Unverified Commit 7a87abb8 authored by tilman's avatar tilman
Browse files

rework readme and reqs

parent 0f79b364
images
.ENV
\ No newline at end of file
images
images
images
pose
playground
.DS_Strore
.ENV
\ No newline at end of file
FROM openpose
#RUN apt-get update &&\
# apt-get install -y python3-setuptools python3-pip
COPY . /app
RUN ls -lisah /app &&\
python3 -V &&\
pip3 -V &&\
pip3 install -r /app/requirements.txt
ENV PYTHONPATH="/workspace/openpose/python:$PYTHONPATH"
ENV OPENPOSE_MODELS="/workspace/openpose/models/"
ENV IN_DIR="/images/in"
ENV OUT_DIR="/images/out"
#RUN ls -lisa /opt/openpose &&\
# ls -lisa /opt/openpose/build &&\
# ls -lisa /opt/openpose/models
# ls -lisa /opt/openpose/build/python &&\
ENTRYPOINT ["/bin/bash"]
#ENTRYPOINT ["python3","/app/detect_structures.py"]
# docker run --rm -it -v "/Users/Tilman/Documents/Programme/Python/forschungspraktikum/art-structures-env/src/images/images_imdahl/:/images/in" -v "/Users/Tilman/Documents/Programme/Python/forschungspraktikum/art-structures-env/src/images/out/docker_out/:/images/out" image-composition-canvas
\ No newline at end of file
FROM ubuntu
ARG DEBIAN_FRONTEND=noninteractive
ENV TZ=Europe/Berlin
RUN apt-get -y update &&\
apt-get install -y curl wget libopencv-dev git cmake build-essential libatlas-base-dev libprotobuf-dev \
libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler libgflags-dev \
libgoogle-glog-dev liblmdb-dev python-setuptools python-dev python-pip \
opencl-headers ocl-icd-opencl-dev libviennacl-dev \
python python3 python3-setuptools python3-dev python3-pip &&\
apt-get install -y --no-install-recommends libboost-all-dev &&\
pip install --upgrade numpy protobuf opencv-python &&\
pip3 install --upgrade numpy protobuf opencv-python &&\
git clone https://github.com/CMU-Perceptual-Computing-Lab/openpose /opt/openpose
RUN ls -lisah . &&\
pwd &&\
python3 -V &&\
pip3 -V &&\
python -V &&\
pip -V
#RUN cd /opt/openpose && git submodule update --init --recursive --remote
#RUN mkdir -p /opt/openpose/models/pose/body_25 && cd /opt/openpose/models/pose/body_25 && wget http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/body_25/pose_iter_584000.caffemodel
#RUN mkdir -p /opt/openpose/models/face && cd /opt/openpose/models/face && wget http://posefs1.perception.cs.cmu.edu/OpenPose/models/face/pose_iter_116000.caffemodel
#RUN mkdir -p /opt/openpose/models/hand && cd /opt/openpose/models/hand && wget http://posefs1.perception.cs.cmu.edu/OpenPose/models/hand/pose_iter_102000.caffemodel
#RUN ls -lisah /opt/openpose/models &&\
# ls -lisah /opt/openpose/models/pose/body_25 &&\
# ls -lisah /opt/openpose/models/face &&\
# ls -lisah /opt/openpose/models/hand
RUN mkdir -p /opt/openpose/build && cd /opt/openpose/build && pwd &&\
cmake -DGPU_MODE=CPU_ONLY -DBUILD_PYTHON=ON -DDOWNLOAD_BODY_25_MODEL=OFF -DDOWNLOAD_BODY_COCO_MODEL=OFF -DDOWNLOAD_FACE_MODEL=OFF -DDOWNLOAD_HAND_MODEL=OFF .. &&\
make -j`nproc`
\ No newline at end of file
## Installation ## Installation
* tested with `Python 3.7.5`, `pip 19.3.1` and ** !Important!: replace `all pathes` with your own path to your local instalation.*
* Install and build OpenPose (including OpenCV), for details see [official instructions](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/installation.md#installation) * Tested with `Python 3.7.5`, `pip 19.3.1` on `macOS 10.15`.
* make sure to append the openpose python bindings to your python path: `export PYTHONPATH=/path/to/openpose/build/python:$PYTHONPATH` !Important!: replace `/path/to/openpose` with your own path to your local openpose instalation * Install and build OpenPose with python bindings (including OpenCV), for details see [official instructions](https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/doc/installation.md#installation).
* make sure to append the openpose python bindings to your python path: `export PYTHONPATH=/path/to/openpose/build/python:$PYTHONPATH`
* set env var for openpose models `export OPENPOSE_MODELS="/path/to/openpose/models/"` * set env var for openpose models `export OPENPOSE_MODELS="/path/to/openpose/models/"`
* install all python dependencies: 'pip install -r requirements.txt' * install all python dependencies: 'pip install -r requirements.txt'
## Usage ## Usage
* make sure you have set all env vars * make sure you have set all env vars
* start the main script: `python detect_structures.py` * start the main script: `IN_DIR="/path/to/folder/with/input/images" OUT_DIR="/output/folder/with/results" python detect_structures.py`
\ No newline at end of file \ No newline at end of file
...@@ -34,9 +34,15 @@ MAX_WIDTH = 1500 #px ...@@ -34,9 +34,15 @@ MAX_WIDTH = 1500 #px
KMEANS_AREA_MASK_THRESHOLD = 0.06 #0.08 #max 1.0 (percent of pixels), cur 0.09, sonntag mittag 0.05, #smaller threshold -> more colors, higher threshold -> less colors KMEANS_AREA_MASK_THRESHOLD = 0.06 #0.08 #max 1.0 (percent of pixels), cur 0.09, sonntag mittag 0.05, #smaller threshold -> more colors, higher threshold -> less colors
KMEANS_K = 7 #10 KMEANS_K = 7 #10
#KMEANS_K = 6 #10 #KMEANS_K = 6 #10
OUT_DIR = 'images/out/asana_task/eccv2020_coco/' OUT_DIR = os.environ['OUT_DIR'] if 'OUT_DIR' in os.environ else './images/out/asana_task/art_structure_inf/eccv2020_coco/'
#IN_DIR = "images/first_email/" # images from first email #IN_DIR = "images/first_email/" # images from first email
IN_DIR = "images/eccv2020_coco/" # images from imdahl IN_DIR = os.environ['IN_DIR'] if 'IN_DIR' in os.environ else "./images/eccv2020_coco/" # images from imdahl
print("in dir",IN_DIR, "out dir", OUT_DIR)
if os.path.dirname(__file__):
print("__file__",__file__)
os.chdir(os.path.dirname(__file__)) #make sure our curdir is the dir of the script so all relative paths will work
SKIP_OPENPOSE = False SKIP_OPENPOSE = False
OPENPOSE_DEMO_KEYPOINTS = np.array([[[4.7613028e+02, 3.3695804e+02, 9.0203685e-01],[5.3667474e+02, 3.8633786e+02, 6.6615295e-01],[5.1645105e+02, 3.8405157e+02, 5.1514143e-01],[0.0000000e+00, 0.0000000e+00, 0.0000000e+00],[0.0000000e+00, 0.0000000e+00, 0.0000000e+00],[5.5459924e+02, 3.8859457e+02, 6.4240879e-01],[5.6353766e+02, 4.7384988e+02, 1.8810490e-01],[5.3886292e+02, 5.2543573e+02, 9.0144195e-02],[5.4566248e+02, 5.3215259e+02, 3.6083767e-01],[5.2768524e+02, 5.3213129e+02, 3.1196830e-01],[5.4556714e+02, 6.3534674e+02, 1.8182488e-01],[5.8149310e+02, 7.2958716e+02, 1.3625422e-01],[5.6579541e+02, 5.3216382e+02, 3.6866242e-01],[5.8822272e+02, 6.2862476e+02, 1.7708556e-01],[6.0843213e+02, 7.2955762e+02, 2.2736737e-01],[4.7597812e+02, 3.2798129e+02, 5.7176876e-01],[4.8729745e+02, 3.3027243e+02, 9.1296065e-01],[0.0000000e+00, 0.0000000e+00, 0.0000000e+00],[5.2090784e+02, 3.3472034e+02, 7.7942842e-01],[5.7928674e+02, 7.5646222e+02, 2.0351715e-01],[5.9049512e+02, 7.5648248e+02, 2.0819387e-01],[6.2183606e+02, 7.3853394e+02, 1.7312977e-01],[5.8145673e+02, 7.5420642e+02, 1.2660497e-01],[5.7701074e+02, 7.5417773e+02, 1.2881383e-01],[5.8374255e+02, 7.3627380e+02, 9.4869599e-02]] OPENPOSE_DEMO_KEYPOINTS = np.array([[[4.7613028e+02, 3.3695804e+02, 9.0203685e-01],[5.3667474e+02, 3.8633786e+02, 6.6615295e-01],[5.1645105e+02, 3.8405157e+02, 5.1514143e-01],[0.0000000e+00, 0.0000000e+00, 0.0000000e+00],[0.0000000e+00, 0.0000000e+00, 0.0000000e+00],[5.5459924e+02, 3.8859457e+02, 6.4240879e-01],[5.6353766e+02, 4.7384988e+02, 1.8810490e-01],[5.3886292e+02, 5.2543573e+02, 9.0144195e-02],[5.4566248e+02, 5.3215259e+02, 3.6083767e-01],[5.2768524e+02, 5.3213129e+02, 3.1196830e-01],[5.4556714e+02, 6.3534674e+02, 1.8182488e-01],[5.8149310e+02, 7.2958716e+02, 1.3625422e-01],[5.6579541e+02, 5.3216382e+02, 3.6866242e-01],[5.8822272e+02, 6.2862476e+02, 1.7708556e-01],[6.0843213e+02, 7.2955762e+02, 2.2736737e-01],[4.7597812e+02, 3.2798129e+02, 5.7176876e-01],[4.8729745e+02, 3.3027243e+02, 9.1296065e-01],[0.0000000e+00, 0.0000000e+00, 0.0000000e+00],[5.2090784e+02, 3.3472034e+02, 7.7942842e-01],[5.7928674e+02, 7.5646222e+02, 2.0351715e-01],[5.9049512e+02, 7.5648248e+02, 2.0819387e-01],[6.2183606e+02, 7.3853394e+02, 1.7312977e-01],[5.8145673e+02, 7.5420642e+02, 1.2660497e-01],[5.7701074e+02, 7.5417773e+02, 1.2881383e-01],[5.8374255e+02, 7.3627380e+02, 9.4869599e-02]]
...@@ -75,7 +81,14 @@ os.chdir(OUT_DIR) #save images in this dir ...@@ -75,7 +81,14 @@ os.chdir(OUT_DIR) #save images in this dir
#for img_name in images: #for img_name in images:
# for img_name, img_bdcn in list(zip(images, images_bdcn)): # for img_name, img_bdcn in list(zip(images, images_bdcn)):
# for img_name in images[0:1]: # for img_name in images[0:1]:
for img_name in images: # print(images)
#filter out hidden files
images = list(filter(lambda e: os.path.basename(e)[0]!='.',images))
#filter images if we want to inspect single image:
# images = list(filter(lambda e: "Franziskus-Giotto1" in e,images))
for img_name in images: #beweinung
# for img_name, img_bdcn in (list(zip(images, images_bdcn))[15:16] if SKIP_OPENPOSE else list(zip(images, images_bdcn))): #jesus # for img_name, img_bdcn in (list(zip(images, images_bdcn))[15:16] if SKIP_OPENPOSE else list(zip(images, images_bdcn))): #jesus
# for img_name, img_bdcn in list(zip(images, images_bdcn))[6:7]: #fusswaschung # for img_name, img_bdcn in list(zip(images, images_bdcn))[6:7]: #fusswaschung
# Process Image # Process Image
...@@ -275,9 +288,9 @@ for img_name in images: ...@@ -275,9 +288,9 @@ for img_name in images:
# pass # pass
kernel = np.array([ kernel = np.array([
[0,0,1,0,0], [0,0,0.5,0,0],
[0,1,1,1,0], [0,1,1,1,0],
[0.5,1,1,1,1], [0.5,1,1,1,0.5],
[0,1,1,1,0], [0,1,1,1,0],
[0,0,0.5,0,0]], dtype=np.uint8) [0,0,0.5,0,0]], dtype=np.uint8)
...@@ -351,11 +364,20 @@ for img_name in images: ...@@ -351,11 +364,20 @@ for img_name in images:
if DRAW_FIRST_CONE_LAYER_BRIGTHER and len(combi) == 1: if DRAW_FIRST_CONE_LAYER_BRIGTHER and len(combi) == 1:
cv2.drawContours(overlay, [polyToArr(intersections[combi])], 0, (0,255,0), -1) cv2.drawContours(overlay, [polyToArr(intersections[combi])], 0, (0,255,0), -1)
img = overlay_two_image_v2(img, overlay, [0,0,0], (0.25)) img = overlay_two_image_v2(img, overlay, [0,0,0], (0.25))
intermediate_output_canvas = overlay_two_image_v2(img, overlay, [0,0,0], (alpha if is_not_last_level else 0.6)) intermediate_output_canvas = overlay_two_image_v2(intermediate_output_canvas, overlay, [0,0,0], (alpha if is_not_last_level else 0.6))
if BISEC_CONES: if BISEC_CONES:
cv2.drawContours(overlay, [polyToArr(intersections[combi])], 0, (color,0,(0 if is_not_last_level else 255)), -1) cv2.drawContours(overlay, [polyToArr(intersections[combi])], 0, (color,0,(0 if is_not_last_level else 255)), -1)
intermediate_output_canvas = overlay_two_image_v2(img, overlay, [0,0,0], (alpha if is_not_last_level else 0.6)) intermediate_output_canvas = overlay_two_image_v2(img, overlay, [0,0,0], (alpha if is_not_last_level else 0.6))
img = overlay_two_image_v2(img, overlay, [0,0,0], (alpha if is_not_last_level else 0.6)) img = overlay_two_image_v2(img, overlay, [0,0,0], (alpha if is_not_last_level else 0.6))
intermediate_output_canvas = overlay_two_image_v2(intermediate_output_canvas, overlay, [0,0,0], (alpha if is_not_last_level else 0.6))
# if(len(combi)==1 or not is_not_last_level):
# black_debug = np.array([[[0,0,0]]*len(img[0])]*len(img),np.uint8)
# cv2.drawContours(black_debug, [polyToArr(intersections[combi])], 0, (255,255,255), -1)
# cv2.namedWindow(str(combi), cv2.WINDOW_NORMAL)
# cv2.imshow(str(combi), black_debug)
if not is_not_last_level and GLOBAL_LINE: #draw centroid of last polygon if not is_not_last_level and GLOBAL_LINE: #draw centroid of last polygon
xy = (int(intersections[combi].centroid.x),int(intersections[combi].centroid.y)) xy = (int(intersections[combi].centroid.x),int(intersections[combi].centroid.y))
global_angle = getGlobalLineAngle(datum.poseKeypoints, CORRECTION_ANGLE) global_angle = getGlobalLineAngle(datum.poseKeypoints, CORRECTION_ANGLE)
...@@ -366,13 +388,61 @@ for img_name in images: ...@@ -366,13 +388,61 @@ for img_name in images:
# draw line with global gaze angle (special mean of all gaze angles) and through center of last intersection # draw line with global gaze angle (special mean of all gaze angles) and through center of last intersection
cv2.line(img, xy, (xy[0]+d[0],xy[1]-d[1]), (0,255,255), int(10*esz)) cv2.line(img, xy, (xy[0]+d[0],xy[1]-d[1]), (0,255,255), int(10*esz))
cv2.line(output_canvas, xy, (xy[0]+d[0],xy[1]-d[1]), (0,255,255), int(10*esz)) cv2.line(output_canvas, xy, (xy[0]+d[0],xy[1]-d[1]), (0,255,255), int(10*esz))
cv2.line(intermediate_output_canvas, xy, (xy[0]+d[0],xy[1]-d[1]), (0,255,255), int(10*esz))
cv2.line(img, xy, (xy[0]+d_l[0],xy[1]-d_l[1]), (0,255,255), int(10*esz)) cv2.line(img, xy, (xy[0]+d_l[0],xy[1]-d_l[1]), (0,255,255), int(10*esz))
cv2.line(output_canvas, xy, (xy[0]+d_l[0],xy[1]-d_l[1]), (0,255,255), int(10*esz)) cv2.line(output_canvas, xy, (xy[0]+d_l[0],xy[1]-d_l[1]), (0,255,255), int(10*esz))
cv2.line(intermediate_output_canvas, xy, (xy[0]+d_l[0],xy[1]-d_l[1]), (0,255,255), int(10*esz))
cv2.circle(img, xy, int(13*esz), (255,255,0), -1) cv2.circle(img, xy, int(13*esz), (255,255,0), -1)
cv2.circle(output_canvas, xy, int(13*esz), (255,255,0), -1) cv2.circle(output_canvas, xy, int(13*esz), (255,255,0), -1)
cv2.circle(intermediate_output_canvas, xy, int(13*esz), (255,255,0), -1)
# put markers on image border if centroid is outside of the image
if(xy[0]<0):
if(xy[1]<0):
# draw in top left: (0,0)
cv2.arrowedLine(img, (int(46*esz),int(46*esz)), (0,0), (255,255,0), int(5*esz))
cv2.arrowedLine(output_canvas, (int(46*esz),int(46*esz)), (0,0), (255,255,0), int(5*esz))
cv2.arrowedLine(intermediate_output_canvas, (int(46*esz),int(46*esz)), (0,0), (255,255,0), int(5*esz))
if(xy[1]>len(img[0])):
# draw in bottom left (0,len(img[0]))
cv2.arrowedLine(img, (int(46*esz),len(img[0])-int(46*esz)), (0,len(img)), (255,255,0), int(5*esz))
cv2.arrowedLine(output_canvas, (int(46*esz),len(img[0])-int(46*esz)), (0,len(img)), (255,255,0), int(5*esz))
cv2.arrowedLine(intermediate_output_canvas, (int(46*esz),len(img[0])-int(46*esz)), (0,len(img)), (255,255,0), int(5*esz))
else:
# draw on (0,xy[1])
cv2.arrowedLine(img, (int(46*esz),xy[1]), (0,xy[1]), (255,255,0), int(5*esz))
cv2.arrowedLine(output_canvas, (int(46*esz),xy[1]), (0,xy[1]), (255,255,0), int(5*esz))
cv2.arrowedLine(intermediate_output_canvas, (int(46*esz),xy[1]), (0,xy[1]), (255,255,0), int(5*esz))
elif (xy[0]>len(img[0])):
if(xy[1]<0):
# draw in top right: (len(img),0)
cv2.arrowedLine(img, (len(img[0])-int(46*esz),int(46*esz)), (len(img[0]),0), (255,255,0), int(5*esz))
cv2.arrowedLine(output_canvas, (len(img[0])-int(46*esz),int(46*esz)), (len(img[0]),0), (255,255,0), int(5*esz))
cv2.arrowedLine(intermediate_output_canvas, (len(img[0])-int(46*esz),int(46*esz)), (len(img[0]),0), (255,255,0), int(5*esz))
if(xy[1]>len(img[0])):
# draw in bottom right (len(img),len(img[0]))
cv2.arrowedLine(img, (len(img[0])-int(46*esz),len(img[0])-int(46*esz)), (len(img[0]),len(img)), (255,255,0), int(5*esz))
cv2.arrowedLine(output_canvas, (len(img[0])-int(46*esz),len(img[0])-int(46*esz)), (len(img[0]),len(img)), (255,255,0), int(5*esz))
cv2.arrowedLine(intermediate_output_canvas, (len(img[0])-int(46*esz),len(img[0])-int(46*esz)), (len(img[0]),len(img)), (255,255,0), int(5*esz))
else:
# draw on (len(img),xy[1])
cv2.arrowedLine(img, (len(img[0])-int(46*esz),xy[1]), (len(img[0]),xy[1]), (255,255,0), int(5*esz))
cv2.arrowedLine(output_canvas, (len(img[0])-int(46*esz),xy[1]), (len(img[0]),xy[1]), (255,255,0), int(5*esz))
cv2.arrowedLine(intermediate_output_canvas, (len(img[0])-int(46*esz),xy[1]), (len(img[0]),xy[1]), (255,255,0), int(5*esz))
# maybe even simpler as i thought, just put marker on (len(img[0], xy[1])
# if(xy[0]>len(img[0])): #outside of canvas, to the right
# l = (len(img[0])-xy[0])/np.cos(global_angle)
# y_new = int(xy[1]+l*np.sin(global_angle))
# cv2.circle(img, (len(img[0]),y_new), int(13*esz), (255,235,50), -1)
# cv2.circle(output_canvas, (len(img[0]),y_new), int(13*esz), (255,235,50), -1)
# cv2.circle(intermediate_output_canvas, (len(img[0]),y_new), int(13*esz), (255,235,50), -1)
else: else:
print("WARNING-------------------WARNING: no intersections there") print("WARNING-------------------WARNING: no intersections there")
cv2.waitKey(0)
if BISEC_VECTORS: if BISEC_VECTORS:
bisecVectors = [poseToBisectVector(pose, CORRECTION_ANGLE) for pose in datum.poseKeypoints] bisecVectors = [poseToBisectVector(pose, CORRECTION_ANGLE) for pose in datum.poseKeypoints]
for bisecVector in bisecVectors: for bisecVector in bisecVectors:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment