├── .gitignore
├── Additive_mixing_layers_extraction.py
├── Convexhull_simplification.py
├── GteDistPointTriangle.pyx
├── Our_preprocessing_pipeline-run-for-100MP_image.ipynb
├── Our_preprocessing_pipeline.ipynb
├── README.md
├── RGBXY_method.py
├── RGBXY_method_modified_for_huge_image.py
├── __init__.py
├── docker
├── Dockerfile
└── README.md
├── environment.yml
├── image-layer-updating-GUI
├── ChangeLog.txt
├── README.md
├── __init__.py
├── color_spiral.js
├── index.html
├── js
│ ├── Blob.js
│ ├── Detector.js
│ ├── canvas-toBlob.js
│ ├── controls
│ │ └── TrackballControls.js
│ ├── helpers.js
│ ├── libs
│ │ └── stats.min.js
│ ├── purl.js
│ ├── three.js
│ ├── three.min.js
│ └── websocket-client.js
├── make_color_spiral.py
├── oranges.jpg
├── palette tetrahedron.js
├── pigs.jpg
├── protocol.txt
├── runboth.sh
├── server.py
├── server_test.py
└── styles.css
├── pyopencl_example.py
├── requirements.frozen.txt
├── requirements.txt
├── test
├── turquoise groundtruth results
│ ├── turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-00.png
│ ├── turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-01.png
│ ├── turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-02.png
│ ├── turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-03.png
│ ├── turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-04.png
│ ├── turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-05.png
│ ├── turquoise-convexhull_vertices-06.js
│ └── turquoise-convexhull_vertices-06.png
└── turquoise.png
└── trimesh.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | example--mesh_obj_files.obj
3 | .ipynb_checkpoints
4 |
--------------------------------------------------------------------------------
/Additive_mixing_layers_extraction.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from __future__ import print_function, division
3 |
4 | import time
5 | import warnings
6 | import json
7 | import time
8 | import PIL.Image as Image
9 | from Convexhull_simplification import *
10 | import scipy.sparse
11 | import scipy.optimize
12 | import scipy
13 | from trimesh import *
14 |
15 |
16 |
17 | import pyximport
18 | pyximport.install(reload_support=True)
19 | from GteDistPointTriangle import *
20 |
21 |
22 | global DEMO
23 | DEMO=False
24 |
25 |
26 |
27 | def Convert_scipy_convexhull_face_ind_to_basic_face_ind(hull):
28 | hvertices=hull.points[hull.vertices]
29 | points_index=-1*np.ones(hull.points.shape[0],dtype=np.int32)
30 | points_index[hull.vertices]=np.arange(len(hull.vertices))
31 | hfaces=np.asarray([points_index[hface] for hface in hull.simplices])
32 |
33 | #### to make sure each faces's points are countclockwise order.
34 | for index in range(len(hfaces)):
35 | face=hvertices[hfaces[index]]
36 | normals=hull.equations[index,:3]
37 | p0=face[0]
38 | p1=face[1]
39 | p2=face[2]
40 |
41 | n=np.cross(p1-p0,p2-p0)
42 | if np.dot(normals,n)<0:
43 | hfaces[index][[1,0]]=hfaces[index][[0,1]]
44 |
45 | return hfaces.tolist()
46 |
47 |
48 | ### assume data is in range(0,1)
49 | def Hull_Simplification_unspecified_M(data, output_prefix, start_save=10):
50 | # hull=ConvexHull(data.reshape((-1,3)), qhull_options="Qs")
51 | hull=ConvexHull(data.reshape((-1,3)))
52 | origin_vertices=hull.points[ hull.vertices ]
53 | print ("original hull vertices number: ", len(hull.vertices))
54 | # with open( output_prefix+"-original_hull_vertices.js", 'w' ) as myfile:
55 | # json.dump({'vs': (hull.points[ hull.vertices ].clip(0.0,1.0)*255).tolist(),'faces': (hull.points[ hull.simplices ].clip(0.0,1.0)*255).tolist()}, myfile, indent = 4 )
56 |
57 | output_rawhull_obj_file=output_prefix+"-mesh_obj_files.obj"
58 | write_convexhull_into_obj_file(hull, output_rawhull_obj_file)
59 |
60 | max_loop=5000
61 | for i in range(max_loop):
62 | mesh=TriMesh.FromOBJ_FileName(output_rawhull_obj_file)
63 | old_num=len(mesh.vs)
64 | old_vertices=mesh.vs
65 | mesh=remove_one_edge_by_finding_smallest_adding_volume_with_test_conditions(mesh,option=2)
66 | # newhull=ConvexHull(mesh.vs, qhull_options="Qs")
67 | hull=ConvexHull(mesh.vs)
68 | write_convexhull_into_obj_file(hull, output_rawhull_obj_file)
69 |
70 | if len(hull.vertices) <= start_save:
71 | name = output_prefix + "-%02d.js" % len(hull.vertices)
72 | with open( name, 'w' ) as myfile:
73 | json.dump({'vs': (hull.points[ hull.vertices ].clip(0.0,1.0)*255).tolist(),'faces': (hull.points[ hull.simplices ].clip(0.0,1.0)*255).tolist()}, myfile, indent = 4 )
74 |
75 | if len(hull.vertices)==old_num or len(hull.vertices)==4:
76 | return
77 |
78 |
79 | def Hull_Simplification_old(arr, M, output_prefix):
80 | hull=ConvexHull(arr.reshape((-1,3)))
81 | # print hull.points[hull.vertices].shape
82 | output_rawhull_obj_file=output_prefix+"-mesh_obj_files.obj"
83 | write_convexhull_into_obj_file(hull, output_rawhull_obj_file)
84 | mesh=TriMesh.FromOBJ_FileName(output_rawhull_obj_file)
85 |
86 |
87 | max_loop=5000
88 | for i in range(max_loop):
89 | old_num=len(mesh.vs)
90 | mesh=TriMesh.FromOBJ_FileName(output_rawhull_obj_file)
91 | mesh=remove_one_edge_by_finding_smallest_adding_volume_with_test_conditions(mesh,option=2)
92 | newhull=ConvexHull(mesh.vs)
93 | write_convexhull_into_obj_file(newhull, output_rawhull_obj_file)
94 |
95 | if len(mesh.vs)==M or len(newhull.vertices)==old_num or len(newhull.vertices)==4:
96 | Final_hull=newhull
97 | break
98 |
99 | Hull_vertices=Final_hull.points[Final_hull.vertices].clip(0,1)
100 | return Hull_vertices
101 |
102 |
103 | # ##### assume arr is in range(0,1)
104 | # def Hull_Simplification_new(arr, M, output_prefix):
105 | # hull=ConvexHull(arr.reshape((-1,3)))
106 |
107 | # max_loop=5000
108 | # for i in range(max_loop):
109 |
110 | # mesh = TriMesh()
111 | # mesh.vs = hull.points[hull.vertices].tolist()
112 | # mesh.faces = Convert_scipy_convexhull_face_ind_to_basic_face_ind(hull)
113 | # mesh.topology_changed()
114 |
115 | # old_num=len(hull.vertices)
116 | # mesh=remove_one_edge_by_finding_smallest_adding_volume_with_test_conditions(mesh,option=2)
117 | # hull=ConvexHull(mesh.vs)
118 |
119 | # if len(hull.vertices)==M or len(hull.vertices)==old_num or len(hull.vertices)==4:
120 | # Final_hull=hull
121 | # break
122 |
123 | # Hull_vertices=Final_hull.points[Final_hull.vertices].clip(0,1)
124 | # return Hull_vertices
125 |
126 |
127 |
128 | # def outsidehull_points_num_ratio(hull_vertices, points):
129 | # de=Delaunay(hull_vertices)
130 | # ind=de.find_simplex(points, tol=1e-8)
131 | # return (len(ind[ind<0])*1.0)/len(ind)
132 |
133 |
134 |
135 | def outsidehull_points_distance(hull_vertices, points):
136 |
137 | ######### here points are all pixel colors
138 | hull=ConvexHull(hull_vertices)
139 | de=Delaunay(hull_vertices)
140 | ind=de.find_simplex(points, tol=1e-8)
141 | total_distance=[]
142 | for i in range(points.shape[0]):
143 | if ind[i]<0:
144 | dist_list=[]
145 | for j in range(hull.simplices.shape[0]):
146 | result = DCPPointTriangle( points[i], hull.points[hull.simplices[j]] )
147 | dist_list.append(result['distance'])
148 | total_distance.append(min(dist_list))
149 | total_distance=np.asarray(total_distance)
150 |
151 | return ((total_distance**2).sum()/len(points))**0.5
152 |
153 |
154 |
155 | def outsidehull_points_distance_for_using_origin_hull_vertices(hull_vertices, all_points, points):
156 |
157 | ######### here all_points are all pixel colors. points are original hull vertices of all pixel colors.
158 | hull=ConvexHull(hull_vertices)
159 | de=Delaunay(hull_vertices)
160 | ind1=de.find_simplex(all_points, tol=1e-8)
161 | length1=len(ind1[ind1<0])
162 |
163 | ind=de.find_simplex(points, tol=1e-8)
164 | length=len(ind[ind<0])
165 |
166 | total_distance=[]
167 | for i in range(points.shape[0]):
168 | if ind[i]<0:
169 | dist_list=[]
170 | for j in range(hull.simplices.shape[0]):
171 | result = DCPPointTriangle( points[i], hull.points[hull.simplices[j]] )
172 | dist_list.append(result['distance'])
173 | total_distance.append(min(dist_list))
174 | total_distance=np.asarray(total_distance)
175 |
176 | pixel_numbers=len(all_points)
177 |
178 | # return ((total_distance**2).sum()/pixel_numbers)**0.5
179 |
180 | return ((((total_distance**2).sum()*length1)/length)/pixel_numbers)**0.5
181 |
182 |
183 |
184 |
185 | def outsidehull_points_distance_unique_data_version(hull_vertices, points, counts):
186 |
187 | ######### here, points are unique pixel colors, it will be faster than directly give all pixel colors.
188 | hull=ConvexHull(hull_vertices)
189 | de=Delaunay(hull_vertices)
190 | ind=de.find_simplex(points, tol=1e-8)
191 | total_distance=[]
192 | for i in range(points.shape[0]):
193 | if ind[i]<0:
194 | dist_list=[]
195 | for j in range(hull.simplices.shape[0]):
196 | result = DCPPointTriangle( points[i], hull.points[hull.simplices[j]] )
197 | dist_list.append(result['distance'])
198 | total_distance.append(min(dist_list))
199 | total_distance=np.asarray(total_distance)
200 |
201 | return (((total_distance**2)*counts[ind<0]).sum()/counts.sum())**0.5
202 |
203 |
204 |
205 | from collections import Counter
206 |
207 | def get_unique_colors_and_their_counts(arr):
208 | # arr shape is N*3
209 |
210 | #### slow
211 | # ### colors2Count dict
212 | # colors2Count ={}
213 | # unique_arr=list(set(list(tuple(element) for element in arr)))
214 | # for element in unique_arr:
215 | # colors2Count.setdefault(tuple(element),0)
216 |
217 | # for index in range(len(arr)):
218 | # colors2Count[tuple(arr[index])]+=1
219 |
220 | # unique_colors=np.asarray(list(colors2Count.keys()))
221 | # counts=np.asarray(list(colors2Count.values()))
222 | # # print unique_colors.shape
223 | # # print counts.shape
224 |
225 |
226 | # #### faster
227 | # a=map(tuple, arr)
228 | # b=Counter(a)
229 | # unique_colors, counts=np.asarray(list(b.keys())), np.asarray(list(b.values()))
230 |
231 | #### fastest
232 | unique_colors, counts=np.unique(arr, axis=0, return_counts=True)
233 |
234 |
235 |
236 | return unique_colors, counts
237 |
238 |
239 |
240 |
241 |
242 | ### assume data is in range(0,1)
243 | def Hull_Simplification_determined_version(data, output_prefix, num_thres=0.1, error_thres=2.0/255.0, SAVE=True, option="use_quantitized_colors"):
244 | # hull=ConvexHull(data.reshape((-1,3)), qhull_options="Qs")
245 | hull=ConvexHull(data.reshape((-1,3)))
246 | origin_vertices=hull.points[ hull.vertices ]
247 | print ("original hull vertices number: ", len(hull.vertices))
248 | # with open( output_prefix+"-original_hull_vertices.js", 'w' ) as myfile:
249 | # json.dump({'vs': (hull.points[ hull.vertices ].clip(0.0,1.0)*255).tolist(),'faces': (hull.points[ hull.simplices ].clip(0.0,1.0)*255).tolist()}, myfile, indent = 4 )
250 |
251 | output_rawhull_obj_file=output_prefix+"-mesh_obj_files.obj"
252 | write_convexhull_into_obj_file(hull, output_rawhull_obj_file)
253 |
254 | if option=="unique_pixel_colors":
255 | unique_data, pixel_counts=get_unique_colors_and_their_counts(data.reshape((-1,3)))
256 | print (len(unique_data))
257 | elif option=="use_quantitized_colors":
258 | print (option)
259 | new_data=(((data*255).round().astype(np.uint8)//8)*8+4)/255.0
260 | unique_data, pixel_counts=get_unique_colors_and_their_counts(new_data.reshape((-1,3)))
261 | print (len(unique_data))
262 |
263 |
264 | max_loop=5000
265 | for i in range(max_loop):
266 | if i%10==0:
267 | print ("loop: ", i)
268 | mesh=TriMesh.FromOBJ_FileName(output_rawhull_obj_file)
269 | old_num=len(mesh.vs)
270 | old_vertices=mesh.vs
271 | # print ("WHY1")
272 | mesh=remove_one_edge_by_finding_smallest_adding_volume_with_test_conditions(mesh,option=2)
273 | # newhull=ConvexHull(mesh.vs, qhull_options="Qs")
274 | hull=ConvexHull(mesh.vs)
275 | write_convexhull_into_obj_file(hull, output_rawhull_obj_file)
276 | # print ("WHY2")
277 |
278 | if len(hull.vertices) <= 10:
279 |
280 | # outside_ratio=outsidehull_points_num_ratio(hull.points[ hull.vertices ].clip(0.0,1.0), data.reshape((-1,3)))
281 | # if outside_ratio>num_thres:
282 |
283 | if option=="all_pixel_colors": ### basic one.
284 | reconstruction_errors=outsidehull_points_distance(hull.points[ hull.vertices ].clip(0.0,1.0), data.reshape((-1,3)))
285 |
286 | elif option=="unique_pixel_colors": ### results should be same with above opiton, but faster
287 | reconstruction_errors=outsidehull_points_distance_unique_data_version(hull.points[ hull.vertices ].clip(0.0,1.0), unique_data, pixel_counts)
288 | elif option=="origin_convexhull_vertices":
289 | # reconstruction_errors=outsidehull_points_distance(hull.points[ hull.vertices ].clip(0.0,1.0), origin_vertices.reshape((-1,3))) ###error_thres may need to be pretty large. #### may use 10/255.0 to be threshold
290 | reconstruction_errors=outsidehull_points_distance_for_using_origin_hull_vertices(hull.points[ hull.vertices ].clip(0.0,1.0), data.reshape((-1,3)), origin_vertices.reshape((-1,3))) ### may use 5/255.0 to be threshold.
291 |
292 | elif option=="use_quantitized_colors":
293 | reconstruction_errors=outsidehull_points_distance_unique_data_version(hull.points[ hull.vertices ].clip(0.0,1.0), unique_data, pixel_counts)
294 |
295 |
296 |
297 | # print reconstruction_errors
298 | if reconstruction_errors>error_thres:
299 |
300 | oldhull=ConvexHull(old_vertices)
301 |
302 | if SAVE:
303 | name = output_prefix + "-%02d.js" % len(oldhull.vertices)
304 | with open( name, 'w' ) as myfile:
305 | json.dump({'vs': (oldhull.points[ oldhull.vertices ].clip(0.0,1.0)*255).tolist(),'faces': (oldhull.points[ oldhull.simplices ].clip(0.0,1.0)*255).tolist()}, myfile, indent = 4 )
306 |
307 | return oldhull.points[ oldhull.vertices ].clip(0.0,1.0)
308 |
309 |
310 | if len(hull.vertices)==old_num or len(hull.vertices)==4:
311 |
312 | if SAVE:
313 | name = output_prefix + "-%02d.js" % len(hull.vertices)
314 | with open( name, 'w' ) as myfile:
315 | json.dump({'vs': (hull.points[ hull.vertices ].clip(0.0,1.0)*255).tolist(),'faces': (hull.points[ hull.simplices ].clip(0.0,1.0)*255).tolist()}, myfile, indent = 4 )
316 |
317 | return hull.points[ hull.vertices ].clip(0.0,1.0)
318 |
319 |
320 |
321 |
322 |
323 |
324 |
325 |
326 | def recover_ASAP_weights_using_scipy_delaunay(Hull_vertices, data, option=1):
327 | ###modified from https://codereview.stackexchange.com/questions/41024/faster-computation-of-barycentric-coordinates-for-many-points (Gareth Rees)
328 | # Load points
329 | points = Hull_vertices
330 | # Load targets
331 | targets = data
332 | ntargets = len(targets)
333 |
334 | start=time.time()
335 | # Compute Delaunay triangulation of points.
336 | tri = Delaunay(points)
337 |
338 | end1=time.time()
339 |
340 | # Find the tetrahedron containing each target (or -1 if not found)
341 | tetrahedra = tri.find_simplex(targets, tol=1e-6)
342 | # print tetrahedra[tetrahedra==-1]
343 |
344 | # Affine transformation for tetrahedron containing each target
345 | X = tri.transform[tetrahedra, :data.shape[1]]
346 |
347 | # Offset of each target from the origin of its containing tetrahedron
348 | Y = targets - tri.transform[tetrahedra, data.shape[1]]
349 |
350 | # First three barycentric coordinates of each target in its tetrahedron.
351 | # The fourth coordinate would be 1 - b.sum(axis=1), but we don't need it.
352 | b = np.einsum('...jk,...k->...j', X, Y)
353 | barycoords=np.c_[b,1-b.sum(axis=1)]
354 |
355 | end2=time.time()
356 |
357 |
358 | ############# this is slow for large size weights like N*1000
359 | if option==1:
360 | weights_list=np.zeros((targets.shape[0],points.shape[0]))
361 | num_tetra=len(tri.simplices)
362 | all_index=np.arange(len(targets))
363 | for i in range(num_tetra):
364 | weights_list[all_index[tetrahedra==i][:,None], np.array(tri.simplices[i])]=barycoords[all_index[tetrahedra==i],:]
365 |
366 | elif option==2:
367 | rows = np.repeat(np.arange(len(data)).reshape((-1,1)), len(tri.simplices[0]), 1).ravel().tolist()
368 | cols = []
369 | vals = []
370 |
371 | for i in range(len(data)):
372 | cols+=tri.simplices[tetrahedra[i]].tolist()
373 | vals+=barycoords[i].tolist()
374 | weights_list = scipy.sparse.coo_matrix( ( vals, ( rows, cols ) ), shape = ( len(data), len(Hull_vertices)) ).tocsr()
375 |
376 | elif option==3:
377 | rows = np.repeat(np.arange(len(data)).reshape((-1,1)), len(tri.simplices[0]), 1).ravel()
378 | # cols = np.empty(rows.shape, rows.dtype)
379 | # vals = np.empty(rows.shape)
380 |
381 | # d = len(tri.simplices[0])
382 | # for i in range(len(data)):
383 | # cols[d*i:d*(i+1)] = tri.simplices[tetrahedra[i]]
384 | # vals[d*i:d*(i+1)] = barycoords[i]
385 |
386 | cols=tri.simplices[tetrahedra].ravel()
387 | vals = barycoords.ravel()
388 | weights_list = scipy.sparse.coo_matrix( ( vals, ( rows, cols ) ), shape = ( len(data), len(Hull_vertices)) ).tocsr()
389 |
390 | end3=time.time()
391 |
392 | print (end1-start, end2-end1, end3-end2)
393 | return weights_list
394 |
395 |
396 |
397 | def Get_ASAP_weights_using_Tan_2016_triangulation_and_then_barycentric_coordinates(img_label_origin, origin_order_tetra_prime, outprefix, order=0):
398 |
399 | img_label=img_label_origin.copy() ### do not modify img_label_origin
400 |
401 | if isinstance(order, (list, tuple, np.ndarray)):
402 | pass
403 |
404 | elif order==0: ## use black as first pigment
405 | diff=abs(origin_order_tetra_prime-np.array([[0,0,0]])).sum(axis=-1)
406 | order=np.argsort(diff)
407 |
408 | elif order==1: ## use white
409 | diff=abs(origin_order_tetra_prime-np.array([[1,1,1]])).sum(axis=-1)
410 | order=np.argsort(diff)
411 |
412 |
413 | tetra_prime=origin_order_tetra_prime[order]
414 | print (tetra_prime[0])
415 |
416 | img_shape=img_label.shape
417 | img_label=img_label.reshape((-1,3))
418 | img_label_backup=img_label.copy()
419 |
420 | hull=ConvexHull(tetra_prime)
421 | test_inside=Delaunay(tetra_prime)
422 | label=test_inside.find_simplex(img_label,tol=1e-8)
423 | # print len(label[label==-1])
424 |
425 | ### modify img_label[] to make all points are inside the simplified convexhull
426 | for i in range(img_label.shape[0]):
427 | # print i
428 | if label[i]<0:
429 | dist_list=[]
430 | cloest_points=[]
431 | for j in range(hull.simplices.shape[0]):
432 | result = DCPPointTriangle( img_label[i], hull.points[hull.simplices[j]] )
433 | dist_list.append(result['distance'])
434 | cloest_points.append(result['closest'])
435 | dist_list=np.asarray(dist_list)
436 | index=np.argmin(dist_list)
437 | img_label[i]=cloest_points[index]
438 |
439 | ### assert
440 | test_inside=Delaunay(tetra_prime)
441 | label=test_inside.find_simplex(img_label,tol=1e-8)
442 | # print len(label[label==-1])
443 | assert(len(label[label==-1])==0)
444 |
445 |
446 | ### colors2xy dict
447 | colors2xy ={}
448 | unique_image_label=list(set(list(tuple(element) for element in img_label)))
449 |
450 | for element in unique_image_label:
451 | colors2xy.setdefault(tuple(element),[])
452 |
453 | for index in range(len(img_label)):
454 | element=img_label[index]
455 | colors2xy[tuple(element)].append(index)
456 |
457 | unique_colors=np.array(list(colors2xy.keys()))
458 | unique_image_label=unique_colors.copy()
459 | vertices_list=tetra_prime
460 |
461 |
462 |
463 | tetra_pixel_dict={}
464 | for face_vertex_ind in hull.simplices:
465 | # print face_vertex_ind
466 | if (face_vertex_ind!=0).all():
467 | i,j,k=face_vertex_ind
468 | tetra_pixel_dict.setdefault(tuple((i,j,k)),[])
469 |
470 | index_list=np.array(list(np.arange(len(unique_image_label))))
471 |
472 | for face_vertex_ind in hull.simplices:
473 | if (face_vertex_ind!=0).all():
474 | # print face_vertex_ind
475 | i,j,k=face_vertex_ind
476 | tetra=np.array([vertices_list[0],vertices_list[i],vertices_list[j],vertices_list[k]])
477 | try:
478 | #### use try here, because sometimes the tetra is nearly flat, will cause qhull error to stop, we do not want to stop, we just skip.
479 | # print (tetra)
480 | test_Del=Delaunay(tetra)
481 | # print len(index_list)
482 | if len(index_list)!=0:
483 | label=test_Del.find_simplex(unique_image_label[index_list],tol=1e-8)
484 | chosen_index=list(index_list[label>=0])
485 | tetra_pixel_dict[tuple((i,j,k))]+=chosen_index
486 | index_list=np.array(list(set(index_list)-set(chosen_index)))
487 | except Exception as e:
488 | pass
489 | # print (tetra)
490 | # print (e)
491 |
492 | # print index_list
493 | assert(len(index_list)==0)
494 |
495 |
496 | pixel_num=0
497 | for key in tetra_pixel_dict:
498 | pixel_num+=len(tetra_pixel_dict[key])
499 | # print pixel_num
500 | assert(pixel_num==unique_image_label.shape[0])
501 |
502 |
503 |
504 | ### input is like (0,1,2,3,4) then shortest_path_order is (1,2,3,4), 0th is background color, usually is white
505 | shortest_path_order=tuple(np.arange(len(tetra_prime))[1:])
506 | # print shortest_path_order
507 |
508 | unique_weights_list=np.zeros((unique_image_label.shape[0],len(tetra_prime)))
509 |
510 | for vertice_tuple in tetra_pixel_dict:
511 | # print vertice_tuple
512 | vertice_index_inglobalorder=np.asarray(shortest_path_order)[np.asarray(sorted(list(shortest_path_order).index(s) for s in vertice_tuple))]
513 | vertice_index_inglobalorder_tuple=tuple(list(vertice_index_inglobalorder))
514 | # print vertice_index_inglobalorder_tuple
515 |
516 | colors=np.array([vertices_list[0],
517 | vertices_list[vertice_index_inglobalorder_tuple[0]],
518 | vertices_list[vertice_index_inglobalorder_tuple[1]],
519 | vertices_list[vertice_index_inglobalorder_tuple[2]]
520 | ])
521 |
522 | pixel_index=np.array(tetra_pixel_dict[vertice_tuple])
523 | if len(pixel_index)!=0:
524 | arr=unique_image_label[pixel_index]
525 | Y=recover_ASAP_weights_using_scipy_delaunay(colors, arr)
526 | unique_weights_list[pixel_index[:,None],np.array([0]+list(vertice_index_inglobalorder_tuple))]=Y.reshape((arr.shape[0],-1))
527 |
528 |
529 |
530 |
531 |
532 | #### from unique weights to original shape weights
533 | mixing_weights=np.zeros((len(img_label),len(tetra_prime)))
534 | for index in range(len(unique_image_label)):
535 | element=unique_image_label[index]
536 | index_list=colors2xy[tuple(element)]
537 | mixing_weights[index_list,:]=unique_weights_list[index,:]
538 |
539 |
540 | # barycentric_weights=barycentric_weights.reshape((img_shape[0],img_shape[1],-1))
541 | origin_order_mixing_weights=np.ones(mixing_weights.shape)
542 | #### to make the weights order is same as orignal input vertex order
543 | origin_order_mixing_weights[:,order]=mixing_weights
544 |
545 |
546 |
547 | origin_order_mixing_weights=origin_order_mixing_weights.reshape((img_shape[0],img_shape[1],-1))
548 | temp=(origin_order_mixing_weights.reshape((img_shape[0],img_shape[1],-1,1))*origin_order_tetra_prime.reshape((1,1,-1,3))).sum(axis=2)
549 | img_diff=temp.reshape(img_label_origin.shape)*255-img_label_origin*255
550 | diff=square(img_diff.reshape((-1,3))).sum(axis=-1)
551 |
552 | print ('max diff: ', sqrt(diff).max())
553 | print ('median diff', median(sqrt(diff)))
554 | print ('RMSE: ', sqrt(diff.sum()/diff.shape[0]))
555 |
556 | if DEMO==False:
557 | mixing_weights_filename=outprefix+'-'+str(len(origin_order_tetra_prime))+"-RGB_ASAP-using_Tan2016_triangulation_and_then_barycentric_coordinates-linear_mixing-weights.js"
558 | with open(mixing_weights_filename,'wb') as myfile:
559 | json.dump({'weights': origin_order_mixing_weights.tolist()}, myfile)
560 |
561 | for i in range(origin_order_mixing_weights.shape[-1]):
562 | mixing_weights_map_filename=outprefix+'-'+str(len(origin_order_tetra_prime))+"-RGB_ASAP-using_Tan2016_triangulation_and_then_barycentric_coordinates-linear_mixing-weights_map-%02d.png" % i
563 | Image.fromarray((origin_order_mixing_weights[:,:,i]*255).round().clip(0,255).astype(uint8)).save(mixing_weights_map_filename)
564 |
565 | return origin_order_mixing_weights
566 |
567 |
--------------------------------------------------------------------------------
/Convexhull_simplification.py:
--------------------------------------------------------------------------------
1 | #####directly copy from SILD_convexhull_simplification-minimize_adding_volume_or_normalized_adding_volume.ipynb 2016.01.11
2 | #### and then remove many unrelated codes.
3 |
4 | from __future__ import print_function, division
5 |
6 | import numpy as np
7 | from scipy.spatial import ConvexHull
8 | from scipy.spatial import Delaunay
9 | from scipy.optimize import *
10 | from math import *
11 | import cvxopt
12 | import PIL.Image as Image
13 | import sys
14 |
15 | ######***********************************************************************************************
16 |
17 | #### 3D case: use method in paper: "Progressive Hulls for Intersection Applications"
18 | #### also using trimesh.py interface from yotam gingold
19 |
20 | def visualize_hull(hull,groundtruth_hull=None):
21 | from matplotlib import pyplot as plt
22 |
23 | fig = plt.figure(figsize=(8,8))
24 | ax = fig.add_subplot(1,1,1, projection='3d')
25 | vertex=hull.points[hull.vertices]
26 | ax.scatter(vertex[:,0], vertex[:,1], vertex[:,2],
27 | marker='*', color='red', s=40, label='class')
28 |
29 | # num=hull.simplices.shape[0]
30 | # points=[]
31 | # normals=[]
32 | # for i in range(num):
33 | # face=hull.points[hull.simplices[i]]
34 | # avg_point=(face[0]+face[1]+face[2])/3.0
35 | # points.append(avg_point)
36 | # points=np.asarray(points)
37 |
38 | # ax.quiver(points[:,0],points[:,1],points[:,2],hull.equations[:,0],hull.equations[:,1],hull.equations[:,2],length=0.01)
39 |
40 | for simplex in hull.simplices:
41 | faces=hull.points[simplex]
42 | xs=list(faces[:,0])
43 | xs.append(faces[0,0])
44 | ys=list(faces[:,1])
45 | ys.append(faces[0,1])
46 | zs=list(faces[:,2])
47 | zs.append(faces[0,2])
48 | # print xs,ys,zs
49 | plt.plot(xs,ys,zs,'k-')
50 |
51 | if groundtruth_hull!=None:
52 | groundtruth_vertex=groundtruth_hull.points[groundtruth_hull.vertices]
53 | ax.scatter(groundtruth_vertex[:,0], groundtruth_vertex[:,1], groundtruth_vertex[:,2],
54 | marker='o', color='green', s=80, label='class')
55 |
56 | plt.title("3D Scatter Plot")
57 | plt.show()
58 |
59 |
60 |
61 |
62 | from trimesh import TriMesh
63 |
64 | def write_convexhull_into_obj_file(hull, output_rawhull_obj_file):
65 | hvertices=hull.points[hull.vertices]
66 | points_index=-1*np.ones(hull.points.shape[0],dtype=np.int32)
67 | points_index[hull.vertices]=np.arange(len(hull.vertices))
68 | #### start from index 1 in obj files!!!!!
69 | hfaces=np.array([points_index[hface] for hface in hull.simplices])+1
70 |
71 | #### to make sure each faces's points are countclockwise order.
72 | for index in range(len(hfaces)):
73 | face=hvertices[hfaces[index]-1]
74 | normals=hull.equations[index,:3]
75 | p0=face[0]
76 | p1=face[1]
77 | p2=face[2]
78 |
79 | n=np.cross(p1-p0,p2-p0)
80 | if np.dot(normals,n)<0:
81 | hfaces[index][[1,0]]=hfaces[index][[0,1]]
82 |
83 | myfile=open(output_rawhull_obj_file,'w')
84 | for index in range(hvertices.shape[0]):
85 | myfile.write('v '+str(hvertices[index][0])+' '+str(hvertices[index][1])+' '+str(hvertices[index][2])+'\n')
86 | for index in range(hfaces.shape[0]):
87 | myfile.write('f '+str(hfaces[index][0])+' '+str(hfaces[index][1])+' '+str(hfaces[index][2])+'\n')
88 | myfile.close()
89 |
90 |
91 |
92 |
93 | def edge_normal_test(vertices, faces, old_face_index_list, v0_ind, v1_ind):
94 | selected_old_face_list=[]
95 | central_two_face_list=[]
96 |
97 | for index in old_face_index_list:
98 | face=faces[index]
99 | face_temp=np.array(face).copy()
100 | face_temp=list(face_temp)
101 |
102 | if v0_ind in face_temp:
103 | face_temp.remove(v0_ind)
104 | if v1_ind in face_temp:
105 | face_temp.remove(v1_ind)
106 | if len(face_temp)==2: ### if left 2 points, then this face is what we need.
107 | selected_old_face=[np.asarray(vertices[face[i]]) for i in range(len(face))]
108 | selected_old_face_list.append(np.asarray(selected_old_face))
109 | if len(face_temp)==1: ##### if left 1 points, then this face is central face.
110 | central_two_face=[np.asarray(vertices[face[i]]) for i in range(len(face))]
111 | central_two_face_list.append(np.asarray(central_two_face))
112 |
113 | assert( len(central_two_face_list)==2 )
114 | if len(central_two_face_list)+len(selected_old_face_list)!=len(old_face_index_list):
115 | print ('error!!!!!!')
116 |
117 | central_two_face_normal_list=[]
118 | neighbor_face_dot_normal_list=[]
119 |
120 | for face in central_two_face_list:
121 | n=np.cross(face[1]-face[0], face[2]-face[0])
122 | n=n/np.sqrt(np.dot(n,n))
123 | central_two_face_normal_list.append(n)
124 |
125 | avg_edge_normal=np.average(np.array(central_two_face_normal_list),axis=0)
126 |
127 | for face in selected_old_face_list:
128 | n=np.cross(face[1]-face[0], face[2]-face[0])
129 | neighbor_face_dot_normal_list.append(np.dot(avg_edge_normal,n))
130 |
131 | if (np.array(neighbor_face_dot_normal_list)>=0.0-1e-5).all():
132 | return 1
133 | else:
134 | return 0
135 |
136 |
137 |
138 | def compute_tetrahedron_volume(face, point):
139 | n=np.cross(face[1]-face[0], face[2]-face[0])
140 | return abs(np.dot(n, point-face[0]))/6.0
141 |
142 |
143 |
144 |
145 | #### this is different from function: remove_one_edge_by_finding_smallest_adding_volume(mesh)
146 | #### add some test conditions to accept new vertex.
147 | #### if option ==1, return a new convexhull.
148 | #### if option ==2, return a new mesh (using trimesh.py)
149 | def remove_one_edge_by_finding_smallest_adding_volume_with_test_conditions(mesh, option):
150 |
151 | edges=mesh.get_edges()
152 | mesh.get_halfedges()
153 | faces=mesh.faces
154 | vertices=mesh.vs
155 | # print (len(vertices))
156 |
157 | temp_list1=[]
158 | temp_list2=[]
159 | count=0
160 |
161 | for edge_index in range(len(edges)):
162 |
163 | edge=edges[edge_index]
164 | vertex1=edge[0]
165 | vertex2=edge[1]
166 | face_index1=mesh.vertex_face_neighbors(vertex1)
167 | face_index2=mesh.vertex_face_neighbors(vertex2)
168 |
169 | face_index=list(set(face_index1) | set(face_index2))
170 | related_faces=[faces[index] for index in face_index]
171 | old_face_list=[]
172 |
173 |
174 | #### now find a point, so that for each face in related_faces will create a positive volume tetrahedron using this point.
175 | ### minimize c*x. w.r.t. A*x<=b
176 | c=np.zeros(3)
177 | A=[]
178 | b=[]
179 |
180 | for index in range(len(related_faces)):
181 | face=related_faces[index]
182 | p0=vertices[face[0]]
183 | p1=vertices[face[1]]
184 | p2=vertices[face[2]]
185 | old_face_list.append(np.asarray([p0,p1,p2]))
186 |
187 | n=np.cross(p1-p0,p2-p0)
188 |
189 | #### Currently use this line. without this line, test_fourcolors results are not good.
190 | n=n/np.sqrt(np.dot(n,n)) ##### use normalized face normals means distance, not volume
191 |
192 | A.append(n)
193 | b.append(np.dot(n,p0))
194 | c+=n
195 |
196 |
197 | ########### now use cvxopt.solvers.lp solver
198 |
199 | A=-np.asfarray(A)
200 | b=-np.asfarray(b)
201 |
202 | c=np.asfarray(c)
203 | cvxopt.solvers.options['show_progress'] = False
204 | cvxopt.solvers.options['glpk'] = dict(msg_lev='GLP_MSG_OFF')
205 | res = cvxopt.solvers.lp( cvxopt.matrix(c), cvxopt.matrix(A), cvxopt.matrix(b), solver='glpk' )
206 |
207 | if res['status']=='optimal':
208 | newpoint = np.asfarray( res['x'] ).squeeze()
209 |
210 |
211 | ######## using objective function to calculate (volume) or (distance to face) as priority.
212 | # volume=res['primal objective']+b.sum()
213 |
214 |
215 | ####### manually compute volume as priority,so no relation with objective function
216 | tetra_volume_list=[]
217 | for each_face in old_face_list:
218 | tetra_volume_list.append(compute_tetrahedron_volume(each_face,newpoint))
219 | volume=np.asarray(tetra_volume_list).sum()
220 |
221 |
222 |
223 | temp_list1.append((count, volume, vertex1, vertex2))
224 | temp_list2.append(newpoint)
225 | count+=1
226 |
227 | # else:
228 | # # print 'cvxopt.solvers.lp is not optimal ', res['status'], np.asfarray( res['x'] ).squeeze()
229 | # if res['status']!='unknown': ### means solver failed
230 | # ##### check our test to see if the solver fails normally
231 | # if edge_normal_test(vertices,faces,face_index,vertex1,vertex2)==1: ### means all normal dot value are positive
232 | # print '!!!edge_normal_neighbor_normal_dotvalue all positive, but solver fails'
233 |
234 | # print ("WHY3")
235 |
236 | if option==1:
237 | if len(temp_list1)==0:
238 | print ('all fails')
239 | hull=ConvexHull(mesh.vs)
240 | else:
241 | min_tuple=min(temp_list1,key=lambda x: x[1])
242 | # print min_tuple
243 | final_index=min_tuple[0]
244 | final_point=temp_list2[final_index]
245 | # print 'final_point ', final_point
246 | new_total_points=mesh.vs
247 | new_total_points.append(final_point)
248 |
249 | hull=ConvexHull(np.array(new_total_points))
250 | return hull
251 |
252 | if option==2:
253 |
254 | if len(temp_list1)==0:
255 | # print 'all fails'
256 | pass
257 | else:
258 | min_tuple=min(temp_list1,key=lambda x: x[1])
259 | # print min_tuple
260 | final_index=min_tuple[0]
261 | final_point=temp_list2[final_index]
262 | # print 'final_point ', final_point
263 |
264 | v1_ind=min_tuple[2]
265 | v2_ind=min_tuple[3]
266 |
267 | ## Collect all faces touching the edge (either vertex).
268 | face_index1=mesh.vertex_face_neighbors(v1_ind)
269 | face_index2=mesh.vertex_face_neighbors(v2_ind)
270 | face_index=list(set(face_index1) | set(face_index2))
271 | ## Collect the vertices of all faces touching the edge.
272 | related_faces_vertex_ind=[faces[index] for index in face_index]
273 |
274 | ## Check link conditions. If link conditions are violated, the resulting
275 | ## mesh wouldn't be manifold.
276 | if len( (set(mesh.vertex_vertex_neighbors(v1_ind)).intersection(set(mesh.vertex_vertex_neighbors(v2_ind)))) ) != 2:
277 | print( "Link condition violated. Should not remove edge." )
278 |
279 | ## Remove the edge's two vertices.
280 | ## This also removes faces attached to either vertex.
281 | ## All other vertices have new indices.
282 | old2new=mesh.remove_vertex_indices([v1_ind, v2_ind])
283 |
284 | ## The edge will collapse to a new vertex.
285 | ## That new vertex will be at the end.
286 | new_vertex_index=current_vertices_num=len(old2new[old2new!=-1])
287 |
288 | ## Fill the hole in the mesh by re-attaching
289 | ## all the deleted faces to either removed vertex
290 | ## to the new vertex.
291 | new_faces_vertex_ind=[]
292 |
293 | for face in related_faces_vertex_ind:
294 | ## Map old vertex indices to new ones.
295 | ## The removed vertices both collapse to the new vertex index.
296 | new_face=[new_vertex_index if x==v1_ind or x==v2_ind else old2new[x] for x in face]
297 | ## The two faces on either side of the collapsed edge will be degenerate.
298 | ## Two vertices in those faces will both be the same vertex (the new one).
299 | ## Don't add that face.
300 | if len(set(new_face))==len(new_face):
301 | new_faces_vertex_ind.append(new_face)
302 |
303 |
304 | ## Add the new vertex.
305 | ##### do not clip coordinates to[0,255]. when simplification done, clip.
306 | mesh.vs = np.vstack( ( mesh.vs, final_point ) )
307 |
308 |
309 | ##### clip coordinates during simplification!
310 | # mesh.vs.append(final_point.clip(0.0,255.0))
311 |
312 |
313 | ## Add the new faces.
314 | # for face in new_faces_vertex_ind: mesh.faces.append(face)
315 | mesh.faces = np.vstack( ( mesh.faces, new_faces_vertex_ind ) )
316 |
317 | ## Tell the mesh to regenerate the half-edge data structure.
318 | mesh.topology_changed()
319 |
320 | # print (len(mesh.vs))
321 |
322 | return mesh
323 |
324 |
325 |
326 |
327 |
328 |
329 | ############### using original image as input###############
330 |
331 |
332 |
333 | if __name__=="__main__":
334 |
335 |
336 | input_image_path=sys.argv[1]+".png"
337 | output_rawhull_obj_file=sys.argv[1]+"-rawconvexhull.obj"
338 | js_output_file=sys.argv[1]+"-final_simplified_hull.js"
339 | js_output_clip_file=sys.argv[1]+"-final_simplified_hull_clip.js"
340 | js_output_file_origin=sys.argv[1]+"-original_hull.js"
341 | E_vertice_num=4
342 |
343 |
344 | import time
345 | start_time=time.clock()
346 |
347 | images=np.asfarray(Image.open(input_image_path).convert('RGB')).reshape((-1,3))
348 | hull=ConvexHull(images)
349 | origin_hull=hull
350 | # visualize_hull(hull)
351 | write_convexhull_into_obj_file(hull, output_rawhull_obj_file)
352 |
353 |
354 |
355 |
356 | N=500
357 | mesh=TriMesh.FromOBJ_FileName(output_rawhull_obj_file)
358 | print ('original vertices number:',len(mesh.vs))
359 |
360 |
361 | for i in range(N):
362 |
363 | print ('loop:', i)
364 |
365 | old_num=len(mesh.vs)
366 | mesh=TriMesh.FromOBJ_FileName(output_rawhull_obj_file)
367 | mesh=remove_one_edge_by_finding_smallest_adding_volume_with_test_conditions(mesh,option=2)
368 | newhull=ConvexHull(mesh.vs)
369 | write_convexhull_into_obj_file(newhull, output_rawhull_obj_file)
370 |
371 | print ('current vertices number:', len(mesh.vs))
372 |
373 | if len(newhull.vertices) <= 10:
374 | import json, os
375 | name = os.path.splitext( js_output_file )[0] + ( '-%02d.js' % len(newhull.vertices ))
376 | with open( name, 'w' ) as myfile:
377 | json.dump({'vs': newhull.points[ newhull.vertices ].tolist(),'faces': newhull.points[ newhull.simplices ].tolist()}, myfile, indent = 4 )
378 |
379 | name = os.path.splitext( js_output_clip_file )[0] + ( '-%02d.js' % len(newhull.vertices ))
380 | with open( name, 'w' ) as myfile:
381 | json.dump({'vs': newhull.points[ newhull.vertices ].clip(0.0,255.0).tolist(),'faces': newhull.points[ newhull.simplices ].clip(0.0,255.0).tolist()}, myfile, indent = 4 )
382 |
383 | pigments_colors=newhull.points[ newhull.vertices ].clip(0,255).round().astype(np.uint8)
384 | pigments_colors=pigments_colors.reshape((pigments_colors.shape[0],1,pigments_colors.shape[1]))
385 | Image.fromarray( pigments_colors ).save( os.path.splitext( js_output_clip_file )[0] + ( '-%02d.png' % len(newhull.vertices )) )
386 |
387 |
388 | if len(mesh.vs)==old_num or len(mesh.vs)<=E_vertice_num:
389 | print ('final vertices number', len(mesh.vs))
390 | break
391 |
392 |
393 |
394 | newhull=ConvexHull(mesh.vs)
395 | # visualize_hull(newhull)
396 | write_convexhull_into_obj_file(newhull, output_rawhull_obj_file)
397 | # print (newhull.points[newhull.vertices])
398 |
399 |
400 | # import json
401 | # with open( js_output_file, 'w' ) as myfile:
402 | # json.dump({'vs': newhull.points[ newhull.vertices ].tolist(),'faces': newhull.points[ newhull.simplices ].tolist()}, myfile, indent = 4 )
403 |
404 | with open( js_output_file_origin, 'w' ) as myfile_origin:
405 | json.dump({'vs': origin_hull.points[ origin_hull.vertices ].tolist(),'faces': origin_hull.points[ origin_hull.simplices ].tolist()}, myfile_origin, indent = 4 )
406 |
407 |
408 |
409 |
410 | end_time=time.clock()
411 |
412 | print ('time: ', end_time-start_time)
413 |
414 |
415 |
416 |
--------------------------------------------------------------------------------
/GteDistPointTriangle.pyx:
--------------------------------------------------------------------------------
1 | # cython: language_level=3
2 | # cython: boundscheck=False
3 | # cython: wraparound=False
4 | # cython: infer_types=True
5 |
6 | '''
7 | Ported from: http://www.geometrictools.com/GTEngine/Include/GteDistPointTriangle.h
8 | '''
9 | from libc.math cimport sqrt
10 |
11 | ctypedef double Real
12 |
13 | cdef scale( Real s, Real v[3], Real out[3] ):
14 | out[0] = s*v[0]
15 | out[1] = s*v[1]
16 | out[2] = s*v[2]
17 |
18 | cdef subtract( Real a[3], Real b[3], Real out[3] ):
19 | out[0] = a[0] - b[0]
20 | out[1] = a[1] - b[1]
21 | out[2] = a[2] - b[2]
22 |
23 | cdef add( Real a[3], Real b[3], Real out[3] ):
24 | out[0] = a[0] + b[0]
25 | out[1] = a[1] + b[1]
26 | out[2] = a[2] + b[2]
27 |
28 |
29 | cdef scale2( Real s, Real v[2], Real out[2] ):
30 | out[0] = s*v[0]
31 | out[1] = s*v[1]
32 |
33 | cdef subtract2( Real a[2], Real b[2], Real out[2] ):
34 | out[0] = a[0] - b[0]
35 | out[1] = a[1] - b[1]
36 |
37 |
38 | cdef add2( Real a[2], Real b[2], Real out[2] ):
39 | out[0] = a[0] + b[0]
40 | out[1] = a[1] + b[1]
41 |
42 |
43 |
44 |
45 | cdef Real dot3( Real a[3], Real b[3] ):
46 | return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
47 |
48 | cdef GetMinEdge02( Real a11, Real b1, Real p[2] ):
49 | p[0] = 0.
50 | p[1] = 0.
51 |
52 | if b1 >= 0.:
53 | p[1] = 0.
54 | elif (a11 + b1 <= 0.):
55 | p[1] = 1.
56 | else:
57 | p[1] = -b1 / a11
58 |
59 | cdef GetMinEdge12( Real a01, Real a11, Real b1, Real f10, Real f01, Real p[2] ):
60 | cdef Real h0
61 | cdef Real h1
62 |
63 | p[0] = 0.
64 | p[1] = 0.
65 |
66 | h0 = a01 + b1 - f10
67 | if (h0 >= 0):
68 | p[1] = 0
69 | else:
70 | h1 = a11 + b1 - f01
71 | if (h1 <= 0):
72 | p[1] = 1
73 | else:
74 | p[1] = h0 / (h0 - h1)
75 | p[0] = 1. - p[1]
76 |
77 | cdef GetMinInterior( Real p0[2], Real h0, Real p1[2], Real h1, Real p[2] ):
78 | cdef Real z = h0 / (h0 - h1)
79 |
80 | # p = (1. - z) * p0 + z * p1
81 |
82 | cdef Real tmp1[2]
83 | cdef Real tmp2[2]
84 |
85 | scale2( 1. - z, p0, tmp1 )
86 | scale2( z, p1, tmp2 )
87 |
88 | add2( tmp1, tmp2, p )
89 |
90 |
91 | cdef struct Result:
92 | Real parameter[3]
93 | Real closest[3]
94 | Real distance
95 | Real sqrDistance
96 |
97 |
98 |
99 | cpdef Result DCPPointTriangle( Real[::1] point_p, Real[:,::1] triangle_p ):
100 | '''
101 | Given a 3-dimensional point as a numpy.array
102 | and a triangle as a sequence of 3 same-dimensional points (also numpy.arrays),
103 | returns an object with properties:
104 | .distance: the distance from the point to the triangle
105 | .sqrDistance: the square of .distance
106 | .parameter[3]: the three barycentric coordinates for the closest point in the triangle (i.e. .closest = \sum_{i=0}^2 .parameter[i]*triangle[i])
107 | .closest: the closest point on the triangle to 'point'
108 | '''
109 | cdef Real point[3]
110 | cdef Real triangle[3][3]
111 |
112 | for i in range(3):
113 | point[i]=point_p[i]
114 |
115 | for i in range(3):
116 | for j in range(3):
117 | triangle[i][j]=triangle_p[i,j]
118 |
119 |
120 | cdef Real p[2]
121 | cdef Real p0[2]
122 | cdef Real p1[2]
123 |
124 | cdef Real diff[3]
125 | subtract( point, triangle[0], diff )
126 |
127 | cdef Real edge0[3]
128 | cdef Real edge1[3]
129 | subtract( triangle[1], triangle[0], edge0 )
130 | subtract( triangle[2], triangle[0], edge1 )
131 |
132 |
133 |
134 | cdef Real a00 = dot3(edge0, edge0)
135 | cdef Real a01 = dot3(edge0, edge1)
136 | cdef Real a11 = dot3(edge1, edge1)
137 | cdef Real b0 = -dot3(diff, edge0)
138 | cdef Real b1 = -dot3(diff, edge1)
139 |
140 | cdef Real f00 = b0
141 | cdef Real f10 = b0 + a00
142 | cdef Real f01 = b0 + a01
143 |
144 | # print f00, f10, f01
145 |
146 | cdef Real h0, h1, dt1
147 |
148 | ## Compute the endpoints p0 and p1 of the segment. The segment is
149 | ## parameterized by L(z) = (1-z)*p0 + z*p1 for z in [0,1] and the
150 | ## directional derivative of half the quadratic on the segment is
151 | ## H(z) = dot(p1-p0,gradient[Q](L(z))/2), where gradient[Q]/2 = (F,G).
152 | ## By design, F(L(z)) = 0 for cases (2), (4), (5), and (6). Cases (1) and
153 | ## (3) can correspond to no-intersection or intersection of F = 0 with the
154 | ## triangle.
155 | if (f00 >= 0.):
156 | if (f01 >= 0.):
157 | ## (1) p0 = (0,0), p1 = (0,1), H(z) = G(L(z))
158 | GetMinEdge02(a11, b1, p)
159 | else:
160 | ## (2) p0 = (0,t10), p1 = (t01,1-t01), H(z) = (t11 - t10)*G(L(z))
161 | p0[0] = 0.
162 | p0[1] = f00 / (f00 - f01)
163 | p1[0] = f01 / (f01 - f10)
164 | p1[1] = 1. - p1[0]
165 | dt1 = p1[1] - p0[1]
166 | h0 = dt1 * (a11 * p0[1] + b1)
167 | if (h0 >= 0.):
168 | GetMinEdge02(a11, b1, p)
169 | else:
170 | h1 = dt1 * (a01 * p1[0] + a11 * p1[1] + b1)
171 | if (h1 <= 0.):
172 | GetMinEdge12(a01, a11, b1, f10, f01, p)
173 | else:
174 | GetMinInterior(p0, h0, p1, h1, p)
175 | elif (f01 <= 0.):
176 | if (f10 <= 0.):
177 | ## (3) p0 = (1,0), p1 = (0,1), H(z) = G(L(z)) - F(L(z))
178 | GetMinEdge12(a01, a11, b1, f10, f01, p)
179 | else:
180 | ## (4) p0 = (t00,0), p1 = (t01,1-t01), H(z) = t11*G(L(z))
181 | p0[0] = f00 / (f00 - f10)
182 | p0[1] = 0.
183 | p1[0] = f01 / (f01 - f10)
184 | p1[1] = 1. - p1[0]
185 | h0 = p1[1] * (a01 * p0[0] + b1)
186 |
187 | # print h0
188 |
189 |
190 | if (h0 >= 0.):
191 | p[0] = p0[0] ## GetMinEdge01
192 | p[1] = p0[1]
193 | else:
194 | h1 = p1[1] * (a01 * p1[0] + a11 * p1[1] + b1)
195 | if (h1 <= 0.):
196 | GetMinEdge12(a01, a11, b1, f10, f01, p)
197 | else:
198 | GetMinInterior(p0, h0, p1, h1, p)
199 | elif (f10 <= 0.):
200 | ## (5) p0 = (0,t10), p1 = (t01,1-t01), H(z) = (t11 - t10)*G(L(z))
201 | p0[0] = 0.
202 | p0[1] = f00 / (f00 - f01)
203 | p1[0] = f01 / (f01 - f10)
204 | p1[1] = 1. - p1[0]
205 | dt1 = p1[1] - p0[1]
206 | h0 = dt1 * (a11 * p0[1] + b1)
207 | if (h0 >= 0.):
208 | GetMinEdge02(a11, b1, p)
209 | else:
210 | h1 = dt1 * (a01 * p1[0] + a11 * p1[1] + b1)
211 | if (h1 <= 0.):
212 | GetMinEdge12(a01, a11, b1, f10, f01, p)
213 | else:
214 | GetMinInterior(p0, h0, p1, h1, p)
215 | else:
216 | ## (6) p0 = (t00,0), p1 = (0,t11), H(z) = t11*G(L(z))
217 | p0[0] = f00 / (f00 - f10)
218 | p0[1] = 0.
219 | p1[0] = 0.
220 | p1[1] = f00 / (f00 - f01)
221 | h0 = p1[1] * (a01 * p0[0] + b1)
222 | if (h0 >= 0.):
223 | p[0] = p0[0] ## GetMinEdge01
224 | p[1] = p0[1]
225 | else:
226 | h1 = p1[1] * (a11 * p1[1] + b1)
227 | if (h1 <= 0.):
228 | GetMinEdge02(a11, b1, p)
229 | else:
230 | GetMinInterior(p0, h0, p1, h1, p)
231 |
232 | # print p
233 |
234 | cdef Result result
235 | result.parameter[0] = 1. - p[0] - p[1]
236 | result.parameter[1] = p[0]
237 | result.parameter[2] = p[1]
238 |
239 | # result.closest = triangle[0] + p[0] * edge0 + p[1] * edge1
240 | cdef Real tmp1[3]
241 | cdef Real tmp2[3]
242 | scale( p[0], edge0, tmp1 )
243 | scale( p[1], edge1, tmp2 )
244 | add( tmp1, tmp2, result.closest)
245 | add( triangle[0], result.closest, result.closest)
246 |
247 |
248 | subtract(point, result.closest, diff)
249 | result.sqrDistance = dot3(diff, diff)
250 | result.distance = sqrt(result.sqrDistance)
251 |
252 | return result
253 |
--------------------------------------------------------------------------------
/Our_preprocessing_pipeline-run-for-100MP_image.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import time\n",
11 | "import scipy\n",
12 | "import json\n",
13 | "import Additive_mixing_layers_extraction\n",
14 | "from scipy.spatial import ConvexHull, Delaunay\n",
15 | "import scipy.sparse\n",
16 | "import PIL.Image as Image\n",
17 | "Image.MAX_IMAGE_PIXELS = 1000000000\n",
18 | "import RGBXY_method_modified_for_huge_image\n",
19 | "from numpy import *"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": null,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": []
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": null,
32 | "metadata": {},
33 | "outputs": [],
34 | "source": [
35 | "# ### 100MP images\n",
36 | "base_dir=\"./Large_images/\"\n",
37 | "import glob\n",
38 | "filepaths=glob.glob(base_dir+\"*.jpg\")\n",
39 | "print filepaths\n",
40 | "\n",
41 | "for filepath in filepaths:\n",
42 | " print \"#####################\"\n",
43 | " print filepath\n",
44 | " img=np.asfarray(Image.open(filepath).convert('RGB'))/255.0\n",
45 | " X,Y=np.mgrid[0:img.shape[0], 0:img.shape[1]]\n",
46 | " XY=np.dstack((X*1.0/img.shape[0],Y*1.0/img.shape[1]))\n",
47 | " data=np.dstack((img, XY))\n",
48 | " print len(data.reshape((-1,5)))\n",
49 | " \n",
50 | " start=time.time()\n",
51 | " palette_rgb=Additive_mixing_layers_extraction.Hull_Simplification_determined_version(img, filepath[:-4]+\"-convexhull_vertices\")\n",
52 | " end=time.time() \n",
53 | " M=len(palette_rgb)\n",
54 | " print \"palette size: \", M\n",
55 | " print \"palette extraction time: \", end-start\n",
56 | "\n",
57 | "\n",
58 | " ######### for RGBXY method\n",
59 | " start=time.time()\n",
60 | " data_hull=ConvexHull(data.reshape((-1,5)))\n",
61 | " start2=time.time()\n",
62 | " print \"5D convexhull vertices number: \", len(data_hull.vertices)\n",
63 | " print \"convexhull on 5D time: \", start2-start\n",
64 | " mixing_weights_2=RGBXY_method_modified_for_huge_image.recover_ASAP_weights_using_scipy_delaunay(data_hull.points[data_hull.vertices], data_hull.points)\n",
65 | " end=time.time()\n",
66 | " print \"total time: \", end-start\n",
67 | " \n"
68 | ]
69 | },
70 | {
71 | "cell_type": "code",
72 | "execution_count": null,
73 | "metadata": {},
74 | "outputs": [],
75 | "source": []
76 | }
77 | ],
78 | "metadata": {
79 | "kernelspec": {
80 | "display_name": "Python 2",
81 | "language": "python",
82 | "name": "python2"
83 | },
84 | "language_info": {
85 | "codemirror_mode": {
86 | "name": "ipython",
87 | "version": 2
88 | },
89 | "file_extension": ".py",
90 | "mimetype": "text/x-python",
91 | "name": "python",
92 | "nbconvert_exporter": "python",
93 | "pygments_lexer": "ipython2",
94 | "version": "2.7.14"
95 | }
96 | },
97 | "nbformat": 4,
98 | "nbformat_minor": 1
99 | }
100 |
--------------------------------------------------------------------------------
/Our_preprocessing_pipeline.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import numpy as np\n",
10 | "import time\n",
11 | "import scipy\n",
12 | "import json\n",
13 | "import Additive_mixing_layers_extraction\n",
14 | "from scipy.spatial import ConvexHull, Delaunay\n",
15 | "import scipy.sparse\n",
16 | "import PIL.Image as Image\n",
17 | "import RGBXY_method\n",
18 | "from numpy import *\n",
19 | "Additive_mixing_layers_extraction.DEMO=True"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 2,
25 | "metadata": {},
26 | "outputs": [],
27 | "source": [
28 | "def save_weights(img, palette_rgb, mixing_weights, output_prefix):\n",
29 | " mixing_weights=mixing_weights.reshape((img.shape[0],img.shape[1],-1)).clip(0,1)\n",
30 | " temp=(mixing_weights.reshape((img.shape[0],img.shape[1],-1,1))*palette_rgb.reshape((1,1,-1,3))).sum(axis=2)\n",
31 | " img_diff=temp*255-img*255\n",
32 | " diff=np.square(img_diff.reshape((-1,3))).sum(axis=-1)\n",
33 | " print 'max diff: ', np.sqrt(diff).max()\n",
34 | " print 'median diff', np.median(np.sqrt(diff))\n",
35 | " rmse=np.sqrt(diff.sum()/diff.shape[0])\n",
36 | " print 'RMSE: ', np.sqrt(diff.sum()/diff.shape[0])\n",
37 | "\n",
38 | " import json\n",
39 | " mixing_weights_filename=output_prefix+\"-palette_size-\"+str(len(palette_rgb))+\"-mixing_weights.js\"\n",
40 | " with open(mixing_weights_filename,'wb') as myfile:\n",
41 | " json.dump({'weights': mixing_weights.tolist()}, myfile)\n",
42 | "\n",
43 | " for i in range(mixing_weights.shape[-1]):\n",
44 | " mixing_weights_map_filename=output_prefix+\"-palette_size-\"+str(len(palette_rgb))+\"-mixing_weights-%02d.png\" % i\n",
45 | " Image.fromarray((mixing_weights[:,:,i]*255).round().clip(0,255).astype(np.uint8)).save(mixing_weights_map_filename)\n",
46 | " return rmse\n",
47 | "\n",
48 | "def get_bigger_palette_to_show(palette):\n",
49 | " ##### palette shape is M*3\n",
50 | " c=50\n",
51 | " palette2=np.ones((1*c, len(palette)*c, 3))\n",
52 | " for i in range(len(palette)):\n",
53 | " palette2[:,i*c:i*c+c,:]=palette[i,:].reshape((1,1,-1))\n",
54 | " return palette2\n"
55 | ]
56 | },
57 | {
58 | "cell_type": "code",
59 | "execution_count": 3,
60 | "metadata": {},
61 | "outputs": [
62 | {
63 | "name": "stdout",
64 | "output_type": "stream",
65 | "text": [
66 | "['./test/turquoise.png']\n",
67 | "#####################\n",
68 | "./test/turquoise.png\n",
69 | "280800\n",
70 | "original hull vertices number: 97\n",
71 | "use_quantitized_colors\n",
72 | "1483\n",
73 | "loop: 0\n",
74 | "loop: 10\n",
75 | "loop: 20\n",
76 | "loop: 30\n",
77 | "loop: 40\n",
78 | "loop: 50\n",
79 | "loop: 60\n",
80 | "loop: 70\n",
81 | "loop: 80\n",
82 | "loop: 90\n",
83 | "palette size: 6\n",
84 | "palette extraction time: 26.7580060959\n",
85 | "convexhull on 5D time: 2.76049113274\n",
86 | "[0. 0. 0.]\n",
87 | "0.00050687789917 0.000232219696045 5.88893890381e-05\n",
88 | "0.000743865966797 0.00019097328186 5.10215759277e-05\n",
89 | "0.000753879547119 0.000230073928833 8.70227813721e-05\n",
90 | "0.000592231750488 0.000156879425049 3.2901763916e-05\n",
91 | "max diff: 7.861846444349073\n",
92 | "median diff 4.9227844771419234e-14\n",
93 | "RMSE: 0.8387862826258635\n",
94 | "5.4658498764 0.678758144379 0.0974788665771\n",
95 | "total time: 9.08978700638\n",
96 | "max diff: 7.861846444349664\n",
97 | "median diff 6.394884621840902e-14\n",
98 | "RMSE: 0.5309361214629531\n"
99 | ]
100 | }
101 | ],
102 | "source": [
103 | "base_dir=\"./test/\"\n",
104 | "\n",
105 | "import glob\n",
106 | "filepaths=glob.glob(base_dir+\"*.png\")\n",
107 | "print filepaths\n",
108 | "\n",
109 | "\n",
110 | "\n",
111 | "for filepath in filepaths:\n",
112 | " print \"#####################\"\n",
113 | " print filepath\n",
114 | " img=np.asfarray(Image.open(filepath).convert('RGB'))/255.0\n",
115 | " arr=img.copy()\n",
116 | " X,Y=np.mgrid[0:img.shape[0], 0:img.shape[1]]\n",
117 | " XY=np.dstack((X*1.0/img.shape[0],Y*1.0/img.shape[1]))\n",
118 | " data=np.dstack((img, XY))\n",
119 | " print len(data.reshape((-1,5)))\n",
120 | " \n",
121 | " start=time.time()\n",
122 | " palette_rgb=Additive_mixing_layers_extraction.Hull_Simplification_determined_version(img, filepath[:-4]+\"-convexhull_vertices\")\n",
123 | " end=time.time() \n",
124 | " M=len(palette_rgb)\n",
125 | " print \"palette size: \", M\n",
126 | " print \"palette extraction time: \", end-start\n",
127 | " \n",
128 | " palette_img=get_bigger_palette_to_show(palette_rgb)\n",
129 | " Image.fromarray((palette_img*255).round().astype(np.uint8)).save(filepath[:-4]+\"-convexhull_vertices.png\")\n",
130 | "\n",
131 | "\n",
132 | " ######### for RGBXY RGB black star triangulation.\n",
133 | " start=time.time()\n",
134 | " data_hull=ConvexHull(data.reshape((-1,5)))\n",
135 | " start2=time.time()\n",
136 | " print \"convexhull on 5D time: \", start2-start\n",
137 | " mixing_weights_1=Additive_mixing_layers_extraction.Get_ASAP_weights_using_Tan_2016_triangulation_and_then_barycentric_coordinates(img.reshape((-1,3))[data_hull.vertices].reshape((-1,1,3)), palette_rgb, None, order=0)\n",
138 | " mixing_weights_2=Additive_mixing_layers_extraction.recover_ASAP_weights_using_scipy_delaunay(data_hull.points[data_hull.vertices], data_hull.points, option=3)\n",
139 | "\n",
140 | " mixing_weights=mixing_weights_2.dot(mixing_weights_1.reshape((-1,M)))\n",
141 | " \n",
142 | " end=time.time()\n",
143 | " print \"total time: \", end-start\n",
144 | " \n",
145 | " mixing_weights=mixing_weights.reshape((img.shape[0],img.shape[1],-1)).clip(0,1)\n",
146 | " \n",
147 | " output_prefix=filepath[:-4]+'-RGBXY_RGB_black_star_ASAP'\n",
148 | " RMSE=save_weights(arr, palette_rgb, mixing_weights, output_prefix)\n"
149 | ]
150 | },
151 | {
152 | "cell_type": "code",
153 | "execution_count": null,
154 | "metadata": {},
155 | "outputs": [],
156 | "source": []
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "metadata": {},
162 | "outputs": [],
163 | "source": []
164 | }
165 | ],
166 | "metadata": {
167 | "kernelspec": {
168 | "display_name": "Python 2",
169 | "language": "python",
170 | "name": "python2"
171 | },
172 | "language_info": {
173 | "codemirror_mode": {
174 | "name": "ipython",
175 | "version": 2
176 | },
177 | "file_extension": ".py",
178 | "mimetype": "text/x-python",
179 | "name": "python",
180 | "nbconvert_exporter": "python",
181 | "pygments_lexer": "ipython2",
182 | "version": "2.7.14"
183 | }
184 | },
185 | "nbformat": 4,
186 | "nbformat_minor": 1
187 | }
188 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Efficient palette-based decomposition and recoloring of images via RGBXY-space geometry
2 |
3 | This code implements the pipeline described in the SIGGRAPH Asia 2018 paper ["Efficient palette-based decomposition and recoloring of images via RGBXY-space geometry"](https://cragl.cs.gmu.edu/fastlayers/) Jianchao Tan, Jose Echevarria, and Yotam Gingold.
4 |
5 | A different and simpler prototype implementation can be found in [this link](https://cragl.cs.gmu.edu/fastlayers/RGBXY_weights.py)
6 |
7 | ## Running the GUI
8 |
9 | The GUI runs in the browser at:
10 | For that to work, you need to run a server. See "Running the Server" below.
11 |
12 | Load or drag-and-drop an image. Then compute the palette and weights. You can manipulate the palette colors in the 3D RGB-space view. You can save the palette and weights for recoloring by clicking the "Save Everything" button.
13 |
14 | Some videos of GUI usage can be found in [this link](https://cragl.cs.gmu.edu/fastlayers/)
15 |
16 | The `turquoise.png` image is copyright [Michelle Lee](https://cargocollective.com/michellelee).
17 |
18 | ### Image Recoloring
19 |
20 | You can perform global recoloring with the resulting layers with a different web GUI (no installation necessary).
21 |
22 | 1. Go to
23 | 2. Drag and drop the original image: ``
24 | 3. Drag and drop the palette: `-automatic computed palette-modified.js`
25 | 4. Drag and drop the weights: `-weights.js`
26 | 5. Click and drag the palette vertices in the 3D view to perform image recoloring.
27 |
28 | This image recoloring web GUI is also used in our previous project, [Decomposing Images into Layers via RGB-space Geometry](https://github.com/JianchaoTan/Decompose-Single-Image-Into-Layers).
29 |
30 | ## Running the Server
31 |
32 | ### With Docker
33 |
34 | You can run the server via Docker (no need to install any dependencies on your machine). You won't get an OpenCL implementation of the layer updating, but it is still quite fast.
35 |
36 | docker pull cragl/fastlayers
37 | docker run -p 8000:8000 -p 9988:9988 cragl/fastlayers
38 |
39 | ### Without Docker
40 |
41 | #### Installing dependencies
42 |
43 | You can install dependencies using either `conda` or `pip`.
44 |
45 | ##### Conda
46 |
47 | Install [Anaconda](https://www.anaconda.com/products/individual) or [Miniconda](https://docs.conda.io/en/latest/miniconda.html).
48 | (Miniconda is faster to install.) Choose the 64-bit Python 3.x version. Launch the Anaconda shell from the Start menu and navigate to this directory.
49 | Then:
50 |
51 | conda env create -f environment.yml
52 | conda activate fastlayers
53 |
54 | To update an already created environment if the `environment.yml` file changes, first activate and then run `conda env update --file environment.yml --prune`.
55 |
56 | ##### Pip
57 |
58 | Note: One of our dependencies, `cvxopt`, is broken on Apple Silicon with `pip`. Use the `conda` instructions above.
59 |
60 | Install Python 3.6+.
61 |
62 | (Optional) Create a virtual environment:
63 |
64 | python3 -m venv .venv
65 | source .venv/bin/activate
66 |
67 | Install dependencies:
68 |
69 | pip install -r requirements.txt
70 |
71 | (untested) If you want to install the exact version of the dependencies we used, run: `pip install -r requirements.frozen.txt`
72 |
73 | #### On your own
74 |
75 | * Python 3.6+
76 | * Python
77 | * NumPy
78 | * SciPy
79 | * Cython
80 | * [GLPK](https://www.gnu.org/software/glpk/) (`brew install glpk`)
81 | * cvxopt, built with the [GLPK](https://www.gnu.org/software/glpk/) linear programming solver interface (`CVXOPT_BUILD_GLPK=1 pip install cvxopt`)
82 | * PIL or Pillow (Python Image Library) (`pip install Pillow`)
83 | * pyopencl
84 | * websockets (`pip install websockets`)
85 |
86 | #### Compile the one Cython file
87 |
88 | cythonize -i GteDistPointTriangle.pyx
89 |
90 | #### Running the server
91 |
92 | Run the server:
93 |
94 | cd image-layer-updating-GUI
95 | ./runboth.sh
96 |
97 | If you are on Windows (untested), the `runboth.sh` script probably won't work. Instead, run the two Python server commands manually in two separate command lines:
98 |
99 | cd image-layer-updating-GUI
100 | python3 server.py
101 |
102 | and
103 |
104 | cd image-layer-updating-GUI
105 | python3 -m http.server
106 |
107 |
108 | ### Testing
109 |
110 | To test the whole pipeline without launching the GUI server, run `Our_preprocessing_pipeline.ipynb` as a Jupyter notebook.
111 |
112 | You can test if your installation is working by comparing your output to the `test/turquoise groundtruth results/` directory.
113 |
--------------------------------------------------------------------------------
/RGBXY_method.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 |
3 | import numpy as np
4 | import time
5 | import scipy
6 | import json
7 | import Additive_mixing_layers_extraction
8 | from scipy.spatial import ConvexHull, Delaunay
9 | import scipy.sparse
10 | from numpy import *
11 | import PIL.Image as Image
12 |
13 |
14 | def RGBXY_extraction(filepath, palette_rgb, origin_image, mask=None, prefix=None, SAVE=True):
15 | ### data shape is row*col*3.
16 |
17 | M=len(palette_rgb)
18 | if mask is None: ### normal use
19 | img_copy=origin_image
20 | img=img_copy.copy() ### do not modify img_copy
21 | X,Y=np.mgrid[0:img.shape[0], 0:img.shape[1]]
22 | else: ### for masked foreground and background
23 | X,Y=np.where(mask==1)
24 | img_copy=origin_image[X,Y].reshape((1,-1,3))
25 | img=img_copy.copy() ### do not modify img_copy
26 |
27 |
28 | XY=np.dstack((X*1.0/origin_image.shape[0],Y*1.0/origin_image.shape[1]))
29 |
30 | data=np.dstack((img, XY))
31 | # print data.shape
32 |
33 | start=time.time()
34 | data_hull=ConvexHull(data.reshape((-1,5)))
35 | # print len(data_hull.vertices)
36 |
37 | ### RGB weights using star triangulation.
38 | print ("using star triangulation now!")
39 | mixing_weights_1=Additive_mixing_layers_extraction.Get_ASAP_weights_using_Tan_2016_triangulation_and_then_barycentric_coordinates(img.reshape((-1,3))[data_hull.vertices].reshape((-1,1,3)), palette_rgb, "None", order=0)
40 |
41 | #### RGBXY weights
42 | mixing_weights_2=Additive_mixing_layers_extraction.recover_ASAP_weights_using_scipy_delaunay(data_hull.points[data_hull.vertices], data_hull.points, option=3)
43 |
44 | end=time.time()
45 | print ("RGBXY method extract mixing weights using time: ", end-start)
46 |
47 | mixing_weights=mixing_weights_2.dot(mixing_weights_1.reshape((-1,M)))
48 | mixing_weights=mixing_weights.reshape((img.shape[0],img.shape[1],-1)).clip(0,1)
49 |
50 | temp=(mixing_weights.reshape((img.shape[0],img.shape[1],-1,1))*palette_rgb.reshape((1,1,-1,3))).sum(axis=2)
51 |
52 | if SAVE:
53 | recover_name=filepath[:-4]+"-palette_size-"+str(len(palette_rgb))+"-recovered_image-using_5D_hull.png"
54 | Image.fromarray((temp*255).round().clip(0,255).astype(np.uint8)).save(recover_name)
55 |
56 | img_diff=temp*255-img_copy*255
57 | diff=square(img_diff.reshape((-1,3))).sum(axis=-1)
58 | # print 'max diff: ', sqrt(diff).max()
59 | # print 'median diff', median(sqrt(diff))
60 | rmse=sqrt(diff.sum()/diff.shape[0])
61 | print ('Reconstruction RMSE: ', sqrt(diff.sum()/diff.shape[0]))
62 |
63 |
64 | if SAVE:
65 | if mask is None: ### normal image
66 | mixing_weights_filename=filepath[:-4]+"-palette_size-"+str(len(palette_rgb))+"-linear_mixing-weights-using_5D_hull.js"
67 | with open(mixing_weights_filename,'wb') as myfile:
68 | json.dump({'weights': mixing_weights.tolist()}, myfile)
69 | for i in range(mixing_weights.shape[-1]):
70 | mixing_weights_map_filename=filepath[:-4]+"-palette_size-"+str(len(palette_rgb))+"-linear_mixing-weights_map-using_5D_hull-%02d.png" % i
71 | Image.fromarray((mixing_weights[:,:,i]*255).round().clip(0,255).astype(uint8)).save(mixing_weights_map_filename)
72 | else: ### for foreground and background image
73 |
74 | ### map back to original shape to show weights map as image format.
75 | weights_map=np.zeros((origin_image.shape[0],origin_image.shape[1], M))
76 | weights_map[np.where(mask==1)]=mixing_weights.reshape((-1,M))
77 |
78 | mixing_weights_filename=filepath[:-4]+"-"+prefix+"-palette_size-"+str(len(palette_rgb))+"-linear_mixing-weights-using_5D_hull.js"
79 | with open(mixing_weights_filename,'wb') as myfile:
80 | json.dump({'weights': weights_map.tolist()}, myfile)
81 |
82 | for i in range(M):
83 | mixing_weights_map_filename=filepath[:-4]+"-"+prefix+"-palette_size-"+str(len(palette_rgb))+"-linear_mixing-weights_map-using_5D_hull-%02d.png" % i
84 | Image.fromarray((weights_map[:,:,i]*255).round().clip(0,255).astype(uint8)).save(mixing_weights_map_filename)
85 |
86 |
87 |
88 | return mixing_weights
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
--------------------------------------------------------------------------------
/RGBXY_method_modified_for_huge_image.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import time
3 | import scipy
4 | from scipy.spatial import ConvexHull, Delaunay
5 | import scipy.sparse
6 | from numpy import *
7 |
8 | def recover_ASAP_weights_using_scipy_delaunay(Hull_vertices, data):
9 | ############## copy from https://codereview.stackexchange.com/questions/41024/faster-computation-of-barycentric-coordinates-for-many-points (Gareth Rees)
10 |
11 | start=time.time()
12 | # Compute Delaunay triangulation of points.
13 | tri = Delaunay(Hull_vertices)
14 |
15 | end=time.time()
16 |
17 | print "delaunay time: ", end-start
18 |
19 | CHUNK_SIZE = 1000
20 |
21 | for i in range(len(data)/CHUNK_SIZE):
22 | if i%1000==0:
23 | print i
24 |
25 | end1=time.time()
26 |
27 | targets = data[i*CHUNK_SIZE:(i+1)*CHUNK_SIZE]
28 |
29 | # Find the tetrahedron containing each target (or -1 if not found)
30 | tetrahedra = tri.find_simplex(targets, tol=1e-6)
31 | # print tetrahedra[tetrahedra==-1]
32 |
33 | # Affine transformation for tetrahedron containing each target
34 | X = tri.transform[tetrahedra, :data.shape[1]]
35 |
36 | # Offset of each target from the origin of its containing tetrahedron
37 | Y = targets - tri.transform[tetrahedra, data.shape[1]]
38 |
39 | # First three barycentric coordinates of each target in its tetrahedron.
40 | # The fourth coordinate would be 1 - b.sum(axis=1), but we don't need it.
41 | b = np.einsum('...jk,...k->...j', X, Y)
42 | barycoords=np.c_[b,1-b.sum(axis=1)]
43 |
44 |
45 | end2=time.time()
46 |
47 | rows = np.repeat(np.arange(len(targets)).reshape((-1,1)), len(tri.simplices[0]), 1).ravel()
48 | cols=tri.simplices[tetrahedra].ravel()
49 | vals = barycoords.ravel()
50 | weights_list = scipy.sparse.coo_matrix( ( vals, ( rows, cols ) ), shape = ( len(targets), len(Hull_vertices)) ).tocsr()
51 |
52 | end3=time.time()
53 |
54 | # print end2-end1, end3-end2
55 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/__init__.py
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Python runtime as a parent image
2 | FROM python:3.7
3 |
4 | # Install git and GLPK, which are outside of Python requirements.txt
5 | RUN apt-get update && apt-get install -y libglpk-dev
6 | # Also install a CPU OpenCL implementation.
7 | # UPDATE: pocl-opencl-icd doesn't implement enough of OpenCL. I get the error:
8 | ## Device side queue is unimplemented (clCreateCommandQueueWithProperties.c:93)
9 | # RUN apt-get update && apt-get install -y libglpk-dev pocl-opencl-icd
10 |
11 | # Set the working directory to /app
12 | WORKDIR /app
13 |
14 | # Copy the code
15 | COPY . /app/fastLayerDecomposition/
16 |
17 | WORKDIR /app/fastLayerDecomposition
18 |
19 | # Install Python dependencies via requirements.txt
20 | RUN CVXOPT_BUILD_GLPK=1 pip3 install -r requirements.txt
21 |
22 | # Run the server
23 | WORKDIR /app/fastLayerDecomposition/image-layer-updating-GUI/
24 |
25 | # Expose port 9988 for the websocket
26 | EXPOSE 9988
27 | # Expose port 8000 for the GUI
28 | EXPOSE 8000
29 |
30 | # Tell PyOpenCL to use device 0 and not prompt.
31 | # ENV PYOPENCL_CTX='0'
32 |
33 | CMD sh runboth.sh
34 |
35 | ## Create fastlayer image by running the following next to the Dockerfile:
36 | # docker build -t fastlayer https://github.com/CraGL/fastLayerDecomposition
37 | ## Run fastlayer with:
38 | # docker run -p 8000:8000 -p 9988:9988 --rm -it fastlayer
39 | ## Open your web browser to: http://localhost:8000/
40 |
--------------------------------------------------------------------------------
/docker/README.md:
--------------------------------------------------------------------------------
1 | ## Easy installation
2 |
3 | You don't need to clone or download this, since the Dockerfile has been pushed to Docker Hub. You can simply install via:
4 |
5 | docker pull cragl/fastlayers
6 |
7 | and run the server via:
8 |
9 | docker run -p 8000:8000 -p 9988:9988 cragl/fastlayers
10 |
11 | Open your web browser to: http://localhost:8000/
12 |
13 | This Docker installation doesn't use OpenCL.
14 |
15 | ## Without DockerHub
16 |
17 | If you'd like to build without docker hub, you can run the following without checking out this repository:
18 |
19 | docker build -f docker/Dockerfile -t fastlayer https://github.com/CraGL/fastLayerDecomposition.git
20 |
21 | If you want to edit the code, check out the repository, `cd fastLayerDecomposition`, and then:
22 |
23 | docker build -f docker/Dockerfile -t fastlayers .
24 |
25 | Run the server via:
26 |
27 | docker run -p 8000:8000 -p 9988:9988 fastlayers
28 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: fastlayers
2 | channels:
3 | - conda-forge
4 | - defaults
5 | dependencies:
6 | - python=3.8
7 | - numpy
8 | - scipy
9 | - pillow
10 | - cvxopt
11 | - cython
12 | - websockets
13 | - pip
14 | - git
15 | - pip:
16 | - pyopencl
17 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/ChangeLog.txt:
--------------------------------------------------------------------------------
1 | ## 2018-01-15
2 |
3 | You can now drag additional overlay vertices. For example, dragging the following will add a few smaller, black-rimmed vertices:
4 |
5 | {
6 | "clear": false, "vertex_radius": 0.007843, "vertex_rim_radius": 0.013, "vertex_rim_color": "black",
7 | "vs": [[95, 155, 156], [96, 154, 156], [95, 153, 155], [93, 138, 139], [77, 102, 104]]
8 | }
9 |
10 | `"clear": true` means to also show these vertices rather than clearing the old ones.
11 | The other properties control the size of the color, the size of its rim, and the color of its rim.
12 |
13 | ## 2015-12-05
14 |
15 | You can save the modified overlay mesh.
16 |
17 | You can save the camera orientation. You can load a camera orientation by dragging-and-dropping the saved file.
18 |
19 | Screenshots are saved to the downloads folder along with the camera orientation.
20 |
21 |
22 | ## 2015-12-03
23 |
24 | Now, you can drop a JSON file on it with weights, and you can drag the vertices around.
25 |
26 | The format is a JSON file containing a dictionary with a key named 'weights' whose value is an array of arrays of arrays, where the outermost array is the rows of the image, then the columns of the image, and finally the weights for that pixel.
27 |
28 | Here is an example:
29 |
30 | { 'weights': [
31 | [ [ pixel00_weight0, pixel00_weight1, pixel00_weight2, ... ], [ pixel01_weight0, pixel01_weight1, ... ], ... ],
32 | [ [ pixel10_weight0, pixel10_weight1, ... ], ... ], ... ],
33 | ...
34 | ]
35 | }
36 |
37 | If you were in Python, you could do this:
38 |
39 | import numpy as np
40 | import json
41 | weights = np.zeros( ( N, M, num_weights ) )
42 | ## Fill `weights` with weights.
43 | with open( 'weights.json', 'wb' ) as f: json.dump( { 'weights': weights.tolist() }, f )
44 |
45 | Check it out:
46 | https://yig.github.io/image-rgb-in-3D
47 |
48 | Also, screenshots is turned on by default.
49 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/README.md:
--------------------------------------------------------------------------------
1 | # RGB is 3D
2 |
3 | Visualize the colors of an image as a 3D RGB point cloud.
4 |
5 | [Try it.](https://yig.github.io/image-rgb-in-3D)
6 | (Requires WebGL.)
7 |
8 | The image to load can be passed as an 'image' GET parameter or uploaded directly from your computer.
9 |
10 |
11 | ## Credits
12 |
13 | Code is based off of [THREE.js example webgl_buffergeometry_particles](http://threejs.org/examples/webgl_buffergeometry_particles.html).
14 |
15 | [Oranges](http://www.ars.usda.gov/is/graphics/photos/k3644-12.html) and [pigs](http://www.ars.usda.gov/is/graphics/photos/mar05/k9455-9.htm)
16 | photos are public domain, from the US Department of Agriculture. (Pigs photo is by Scott Bauer.)
17 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/image-layer-updating-GUI/__init__.py
--------------------------------------------------------------------------------
/image-layer-updating-GUI/js/Blob.js:
--------------------------------------------------------------------------------
1 | /* Blob.js
2 | * A Blob implementation.
3 | * 2014-07-24
4 | *
5 | * By Eli Grey, http://eligrey.com
6 | * By Devin Samarin, https://github.com/dsamarin
7 | * License: MIT
8 | * See https://github.com/eligrey/Blob.js/blob/master/LICENSE.md
9 | */
10 |
11 | /*global self, unescape */
12 | /*jslint bitwise: true, regexp: true, confusion: true, es5: true, vars: true, white: true,
13 | plusplus: true */
14 |
15 | /*! @source http://purl.eligrey.com/github/Blob.js/blob/master/Blob.js */
16 |
17 | (function (view) {
18 | "use strict";
19 |
20 | view.URL = view.URL || view.webkitURL;
21 |
22 | if (view.Blob && view.URL) {
23 | try {
24 | new Blob;
25 | return;
26 | } catch (e) {}
27 | }
28 |
29 | // Internally we use a BlobBuilder implementation to base Blob off of
30 | // in order to support older browsers that only have BlobBuilder
31 | var BlobBuilder = view.BlobBuilder || view.WebKitBlobBuilder || view.MozBlobBuilder || (function(view) {
32 | var
33 | get_class = function(object) {
34 | return Object.prototype.toString.call(object).match(/^\[object\s(.*)\]$/)[1];
35 | }
36 | , FakeBlobBuilder = function BlobBuilder() {
37 | this.data = [];
38 | }
39 | , FakeBlob = function Blob(data, type, encoding) {
40 | this.data = data;
41 | this.size = data.length;
42 | this.type = type;
43 | this.encoding = encoding;
44 | }
45 | , FBB_proto = FakeBlobBuilder.prototype
46 | , FB_proto = FakeBlob.prototype
47 | , FileReaderSync = view.FileReaderSync
48 | , FileException = function(type) {
49 | this.code = this[this.name = type];
50 | }
51 | , file_ex_codes = (
52 | "NOT_FOUND_ERR SECURITY_ERR ABORT_ERR NOT_READABLE_ERR ENCODING_ERR "
53 | + "NO_MODIFICATION_ALLOWED_ERR INVALID_STATE_ERR SYNTAX_ERR"
54 | ).split(" ")
55 | , file_ex_code = file_ex_codes.length
56 | , real_URL = view.URL || view.webkitURL || view
57 | , real_create_object_URL = real_URL.createObjectURL
58 | , real_revoke_object_URL = real_URL.revokeObjectURL
59 | , URL = real_URL
60 | , btoa = view.btoa
61 | , atob = view.atob
62 |
63 | , ArrayBuffer = view.ArrayBuffer
64 | , Uint8Array = view.Uint8Array
65 |
66 | , origin = /^[\w-]+:\/*\[?[\w\.:-]+\]?(?::[0-9]+)?/
67 | ;
68 | FakeBlob.fake = FB_proto.fake = true;
69 | while (file_ex_code--) {
70 | FileException.prototype[file_ex_codes[file_ex_code]] = file_ex_code + 1;
71 | }
72 | // Polyfill URL
73 | if (!real_URL.createObjectURL) {
74 | URL = view.URL = function(uri) {
75 | var
76 | uri_info = document.createElementNS("http://www.w3.org/1999/xhtml", "a")
77 | , uri_origin
78 | ;
79 | uri_info.href = uri;
80 | if (!("origin" in uri_info)) {
81 | if (uri_info.protocol.toLowerCase() === "data:") {
82 | uri_info.origin = null;
83 | } else {
84 | uri_origin = uri.match(origin);
85 | uri_info.origin = uri_origin && uri_origin[1];
86 | }
87 | }
88 | return uri_info;
89 | };
90 | }
91 | URL.createObjectURL = function(blob) {
92 | var
93 | type = blob.type
94 | , data_URI_header
95 | ;
96 | if (type === null) {
97 | type = "application/octet-stream";
98 | }
99 | if (blob instanceof FakeBlob) {
100 | data_URI_header = "data:" + type;
101 | if (blob.encoding === "base64") {
102 | return data_URI_header + ";base64," + blob.data;
103 | } else if (blob.encoding === "URI") {
104 | return data_URI_header + "," + decodeURIComponent(blob.data);
105 | } if (btoa) {
106 | return data_URI_header + ";base64," + btoa(blob.data);
107 | } else {
108 | return data_URI_header + "," + encodeURIComponent(blob.data);
109 | }
110 | } else if (real_create_object_URL) {
111 | return real_create_object_URL.call(real_URL, blob);
112 | }
113 | };
114 | URL.revokeObjectURL = function(object_URL) {
115 | if (object_URL.substring(0, 5) !== "data:" && real_revoke_object_URL) {
116 | real_revoke_object_URL.call(real_URL, object_URL);
117 | }
118 | };
119 | FBB_proto.append = function(data/*, endings*/) {
120 | var bb = this.data;
121 | // decode data to a binary string
122 | if (Uint8Array && (data instanceof ArrayBuffer || data instanceof Uint8Array)) {
123 | var
124 | str = ""
125 | , buf = new Uint8Array(data)
126 | , i = 0
127 | , buf_len = buf.length
128 | ;
129 | for (; i < buf_len; i++) {
130 | str += String.fromCharCode(buf[i]);
131 | }
132 | bb.push(str);
133 | } else if (get_class(data) === "Blob" || get_class(data) === "File") {
134 | if (FileReaderSync) {
135 | var fr = new FileReaderSync;
136 | bb.push(fr.readAsBinaryString(data));
137 | } else {
138 | // async FileReader won't work as BlobBuilder is sync
139 | throw new FileException("NOT_READABLE_ERR");
140 | }
141 | } else if (data instanceof FakeBlob) {
142 | if (data.encoding === "base64" && atob) {
143 | bb.push(atob(data.data));
144 | } else if (data.encoding === "URI") {
145 | bb.push(decodeURIComponent(data.data));
146 | } else if (data.encoding === "raw") {
147 | bb.push(data.data);
148 | }
149 | } else {
150 | if (typeof data !== "string") {
151 | data += ""; // convert unsupported types to strings
152 | }
153 | // decode UTF-16 to binary string
154 | bb.push(unescape(encodeURIComponent(data)));
155 | }
156 | };
157 | FBB_proto.getBlob = function(type) {
158 | if (!arguments.length) {
159 | type = null;
160 | }
161 | return new FakeBlob(this.data.join(""), type, "raw");
162 | };
163 | FBB_proto.toString = function() {
164 | return "[object BlobBuilder]";
165 | };
166 | FB_proto.slice = function(start, end, type) {
167 | var args = arguments.length;
168 | if (args < 3) {
169 | type = null;
170 | }
171 | return new FakeBlob(
172 | this.data.slice(start, args > 1 ? end : this.data.length)
173 | , type
174 | , this.encoding
175 | );
176 | };
177 | FB_proto.toString = function() {
178 | return "[object Blob]";
179 | };
180 | FB_proto.close = function() {
181 | this.size = 0;
182 | delete this.data;
183 | };
184 | return FakeBlobBuilder;
185 | }(view));
186 |
187 | view.Blob = function(blobParts, options) {
188 | var type = options ? (options.type || "") : "";
189 | var builder = new BlobBuilder();
190 | if (blobParts) {
191 | for (var i = 0, len = blobParts.length; i < len; i++) {
192 | if (Uint8Array && blobParts[i] instanceof Uint8Array) {
193 | builder.append(blobParts[i].buffer);
194 | }
195 | else {
196 | builder.append(blobParts[i]);
197 | }
198 | }
199 | }
200 | var blob = builder.getBlob(type);
201 | if (!blob.slice && blob.webkitSlice) {
202 | blob.slice = blob.webkitSlice;
203 | }
204 | return blob;
205 | };
206 |
207 | var getPrototypeOf = Object.getPrototypeOf || function(object) {
208 | return object.__proto__;
209 | };
210 | view.Blob.prototype = getPrototypeOf(new view.Blob());
211 | }(typeof self !== "undefined" && self || typeof window !== "undefined" && window || this.content || this));
212 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/js/Detector.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @author alteredq / http://alteredqualia.com/
3 | * @author mr.doob / http://mrdoob.com/
4 | */
5 |
6 | var Detector = {
7 |
8 | canvas: !! window.CanvasRenderingContext2D,
9 | webgl: ( function () { try { var canvas = document.createElement( 'canvas' ); return !! ( window.WebGLRenderingContext && ( canvas.getContext( 'webgl' ) || canvas.getContext( 'experimental-webgl' ) ) ); } catch( e ) { return false; } } )(),
10 | workers: !! window.Worker,
11 | fileapi: window.File && window.FileReader && window.FileList && window.Blob,
12 |
13 | getWebGLErrorMessage: function () {
14 |
15 | var element = document.createElement( 'div' );
16 | element.id = 'webgl-error-message';
17 | element.style.fontFamily = 'monospace';
18 | element.style.fontSize = '13px';
19 | element.style.fontWeight = 'normal';
20 | element.style.textAlign = 'center';
21 | element.style.background = '#fff';
22 | element.style.color = '#000';
23 | element.style.padding = '1.5em';
24 | element.style.width = '400px';
25 | element.style.margin = '5em auto 0';
26 |
27 | if ( ! this.webgl ) {
28 |
29 | element.innerHTML = window.WebGLRenderingContext ? [
30 | 'Your graphics card does not seem to support WebGL.
',
31 | 'Find out how to get it here.'
32 | ].join( '\n' ) : [
33 | 'Your browser does not seem to support WebGL.
',
34 | 'Find out how to get it here.'
35 | ].join( '\n' );
36 |
37 | }
38 |
39 | return element;
40 |
41 | },
42 |
43 | addGetWebGLMessage: function ( parameters ) {
44 |
45 | var parent, id, element;
46 |
47 | parameters = parameters || {};
48 |
49 | parent = parameters.parent !== undefined ? parameters.parent : document.body;
50 | id = parameters.id !== undefined ? parameters.id : 'oldie';
51 |
52 | element = Detector.getWebGLErrorMessage();
53 | element.id = id;
54 |
55 | parent.appendChild( element );
56 |
57 | }
58 |
59 | };
60 |
61 | // browserify support
62 | if ( typeof module === 'object' ) {
63 |
64 | module.exports = Detector;
65 |
66 | }
67 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/js/canvas-toBlob.js:
--------------------------------------------------------------------------------
1 | /* canvas-toBlob.js
2 | * A canvas.toBlob() implementation.
3 | * 2016-05-26
4 | *
5 | * By Eli Grey, http://eligrey.com and Devin Samarin, https://github.com/eboyjr
6 | * License: MIT
7 | * See https://github.com/eligrey/canvas-toBlob.js/blob/master/LICENSE.md
8 | */
9 |
10 | /*global self */
11 | /*jslint bitwise: true, regexp: true, confusion: true, es5: true, vars: true, white: true,
12 | plusplus: true */
13 |
14 | /*! @source http://purl.eligrey.com/github/canvas-toBlob.js/blob/master/canvas-toBlob.js */
15 |
16 | (function(view) {
17 | "use strict";
18 | var
19 | Uint8Array = view.Uint8Array
20 | , HTMLCanvasElement = view.HTMLCanvasElement
21 | , canvas_proto = HTMLCanvasElement && HTMLCanvasElement.prototype
22 | , is_base64_regex = /\s*;\s*base64\s*(?:;|$)/i
23 | , to_data_url = "toDataURL"
24 | , base64_ranks
25 | , decode_base64 = function(base64) {
26 | var
27 | len = base64.length
28 | , buffer = new Uint8Array(len / 4 * 3 | 0)
29 | , i = 0
30 | , outptr = 0
31 | , last = [0, 0]
32 | , state = 0
33 | , save = 0
34 | , rank
35 | , code
36 | , undef
37 | ;
38 | while (len--) {
39 | code = base64.charCodeAt(i++);
40 | rank = base64_ranks[code-43];
41 | if (rank !== 255 && rank !== undef) {
42 | last[1] = last[0];
43 | last[0] = code;
44 | save = (save << 6) | rank;
45 | state++;
46 | if (state === 4) {
47 | buffer[outptr++] = save >>> 16;
48 | if (last[1] !== 61 /* padding character */) {
49 | buffer[outptr++] = save >>> 8;
50 | }
51 | if (last[0] !== 61 /* padding character */) {
52 | buffer[outptr++] = save;
53 | }
54 | state = 0;
55 | }
56 | }
57 | }
58 | // 2/3 chance there's going to be some null bytes at the end, but that
59 | // doesn't really matter with most image formats.
60 | // If it somehow matters for you, truncate the buffer up outptr.
61 | return buffer;
62 | }
63 | ;
64 | if (Uint8Array) {
65 | base64_ranks = new Uint8Array([
66 | 62, -1, -1, -1, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -1
67 | , -1, -1, 0, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9
68 | , 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25
69 | , -1, -1, -1, -1, -1, -1, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35
70 | , 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51
71 | ]);
72 | }
73 | if (HTMLCanvasElement && (!canvas_proto.toBlob || !canvas_proto.toBlobHD)) {
74 | if (!canvas_proto.toBlob)
75 | canvas_proto.toBlob = function(callback, type /*, ...args*/) {
76 | if (!type) {
77 | type = "image/png";
78 | } if (this.mozGetAsFile) {
79 | callback(this.mozGetAsFile("canvas", type));
80 | return;
81 | } if (this.msToBlob && /^\s*image\/png\s*(?:$|;)/i.test(type)) {
82 | callback(this.msToBlob());
83 | return;
84 | }
85 |
86 | var
87 | args = Array.prototype.slice.call(arguments, 1)
88 | , dataURI = this[to_data_url].apply(this, args)
89 | , header_end = dataURI.indexOf(",")
90 | , data = dataURI.substring(header_end + 1)
91 | , is_base64 = is_base64_regex.test(dataURI.substring(0, header_end))
92 | , blob
93 | ;
94 | if (Blob.fake) {
95 | // no reason to decode a data: URI that's just going to become a data URI again
96 | blob = new Blob
97 | if (is_base64) {
98 | blob.encoding = "base64";
99 | } else {
100 | blob.encoding = "URI";
101 | }
102 | blob.data = data;
103 | blob.size = data.length;
104 | } else if (Uint8Array) {
105 | if (is_base64) {
106 | blob = new Blob([decode_base64(data)], {type: type});
107 | } else {
108 | blob = new Blob([decodeURIComponent(data)], {type: type});
109 | }
110 | }
111 | callback(blob);
112 | };
113 |
114 | if (!canvas_proto.toBlobHD && canvas_proto.toDataURLHD) {
115 | canvas_proto.toBlobHD = function() {
116 | to_data_url = "toDataURLHD";
117 | var blob = this.toBlob();
118 | to_data_url = "toDataURL";
119 | return blob;
120 | }
121 | } else {
122 | canvas_proto.toBlobHD = canvas_proto.toBlob;
123 | }
124 | }
125 | }(typeof self !== "undefined" && self || typeof window !== "undefined" && window || this.content || this));
126 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/js/controls/TrackballControls.js:
--------------------------------------------------------------------------------
1 | /**
2 | * @author Eberhard Graether / http://egraether.com/
3 | * @author Mark Lundin / http://mark-lundin.com
4 | */
5 |
6 | THREE.TrackballControls = function ( object, domElement ) {
7 |
8 | var _this = this;
9 | var STATE = { NONE: -1, ROTATE: 0, ZOOM: 1, PAN: 2, TOUCH_ROTATE: 3, TOUCH_ZOOM_PAN: 4 };
10 |
11 | this.object = object;
12 | this.domElement = ( domElement !== undefined ) ? domElement : document;
13 |
14 | // API
15 |
16 | this.enabled = true;
17 |
18 | this.screen = { left: 0, top: 0, width: 0, height: 0 };
19 |
20 | this.rotateSpeed = 1.0;
21 | this.zoomSpeed = 1.2;
22 | this.panSpeed = 0.3;
23 |
24 | this.noRotate = false;
25 | this.noZoom = false;
26 | this.noPan = false;
27 | this.noRoll = false;
28 |
29 | this.staticMoving = false;
30 | this.dynamicDampingFactor = 0.2;
31 |
32 | this.minDistance = 0;
33 | this.maxDistance = Infinity;
34 |
35 | this.keys = [ 65 /*A*/, 83 /*S*/, 68 /*D*/ ];
36 |
37 | // internals
38 |
39 | this.target = new THREE.Vector3();
40 |
41 | var EPS = 0.000001;
42 |
43 | var lastPosition = new THREE.Vector3();
44 |
45 | var _state = STATE.NONE,
46 | _prevState = STATE.NONE,
47 |
48 | _eye = new THREE.Vector3(),
49 |
50 | _rotateStart = new THREE.Vector3(),
51 | _rotateEnd = new THREE.Vector3(),
52 |
53 | _zoomStart = new THREE.Vector2(),
54 | _zoomEnd = new THREE.Vector2(),
55 |
56 | _touchZoomDistanceStart = 0,
57 | _touchZoomDistanceEnd = 0,
58 |
59 | _panStart = new THREE.Vector2(),
60 | _panEnd = new THREE.Vector2();
61 |
62 | // for reset
63 |
64 | this.target0 = this.target.clone();
65 | this.position0 = this.object.position.clone();
66 | this.up0 = this.object.up.clone();
67 |
68 | // events
69 |
70 | var changeEvent = { type: 'change' };
71 | var startEvent = { type: 'start'};
72 | var endEvent = { type: 'end'};
73 |
74 |
75 | // methods
76 |
77 | this.handleResize = function () {
78 |
79 | if ( this.domElement === document ) {
80 |
81 | this.screen.left = 0;
82 | this.screen.top = 0;
83 | this.screen.width = window.innerWidth;
84 | this.screen.height = window.innerHeight;
85 |
86 | } else {
87 |
88 | var box = this.domElement.getBoundingClientRect();
89 | // adjustments come from similar code in the jquery offset() function
90 | var d = this.domElement.ownerDocument.documentElement;
91 | this.screen.left = box.left + window.pageXOffset - d.clientLeft;
92 | this.screen.top = box.top + window.pageYOffset - d.clientTop;
93 | this.screen.width = box.width;
94 | this.screen.height = box.height;
95 |
96 | }
97 |
98 | };
99 |
100 | this.handleEvent = function ( event ) {
101 |
102 | if ( typeof this[ event.type ] == 'function' ) {
103 |
104 | this[ event.type ]( event );
105 |
106 | }
107 |
108 | };
109 |
110 | var getMouseOnScreen = ( function () {
111 |
112 | var vector = new THREE.Vector2();
113 |
114 | return function ( pageX, pageY ) {
115 |
116 | vector.set(
117 | ( pageX - _this.screen.left ) / _this.screen.width,
118 | ( pageY - _this.screen.top ) / _this.screen.height
119 | );
120 |
121 | return vector;
122 |
123 | };
124 |
125 | }() );
126 |
127 | var getMouseProjectionOnBall = ( function () {
128 |
129 | var vector = new THREE.Vector3();
130 | var objectUp = new THREE.Vector3();
131 | var mouseOnBall = new THREE.Vector3();
132 |
133 | return function ( pageX, pageY ) {
134 |
135 | mouseOnBall.set(
136 | ( pageX - _this.screen.width * 0.5 - _this.screen.left ) / (_this.screen.width*.5),
137 | ( _this.screen.height * 0.5 + _this.screen.top - pageY ) / (_this.screen.height*.5),
138 | 0.0
139 | );
140 |
141 | var length = mouseOnBall.length();
142 |
143 | if ( _this.noRoll ) {
144 |
145 | if ( length < Math.SQRT1_2 ) {
146 |
147 | mouseOnBall.z = Math.sqrt( 1.0 - length*length );
148 |
149 | } else {
150 |
151 | mouseOnBall.z = .5 / length;
152 |
153 | }
154 |
155 | } else if ( length > 1.0 ) {
156 |
157 | mouseOnBall.normalize();
158 |
159 | } else {
160 |
161 | mouseOnBall.z = Math.sqrt( 1.0 - length * length );
162 |
163 | }
164 |
165 | _eye.copy( _this.object.position ).sub( _this.target );
166 |
167 | vector.copy( _this.object.up ).setLength( mouseOnBall.y )
168 | vector.add( objectUp.copy( _this.object.up ).cross( _eye ).setLength( mouseOnBall.x ) );
169 | vector.add( _eye.setLength( mouseOnBall.z ) );
170 |
171 | return vector;
172 |
173 | };
174 |
175 | }() );
176 |
177 | this.rotateCamera = (function(){
178 |
179 | var axis = new THREE.Vector3(),
180 | quaternion = new THREE.Quaternion();
181 |
182 |
183 | return function () {
184 |
185 | var angle = Math.acos( _rotateStart.dot( _rotateEnd ) / _rotateStart.length() / _rotateEnd.length() );
186 |
187 | if ( angle ) {
188 |
189 | axis.crossVectors( _rotateStart, _rotateEnd ).normalize();
190 |
191 | angle *= _this.rotateSpeed;
192 |
193 | quaternion.setFromAxisAngle( axis, -angle );
194 |
195 | _eye.applyQuaternion( quaternion );
196 | _this.object.up.applyQuaternion( quaternion );
197 |
198 | _rotateEnd.applyQuaternion( quaternion );
199 |
200 | if ( _this.staticMoving ) {
201 |
202 | _rotateStart.copy( _rotateEnd );
203 |
204 | } else {
205 |
206 | quaternion.setFromAxisAngle( axis, angle * ( _this.dynamicDampingFactor - 1.0 ) );
207 | _rotateStart.applyQuaternion( quaternion );
208 |
209 | }
210 |
211 | }
212 | }
213 |
214 | }());
215 |
216 | this.zoomCamera = function () {
217 |
218 | if ( _state === STATE.TOUCH_ZOOM_PAN ) {
219 |
220 | var factor = _touchZoomDistanceStart / _touchZoomDistanceEnd;
221 | _touchZoomDistanceStart = _touchZoomDistanceEnd;
222 | _eye.multiplyScalar( factor );
223 |
224 | } else {
225 |
226 | var factor = 1.0 + ( _zoomEnd.y - _zoomStart.y ) * _this.zoomSpeed;
227 |
228 | if ( factor !== 1.0 && factor > 0.0 ) {
229 |
230 | _eye.multiplyScalar( factor );
231 |
232 | if ( _this.staticMoving ) {
233 |
234 | _zoomStart.copy( _zoomEnd );
235 |
236 | } else {
237 |
238 | _zoomStart.y += ( _zoomEnd.y - _zoomStart.y ) * this.dynamicDampingFactor;
239 |
240 | }
241 |
242 | }
243 |
244 | }
245 |
246 | };
247 |
248 | this.panCamera = (function(){
249 |
250 | var mouseChange = new THREE.Vector2(),
251 | objectUp = new THREE.Vector3(),
252 | pan = new THREE.Vector3();
253 |
254 | return function () {
255 |
256 | mouseChange.copy( _panEnd ).sub( _panStart );
257 |
258 | if ( mouseChange.lengthSq() ) {
259 |
260 | mouseChange.multiplyScalar( _eye.length() * _this.panSpeed );
261 |
262 | pan.copy( _eye ).cross( _this.object.up ).setLength( mouseChange.x );
263 | pan.add( objectUp.copy( _this.object.up ).setLength( mouseChange.y ) );
264 |
265 | _this.object.position.add( pan );
266 | _this.target.add( pan );
267 |
268 | if ( _this.staticMoving ) {
269 |
270 | _panStart.copy( _panEnd );
271 |
272 | } else {
273 |
274 | _panStart.add( mouseChange.subVectors( _panEnd, _panStart ).multiplyScalar( _this.dynamicDampingFactor ) );
275 |
276 | }
277 |
278 | }
279 | }
280 |
281 | }());
282 |
283 | this.checkDistances = function () {
284 |
285 | if ( !_this.noZoom || !_this.noPan ) {
286 |
287 | if ( _eye.lengthSq() > _this.maxDistance * _this.maxDistance ) {
288 |
289 | _this.object.position.addVectors( _this.target, _eye.setLength( _this.maxDistance ) );
290 |
291 | }
292 |
293 | if ( _eye.lengthSq() < _this.minDistance * _this.minDistance ) {
294 |
295 | _this.object.position.addVectors( _this.target, _eye.setLength( _this.minDistance ) );
296 |
297 | }
298 |
299 | }
300 |
301 | };
302 |
303 | this.update = function () {
304 |
305 | _eye.subVectors( _this.object.position, _this.target );
306 |
307 | if ( !_this.noRotate ) {
308 |
309 | _this.rotateCamera();
310 |
311 | }
312 |
313 | if ( !_this.noZoom ) {
314 |
315 | _this.zoomCamera();
316 |
317 | }
318 |
319 | if ( !_this.noPan ) {
320 |
321 | _this.panCamera();
322 |
323 | }
324 |
325 | _this.object.position.addVectors( _this.target, _eye );
326 |
327 | _this.checkDistances();
328 |
329 | _this.object.lookAt( _this.target );
330 |
331 | if ( lastPosition.distanceToSquared( _this.object.position ) > EPS ) {
332 |
333 | _this.dispatchEvent( changeEvent );
334 |
335 | lastPosition.copy( _this.object.position );
336 |
337 | }
338 |
339 | };
340 |
341 | this.reset = function () {
342 |
343 | _state = STATE.NONE;
344 | _prevState = STATE.NONE;
345 |
346 | _this.target.copy( _this.target0 );
347 | _this.object.position.copy( _this.position0 );
348 | _this.object.up.copy( _this.up0 );
349 |
350 | _eye.subVectors( _this.object.position, _this.target );
351 |
352 | _this.object.lookAt( _this.target );
353 |
354 | _this.dispatchEvent( changeEvent );
355 |
356 | lastPosition.copy( _this.object.position );
357 |
358 | };
359 |
360 | // listeners
361 |
362 | function keydown( event ) {
363 |
364 | if ( _this.enabled === false ) return;
365 |
366 | window.removeEventListener( 'keydown', keydown );
367 |
368 | _prevState = _state;
369 |
370 | if ( _state !== STATE.NONE ) {
371 |
372 | return;
373 |
374 | } else if ( event.keyCode === _this.keys[ STATE.ROTATE ] && !_this.noRotate ) {
375 |
376 | _state = STATE.ROTATE;
377 |
378 | } else if ( event.keyCode === _this.keys[ STATE.ZOOM ] && !_this.noZoom ) {
379 |
380 | _state = STATE.ZOOM;
381 |
382 | } else if ( event.keyCode === _this.keys[ STATE.PAN ] && !_this.noPan ) {
383 |
384 | _state = STATE.PAN;
385 |
386 | }
387 |
388 | }
389 |
390 | function keyup( event ) {
391 |
392 | if ( _this.enabled === false ) return;
393 |
394 | _state = _prevState;
395 |
396 | window.addEventListener( 'keydown', keydown, false );
397 |
398 | }
399 |
400 | function mousedown( event ) {
401 |
402 | if ( _this.enabled === false ) return;
403 |
404 | event.preventDefault();
405 | event.stopPropagation();
406 |
407 | if ( _state === STATE.NONE ) {
408 |
409 | _state = event.button;
410 |
411 | }
412 |
413 | if ( _state === STATE.ROTATE && !_this.noRotate ) {
414 |
415 | _rotateStart.copy( getMouseProjectionOnBall( event.pageX, event.pageY ) );
416 | _rotateEnd.copy( _rotateStart );
417 |
418 | } else if ( _state === STATE.ZOOM && !_this.noZoom ) {
419 |
420 | _zoomStart.copy( getMouseOnScreen( event.pageX, event.pageY ) );
421 | _zoomEnd.copy(_zoomStart);
422 |
423 | } else if ( _state === STATE.PAN && !_this.noPan ) {
424 |
425 | _panStart.copy( getMouseOnScreen( event.pageX, event.pageY ) );
426 | _panEnd.copy(_panStart)
427 |
428 | }
429 |
430 | document.addEventListener( 'mousemove', mousemove, false );
431 | document.addEventListener( 'mouseup', mouseup, false );
432 |
433 | _this.dispatchEvent( startEvent );
434 |
435 | }
436 |
437 | function mousemove( event ) {
438 |
439 | if ( _this.enabled === false ) return;
440 |
441 | event.preventDefault();
442 | event.stopPropagation();
443 |
444 | if ( _state === STATE.ROTATE && !_this.noRotate ) {
445 |
446 | _rotateEnd.copy( getMouseProjectionOnBall( event.pageX, event.pageY ) );
447 |
448 | } else if ( _state === STATE.ZOOM && !_this.noZoom ) {
449 |
450 | _zoomEnd.copy( getMouseOnScreen( event.pageX, event.pageY ) );
451 |
452 | } else if ( _state === STATE.PAN && !_this.noPan ) {
453 |
454 | _panEnd.copy( getMouseOnScreen( event.pageX, event.pageY ) );
455 |
456 | }
457 |
458 | }
459 |
460 | function mouseup( event ) {
461 |
462 | if ( _this.enabled === false ) return;
463 |
464 | event.preventDefault();
465 | event.stopPropagation();
466 |
467 | _state = STATE.NONE;
468 |
469 | document.removeEventListener( 'mousemove', mousemove );
470 | document.removeEventListener( 'mouseup', mouseup );
471 | _this.dispatchEvent( endEvent );
472 |
473 | }
474 |
475 | function mousewheel( event ) {
476 |
477 | if ( _this.enabled === false ) return;
478 |
479 | event.preventDefault();
480 | event.stopPropagation();
481 |
482 | var delta = 0;
483 |
484 | if ( event.wheelDelta ) { // WebKit / Opera / Explorer 9
485 |
486 | delta = event.wheelDelta / 40;
487 |
488 | } else if ( event.detail ) { // Firefox
489 |
490 | delta = - event.detail / 3;
491 |
492 | }
493 |
494 | _zoomStart.y += delta * 0.01;
495 | _this.dispatchEvent( startEvent );
496 | _this.dispatchEvent( endEvent );
497 |
498 | }
499 |
500 | function touchstart( event ) {
501 |
502 | if ( _this.enabled === false ) return;
503 |
504 | switch ( event.touches.length ) {
505 |
506 | case 1:
507 | _state = STATE.TOUCH_ROTATE;
508 | _rotateStart.copy( getMouseProjectionOnBall( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ) );
509 | _rotateEnd.copy( _rotateStart );
510 | break;
511 |
512 | case 2:
513 | _state = STATE.TOUCH_ZOOM_PAN;
514 | var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX;
515 | var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY;
516 | _touchZoomDistanceEnd = _touchZoomDistanceStart = Math.sqrt( dx * dx + dy * dy );
517 |
518 | var x = ( event.touches[ 0 ].pageX + event.touches[ 1 ].pageX ) / 2;
519 | var y = ( event.touches[ 0 ].pageY + event.touches[ 1 ].pageY ) / 2;
520 | _panStart.copy( getMouseOnScreen( x, y ) );
521 | _panEnd.copy( _panStart );
522 | break;
523 |
524 | default:
525 | _state = STATE.NONE;
526 |
527 | }
528 | _this.dispatchEvent( startEvent );
529 |
530 |
531 | }
532 |
533 | function touchmove( event ) {
534 |
535 | if ( _this.enabled === false ) return;
536 |
537 | event.preventDefault();
538 | event.stopPropagation();
539 |
540 | switch ( event.touches.length ) {
541 |
542 | case 1:
543 | _rotateEnd.copy( getMouseProjectionOnBall( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ) );
544 | break;
545 |
546 | case 2:
547 | var dx = event.touches[ 0 ].pageX - event.touches[ 1 ].pageX;
548 | var dy = event.touches[ 0 ].pageY - event.touches[ 1 ].pageY;
549 | _touchZoomDistanceEnd = Math.sqrt( dx * dx + dy * dy );
550 |
551 | var x = ( event.touches[ 0 ].pageX + event.touches[ 1 ].pageX ) / 2;
552 | var y = ( event.touches[ 0 ].pageY + event.touches[ 1 ].pageY ) / 2;
553 | _panEnd.copy( getMouseOnScreen( x, y ) );
554 | break;
555 |
556 | default:
557 | _state = STATE.NONE;
558 |
559 | }
560 |
561 | }
562 |
563 | function touchend( event ) {
564 |
565 | if ( _this.enabled === false ) return;
566 |
567 | switch ( event.touches.length ) {
568 |
569 | case 1:
570 | _rotateEnd.copy( getMouseProjectionOnBall( event.touches[ 0 ].pageX, event.touches[ 0 ].pageY ) );
571 | _rotateStart.copy( _rotateEnd );
572 | break;
573 |
574 | case 2:
575 | _touchZoomDistanceStart = _touchZoomDistanceEnd = 0;
576 |
577 | var x = ( event.touches[ 0 ].pageX + event.touches[ 1 ].pageX ) / 2;
578 | var y = ( event.touches[ 0 ].pageY + event.touches[ 1 ].pageY ) / 2;
579 | _panEnd.copy( getMouseOnScreen( x, y ) );
580 | _panStart.copy( _panEnd );
581 | break;
582 |
583 | }
584 |
585 | _state = STATE.NONE;
586 | _this.dispatchEvent( endEvent );
587 |
588 | }
589 |
590 | this.domElement.addEventListener( 'contextmenu', function ( event ) { event.preventDefault(); }, false );
591 |
592 | this.domElement.addEventListener( 'mousedown', mousedown, false );
593 |
594 | this.domElement.addEventListener( 'mousewheel', mousewheel, false );
595 | this.domElement.addEventListener( 'DOMMouseScroll', mousewheel, false ); // firefox
596 |
597 | this.domElement.addEventListener( 'touchstart', touchstart, false );
598 | this.domElement.addEventListener( 'touchend', touchend, false );
599 | this.domElement.addEventListener( 'touchmove', touchmove, false );
600 |
601 | window.addEventListener( 'keydown', keydown, false );
602 | window.addEventListener( 'keyup', keyup, false );
603 |
604 | this.handleResize();
605 |
606 | // force an update at start
607 | this.update();
608 |
609 | };
610 |
611 | THREE.TrackballControls.prototype = Object.create( THREE.EventDispatcher.prototype );
612 | THREE.TrackballControls.prototype.constructor = THREE.TrackballControls;
613 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/js/helpers.js:
--------------------------------------------------------------------------------
1 | "use strict";
2 |
3 | /*
4 | Convert an array of arrays into a row-major flat buffer.
5 | */
6 | function flatten_Float32Array( array ) {
7 | const nrows = array.length;
8 | const ncols = array[0].length;
9 |
10 | var result = new Float32Array( nrows*ncols );
11 | for( let row = 0; row < nrows; ++row ) {
12 | for( let col = 0; col < ncols; ++col ) {
13 | result[row*ncols+col] = array[row][col];
14 | }
15 | }
16 | return result;
17 | }
18 | function flatten_Int32Array( array ) {
19 | const nrows = array.length;
20 | const ncols = array[0].length;
21 |
22 | var result = new Int32Array( nrows*ncols );
23 | for( let row = 0; row < nrows; ++row ) {
24 | for( let col = 0; col < ncols; ++col ) {
25 | result[row*ncols+col] = array[row][col];
26 | }
27 | }
28 | return result;
29 | }
30 | /*
31 | Expand a row-major flat buffer into an array of arrays with n rows.
32 | */
33 | function inflate_Float32Array_2D( data, nrows ) {
34 | let array = new Float32Array( data );
35 |
36 | if( array.length % nrows !== 0 ) console.error( "inflate_Float32Array() called but dimensions are impossible." );
37 |
38 | var ncols = array.length / nrows;
39 | var result = Array(nrows);
40 | for( let row = 0; row < nrows; ++row ) {
41 | result[row] = Array(ncols);
42 | for( let col = 0; col < ncols; ++col ) {
43 | result[row][col] = array[row*ncols+col]
44 | }
45 | }
46 |
47 | return result;
48 | }
49 | /*
50 | Expand a row-major flat buffer into a length-N array of length-M arrays of length-K arrays.
51 | */
52 | function inflate_Float32Array_3D( data, N, M, K ) {
53 | // HACK: Actually we're getting Uint8; convert to Float32.
54 | // let array = new Float32Array( data );
55 | let array = new Uint8Array( data );
56 |
57 | if( array.length !== N*M*K ) console.error( "inflate_Float32Array() called but dimensions are impossible." );
58 |
59 | var result = Array(N);
60 | for( let row = 0; row < N; ++row ) {
61 | result[row] = Array(M);
62 | for( let col = 0; col < M; ++col ) {
63 | result[row][col] = Array(K);
64 | for( let channel = 0; channel < K; ++channel ) {
65 | result[row][col][channel] = array[ (row*M+col)*K + channel ]/255.0;
66 | }
67 | }
68 | }
69 |
70 | return result;
71 | }
72 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/js/libs/stats.min.js:
--------------------------------------------------------------------------------
1 | // stats.js - http://github.com/mrdoob/stats.js
2 | var Stats=function(){var l=Date.now(),m=l,g=0,n=Infinity,o=0,h=0,p=Infinity,q=0,r=0,s=0,f=document.createElement("div");f.id="stats";f.addEventListener("mousedown",function(b){b.preventDefault();t(++s%2)},!1);f.style.cssText="width:80px;opacity:0.9;cursor:pointer";var a=document.createElement("div");a.id="fps";a.style.cssText="padding:0 0 3px 3px;text-align:left;background-color:#002";f.appendChild(a);var i=document.createElement("div");i.id="fpsText";i.style.cssText="color:#0ff;font-family:Helvetica,Arial,sans-serif;font-size:9px;font-weight:bold;line-height:15px";
3 | i.innerHTML="FPS";a.appendChild(i);var c=document.createElement("div");c.id="fpsGraph";c.style.cssText="position:relative;width:74px;height:30px;background-color:#0ff";for(a.appendChild(c);74>c.children.length;){var j=document.createElement("span");j.style.cssText="width:1px;height:30px;float:left;background-color:#113";c.appendChild(j)}var d=document.createElement("div");d.id="ms";d.style.cssText="padding:0 0 3px 3px;text-align:left;background-color:#020;display:none";f.appendChild(d);var k=document.createElement("div");
4 | k.id="msText";k.style.cssText="color:#0f0;font-family:Helvetica,Arial,sans-serif;font-size:9px;font-weight:bold;line-height:15px";k.innerHTML="MS";d.appendChild(k);var e=document.createElement("div");e.id="msGraph";e.style.cssText="position:relative;width:74px;height:30px;background-color:#0f0";for(d.appendChild(e);74>e.children.length;)j=document.createElement("span"),j.style.cssText="width:1px;height:30px;float:left;background-color:#131",e.appendChild(j);var t=function(b){s=b;switch(s){case 0:a.style.display=
5 | "block";d.style.display="none";break;case 1:a.style.display="none",d.style.display="block"}};return{REVISION:11,domElement:f,setMode:t,begin:function(){l=Date.now()},end:function(){var b=Date.now();g=b-l;n=Math.min(n,g);o=Math.max(o,g);k.textContent=g+" MS ("+n+"-"+o+")";var a=Math.min(30,30-30*(g/200));e.appendChild(e.firstChild).style.height=a+"px";r++;b>m+1E3&&(h=Math.round(1E3*r/(b-m)),p=Math.min(p,h),q=Math.max(q,h),i.textContent=h+" FPS ("+p+"-"+q+")",a=Math.min(30,30-30*(h/100)),c.appendChild(c.firstChild).style.height=
6 | a+"px",m=b,r=0);return b},update:function(){l=this.end()}}};
7 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/js/purl.js:
--------------------------------------------------------------------------------
1 | /*
2 | * Purl (A JavaScript URL parser) v2.3.1
3 | * Developed and maintanined by Mark Perkins, mark@allmarkedup.com
4 | * Source repository: https://github.com/allmarkedup/jQuery-URL-Parser
5 | * Licensed under an MIT-style license. See https://github.com/allmarkedup/jQuery-URL-Parser/blob/master/LICENSE for details.
6 | */
7 |
8 | ;(function(factory) {
9 | if (typeof define === 'function' && define.amd) {
10 | define(factory);
11 | } else {
12 | window.purl = factory();
13 | }
14 | })(function() {
15 |
16 | var tag2attr = {
17 | a : 'href',
18 | img : 'src',
19 | form : 'action',
20 | base : 'href',
21 | script : 'src',
22 | iframe : 'src',
23 | link : 'href',
24 | embed : 'src',
25 | object : 'data'
26 | },
27 |
28 | key = ['source', 'protocol', 'authority', 'userInfo', 'user', 'password', 'host', 'port', 'relative', 'path', 'directory', 'file', 'query', 'fragment'], // keys available to query
29 |
30 | aliases = { 'anchor' : 'fragment' }, // aliases for backwards compatability
31 |
32 | parser = {
33 | strict : /^(?:([^:\/?#]+):)?(?:\/\/((?:(([^:@]*):?([^:@]*))?@)?([^:\/?#]*)(?::(\d*))?))?((((?:[^?#\/]*\/)*)([^?#]*))(?:\?([^#]*))?(?:#(.*))?)/, //less intuitive, more accurate to the specs
34 | loose : /^(?:(?![^:@]+:[^:@\/]*@)([^:\/?#.]+):)?(?:\/\/)?((?:(([^:@]*):?([^:@]*))?@)?([^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/ // more intuitive, fails on relative paths and deviates from specs
35 | },
36 |
37 | isint = /^[0-9]+$/;
38 |
39 | function parseUri( url, strictMode ) {
40 | var str = decodeURI( url ),
41 | res = parser[ strictMode || false ? 'strict' : 'loose' ].exec( str ),
42 | uri = { attr : {}, param : {}, seg : {} },
43 | i = 14;
44 |
45 | while ( i-- ) {
46 | uri.attr[ key[i] ] = res[i] || '';
47 | }
48 |
49 | // build query and fragment parameters
50 | uri.param['query'] = parseString(uri.attr['query']);
51 | uri.param['fragment'] = parseString(uri.attr['fragment']);
52 |
53 | // split path and fragement into segments
54 | uri.seg['path'] = uri.attr.path.replace(/^\/+|\/+$/g,'').split('/');
55 | uri.seg['fragment'] = uri.attr.fragment.replace(/^\/+|\/+$/g,'').split('/');
56 |
57 | // compile a 'base' domain attribute
58 | uri.attr['base'] = uri.attr.host ? (uri.attr.protocol ? uri.attr.protocol+'://'+uri.attr.host : uri.attr.host) + (uri.attr.port ? ':'+uri.attr.port : '') : '';
59 |
60 | return uri;
61 | }
62 |
63 | function getAttrName( elm ) {
64 | var tn = elm.tagName;
65 | if ( typeof tn !== 'undefined' ) return tag2attr[tn.toLowerCase()];
66 | return tn;
67 | }
68 |
69 | function promote(parent, key) {
70 | if (parent[key].length === 0) return parent[key] = {};
71 | var t = {};
72 | for (var i in parent[key]) t[i] = parent[key][i];
73 | parent[key] = t;
74 | return t;
75 | }
76 |
77 | function parse(parts, parent, key, val) {
78 | var part = parts.shift();
79 | if (!part) {
80 | if (isArray(parent[key])) {
81 | parent[key].push(val);
82 | } else if ('object' == typeof parent[key]) {
83 | parent[key] = val;
84 | } else if ('undefined' == typeof parent[key]) {
85 | parent[key] = val;
86 | } else {
87 | parent[key] = [parent[key], val];
88 | }
89 | } else {
90 | var obj = parent[key] = parent[key] || [];
91 | if (']' == part) {
92 | if (isArray(obj)) {
93 | if ('' !== val) obj.push(val);
94 | } else if ('object' == typeof obj) {
95 | obj[keys(obj).length] = val;
96 | } else {
97 | obj = parent[key] = [parent[key], val];
98 | }
99 | } else if (~part.indexOf(']')) {
100 | part = part.substr(0, part.length - 1);
101 | if (!isint.test(part) && isArray(obj)) obj = promote(parent, key);
102 | parse(parts, obj, part, val);
103 | // key
104 | } else {
105 | if (!isint.test(part) && isArray(obj)) obj = promote(parent, key);
106 | parse(parts, obj, part, val);
107 | }
108 | }
109 | }
110 |
111 | function merge(parent, key, val) {
112 | if (~key.indexOf(']')) {
113 | var parts = key.split('[');
114 | parse(parts, parent, 'base', val);
115 | } else {
116 | if (!isint.test(key) && isArray(parent.base)) {
117 | var t = {};
118 | for (var k in parent.base) t[k] = parent.base[k];
119 | parent.base = t;
120 | }
121 | if (key !== '') {
122 | set(parent.base, key, val);
123 | }
124 | }
125 | return parent;
126 | }
127 |
128 | function parseString(str) {
129 | return reduce(String(str).split(/&|;/), function(ret, pair) {
130 | try {
131 | pair = decodeURIComponent(pair.replace(/\+/g, ' '));
132 | } catch(e) {
133 | // ignore
134 | }
135 | var eql = pair.indexOf('='),
136 | brace = lastBraceInKey(pair),
137 | key = pair.substr(0, brace || eql),
138 | val = pair.substr(brace || eql, pair.length);
139 |
140 | val = val.substr(val.indexOf('=') + 1, val.length);
141 |
142 | if (key === '') {
143 | key = pair;
144 | val = '';
145 | }
146 |
147 | return merge(ret, key, val);
148 | }, { base: {} }).base;
149 | }
150 |
151 | function set(obj, key, val) {
152 | var v = obj[key];
153 | if (typeof v === 'undefined') {
154 | obj[key] = val;
155 | } else if (isArray(v)) {
156 | v.push(val);
157 | } else {
158 | obj[key] = [v, val];
159 | }
160 | }
161 |
162 | function lastBraceInKey(str) {
163 | var len = str.length,
164 | brace,
165 | c;
166 | for (var i = 0; i < len; ++i) {
167 | c = str[i];
168 | if (']' == c) brace = false;
169 | if ('[' == c) brace = true;
170 | if ('=' == c && !brace) return i;
171 | }
172 | }
173 |
174 | function reduce(obj, accumulator){
175 | var i = 0,
176 | l = obj.length >> 0,
177 | curr = arguments[2];
178 | while (i < l) {
179 | if (i in obj) curr = accumulator.call(undefined, curr, obj[i], i, obj);
180 | ++i;
181 | }
182 | return curr;
183 | }
184 |
185 | function isArray(vArg) {
186 | return Object.prototype.toString.call(vArg) === "[object Array]";
187 | }
188 |
189 | function keys(obj) {
190 | var key_array = [];
191 | for ( var prop in obj ) {
192 | if ( obj.hasOwnProperty(prop) ) key_array.push(prop);
193 | }
194 | return key_array;
195 | }
196 |
197 | function purl( url, strictMode ) {
198 | if ( arguments.length === 1 && url === true ) {
199 | strictMode = true;
200 | url = undefined;
201 | }
202 | strictMode = strictMode || false;
203 | url = url || window.location.toString();
204 |
205 | return {
206 |
207 | data : parseUri(url, strictMode),
208 |
209 | // get various attributes from the URI
210 | attr : function( attr ) {
211 | attr = aliases[attr] || attr;
212 | return typeof attr !== 'undefined' ? this.data.attr[attr] : this.data.attr;
213 | },
214 |
215 | // return query string parameters
216 | param : function( param ) {
217 | return typeof param !== 'undefined' ? this.data.param.query[param] : this.data.param.query;
218 | },
219 |
220 | // return fragment parameters
221 | fparam : function( param ) {
222 | return typeof param !== 'undefined' ? this.data.param.fragment[param] : this.data.param.fragment;
223 | },
224 |
225 | // return path segments
226 | segment : function( seg ) {
227 | if ( typeof seg === 'undefined' ) {
228 | return this.data.seg.path;
229 | } else {
230 | seg = seg < 0 ? this.data.seg.path.length + seg : seg - 1; // negative segments count from the end
231 | return this.data.seg.path[seg];
232 | }
233 | },
234 |
235 | // return fragment segments
236 | fsegment : function( seg ) {
237 | if ( typeof seg === 'undefined' ) {
238 | return this.data.seg.fragment;
239 | } else {
240 | seg = seg < 0 ? this.data.seg.fragment.length + seg : seg - 1; // negative segments count from the end
241 | return this.data.seg.fragment[seg];
242 | }
243 | }
244 |
245 | };
246 |
247 | }
248 |
249 | purl.jQuery = function($){
250 | if ($ != null) {
251 | $.fn.url = function( strictMode ) {
252 | var url = '';
253 | if ( this.length ) {
254 | url = $(this).attr( getAttrName(this[0]) ) || '';
255 | }
256 | return purl( url, strictMode );
257 | };
258 |
259 | $.url = purl;
260 | }
261 | };
262 |
263 | purl.jQuery(window.jQuery);
264 |
265 | return purl;
266 |
267 | });
268 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/js/websocket-client.js:
--------------------------------------------------------------------------------
1 | // @flow
2 |
3 | /**
4 | * An asynchronous WebSocket client.
5 | * @example
6 | * // Set up connection.
7 | * const webSocketClient = new WebSocketClient;
8 | * // Connect.
9 | * await webSocketClient.connect('ws://www.example.com/');
10 | * // Send is synchronous.
11 | * webSocketClient.send('Hello!');
12 | * // Receive is asynchronous.
13 | * console.log(await webSocketClient.receive());
14 | * // See if there are any more messages received.
15 | * if (webSocketClient.dataAvailable !== 0) {
16 | * console.log(await webSocketClient.receive());
17 | * }
18 | * // Close the connection.
19 | * await webSocketClient.disconnect();
20 | */
21 | class WebSocketClient {
22 |
23 | constructor() {
24 | this._reset();
25 | }
26 |
27 | /**
28 | * Whether a connection is currently open.
29 | * @returns true if the connection is open.
30 | */
31 | get connected() {
32 | // Checking != null also checks against undefined.
33 | return this._socket != null && this._socket.readyState === WebSocket.OPEN;
34 | }
35 |
36 | /**
37 | * The number of messages available to receive.
38 | * @returns The number of queued messages that can be retrieved with {@link #receive}
39 | */
40 | get dataAvailable() {
41 | return this._receiveDataQueue.length;
42 | }
43 |
44 | /**
45 | * Sets up a WebSocket connection to specified url. Resolves when the
46 | * connection is established. Can be called again to reconnect to any url.
47 | */
48 | connect(url, protocols) {
49 | return this.disconnect().then(() => {
50 | this._reset();
51 |
52 | this._socket = new WebSocket(url, protocols);
53 | this._socket.binaryType = 'arraybuffer';
54 | return this._setupListenersOnConnect();
55 | });
56 | }
57 |
58 | /**
59 | * Send data through the websocket.
60 | * Must be connected. See {@link #connected}.
61 | */
62 | send(data) {
63 | if (!this.connected) {
64 | throw this._closeEvent || new Error('Not connected.');
65 | }
66 |
67 | this._socket.send(data);
68 | }
69 |
70 | /**
71 | * Asynchronously receive data from the websocket.
72 | * Resolves immediately if there is buffered, unreceived data.
73 | * Otherwise, resolves with the next rececived message,
74 | * or rejects if disconnected.
75 | * @returns A promise that resolves with the data received.
76 | */
77 | receive() {
78 | if (this._receiveDataQueue.length !== 0) {
79 | return Promise.resolve(this._receiveDataQueue.shift());
80 | }
81 |
82 | if (!this.connected) {
83 | return Promise.reject(this._closeEvent || new Error('Not connected.'));
84 | }
85 |
86 | const receivePromise = new Promise((resolve, reject) => {
87 | this._receiveCallbacksQueue.push({ resolve, reject });
88 | });
89 |
90 | return receivePromise;
91 | }
92 |
93 | /**
94 | * Initiates the close handshake if there is an active connection.
95 | * Returns a promise that will never reject.
96 | * The promise resolves once the WebSocket connection is closed.
97 | */
98 | disconnect(code, reason) {
99 | if(!this.connected) {
100 | return Promise.resolve(this._closeEvent);
101 | }
102 |
103 | return new Promise((resolve, reject) => {
104 | // It's okay to call resolve/reject multiple times in a promise.
105 | const callbacks = {
106 | resolve: dummy => {
107 | // Make sure this object always stays in the queue
108 | // until callbacks.reject() (which is resolve) is called.
109 | this._receiveCallbacksQueue.push(callbacks);
110 | },
111 |
112 | reject: resolve
113 | };
114 |
115 | this._receiveCallbacksQueue.push(callbacks);
116 | // After this, we will imminently get a close event.
117 | // Therefore, this promise will resolve.
118 | this._socket.close(code, reason);
119 | });
120 | }
121 |
122 | /**
123 | * Sets up the event listeners, which do the bulk of the work.
124 | * @private
125 | */
126 | _setupListenersOnConnect() {
127 | const socket = this._socket;
128 |
129 | return new Promise((resolve, reject) => {
130 |
131 | const handleMessage = event => {
132 | const messageEvent = event;
133 | // The cast was necessary because Flow's libdef's don't contain
134 | // a MessageEventListener definition.
135 |
136 | if (this._receiveCallbacksQueue.length !== 0) {
137 | this._receiveCallbacksQueue.shift().resolve(messageEvent.data);
138 | return;
139 | }
140 |
141 | this._receiveDataQueue.push(messageEvent.data);
142 | };
143 |
144 | const handleOpen = event => {
145 | socket.addEventListener('message', handleMessage);
146 | socket.addEventListener('close', event => {
147 | this._closeEvent = event;
148 |
149 | // Whenever a close event fires, the socket is effectively dead.
150 | // It's impossible for more messages to arrive.
151 | // If there are any promises waiting for messages, reject them.
152 | while (this._receiveCallbacksQueue.length !== 0) {
153 | this._receiveCallbacksQueue.shift().reject(this._closeEvent);
154 | }
155 | });
156 | resolve();
157 | };
158 |
159 | socket.addEventListener('error', reject);
160 | socket.addEventListener('open', handleOpen);
161 | });
162 | }
163 |
164 | /**
165 | * @private
166 | */
167 | _reset() {
168 | this._receiveDataQueue = [];
169 | this._receiveCallbacksQueue = [];
170 | this._closeEvent = null;
171 | }
172 | }
173 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/make_color_spiral.py:
--------------------------------------------------------------------------------
1 | from numpy import *
2 | import json
3 |
4 | ts = linspace(0,1,1000)
5 | colors = asarray([ sin(6*pi*ts)*.5+.5, cos(6*pi*ts)*.5+.5, ts ]).T
6 | json.dump({'float_colors': colors.tolist()}, open("color_spiral.js",'wb'))
7 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/oranges.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/image-layer-updating-GUI/oranges.jpg
--------------------------------------------------------------------------------
/image-layer-updating-GUI/palette tetrahedron.js:
--------------------------------------------------------------------------------
1 | {"vs":[[0,255,0],[255,0,0],[255,255,255],[0,0,0]],"faces":[[[255,0,0],[255,255,255],[0,0,0]],[[0,255,0],[255,255,255],[0,0,0]],[[0,255,0],[255,0,0],[0,0,0]],[[0,255,0],[255,0,0],[255,255,255]]]}
--------------------------------------------------------------------------------
/image-layer-updating-GUI/pigs.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/image-layer-updating-GUI/pigs.jpg
--------------------------------------------------------------------------------
/image-layer-updating-GUI/protocol.txt:
--------------------------------------------------------------------------------
1 | # Startup
2 | Browser:
3 | // Actually, use the WebSocketClient library because it supports await: https://github.com/jcao219/websocket-async
4 | https://github.com/yig/websocket-async/
5 | let socket = new WebSocket( "ws://localhost:4567" );
6 |
7 | # User loads an image. This happens in the browser.
8 | Browser:
9 | socket.send( "load-image" );
10 | socket.send( image_data );
11 | Server:
12 | // Create a server with the python websockets library: http://websockets.readthedocs.io/en/stable/intro.html
13 | if socket receives "load-image":
14 | compute RGBXY deconstruction
15 |
16 | # User changes a palette.
17 | User can load a palette by dragging and dropping onto the browser like before.
18 | User can edit a palette by interacting with the GUI to add a new color, delete a color, or modify a color.
19 |
20 | Browser:
21 | socket.send( "palette" )
22 | socket.send( "[ [ R, G, B ], [ R, G, B ], ... ]" )
23 | Server:
24 | if socket receives "palette":
25 | compute layer decomposition
26 | socket.send( convex_hull_edges )
27 | socket.send( len( layers ) )
28 | for layer in layers:
29 | socket.send( "layer #", layer )
30 | Browser:
31 | convex_hull_edges = socket.receive()
32 | num_layers = socket.receive()
33 | for i in num_layers:
34 | layers[i] = socket.receive()
35 | create an image from the data
36 | draw reconstruction
37 |
38 | # User requests an automatic palette.
39 | Browser:
40 | socket.send( "automatically-compute-palette" );
41 | Server:
42 | if socket receives "automatically-compute-palette":
43 | compute automatic palette
44 | socket.send( palette )
45 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/runboth.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | ## Kill both with control-c
4 | ## From: https://superuser.com/questions/1118204/how-to-use-ctrlc-to-kill-all-background-processes-started-in-a-bash-script/1118226
5 | ## Use INT not SIGINT
6 | ## From: https://unix.stackexchange.com/questions/314554/why-do-i-get-an-error-message-when-trying-to-trap-a-sigint-signal
7 | trap 'kill $BGPID; exit' INT
8 | python3 server.py &
9 | BGPID=$!
10 | python3 -m http.server
11 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/server.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import asyncio
4 | import websockets
5 | import json
6 | import numpy as np
7 | import sys
8 | import os
9 | _parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
10 | sys.path.insert(0, _parent_dir)
11 |
12 | import Additive_mixing_layers_extraction
13 | Additive_mixing_layers_extraction.DEMO=True
14 | import RGBXY_method
15 |
16 | from scipy.spatial import ConvexHull
17 |
18 | ## A flag for using OpenCL.
19 | ## In particular for Docker, pocl doesn't implement something we need,
20 | ## so let's not install it in Docker and instead run without OpenCL.
21 | ## When using pocl, I get:
22 | ## Device side queue is unimplemented (clCreateCommandQueueWithProperties.c:93)
23 | USE_OPENCL = False
24 | try:
25 | import pyopencl_example
26 | if len( pyopencl_example.cl.get_platforms() ) > 0:
27 | USE_OPENCL = True
28 | except: pass
29 | print( "Using OpenCL:", USE_OPENCL )
30 |
31 | async def layer_server( websocket, path ):
32 | the_image = None
33 | RGBXY_mixing_weights=None
34 | data_hull=None
35 |
36 | async for msg in websocket:
37 | print (msg)
38 | if msg == "load-image":
39 | ## Receive parameters from the websocket.
40 | width_and_height = await websocket.recv()
41 | data = await websocket.recv()
42 |
43 | ## Parse the parameters.
44 | # print (width_and_height)
45 | width = int( width_and_height.split()[0] )
46 | height = int( width_and_height.split()[1] )
47 | the_image_new = np.frombuffer( data, dtype = np.uint8 ).reshape(height, width, 4 ).copy()
48 |
49 | ## Skip load image if we already have this exact image.
50 | if the_image is not None and np.all(the_image == the_image_new):
51 | print( "Skipping duplicate load-image." )
52 | continue
53 | else:
54 | the_image = the_image_new
55 |
56 | # the_image = np.frombuffer( data, dtype = np.uint8 ).copy()
57 | print( "Received an image with", the_image.nbytes, "bytes." )
58 |
59 | # Compute RGBXY_mixing_weights.
60 | print( "Computing RGBXY mixing weights ..." )
61 | X,Y=np.mgrid[0:the_image.shape[0], 0:the_image.shape[1]]
62 | XY=np.dstack((X*1.0/the_image.shape[0],Y*1.0/the_image.shape[1]))
63 | RGBXY_data=np.dstack((the_image[:,:,:3]/255.0, XY))
64 | print( "\tConvexHull 5D..." )
65 | data_hull=ConvexHull(RGBXY_data.reshape((-1,5)))
66 | print( "\t...finished" )
67 | print( "\tComputing W_RGBXY..." )
68 | RGBXY_mixing_weights=Additive_mixing_layers_extraction.recover_ASAP_weights_using_scipy_delaunay(data_hull.points[data_hull.vertices], data_hull.points, option=3)
69 | print( "\t...finished" )
70 | print( "... finished." )
71 |
72 |
73 | elif msg == "palette":
74 | ## Receive parameters from the websocket.
75 | print (the_image.shape)
76 | palette = await websocket.recv()
77 | print (palette)
78 |
79 | ## Parse the parameters.
80 | palette = json.loads( palette )
81 | palette = np.asarray(palette)/255.0
82 |
83 | print (palette)
84 | ## Compute something.
85 | # hull=ConvexHull(palette)
86 | # convex_hull_edges = hull.points[hull.simplices]
87 |
88 | num_layers = len( palette )
89 | print (num_layers)
90 |
91 |
92 | ### compute RGB_mixing_weights and use pyopencl version code to dot product with sparse RGBXY_mixing_weights
93 | img_data=(the_image[:,:,:3].reshape((-1,3))[data_hull.vertices]).reshape((-1,1,3))/255.0
94 | print (img_data.shape)
95 | #### delaunay triangulation.
96 | # w_rgb=RGBXY_method.run_one_ASAP(palette, img_data, None)
97 | #### star triangulation using close to black pigment as first color
98 | w_rgb=Additive_mixing_layers_extraction.Get_ASAP_weights_using_Tan_2016_triangulation_and_then_barycentric_coordinates(img_data, palette, "None", order=0)
99 |
100 | w_rgb=w_rgb.reshape((-1,num_layers))
101 |
102 | if USE_OPENCL:
103 | w_rgbxy_values=RGBXY_mixing_weights.data
104 | w_rgbxy_values=w_rgbxy_values.reshape((-1,6))
105 | w_rgbxy_indices=RGBXY_mixing_weights.indices.reshape((-1,6))
106 |
107 | mult, _ = pyopencl_example.prepare_openCL_multiplication( w_rgb, w_rgbxy_values, w_rgbxy_indices )
108 | final_mixing_weights=mult(w_rgb)
109 | else:
110 | final_mixing_weights = RGBXY_mixing_weights.dot( w_rgb )
111 |
112 | layers=final_mixing_weights.reshape((the_image.shape[0], the_image.shape[1], num_layers))
113 | print (layers.shape)
114 | ## Send data back.
115 | # print ("send hull edges")
116 | # await websocket.send( json.dumps( convex_hull_edges.tolist() ) )
117 | print( "Sending weights..." )
118 | # await websocket.send( json.dumps( layers.tolist() ) )
119 | # await websocket.send( np.ascontiguousarray( layers, np.float32 ).tobytes() )
120 | ## HACK: Send uint8 for speed.
121 | await websocket.send( np.ascontiguousarray( ( layers*255. ).round().clip(0,255), np.uint8 ).tobytes() )
122 | print( "... finished." )
123 |
124 | elif msg == "automatically-compute-palette":
125 | ## Receive parameters from the websocket.
126 | # No additional parameters. Compute an automatic palette for `the_image`.
127 | ## Compute palette.
128 | palette=Additive_mixing_layers_extraction.Hull_Simplification_determined_version(the_image[:,:,:3].reshape((-1,3))/255.0, "./example-", SAVE=False)
129 | hull=ConvexHull(palette)
130 | print ("finish compute palette")
131 | ## Send data back.
132 | await websocket.send( json.dumps( {'vs': (palette*255).tolist(), 'faces': (hull.points[hull.simplices]*255).tolist() } ) )
133 |
134 | elif msg == "user-choose-number-compute-palette":
135 | palette_size=await websocket.recv()
136 | palette_size=int(palette_size)
137 | print ("user choose palette size is: ", palette_size)
138 | ## Compute palette.
139 | palette=Additive_mixing_layers_extraction.Hull_Simplification_old(the_image[:,:,:3].reshape((-1,3))/255.0, palette_size, "./example-")
140 | hull=ConvexHull(palette)
141 | print ("finish compute palette")
142 | ## Send data back.
143 | await websocket.send( json.dumps( {'vs': (palette*255).tolist(), 'faces': (hull.points[hull.simplices]*255).tolist() } ) )
144 |
145 | elif msg == "random-add-one-more-color":
146 | palette = await websocket.recv()
147 | print (palette)
148 |
149 | ## Parse the parameters.
150 | palette = json.loads( palette )
151 | palette = np.asarray(palette)/255.0
152 |
153 | print (palette)
154 |
155 | hull=ConvexHull(palette)
156 | print ("finish compute palette")
157 | ## Send data back.
158 | await websocket.send( json.dumps( {'vs': (palette*255).tolist(), 'faces': (hull.points[hull.simplices]*255).tolist() } ) )
159 |
160 | else:
161 | print( "Unknown message:", msg )
162 |
163 |
164 |
165 | port_websocket = 9988
166 | port_http = 8000
167 |
168 | ## Also start an http server on port 8000
169 | def serve_http( port ):
170 | import os
171 | pid = os.fork()
172 | ## If we are the child, serve files
173 | if pid == 0:
174 | import http.server
175 | import socketserver
176 | ## Via: https://stackoverflow.com/questions/4465959/python-errno-98-address-already-in-use/25529620#25529620
177 | socketserver.TCPServer.allow_reuse_address
178 | Handler = http.server.SimpleHTTPRequestHandler
179 | with socketserver.TCPServer(("localhost", port), Handler) as httpd:
180 | print("Serving HTTP on port", port)
181 | httpd.serve_forever()
182 |
183 | ## This is too annoying because of address re-use.
184 | # serve_http( port_http )
185 |
186 | import argparse
187 | parser = argparse.ArgumentParser( description = "A compute server for interactive layer editing." )
188 | parser.add_argument( "--port", type = int, default = port_websocket, help="The port to listen on." )
189 | args = parser.parse_args()
190 | port_websocket = args.port
191 |
192 | print("WebSocket server on port", port_websocket )
193 | start_server = websockets.serve( layer_server, '0.0.0.0', port_websocket, max_size = None )
194 | asyncio.get_event_loop().run_until_complete(start_server)
195 | asyncio.get_event_loop().run_forever()
196 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/server_test.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import asyncio
4 | import websockets
5 | # import json
6 | # import numpy as np
7 |
8 | async def layer_server( websocket, path ):
9 | async for msg in websocket:
10 | print( msg )
11 |
12 | port_websocket = 9988
13 | print("WebSocket server on port", port_websocket )
14 | start_server = websockets.serve( layer_server, 'localhost', port_websocket, max_size = None )
15 | asyncio.get_event_loop().run_until_complete(start_server)
16 | asyncio.get_event_loop().run_forever()
17 |
--------------------------------------------------------------------------------
/image-layer-updating-GUI/styles.css:
--------------------------------------------------------------------------------
1 |
2 | body {
3 | color: #cccccc;
4 | font-family:Monospace;
5 | font-size:13px;
6 |
7 | background-color: #333;
8 | margin: 0px;
9 | overflow: hidden;
10 |
11 | line-height: 20px;
12 |
13 | padding: 5px;
14 | }
15 |
16 |
17 |
18 | #container
19 | {
20 | width: 280px;
21 | height: 280px;
22 | z-index: 10;
23 | }
24 |
25 | #info {
26 | float: left;
27 | width: 50%;
28 | }
29 | .image_holder
30 | {
31 | float: left;
32 | }
33 | .image
34 | {
35 | max-width: 200px;
36 | height: auto;
37 | }
38 | #image_name_holder
39 | {
40 | max-width: 200px;
41 | }
42 |
43 | #reconstruction_holder
44 | {
45 | display: none;
46 | padding-left: 5px;
47 | }
48 | #img_difference_holder
49 | {
50 | display: none;
51 | padding-left: 5px;
52 | }
53 |
54 | #layers
55 | {
56 | overflow:scroll;
57 | float: left;
58 | width: 50%;
59 | height: 100%;
60 | }
61 | #layer_holder
62 | {
63 | line-height: 0px;
64 |
65 | background-image: linear-gradient(45deg, #444 25%, transparent 25%, transparent 75%, #444 75%), linear-gradient(45deg, #444 25%, #333 25%, #333 75%, #444 75%);
66 | background-size: 10px 10px;
67 | background-position: 0 0, 25px 25px;
68 | }
69 |
70 | #layer_holder img
71 | {
72 | max-width: 160px;
73 | margin: 2px;
74 | }
75 |
76 | .widget
77 | {
78 | clear: both;
79 | padding-top: 5px;
80 | }
81 |
82 | a {
83 |
84 | color: #0080ff;
85 | }
86 |
--------------------------------------------------------------------------------
/pyopencl_example.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import print_function, division
5 |
6 | import numpy as np
7 | import pyopencl as cl
8 | import time
9 |
10 | def prepare_openCL_multiplication( w_rgb, w_rgbxy_values, w_rgbxy_indices ):
11 | ## rgbxy indices and values correspond.
12 | assert w_rgbxy_values.shape == w_rgbxy_indices.shape
13 |
14 | ## Our kernel function expects 32-bit values:
15 | w_rgb = w_rgb.astype(np.float32)
16 | w_rgbxy_values = w_rgbxy_values.astype(np.float32)
17 | w_rgbxy_indices = w_rgbxy_indices.astype(np.int32)
18 |
19 | npix = w_rgbxy_values.shape[0]
20 |
21 | padding = 16
22 | if padding is not None:
23 | npix_padded = ((npix-1)//padding+1)*padding
24 | print( "npix:", npix )
25 | print( "npix padded to %s:" % padding, npix_padded )
26 | print( "npix (mod) %s:" % padding, npix % padding )
27 | print( "npix padded (mod) %s:" % padding, npix_padded % padding )
28 | if npix != npix_padded:
29 | ## Add a few extra rows. Make sure to keep the dtype unchanged.
30 | w_rgbxy_values = np.append( w_rgbxy_values, np.zeros((npix_padded-npix,w_rgbxy_values.shape[1]),dtype=w_rgbxy_values.dtype), axis = 0 )
31 | w_rgbxy_indices = np.append( w_rgbxy_indices, np.zeros((npix_padded-npix,w_rgbxy_indices.shape[1]),dtype=w_rgbxy_indices.dtype), axis = 0 )
32 | # w_rgbxy_values = np.append( w_rgbxy_values, np.tile( w_rgbxy_values[-1:], (npix_padded-npix,1) ), axis = 0 )
33 | # w_rgbxy_indices = np.append( w_rgbxy_indices, np.tile( w_rgbxy_indices[-1:], (npix_padded-npix,1) ), axis = 0 )
34 |
35 | device = 'gpu'
36 | if device == 'ask':
37 | ## Ask the user:
38 | ctx = cl.create_some_context()
39 | else:
40 | ## Choose CPU or GPU automatically.
41 | platform = cl.get_platforms()
42 | if device == 'gpu':
43 | my_gpu_devices = platform[0].get_devices(device_type=cl.device_type.GPU)
44 | if device == 'cpu' or len(my_gpu_devices) == 0:
45 | my_gpu_devices = platform[0].get_devices(device_type=cl.device_type.CPU)
46 | if len(my_gpu_devices) == 0:
47 | raise RuntimeError( "Unknown device: %s" % device )
48 | print( my_gpu_devices )
49 | ctx = cl.Context(devices=my_gpu_devices)
50 |
51 | queue = cl.CommandQueue(ctx)
52 |
53 | mf = cl.mem_flags
54 | w_rgbxy_values_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=w_rgbxy_values)
55 | w_rgbxy_indices_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=w_rgbxy_indices)
56 |
57 | NO_COPY = True
58 | if NO_COPY:
59 | w_rgb_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.USE_HOST_PTR, hostbuf=w_rgb)
60 | else:
61 | w_rgb_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=w_rgb)
62 |
63 | output_shape=(w_rgbxy_values.shape[0], w_rgb.shape[1])
64 | final_matrix = np.empty(output_shape).astype(np.float32)
65 |
66 | if NO_COPY:
67 | dest_buf = cl.Buffer(ctx, mf.WRITE_ONLY | mf.USE_HOST_PTR, hostbuf=final_matrix )
68 | else:
69 | dest_buf = cl.Buffer(ctx, mf.WRITE_ONLY, final_matrix.nbytes )
70 |
71 | prg = cl.Program(ctx, """
72 | __kernel void multiplymatrices(const unsigned int size, __global const float * w_rgb, __global const float * w_rgbxy_values, __global const int * w_rgbxy_indices, __global float * res) {
73 |
74 | int i = get_global_id(0);
75 | int j = get_global_id(1);
76 |
77 | res[i * size + j] = 0;
78 |
79 | for (int k = 0; k < 6; k++)
80 | {
81 | res[i * size + j] += w_rgbxy_values[i * 6 + k] * w_rgb[ w_rgbxy_indices[i * 6 + k] * size + j];
82 | }
83 |
84 | }
85 | """).build()
86 |
87 | reps = 5
88 |
89 | all_times = []
90 | def actually_multiply( new_rgb_data ):
91 | nonlocal w_rgb_buf
92 |
93 | w_rgb[:] = new_rgb_data
94 |
95 | t0 = time.time()
96 | ## If we were really running this interactively, we would update w_rgb
97 | ## and keep w_rgbxy the same.
98 | if not NO_COPY:
99 | w_rgb_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=w_rgb)
100 | ## Run the code.
101 | ## Automatic
102 | localsize = None
103 | print( 'global size:', output_shape )
104 | if output_shape[0] % 4 == 0: localsize = (4,w_rgb.shape[1])
105 | # localsize = (2,1)
106 | print( 'local size:', localsize )
107 | event = prg.multiplymatrices(queue, output_shape, localsize, np.int32(output_shape[1]), w_rgb_buf, w_rgbxy_values_buf, w_rgbxy_indices_buf, dest_buf )
108 | ## Copy the result back.
109 | if NO_COPY:
110 | event.wait()
111 | else:
112 | cl.enqueue_copy(queue, final_matrix, dest_buf)
113 | t1= time.time()
114 | delta_t=t1-t0
115 | all_times.append( delta_t )
116 |
117 | # print( final_matrix[:10,:10] )
118 |
119 | # print( np.average( np.asarray( all_times ) ) )
120 | print( "Latest time:", delta_t )
121 |
122 | return final_matrix[:npix]
123 |
124 | def get_times():
125 | return np.asarray( all_times )
126 |
127 | return actually_multiply, get_times
128 |
129 | def openCL_multiplication( w_rgb, w_rgbxy_values, w_rgbxy_indices ):
130 | mult, get_times = prepare_openCL_multiplication( w_rgb, w_rgbxy_values, w_rgbxy_indices )
131 |
132 | for i in range(5):
133 | final_matrix = mult( w_rgb )
134 |
135 | print( final_matrix[:10,:10] )
136 |
137 | return final_matrix, get_times()
138 |
139 | if __name__=="__main__":
140 |
141 | npix = 6*1000*1000
142 | nmiddle = 3000
143 | nlayers = 6
144 |
145 | np.random.seed(0)
146 | w_rgbxy_values=np.random.random(npix*6).reshape((npix,6))*1.0
147 | w_rgb=np.random.random(nmiddle*nlayers).reshape((nmiddle,nlayers))*1.0
148 | w_rgbxy_indices=(np.random.random((npix,6))*nmiddle).round().astype(np.int32)
149 | final_matrix, times = openCL_multiplication( w_rgb, w_rgbxy_values, w_rgbxy_indices )
150 |
151 | print( 'OpenCL Multiplication times:' )
152 | print( times )
153 | print( 'min:', times.min() )
154 | print( 'max:', times.max() )
155 | print( 'average:', np.average( times ) )
156 |
--------------------------------------------------------------------------------
/requirements.frozen.txt:
--------------------------------------------------------------------------------
1 | numpy==1.16.3
2 | scipy==1.2.1
3 | Cython==0.29.7
4 | cvxopt==1.2.3
5 | Pillow==6.0.0
6 | pyopencl==2018.2.5
7 | websockets==7.0
8 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | Cython
4 | cvxopt
5 | Pillow
6 | pyopencl
7 | websockets
8 |
--------------------------------------------------------------------------------
/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-00.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-00.png
--------------------------------------------------------------------------------
/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-01.png
--------------------------------------------------------------------------------
/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-02.png
--------------------------------------------------------------------------------
/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-03.png
--------------------------------------------------------------------------------
/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-04.png
--------------------------------------------------------------------------------
/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/test/turquoise groundtruth results/turquoise-RGBXY_RGB_black_star_ASAP-palette_size-6-mixing_weights-05.png
--------------------------------------------------------------------------------
/test/turquoise groundtruth results/turquoise-convexhull_vertices-06.js:
--------------------------------------------------------------------------------
1 | {
2 | "vs": [
3 | [
4 | 255.0,
5 | 230.210430715785,
6 | 216.89041195049998
7 | ],
8 | [
9 | 231.205572599745,
10 | 205.363653746505,
11 | 143.77181964873
12 | ],
13 | [
14 | 0.0,
15 | 0.0,
16 | 0.0
17 | ],
18 | [
19 | 227.176852911045,
20 | 77.25346920243,
21 | 65.094289607505
22 | ],
23 | [
24 | 172.889631799125,
25 | 96.35591054838,
26 | 147.94547201666998
27 | ],
28 | [
29 | 56.55975738417,
30 | 225.20025570636002,
31 | 211.307605045695
32 | ]
33 | ],
34 | "faces": [
35 | [
36 | [
37 | 172.889631799125,
38 | 96.35591054838,
39 | 147.94547201666998
40 | ],
41 | [
42 | 56.55975738417,
43 | 225.20025570636002,
44 | 211.307605045695
45 | ],
46 | [
47 | 0.0,
48 | 0.0,
49 | 0.0
50 | ]
51 | ],
52 | [
53 | [
54 | 172.889631799125,
55 | 96.35591054838,
56 | 147.94547201666998
57 | ],
58 | [
59 | 56.55975738417,
60 | 225.20025570636002,
61 | 211.307605045695
62 | ],
63 | [
64 | 255.0,
65 | 230.210430715785,
66 | 216.89041195049998
67 | ]
68 | ],
69 | [
70 | [
71 | 231.205572599745,
72 | 205.363653746505,
73 | 143.77181964873
74 | ],
75 | [
76 | 56.55975738417,
77 | 225.20025570636002,
78 | 211.307605045695
79 | ],
80 | [
81 | 0.0,
82 | 0.0,
83 | 0.0
84 | ]
85 | ],
86 | [
87 | [
88 | 231.205572599745,
89 | 205.363653746505,
90 | 143.77181964873
91 | ],
92 | [
93 | 56.55975738417,
94 | 225.20025570636002,
95 | 211.307605045695
96 | ],
97 | [
98 | 255.0,
99 | 230.210430715785,
100 | 216.89041195049998
101 | ]
102 | ],
103 | [
104 | [
105 | 227.176852911045,
106 | 77.25346920243,
107 | 65.094289607505
108 | ],
109 | [
110 | 172.889631799125,
111 | 96.35591054838,
112 | 147.94547201666998
113 | ],
114 | [
115 | 255.0,
116 | 230.210430715785,
117 | 216.89041195049998
118 | ]
119 | ],
120 | [
121 | [
122 | 227.176852911045,
123 | 77.25346920243,
124 | 65.094289607505
125 | ],
126 | [
127 | 231.205572599745,
128 | 205.363653746505,
129 | 143.77181964873
130 | ],
131 | [
132 | 255.0,
133 | 230.210430715785,
134 | 216.89041195049998
135 | ]
136 | ],
137 | [
138 | [
139 | 227.176852911045,
140 | 77.25346920243,
141 | 65.094289607505
142 | ],
143 | [
144 | 172.889631799125,
145 | 96.35591054838,
146 | 147.94547201666998
147 | ],
148 | [
149 | 0.0,
150 | 0.0,
151 | 0.0
152 | ]
153 | ],
154 | [
155 | [
156 | 227.176852911045,
157 | 77.25346920243,
158 | 65.094289607505
159 | ],
160 | [
161 | 231.205572599745,
162 | 205.363653746505,
163 | 143.77181964873
164 | ],
165 | [
166 | 0.0,
167 | 0.0,
168 | 0.0
169 | ]
170 | ]
171 | ]
172 | }
--------------------------------------------------------------------------------
/test/turquoise groundtruth results/turquoise-convexhull_vertices-06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/test/turquoise groundtruth results/turquoise-convexhull_vertices-06.png
--------------------------------------------------------------------------------
/test/turquoise.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/CraGL/fastLayerDecomposition/e3ddd15086b56dc293d945a5513c3bd58b32a582/test/turquoise.png
--------------------------------------------------------------------------------
/trimesh.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function, division
2 | from numpy import *
3 |
4 | def mag2( vec ):
5 | return dot( vec, vec )
6 | def mag( vec ):
7 | return sqrt(mag2(vec))
8 |
9 | class TriMesh( object ):
10 | def __init__( self ):
11 | self.vs = []
12 | self.faces = []
13 |
14 | self.__face_normals = None
15 | self.__face_areas = None
16 | self.__vertex_normals = None
17 | self.__vertex_areas = None
18 | self.__edges = None
19 |
20 | self.__halfedges = None
21 | self.__vertex_halfedges = None
22 | self.__face_halfedges = None
23 | self.__edge_halfedges = None
24 | self.__directed_edge2he_index = None
25 |
26 | self.lifetime_counter = 0
27 |
28 | def copy( self ):
29 | import copy
30 | return copy.deepcopy( self )
31 |
32 | def __deepcopy__( self, memodict ):
33 | result = TriMesh()
34 |
35 | ## Make a copy of vs and faces using array().
36 | ## But in case they weren't stored as arrays, return them as the type they were.
37 | ## This is important if they were lists, and someone expected to be able to call
38 | ## .append() or .extend() on them.
39 | result.vs = array( self.vs )
40 | if not isinstance( self.vs, ndarray ):
41 | result.vs = type( self.vs )( result.vs )
42 |
43 | result.faces = array( self.faces )
44 | if not isinstance( self.faces, ndarray ):
45 | result.faces = type( self.faces )( result.faces )
46 |
47 | if hasattr( self, 'uvs' ):
48 | result.uvs = array( self.uvs )
49 | if not isinstance( self.uvs, ndarray ):
50 | result.uvs = type( self.uvs )( result.uvs )
51 |
52 | ## I could skip copying these cached values, but they are usually needed for rendering
53 | ## and copy quickly.
54 | if self.__face_normals is not None:
55 | result.__face_normals = self.__face_normals.copy()
56 | if self.__face_areas is not None:
57 | result.__face_areas = self.__face_areas.copy()
58 | if self.__vertex_normals is not None:
59 | result.__vertex_normals = self.__vertex_normals.copy()
60 | if self.__vertex_areas is not None:
61 | result.__vertex_areas = self.__vertex_areas.copy()
62 | if self.__edges is not None:
63 | result.__edges = list( self.__edges )
64 |
65 | ## I will skip copying these cached values, because they copy slowly and are
66 | ## not as commonly needed. They'll still be regenerated as needed.
67 | '''
68 | if self.__halfedges is not None:
69 | from copy import copy
70 | result.__halfedges = [ copy( he ) for he in self.__halfedges ]
71 | if self.__vertex_halfedges is not None:
72 | result.__vertex_halfedges = list( self.__vertex_halfedges )
73 | if self.__face_halfedges is not None:
74 | result.__face_halfedges = list( self.__face_halfedges )
75 | if self.__edge_halfedges is not None:
76 | result.__edge_halfedges = list( self.__edge_halfedges )
77 | if self.__directed_edge2he_index is not None:
78 | result.__directed_edge2he_index = dict( self.__directed_edge2he_index )
79 | '''
80 |
81 | result.lifetime_counter = self.lifetime_counter
82 |
83 | return result
84 |
85 | def update_face_normals_and_areas( self ):
86 | if self.__face_normals is None: self.__face_normals = zeros( ( len( self.faces ), 3 ) )
87 | if self.__face_areas is None: self.__face_areas = zeros( len( self.faces ) )
88 |
89 | ## We need subtraction between vertices.
90 | ## Convert vertices to arrays once here, or else we'd have to call asarray()
91 | ## ~6 times for each vertex.
92 | ## NOTE: If self.vs is already an array, then this code does nothing.
93 | ## TODO Q: Should I set self.vs = asarray( self.vs )? It might violate someone's
94 | ## assumption that self.vs is whatever indexable type they left it.
95 | ## In particular, this violates the ability of someone to .append() or .extend()
96 | ## self.vs.
97 | vs = asarray( self.vs )
98 | fs = asarray( self.faces, dtype = int )
99 |
100 | ## Slow:
101 | '''
102 | for f in xrange( len( self.faces ) ):
103 | face = self.faces[f]
104 | n = cross(
105 | vs[ face[1] ] - vs[ face[0] ],
106 | vs[ face[2] ] - vs[ face[1] ]
107 | )
108 | nmag = mag( n )
109 | self.__face_normals[f] = (1./nmag) * n
110 | self.__face_areas[f] = .5 * nmag
111 | '''
112 | ## ~Slow
113 |
114 | ## Fast:
115 | self.__face_normals = cross( vs[ fs[:,1] ] - vs[ fs[:,0] ], vs[ fs[:,2] ] - vs[ fs[:,1] ] )
116 | self.__face_areas = sqrt((self.__face_normals**2).sum(axis=1))
117 | self.__face_normals /= self.__face_areas[:,newaxis]
118 | self.__face_areas *= 0.5
119 | ## ~Fast
120 |
121 | assert len( self.faces ) == len( self.__face_normals )
122 | assert len( self.faces ) == len( self.__face_areas )
123 |
124 |
125 | def get_face_normals( self ):
126 | if self.__face_normals is None: self.update_face_normals_and_areas()
127 | return self.__face_normals
128 |
129 | face_normals = property( get_face_normals )
130 |
131 |
132 | def get_face_areas( self ):
133 | if self.__face_areas is None: self.update_face_normals_and_areas()
134 | return self.__face_areas
135 |
136 | face_areas = property( get_face_areas )
137 |
138 |
139 | def update_vertex_normals( self ):
140 | if self.__vertex_normals is None: self.__vertex_normals = zeros( ( len(self.vs), 3 ) )
141 |
142 | ## Slow:
143 | '''
144 | for vi in xrange( len( self.vs ) ):
145 | self.__vertex_normals[vi] = 0.
146 |
147 | for fi in self.vertex_face_neighbors( vi ):
148 | ## This matches the OpenMesh FAST vertex normals.
149 | #self.__vertex_normals[vi] += self.face_normals[ fi ]
150 | ## Area weighted
151 | self.__vertex_normals[vi] += self.face_normals[ fi ] * self.face_areas[ fi ]
152 |
153 | ## Now normalize the normals
154 | #self.__vertex_normals[vi] *= 1./mag( self.__vertex_normals[vi] )
155 | self.__vertex_normals *= 1./sqrt( ( self.__vertex_normals**2 ).sum(1) ).reshape( (len(self.vs), 1) )
156 | '''
157 | ## ~Slow
158 |
159 | ## Fast:
160 | fs = asarray( self.faces, dtype = int )
161 | ## This matches the OpenMesh FAST vertex normals.
162 | #fns = self.face_normals
163 | ## Area weighted
164 | fns = self.face_normals * self.face_areas[:,newaxis]
165 |
166 | self.__vertex_normals[:] = 0.
167 | ## I wish this worked, but it doesn't do the right thing with aliasing
168 | ## (when the same element appears multiple times in the slice).
169 | #self.__vertex_normals[ fs[:,0] ] += fns
170 | #self.__vertex_normals[ fs[:,1] ] += fns
171 | #self.__vertex_normals[ fs[:,2] ] += fns
172 | import itertools
173 | for c in (0,1,2):
174 | for i, n in itertools.izip( fs[:,c], fns ):
175 | self.__vertex_normals[ i ] += n
176 |
177 | self.__vertex_normals /= sqrt( ( self.__vertex_normals**2 ).sum(axis=1) )[:,newaxis]
178 | ## ~Fast
179 |
180 | assert len( self.vs ) == len( self.__vertex_normals )
181 |
182 | def get_vertex_normals( self ):
183 | if self.__vertex_normals is None: self.update_vertex_normals()
184 | return self.__vertex_normals
185 |
186 | vertex_normals = property( get_vertex_normals )
187 |
188 |
189 | def update_vertex_areas( self ):
190 | if self.__vertex_areas is None: self.__vertex_areas = zeros( len(self.vs) )
191 |
192 | ## Slow:
193 | '''
194 | for vi in xrange( len( self.vs ) ):
195 | ## Try to compute proper area (if we have laplacian editing around).
196 | ## (This only matters for obtuse triangles.)
197 | try:
198 | #raise ImportError
199 | import laplacian_editing
200 | cot_alpha, cot_beta, area = laplacian_editing.cotangentWeights(
201 | self.vs[ vi ],
202 | [ self.vs[ vni ] for vni in self.vertex_vertex_neighbors( vi ) ],
203 | self.vertex_is_boundary( vi )
204 | )
205 | self.__vertex_areas[vi] = area
206 |
207 | ## Otherwise use 1/3 of the incident faces' areas
208 | except ImportError:
209 | self.__vertex_areas[vi] = 0.
210 | for fi in self.vertex_face_neighbors( vi ):
211 | self.__vertex_areas[vi] += self.face_areas[ fi ]
212 |
213 | self.__vertex_areas[vi] *= 1./3.
214 | '''
215 | ## ~Slow
216 |
217 | ## Fast:
218 | ## NOTE: This does not use laplacian_editing's so-called mixed area
219 | ## computation even if the module is present!
220 | ## (This only matters for obtuse triangles.)
221 | self.__vertex_areas[:] = 0.
222 |
223 | fs = asarray( self.faces, dtype = int )
224 | fas = self.__face_areas
225 | ## I wish this worked, but it doesn't do the right thing with aliasing
226 | ## (when the same element appears multiple times in the slice).
227 | #self.__vertex_areas[ fs[:,0] ] += fas
228 | #self.__vertex_areas[ fs[:,1] ] += fas
229 | #self.__vertex_areas[ fs[:,2] ] += fas
230 | import itertools
231 | for c in (0,1,2):
232 | for i, area in itertools.izip( fs[:,c], fas ):
233 | self.__vertex_areas[ i ] += area
234 |
235 | self.__vertex_areas /= 3.
236 | ## ~Fast
237 |
238 | assert len( self.vs ) == len( self.__vertex_areas )
239 |
240 | def get_vertex_areas( self ):
241 | if self.__vertex_areas is None: self.update_vertex_areas()
242 | return self.__vertex_areas
243 |
244 | vertex_areas = property( get_vertex_areas )
245 |
246 |
247 | def update_edge_list( self ):
248 | #from sets import Set, ImmutableSet
249 | Set, ImmutableSet = set, frozenset
250 |
251 | ## We need a set of set-pairs of vertices, because edges are bidirectional.
252 | edges = Set()
253 | for face in self.faces:
254 | edges.add( ImmutableSet( ( face[0], face[1] ) ) )
255 | edges.add( ImmutableSet( ( face[1], face[2] ) ) )
256 | edges.add( ImmutableSet( ( face[2], face[0] ) ) )
257 |
258 | self.__edges = [ tuple( edge ) for edge in edges ]
259 |
260 | def get_edges( self ):
261 | if self.__edges is None: self.update_edge_list()
262 | return self.__edges
263 |
264 | edges = property( get_edges )
265 |
266 |
267 | class HalfEdge( object ):
268 | def __init__( self ):
269 | self.to_vertex = -1
270 | self.face = -1
271 | self.edge = -1
272 | self.opposite_he = -1
273 | self.next_he = -1
274 |
275 | def update_halfedges( self ):
276 | '''
277 | Generates all half edge data structures for the mesh given by its vertices 'self.vs'
278 | and faces 'self.faces'.
279 |
280 | untested
281 | '''
282 |
283 | self.__halfedges = []
284 | self.__vertex_halfedges = None
285 | self.__face_halfedges = None
286 | self.__edge_halfedges = None
287 | self.__directed_edge2he_index = {}
288 |
289 | __directed_edge2face_index = {}
290 | for fi, face in enumerate( self.faces ):
291 | __directed_edge2face_index[ (face[0], face[1]) ] = fi
292 | __directed_edge2face_index[ (face[1], face[2]) ] = fi
293 | __directed_edge2face_index[ (face[2], face[0]) ] = fi
294 |
295 | def directed_edge2face_index( edge ):
296 | result = __directed_edge2face_index.get( edge, -1 )
297 |
298 | ## If result is -1, then there's no such face in the mesh.
299 | ## The edge must be a boundary edge.
300 | ## In this case, the reverse orientation edge must have a face.
301 | if -1 == result:
302 | assert edge[::-1] in __directed_edge2face_index
303 |
304 | return result
305 |
306 | self.__vertex_halfedges = [None] * len( self.vs )
307 | self.__face_halfedges = [None] * len( self.faces )
308 | self.__edge_halfedges = [None] * len( self.edges )
309 |
310 | for ei, edge in enumerate( self.edges ):
311 | he0 = self.HalfEdge()
312 | ## The face will be -1 if it is a boundary half-edge.
313 | he0.face = directed_edge2face_index( edge )
314 | he0.to_vertex = edge[1]
315 | he0.edge = ei
316 |
317 | he1 = self.HalfEdge()
318 | ## The face will be -1 if it is a boundary half-edge.
319 | he1.face = directed_edge2face_index( edge[::-1] )
320 | he1.to_vertex = edge[0]
321 | he1.edge = ei
322 |
323 | ## Add the HalfEdge structures to the list.
324 | he0index = len( self.__halfedges )
325 | self.__halfedges.append( he0 )
326 | he1index = len( self.__halfedges )
327 | self.__halfedges.append( he1 )
328 |
329 | ## Now we can store the opposite half-edge index.
330 | he0.opposite_he = he1index
331 | he1.opposite_he = he0index
332 |
333 | ## Also store the index in our __directed_edge2he_index map.
334 | assert edge not in self.__directed_edge2he_index
335 | assert edge[::-1] not in self.__directed_edge2he_index
336 | self.__directed_edge2he_index[ edge ] = he0index
337 | self.__directed_edge2he_index[ edge[::-1] ] = he1index
338 |
339 | ## If the vertex pointed to by a half-edge doesn't yet have an out-going
340 | ## halfedge, store the opposite halfedge.
341 | ## Also, if the vertex is a boundary vertex, make sure its
342 | ## out-going halfedge a boundary halfedge.
343 | ## NOTE: Halfedge data structure can't properly handle butterfly vertices.
344 | ## If the mesh has butterfly vertices, there will be multiple outgoing
345 | ## boundary halfedges. Because we have to pick one as the vertex's outgoing
346 | ## halfedge, we can't iterate over all neighbors, only a single wing of the
347 | ## butterfly.
348 | if self.__vertex_halfedges[ he0.to_vertex ] is None or -1 == he1.face:
349 | self.__vertex_halfedges[ he0.to_vertex ] = he0.opposite_he
350 | if self.__vertex_halfedges[ he1.to_vertex ] is None or -1 == he0.face:
351 | self.__vertex_halfedges[ he1.to_vertex ] = he1.opposite_he
352 |
353 | ## If the face pointed to by a half-edge doesn't yet have a
354 | ## halfedge pointing to it, store the halfedge.
355 | if -1 != he0.face and self.__face_halfedges[ he0.face ] is None:
356 | self.__face_halfedges[ he0.face ] = he0index
357 | if -1 != he1.face and self.__face_halfedges[ he1.face ] is None:
358 | self.__face_halfedges[ he1.face ] = he1index
359 |
360 | ## Store one of the half-edges for the edge.
361 | assert self.__edge_halfedges[ ei ] is None
362 | self.__edge_halfedges[ ei ] = he0index
363 |
364 | ## Now that all the half-edges are created, set the remaining next_he field.
365 | ## We can't yet handle boundary halfedges, so store them for later.
366 | boundary_heis = []
367 | for hei, he in enumerate( self.__halfedges ):
368 | ## Store boundary halfedges for later.
369 | if -1 == he.face:
370 | boundary_heis.append( hei )
371 | continue
372 |
373 | face = self.faces[ he.face ]
374 | i = he.to_vertex
375 | j = face[ ( list(face).index( i ) + 1 ) % 3 ]
376 |
377 | he.next_he = self.__directed_edge2he_index[ (i,j) ]
378 |
379 | ## Make a map from vertices to boundary halfedges (indices) originating from them.
380 | ## NOTE: There will only be multiple originating boundary halfedges at butterfly vertices.
381 | vertex2outgoing_boundary_hei = {}
382 | #from sets import Set
383 | Set = set
384 | for hei in boundary_heis:
385 | originating_vertex = self.__halfedges[ self.__halfedges[ hei ].opposite_he ].to_vertex
386 | vertex2outgoing_boundary_hei.setdefault(
387 | originating_vertex, Set()
388 | ).add( hei )
389 | if len( vertex2outgoing_boundary_hei[ originating_vertex ] ) > 1:
390 | print('Butterfly vertex encountered')
391 |
392 | ## For each boundary halfedge, make its next_he one of the boundary halfedges
393 | ## originating at its to_vertex.
394 | for hei in boundary_heis:
395 | he = self.__halfedges[ hei ]
396 | for outgoing_hei in vertex2outgoing_boundary_hei[ he.to_vertex ]:
397 | he.next_he = outgoing_hei
398 | vertex2outgoing_boundary_hei[ he.to_vertex ].remove( outgoing_hei )
399 | break
400 |
401 | assert False not in [ 0 == len( out_heis ) for out_heis in vertex2outgoing_boundary_hei.values() ]
402 |
403 | def he_index2directed_edge( self, he_index ):
404 | '''
405 | Given the index of a HalfEdge, returns the corresponding directed edge (i,j).
406 |
407 | untested
408 | '''
409 |
410 | he = self.halfedges[ he_index ]
411 | return ( self.halfedges[ he.opposite_he ].to_vertex, he.to_vertex )
412 |
413 | def directed_edge2he_index( self, edge ):
414 | '''
415 | Given a directed edge (i,j), returns the index of the HalfEdge class in
416 | halfedges().
417 |
418 | untested
419 | '''
420 |
421 | if self.__directed_edge2he_index is None: self.update_halfedges()
422 |
423 | edge = tuple( edge )
424 | return self.__directed_edge2he_index[ edge ]
425 |
426 | def get_halfedges( self ):
427 | '''
428 | Returns a list of all HalfEdge classes.
429 |
430 | untested
431 | '''
432 |
433 | if self.__halfedges is None: self.update_halfedges()
434 | return self.__halfedges
435 |
436 | halfedges = property( get_halfedges )
437 |
438 | def vertex_vertex_neighbors( self, vertex_index ):
439 | '''
440 | Returns the vertex neighbors (as indices) of the vertex 'vertex_index'.
441 |
442 | untested
443 | '''
444 |
445 | ## It's important to access self.halfedges first (which calls get_halfedges()),
446 | ## so that we're sure all halfedge info is generated.
447 | halfedges = self.halfedges
448 | result = []
449 | start_he = halfedges[ self.__vertex_halfedges[ vertex_index ] ]
450 | he = start_he
451 | while True:
452 | result.append( he.to_vertex )
453 |
454 | he = halfedges[ halfedges[ he.opposite_he ].next_he ]
455 | if he is start_he: break
456 |
457 | return result
458 |
459 | def vertex_valence( self, vertex_index ):
460 | '''
461 | Returns the valence (number of vertex neighbors) of vertex with index 'vertex_index'.
462 |
463 | untested
464 | '''
465 |
466 | return len( self.vertex_vertex_neighbors( vertex_index ) )
467 |
468 | def vertex_face_neighbors( self, vertex_index ):
469 | '''
470 | Returns the face neighbors (as indices) of the vertex 'vertex_index'.
471 |
472 | untested
473 | '''
474 |
475 | ## It's important to access self.halfedges first (which calls get_halfedges()),
476 | ## so that we're sure all halfedge info is generated.
477 | halfedges = self.halfedges
478 | result = []
479 | start_he = halfedges[ self.__vertex_halfedges[ vertex_index ] ]
480 | he = start_he
481 | while True:
482 | if -1 != he.face: result.append( he.face )
483 |
484 | he = halfedges[ halfedges[ he.opposite_he ].next_he ]
485 | if he is start_he: break
486 |
487 | return result
488 |
489 | def vertex_is_boundary( self, vertex_index ):
490 | '''
491 | Returns whether the vertex with given index is on the boundary.
492 |
493 | untested
494 | '''
495 |
496 | ## It's important to access self.halfedges first (which calls get_halfedges()),
497 | ## so that we're sure all halfedge info is generated.
498 | halfedges = self.halfedges
499 | return -1 == halfedges[ self.__vertex_halfedges[ vertex_index ] ].face
500 |
501 | def boundary_vertices( self ):
502 | '''
503 | Returns a list of the vertex indices on the boundary.
504 |
505 | untested
506 | '''
507 |
508 | result = []
509 | for hei, he in enumerate( self.halfedges ):
510 | if -1 == he.face:
511 | # result.extend( self.he_index2directed_edge( hei ) )
512 | result.append( he.to_vertex )
513 | result.append( self.halfedges[ he.opposite_he ].to_vertex )
514 |
515 | #from sets import ImmutableSet
516 | ImmutableSet = frozenset
517 | return list(ImmutableSet( result ))
518 |
519 | def boundary_edges( self ):
520 | '''
521 | Returns a list of boundary edges (i,j). If (i,j) is in the result, (j,i) will not be.
522 |
523 | untested
524 | '''
525 |
526 | result = []
527 | for hei, he in enumerate( self.halfedges ):
528 | if -1 == he.face:
529 | result.append( self.he_index2directed_edge( hei ) )
530 | return result
531 |
532 | def positions_changed( self ):
533 | '''
534 | Notify the object that vertex positions changed.
535 | All position-related structures (normals, areas) will be marked for re-calculation.
536 | '''
537 |
538 | self.__face_normals = None
539 | self.__face_areas = None
540 | self.__vertex_normals = None
541 | self.__vertex_areas = None
542 |
543 | self.lifetime_counter += 1
544 |
545 |
546 | def topology_changed( self ):
547 | '''
548 | Notify the object that topology (faces or #vertices) changed.
549 | All topology-related structures (halfedges, edge lists) as well as position-related
550 | structures (normals, areas) will be marked for re-calculation.
551 | '''
552 |
553 | ## Set mesh.vs to an array so that subsequent calls to asarray() on it are no-ops.
554 | self.vs = asarray( self.vs )
555 |
556 |
557 | #### jianchao's modification begin
558 | # self.vs=list(self.vs)
559 | #### jianchao's modification end
560 |
561 |
562 | self.__edges = None
563 | self.__halfedges = None
564 | self.__vertex_halfedges = None
565 | self.__face_halfedges = None
566 | self.__edge_halfedges = None
567 | self.__directed_edge2he_index = None
568 |
569 | self.positions_changed()
570 |
571 | def get_dangling_vertices( self ):
572 | '''
573 | Returns vertex indices in TriMesh 'mesh' that belong to no faces.
574 | '''
575 |
576 | ## Slow:
577 | '''
578 | brute_vertex_face_valence = [ 0 ] * len( self.vs )
579 | for i,j,k in self.faces:
580 | brute_vertex_face_valence[ i ] += 1
581 | brute_vertex_face_valence[ j ] += 1
582 | brute_vertex_face_valence[ k ] += 1
583 | return [ i for i in xrange( len( self.vs ) ) if 0 == brute_vertex_face_valence[i] ]
584 | '''
585 | ## ~Slow
586 |
587 | ## Fast:
588 | '''
589 | brute_vertex_face_valence = zeros( len( self.vs ), dtype = int )
590 | self.faces = asarray( self.faces )
591 | brute_vertex_face_valence[ self.faces[:,0] ] += 1
592 | brute_vertex_face_valence[ self.faces[:,1] ] += 1
593 | brute_vertex_face_valence[ self.faces[:,2] ] += 1
594 | return where( brute_vertex_face_valence == 0 )[0]
595 | '''
596 | ## ~Fast
597 |
598 | ## Faster:
599 | vertex_has_face = zeros( len( self.vs ), dtype = bool )
600 | self.faces = asarray( self.faces )
601 | vertex_has_face[ self.faces.ravel() ] = True
602 | return where( vertex_has_face == 0 )[0]
603 | ## ~Faster
604 |
605 | def remove_vertex_indices( self, vertex_indices_to_remove ):
606 | '''
607 | Removes vertices in the list of indices 'vertex_indices_to_remove'.
608 | Also removes faces containing the vertices and dangling vertices.
609 |
610 | Returns an array mapping vertex indices before the call
611 | to vertex indices after the call or -1 if the vertex was removed.
612 |
613 | used
614 | '''
615 |
616 | ## I can't assert this here because I call this function recursively to remove dangling
617 | ## vertices.
618 | ## Also, someone manipulating the mesh might want to do the same thing (call this
619 | ## function on dangling vertices).
620 | #assert 0 == len( self.get_dangling_vertices() )
621 |
622 |
623 | if 0 == len( vertex_indices_to_remove ): return arange( len( self.vs ) )
624 |
625 |
626 | ## Slow:
627 | '''
628 | ## Make a map from old to new vertices. This is the return value.
629 | old2new = [ -1 ] * len( self.vs )
630 | last_index = 0
631 | for i in xrange( len( self.vs ) ):
632 | if i not in vertex_indices_to_remove:
633 | old2new[ i ] = last_index
634 | last_index += 1
635 |
636 | ## Remove vertices from vs, faces, edges, and optionally uvs.
637 | self.vs = [ pt for i, pt in enumerate( self.vs ) if old2new[i] != -1 ]
638 | if hasattr( self, 'uvs' ):
639 | self.uvs = [ uv for i, uv in enumerate( self.uvs ) if old2new[i] != -1 ]
640 | ## UPDATE: We have half-edge info, so we have to call 'topology_changed()' to
641 | ## regenerate the half-edge info, and 'topology_changed()' implies
642 | ## 'geometry_changed()', so updating anything but '.vs', '.faces'
643 | ## and '.uvs' is a waste unless I can precisely update the
644 | ## halfedge data structures.
645 | #self.__vertex_normals = asarray( [ vn for i, vn in enumerate( self.__vertex_normals ) if old2new[i] != -1 ] )
646 | #self.__edges = [ ( old2new[i], old2new[j] ) for i,j in self.__edges ]
647 | #self.__edges = [ edge for edge in self.__edges if -1 not in edge ]
648 | self.faces = [ ( old2new[i], old2new[j], old2new[k] ) for i,j,k in self.faces ]
649 | #self.__face_normals = [ n for i,n in enumerate( self.__face_normals ) if -1 not in self.faces[i] ]
650 | #self.__face_areas = [ n for i,n in enumerate( self.__face_areas ) if -1 not in self.faces[i] ]
651 | self.faces = [ tri for tri in self.faces if -1 not in tri ]
652 | '''
653 | ## ~Slow
654 |
655 |
656 | ## Fast:
657 | ## Make a map from old to new vertices. This is the return value.
658 | old2new = -ones( len( self.vs ), dtype = int )
659 | ## Later versions of numpy.setdiff1d(), such as 2.0, return a unique, sorted array
660 | ## and do not assume that inputs are unique.
661 | ## Earlier versions, such as 1.4, require unique inputs and don't say
662 | ## anything about sorted output.
663 | ## (We don't know that 'vertex_indices_to_remove' is unique!)
664 | keep_vertices = sort( setdiff1d( arange( len( self.vs ) ), unique( vertex_indices_to_remove ) ) )
665 | old2new[ keep_vertices ] = arange( len( keep_vertices ) )
666 |
667 | ## Remove vertices from vs, faces, edges, and optionally uvs.
668 | ## Fast:
669 | self.vs = asarray( self.vs )
670 | self.vs = self.vs[ keep_vertices, : ]
671 | if hasattr( self, 'uvs' ):
672 | self.uvs = asarray( self.uvs )
673 | self.uvs = self.uvs[ keep_vertices, : ]
674 |
675 | self.faces = asarray( self.faces )
676 | self.faces = old2new[ self.faces ]
677 | self.faces = self.faces[ ( self.faces != -1 ).all( axis = 1 ) ]
678 | ## ~Fast
679 |
680 |
681 | ## Now that we have halfedge info, just call topology changed and everything but
682 | ## 'vs' and 'faces' will be regenerated.
683 | self.topology_changed()
684 |
685 | ## Remove dangling vertices created by removing faces incident to vertices in 'vertex_indices_to_remove'.
686 | ## We only need to call this once, because a dangling vertex has no faces, so its removal
687 | ## won't remove any faces, so no new dangling vertices can be created.
688 | dangling = self.get_dangling_vertices()
689 | if len( dangling ) > 0:
690 | old2new_recurse = self.remove_vertex_indices( dangling )
691 | assert 0 == len( self.get_dangling_vertices() )
692 |
693 | '''
694 | for i in xrange( len( old2new ) ):
695 | if -1 != old2new[i]: old2new[i] = old2new_recurse[ old2new[ i ] ]
696 | '''
697 | old2new[ old2new != -1 ] = old2new_recurse[ old2new ]
698 |
699 |
700 | ### jianchao's modification begin
701 | # self.vs=list(self.vs)
702 | # self.faces=list(self.faces)
703 | ### jianchao's modification end
704 |
705 |
706 | return old2new
707 |
708 | def remove_face_indices( self, face_indices_to_remove ):
709 | '''
710 | Removes faces in the list of indices 'face_indices_to_remove'.
711 | Also removes dangling vertices.
712 |
713 | Returns an array mapping face indices before the call
714 | to face indices after the call or -1 if the face was removed.
715 |
716 | used
717 | '''
718 |
719 | if 0 == len( face_indices_to_remove ): return arange( len( self.faces ) )
720 |
721 |
722 | ## Fast:
723 | ## Make a map from old to new faces. This is the return value.
724 | old2new = -ones( len( self.faces ), dtype = int )
725 | ## Later versions of numpy.setdiff1d(), such as 2.0, return a unique, sorted array
726 | ## and do not assume that inputs are unique.
727 | ## Earlier versions, such as 1.4, require unique inputs and don't say
728 | ## anything about sorted output.
729 | ## (We don't know that 'face_indices_to_remove' is unique!)
730 | keep_faces = sort( setdiff1d( arange( len( self.faces ) ), unique( face_indices_to_remove ) ) )
731 | old2new[ keep_faces ] = arange( len( keep_faces ) )
732 |
733 | ## Remove vertices from vs, faces, edges, and optionally uvs.
734 | ## Fast:
735 | self.faces = asarray( self.faces )
736 | self.faces = self.faces[ keep_faces, : ]
737 | ## ~Fast
738 |
739 |
740 | ## Now that we have halfedge info, just call topology changed and everything but
741 | ## 'vs' and 'faces' will be regenerated.
742 | self.topology_changed()
743 |
744 | ## Remove dangling vertices created by removing faces incident to vertices.
745 | ## Since we are only removing dangling vertices, 'self.faces' can't be affected,
746 | ## so we don't need to worry about the 'old2new' map.
747 | dangling = self.get_dangling_vertices()
748 | if len( dangling ) > 0:
749 | self.remove_vertex_indices( dangling )
750 | assert 0 == len( self.get_dangling_vertices() )
751 |
752 | return old2new
753 |
754 |
755 | def append( self, mesh ):
756 | '''
757 | Given a mesh, with two properties,
758 | .vs, containing a list of 3d vertices
759 | .faces, containing a list of triangles as triplets of indices into .vs
760 | appends 'mesh's vertices and faces to self.vs and self.faces.
761 | '''
762 |
763 | ## mesh's vertices are going to be copied to the end of self.vs;
764 | ## All vertex indices in mesh.faces will need to be offset by the current
765 | ## number of vertices in self.vs.
766 | vertex_offset = len( self.vs )
767 |
768 | self.vs = list( self.vs ) + list( mesh.vs )
769 | self.faces = list( self.faces ) + list( asarray( mesh.faces, dtype = int ) + vertex_offset )
770 |
771 |
772 | ## If there are uvs, concatenate them.
773 |
774 | ## First, if self is an empty mesh (without uv's), and the mesh to append-to has uv's,
775 | ## create an empty .uvs property in self.
776 | if not hasattr( self, 'uvs' ) and hasattr( mesh, 'uvs' ) and len( self.vs ) == 0:
777 | self.uvs = []
778 |
779 | if hasattr( self, 'uvs' ) and hasattr( mesh, 'uvs' ):
780 | self.uvs = list( self.uvs ) + list( mesh.uvs )
781 | elif hasattr( self, 'uvs' ):
782 | del self.uvs
783 |
784 |
785 | ## We're almost done, we only need to call topology_changed().
786 | ## However, let's see if we can keep some properties that are slow to regenerate.
787 | self__face_normals = self.__face_normals
788 | self__face_areas = self.__face_areas
789 | self__vertex_normals = self.__vertex_normals
790 | self__vertex_areas = self.__vertex_areas
791 |
792 | self.topology_changed()
793 |
794 | if self__face_normals is not None and mesh.__face_normals is not None:
795 | self.__face_normals = append( self__face_normals, mesh.__face_normals, axis = 0 )
796 | if self__face_areas is not None and mesh.__face_areas is not None:
797 | self.__face_areas = append( self__face_areas, mesh.__face_areas, axis = 0 )
798 | if self__vertex_normals is not None and mesh.__vertex_normals is not None:
799 | self.__vertex_normals = append( self__vertex_normals, mesh.__vertex_normals, axis = 0 )
800 | if self__vertex_areas is not None and mesh.__vertex_areas is not None:
801 | self.__vertex_areas = append( self__vertex_areas, mesh.__vertex_areas, axis = 0 )
802 |
803 |
804 | def FromTriMeshes( meshes ):
805 | '''
806 | Given a sequence of meshes, each with two properties,
807 | .vs, containing a list of 3d vertices
808 | .faces, containing a list of triangles as triplets of indices into .vs
809 | returns a single TriMesh object containing all meshes concatenated together.
810 | '''
811 |
812 | result = TriMesh()
813 | for mesh in meshes:
814 | result.append( mesh )
815 |
816 | ## Reset the lifetime counter
817 | result.lifetime_counter = 0
818 | return result
819 |
820 | FromTriMeshes = staticmethod( FromTriMeshes )
821 |
822 |
823 | def FromOBJ_FileName( obj_fname ):
824 | if obj_fname.endswith( '.gz' ):
825 | import gzip
826 | f = gzip.open( obj_fname )
827 | else:
828 | f = open( obj_fname )
829 | return TriMesh.FromOBJ_Lines( f )
830 |
831 | FromOBJ_FileName = staticmethod( FromOBJ_FileName )
832 |
833 |
834 | def FromOBJ_Lines( obj_lines ):
835 | '''
836 | Given lines from an OBJ file, return a new TriMesh object.
837 |
838 | tested
839 | '''
840 |
841 | result = TriMesh()
842 |
843 | ## NOTE: We only handle faces and vertex positions.
844 | for line in obj_lines:
845 | line = line.strip()
846 |
847 | sline = line.split()
848 | ## Skip blank lines
849 | if not sline: continue
850 |
851 | elif sline[0] == 'v':
852 | result.vs.append( [ float(v) for v in sline[1:] ] )
853 | ## Vertices must have three coordinates.
854 | ## UPDATE: Let's be flexible about this.
855 | # assert len( result.vs[-1] ) == 3
856 |
857 | elif sline[0] == 'f':
858 | ## The split('/')[0] means we record only the vertex coordinate indices
859 | ## for each face.
860 | face_vertex_ids = [ int( c.split('/')[0] ) for c in sline[1:] ]
861 | ## Faces must be triangles.
862 | assert len( face_vertex_ids ) == 3
863 |
864 | ## Face vertex indices cannot be zero.
865 | ## UPDATE: Do this assert once at the end. The following code
866 | ## will convert the 0 to -1.
867 | # assert not any([ ind == 0 for ind in face_vertex_ids ])
868 |
869 | ## Subtract one from positive indices, and use relative addressing for negative
870 | ## indices.
871 | face_vertex_ids = [
872 | ( ind-1 ) if ( ind >= 0 ) else ( len(result.vs) + ind )
873 | for ind in face_vertex_ids
874 | ]
875 |
876 | ## UPDATE: Do this assert once at the end.
877 | # assert all([ ind < len( result.vs ) for ind in face_vertex_ids ])
878 | result.faces.append( face_vertex_ids )
879 |
880 | result.vs = asarray( result.vs )
881 | result.faces = asarray( result.faces, dtype = int )
882 | assert logical_and( result.faces >= 0, result.faces < len( result.vs ) ).all()
883 |
884 | return result
885 |
886 | FromOBJ_Lines = staticmethod( FromOBJ_Lines )
887 |
888 |
889 | def write_OBJ( self, fname, header_comment = None ):
890 | '''
891 | Writes the data out to an OBJ file named 'fname'.
892 | Optional comment 'header_comment' is printed at the
893 | top of the OBJ file, after prepending the OBJ comment
894 | marker at the head of each line.
895 |
896 | tested
897 | '''
898 |
899 |
900 | ## Estimate for mesh size:
901 | ## 16 bytes for a vertex row,
902 | ## optionally 16 bytes for a uv row,
903 | ## 12/20 bytes for a face row with/without uv's.
904 | ## Assuming no uv's and 2 faces per vertex,
905 | ## a 1MB mesh is made of (1024*1024/(16+2*12)) = 26214 vertices.
906 | ## If we have uv's, then we will reach 1MB with (1024*1024/(2*16+2*20)) = 14563 vertices.
907 | ## Print a warning if we're going to save a mesh much larger than a megabyte.
908 | if len( self.vs ) > 15000:
909 | print( 'Writing a large OBJ to:', fname )
910 |
911 |
912 | out = open( fname, 'w' )
913 |
914 | if header_comment is None:
915 | import sys
916 | header_comment = 'Written by ' + ' '.join([ arg.replace('\n',r'\n') for arg in sys.argv ])
917 |
918 | ## Print the header comment.
919 | for line in header_comment.split('\n'):
920 | out.write( '## %s\n' % (line,) )
921 | out.write( '\n' )
922 |
923 |
924 | ## Print vertices.
925 | for v in self.vs:
926 | out.write( 'v %r %r %r\n' % tuple(v) )
927 | out.write( '\n' )
928 |
929 |
930 | ## Print uv's if we have them.
931 | if hasattr( self, 'uvs' ):
932 | for uv in self.uvs:
933 | out.write( 'vt %r %r\n' % tuple(uv) )
934 | out.write( '\n' )
935 |
936 | ## Print faces with uv's.
937 | for f in self.faces:
938 | #out.write( 'f %s/%s %s/%s %s/%s\n' % tuple( ( asarray(f,dtype=int) + 1 ).repeat(2) ) )
939 | out.write( 'f %s/%s %s/%s %s/%s\n' % ( f[0]+1,f[0]+1, f[1]+1,f[1]+1, f[2]+1,f[2]+1 ) )
940 | else:
941 | ## Print faces without uv's.
942 | for f in self.faces:
943 | #out.write( 'f %s %s %s\n' % tuple(asarray(f,dtype=int) + 1) )
944 | out.write( 'f %s %s %s\n' % ( f[0]+1, f[1]+1, f[2]+1 ) )
945 |
946 |
947 | out.close()
948 |
949 | print( 'OBJ written to:', fname)
950 |
951 | def write_OFF( self, fname ):
952 | '''
953 | Writes the data out to an OFF file named 'fname'.
954 | '''
955 |
956 | out = file( fname, 'w' )
957 |
958 | out.write( 'OFF\n' )
959 | out.write( '%d %d 0\n' % ( len( self.vs ), len( self.faces ) ) )
960 |
961 | for v in self.vs:
962 | out.write( '%r %r %r\n' % tuple(v) )
963 | for f in self.faces:
964 | out.write( '3 %s %s %s\n' % tuple(f) )
965 |
966 | out.close()
967 |
968 | print( 'OFF written to:', fname)
969 |
970 | ## We can't pickle anything that doesn't have a name visible at module scope.
971 | ## In order to allow pickling of class TriMesh, we'll make a reference to the inner HalfEdge class
972 | ## here at the module level.
973 | HalfEdge = TriMesh.HalfEdge
974 |
975 | def main():
976 | import sys
977 | if len( sys.argv ) > 1:
978 | mesh = TriMesh.FromOBJ_FileName( sys.argv[1] )
979 | # mesh.write_OBJ( sys.argv[2] )
980 |
981 | if __name__ == '__main__':
982 | main()
983 |
--------------------------------------------------------------------------------