├── .gitattributes
├── .gitignore
├── IndividualTreeExtraction.py
├── PointwiseDirectionPrediction.py
├── README.md
├── accessible_region
└── AccessibleRegionGrowing.py
├── backbone_network
├── BatchSampleGenerator.py
├── Loss.py
├── PDE_net.py
└── PDE_net_training.py
├── figs
└── overall_workflow.png
├── utils
├── py_util.py
└── tf_util.py
├── voxel_region_grow
├── VoxelRegionGrow.pyx
└── VoxelRegionGrow_Setup.py
└── voxel_traversal
└── VoxelTraversalAlgorithm.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # celery beat schedule file
95 | celerybeat-schedule
96 |
97 | # SageMath parsed files
98 | *.sage.py
99 |
100 | # Environments
101 | .env
102 | .venv
103 | env/
104 | venv/
105 | ENV/
106 | env.bak/
107 | venv.bak/
108 |
109 | # Spyder project settings
110 | .spyderproject
111 | .spyproject
112 |
113 | # Rope project settings
114 | .ropeproject
115 |
116 | # mkdocs documentation
117 | /site
118 |
119 | # mypy
120 | .mypy_cache/
121 | .dmypy.json
122 | dmypy.json
123 |
124 | # Pyre type checker
125 | .pyre/
126 |
--------------------------------------------------------------------------------
/IndividualTreeExtraction.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon July 11 18:50:39 2020
3 |
4 | @author: Haifeng Luo
5 | """
6 | import numpy as np
7 | import os
8 | import sys
9 | import matplotlib.pyplot as plt
10 | from mpl_toolkits.mplot3d import Axes3D
11 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
12 | sys.path.append(BASE_DIR)
13 | sys.path.append(os.path.join(BASE_DIR, 'voxel_traversal'))
14 | sys.path.append(os.path.join(BASE_DIR, 'accessible_region'))
15 | sys.path.append(os.path.join(BASE_DIR, 'utils'))
16 | import py_util
17 | import VoxelTraversalAlgorithm as VTA
18 | import AccessibleRegionGrowing as ARG
19 | import PointwiseDirectionPrediction as PDE_net
20 |
21 |
22 | def show_AR_RG(voxels1, voxels2):
23 | fig = plt.figure()
24 | ax = fig.gca(projection='3d')
25 | ####accessible region
26 | ax.voxels(voxels2, facecolors='red', edgecolor='k', alpha=0.9)
27 | ####region growing results
28 | ax.voxels(voxels1, facecolors='green', edgecolor='k')
29 | plt.show()
30 |
31 | ############################################################
32 | def compute_object_center(sample_xyz):
33 | min_xyz = np.min(sample_xyz, axis=0)
34 | max_xyz = np.max(sample_xyz, axis=0)
35 | deta_central_xyz = (max_xyz - min_xyz) / 2.0
36 | central_xyz = deta_central_xyz + min_xyz
37 | return central_xyz
38 |
39 | ############################################################
40 | def object_xoy_bounding(xyz, object_xyz, sphere_level, bounding_order=1):
41 |
42 | min_xy = np.min(object_xyz[:, :2], axis=0)
43 | max_xy = np.max(object_xyz[:, :2], axis=0)
44 | delta_xy = (max_xy - min_xy) / sphere_level
45 | min_xy += bounding_order * delta_xy
46 | max_xy -= bounding_order * delta_xy
47 | modify_object_index_x = np.where((xyz[:, 0] >= min_xy[0]) == (xyz[:, 0] < max_xy[0]))
48 | modify_object_index_y = np.where((xyz[:, 1] >= min_xy[1]) == (xyz[:, 1] < max_xy[1]))
49 | modify_object_index_xy = np.intersect1d(modify_object_index_x[0], modify_object_index_y[0])
50 | modify_object_index_xy = list(modify_object_index_xy)
51 | return modify_object_index_xy
52 |
53 | def direction_vote_voxels(points, directions, voxel_size, num_voxel_xyz, min_xyz):
54 | numpints = np.size(points, 0)
55 | output_voxel_direction_count = np.zeros((int(num_voxel_xyz[0]), int(num_voxel_xyz[1]), int(num_voxel_xyz[2])), dtype=int)
56 |
57 | ######
58 | per_voxel_direction_start_points = [[[[] for _ in range(int(num_voxel_xyz[2]))] for _ in range(int(num_voxel_xyz[1]))] for _ in range(int(num_voxel_xyz[0]))]
59 | ####
60 | for i in range(numpints):
61 | visited_voxels = VTA.voxel_traversal(points[i, :], directions[i, :], min_xyz, num_voxel_xyz, voxel_size)
62 | for j in range(len(visited_voxels)):
63 | output_voxel_direction_count[int(visited_voxels[j][0]), int(visited_voxels[j][1]), int(visited_voxels[j][2])] += 1
64 | per_voxel_direction_start_points[int(visited_voxels[j][0])][int(visited_voxels[j][1])][int(visited_voxels[j][2])].append(i)
65 |
66 | return output_voxel_direction_count, per_voxel_direction_start_points
67 |
68 | def center_detection_xoy(voxel_direction_count, num_voxel_xyz, center_direction_count_th):
69 |
70 | numVoxel_x = num_voxel_xyz[0]
71 | numVoxel_y = num_voxel_xyz[1]
72 | object_center_voxel_list = []
73 |
74 | for i in range(int(numVoxel_x - 2)):
75 | for j in range(int(numVoxel_y - 2)):
76 | temp_object_voxel_dir_count = voxel_direction_count[i + 1, j + 1]
77 |
78 | if temp_object_voxel_dir_count < center_direction_count_th:
79 | continue
80 |
81 | temp_neighbors = [voxel_direction_count[i, j], voxel_direction_count[i + 1, j],
82 | voxel_direction_count[i + 2, j],
83 | voxel_direction_count[i, j + 1], voxel_direction_count[i + 2, j + 1],
84 | voxel_direction_count[i, j + 2], voxel_direction_count[i + 1, j + 2],
85 | voxel_direction_count[i + 2, j + 2]]
86 | max_neighbors = np.max(np.array(temp_neighbors))
87 |
88 | if temp_object_voxel_dir_count > max_neighbors:
89 | object_center_voxel_list.append([i + 1, j + 1])
90 |
91 | return np.vstack(object_center_voxel_list)
92 |
93 | ############################################################
94 | def center_detection(data, voxel_size, angle_threshold, center_direction_count_th=20):
95 | '''detect the tree centers'''
96 |
97 | object_xyz_list = []
98 | xyz = data[:, :3]
99 | directions = data[:, 3:]
100 | min_xyz = np.min(xyz, axis=0)
101 | max_xyz = np.max(xyz, axis=0)
102 | delta_xyz = max_xyz - min_xyz
103 | num_voxel_xyz = np.ceil(delta_xyz / voxel_size)
104 |
105 | #######################################################################
106 | ############################Center Detection###########################
107 | #######################################################################
108 | output_voxel_direction_count, per_voxel_direction_start_points = direction_vote_voxels(xyz,
109 | directions,
110 | voxel_size,
111 | num_voxel_xyz,
112 | min_xyz)
113 | #####centers in xoy plane
114 | output_voxel_direction_count_xoy = np.sum(output_voxel_direction_count, axis=2)
115 | object_centers_xoy = center_detection_xoy(output_voxel_direction_count_xoy,
116 | num_voxel_xyz[:2],
117 | center_direction_count_th)
118 |
119 | ####centers in z-axis
120 | for i in range(np.size(object_centers_xoy, 0)):
121 | temp_object_center_xoy = object_centers_xoy[i, :]
122 | ####
123 | temp_centre_xyz = np.array([temp_object_center_xoy[0], temp_object_center_xoy[1]])
124 | temp_centre_xyz = temp_centre_xyz * voxel_size + min_xyz[:2] # + voxel_size / 2.0
125 | ####
126 | center_xbottom = temp_centre_xyz[0] - voxel_size / 2.0
127 | center_xup = temp_centre_xyz[0] + voxel_size / 2.0
128 | center_ybottom = temp_centre_xyz[1] - voxel_size / 2.0
129 | center_yup = temp_centre_xyz[1] + voxel_size / 2.0
130 | x_vaild_range = np.where((xyz[:, 0] > center_xbottom) == (xyz[:, 0] < center_xup))
131 | y_vaild_range = np.where((xyz[:, 1] > center_ybottom) == (xyz[:, 1] < center_yup))
132 | xy_intersection_index = list(set(x_vaild_range[0]).intersection(set(y_vaild_range[0])))
133 |
134 | ####discard the fake centers
135 | if len(xy_intersection_index) == 0:
136 | continue
137 | #####
138 | output_voxel_direction_count_z = output_voxel_direction_count[temp_object_center_xoy[0], temp_object_center_xoy[1], :]
139 | temp_index = np.where(output_voxel_direction_count_z == np.max(output_voxel_direction_count_z))
140 | object_xyz_list.append([temp_object_center_xoy[0], temp_object_center_xoy[1], temp_index[0][0]])
141 |
142 | object_xyz_list = np.vstack(object_xyz_list)
143 | object_xyz_list = object_xyz_list * voxel_size + min_xyz # + voxel_size / 2.0
144 |
145 | ####### further refine detected centers using intersection directions
146 | ####### Note that the following steps have not been discussed in our paper #############
147 | ####### If higher efficiency is required, these steps can be discarded ###############
148 | objectVoxelMask_list = []
149 | for i in range(np.size(object_xyz_list, 0)):
150 |
151 | center_xyz = object_xyz_list[i, :]
152 | _, _, objectVoxelMask = individual_tree_separation(xyz,
153 | directions,
154 | center_xyz,
155 | voxel_size,
156 | min_xyz,
157 | num_voxel_xyz,
158 | angle_threshold)
159 |
160 | objectVoxelMask_index = np.where(objectVoxelMask == True)
161 | if np.size(objectVoxelMask_index[0], 0) == 0:
162 | continue
163 | temp_objectvoxels = []
164 | for j in range(np.size(objectVoxelMask_index[0], 0)):
165 | temp_objectvoxel_index = [objectVoxelMask_index[0][j], objectVoxelMask_index[1][j], objectVoxelMask_index[2][j]]
166 | temp_objectvoxels.append(temp_objectvoxel_index)
167 | objectVoxelMask_list.append(temp_objectvoxels)
168 |
169 | #######
170 | final_object_center_index = []
171 | for i in range(len(objectVoxelMask_list)):
172 | #####
173 | temp_object_voxels = np.vstack(objectVoxelMask_list[i])
174 | #####copy array
175 | temp_all_object_voxels = objectVoxelMask_list[:]
176 | del temp_all_object_voxels[i]
177 |
178 | #######
179 | for j in range(len(temp_all_object_voxels)):
180 |
181 | temp_remain_object_voxels = np.vstack(temp_all_object_voxels[j])
182 | temp_intersection = np.array([x for x in set(tuple(x) for x in temp_object_voxels) & set(tuple(x) for x in temp_remain_object_voxels)])
183 |
184 | if np.size(temp_intersection, 0) > 0:
185 | temp_object_voxels = set(tuple(x) for x in temp_object_voxels).difference(set(tuple(x) for x in temp_intersection))
186 | temp_object_voxels = np.array([list(x) for x in temp_object_voxels])
187 |
188 | if np.size(temp_object_voxels, 0) == 0:
189 | break #
190 | if np.size(temp_object_voxels, 0) >= 3:
191 | final_object_center_index.append(i)
192 |
193 | object_xyz_list = object_xyz_list[final_object_center_index, :]
194 | print('Num of Tree Centers: %d'%int(np.size(object_xyz_list, 0)))
195 | return object_xyz_list
196 |
197 | ############################################################
198 | def individual_tree_separation(xyz, directions, center_xyz, voxel_size, min_xyz, num_voxel_xyz,
199 | angle_threshold, visulization=False):
200 |
201 | #####generate accessible region
202 | accessible_region, accessible_index = ARG.detect_accessible_region(xyz, directions, center_xyz,
203 | voxel_size, angle_threshold)
204 | #####
205 | #####voxelize accessible region
206 | accessible_region_voxels, seed_voxel, valid_voxels, voxel2point_index_list = ARG.voxelization(accessible_region,
207 | accessible_index,
208 | voxel_size,
209 | center_xyz,
210 | min_xyz,
211 | num_voxel_xyz)
212 | ###########
213 | output_voxels_v2 = np.array(accessible_region_voxels)
214 | output_voxels_v2 = output_voxels_v2.astype(bool)
215 |
216 | ####voxel-based region growing
217 | objcetMask, objcetMaskVoxelIndex = ARG.voxel_region_grow(accessible_region_voxels, seed_voxel)
218 |
219 | ###########visualization
220 | objcetMask = np.array(objcetMask)
221 | objcetMask = objcetMask.astype(bool)
222 | if visulization == True:
223 | show_AR_RG(objcetMask, output_voxels_v2)
224 |
225 | ######refine seed voxels
226 | index_voxel2point = [valid_voxels.index(tempMaskIndex) for tempMaskIndex in objcetMaskVoxelIndex]
227 | ######
228 | temp_object_xyz_index = []
229 | for temp_index_voxel2point in index_voxel2point:
230 | temp_object_xyz_index += voxel2point_index_list[temp_index_voxel2point]
231 | #####
232 | object_result = xyz[temp_object_xyz_index, :]
233 | return object_result, temp_object_xyz_index, objcetMask
234 |
235 | def individual_tree_extraction(PDE_net_model_path, test_data_path, result_path, voxel_size, Nd, ARe):
236 | '''Individual Tree Extraction'''
237 | ####restore trained PDE-net
238 | sess, PDE_net_ops = PDE_net.restore_trained_model(NUM_POINT, PDE_net_model_path)
239 | ####
240 | file_list = os.listdir(test_data_path)
241 | for i in range(len(file_list)):
242 | tree_index = 0
243 | filename, _ = os.path.splitext(file_list[i])
244 | print('Separating ' + filename + '...')
245 | #### data[x, y, z] original coordinates
246 | testdata = py_util.load_data(test_data_path + file_list[i])[:, :3]
247 | ####normalized coordinates
248 | nor_testdata = py_util.normalize(testdata)
249 | ####Pointwise direction prediction
250 | xyz_direction = PDE_net.prediction(sess, nor_testdata, PDE_net_ops)
251 | ####tree center detection
252 | object_center_list = center_detection(xyz_direction, voxel_size, ARe, Nd)
253 |
254 | ####for single tree clusters
255 | if np.size(object_center_list, axis=0) <= 1:
256 | ####random colors
257 | num_pointIntree = np.size(xyz_direction, axis=0)
258 | color = np.random.randint(0, 255, size=3)
259 | ####assign tree labels
260 | temp_tree_label = np.ones([num_pointIntree, 1]) * tree_index
261 | color = np.ones([num_pointIntree, 3]) * color
262 | ######
263 | individualtree = np.concatenate([testdata[:, :3], color, temp_tree_label], axis=-1)
264 | np.savetxt(result_path + file_list[i], individualtree, fmt='%.4f')
265 | tree_index += 1
266 | continue
267 |
268 | ####for multi tree clusters
269 | extracted_object_list = []
270 | object_color_list = []
271 | temp_tree_id = 0
272 | for j in range(np.size(object_center_list, 0)):
273 |
274 | xyz = xyz_direction[:, :3]
275 | directions = xyz_direction[:, 3:]
276 | ####
277 | min_xyz = np.min(xyz, axis=0)
278 | max_xyz = np.max(xyz, axis=0)
279 | delta_xyz = max_xyz - min_xyz
280 | num_voxel_xyz = np.ceil(delta_xyz / voxel_size)
281 | ####
282 | center_xyz = object_center_list[j, :]
283 | ####use padding to fix the situation where the tree center voxel is empty
284 | center_xyz_padding = np.array([[center_xyz[0], center_xyz[1], center_xyz[2]],
285 | [center_xyz[0], center_xyz[1], center_xyz[2] - voxel_size],
286 | [center_xyz[0], center_xyz[1], center_xyz[2] + voxel_size]])
287 | directions_padding = np.array([[0.0, 0.0, 0.0],
288 | [0.0, 0.0, 1.0],
289 | [0.0, 0.0, -1.0]])
290 | center_direction_padding = np.concatenate([center_xyz_padding, directions_padding], axis=-1)
291 |
292 | xyz = np.concatenate([center_xyz_padding, xyz], axis=0)
293 | directions = np.concatenate([directions_padding, directions], axis=0)
294 | xyz_direction = np.concatenate([center_direction_padding, xyz_direction], axis=0)
295 |
296 | ####only for align the indexes
297 | testdata = np.concatenate([testdata[:3, :], testdata], axis=0)
298 | ####
299 | object_result, temp_object_xyz_index, _ = individual_tree_separation(xyz,
300 | directions,
301 | center_xyz,
302 | voxel_size,
303 | min_xyz,
304 | num_voxel_xyz,
305 | ARe,
306 | visulization=False)
307 | ####refine the NULL growing results
308 | if np.size(object_result, 0) == 0: continue
309 | ###fix the discontinuity of the voxel in the vertical direction of tree centers
310 | modify_object_index_xy = object_xoy_bounding(xyz, object_result, 8, bounding_order=1)
311 | temp_object_xyz_index += modify_object_index_xy
312 | temp_object_xyz_index = list(set(temp_object_xyz_index))
313 |
314 | #####remove padding points
315 | real_object_xyz_index = [i for i in temp_object_xyz_index if i > 2]
316 | object_result = testdata[real_object_xyz_index, :3]
317 | ####generate random color for extracted individual tree points
318 | num_pointInObject = np.size(object_result, axis=0)
319 | color = np.random.randint(0, 255, size=3)
320 | object_color_list.append(color)
321 | ####assign a tree label for each individual tree
322 | temp_object_label = np.ones([num_pointInObject, 1]) * temp_tree_id
323 | color = np.ones([num_pointInObject, 3]) * color
324 | extracted_object_list.append(np.concatenate([object_result, color, temp_object_label], axis=-1))
325 | ####
326 | temp_tree_id += 1
327 | ####delete the extracted individual tree points
328 | testdata = np.delete(testdata, temp_object_xyz_index, axis=0)
329 | xyz_direction = np.delete(xyz_direction, temp_object_xyz_index, axis=0)
330 |
331 | ####use the nearest neighbor assignment to refine those points with large errors
332 | for k in range(np.size(xyz_direction, 0)):
333 | temp_remain_xyz_nor = xyz_direction[k, :3]
334 | temp_remain_xyz = testdata[k, :3]
335 | temp_distances = np.sqrt(np.sum(np.asarray(temp_remain_xyz_nor - object_center_list) ** 2, axis=1))
336 | nearestObjectCenter = np.where(temp_distances == np.min(temp_distances))
337 | color = object_color_list[int(nearestObjectCenter[0])]
338 | temp_remain_xyz_label = np.expand_dims(np.concatenate([temp_remain_xyz, color, nearestObjectCenter[0]], axis=-1), axis=0)
339 | extracted_object_list.append(temp_remain_xyz_label)
340 | ####output the final results
341 | np.savetxt(result_path + filename + '.txt', np.vstack(extracted_object_list), fmt='%.4f')
342 |
343 |
344 | if __name__ == '__main__':
345 |
346 | NUM_POINT = 4096
347 | Nd = 80
348 | ARe = np.pi / 9.0
349 | voxel_size = 0.08
350 | #######
351 | PDE_net_model_path ='./backbone_network/pre_trained_PDE_net/'
352 | test_data_path = './data/test/'
353 | result_path = './result/'
354 | if not os.path.exists(result_path): os.mkdir(result_path)
355 |
356 | #######extract individual trees from tree clusters
357 | individual_tree_extraction(PDE_net_model_path, test_data_path, result_path, voxel_size, Nd, ARe)
358 |
--------------------------------------------------------------------------------
/PointwiseDirectionPrediction.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon July 11 18:50:39 2020
3 |
4 | @author: Haifeng Luo
5 | """
6 |
7 | import os
8 | import sys
9 | import numpy as np
10 | import tensorflow as tf
11 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
12 | sys.path.append(os.path.join(BASE_DIR, 'utils'))
13 | sys.path.append(os.path.join(BASE_DIR, 'backbone_network'))
14 | import PDE_net
15 |
16 |
17 | def restore_trained_model(NUM_POINT, MODEL_DIR, BATCH_SIZE=1):
18 |
19 | with tf.Graph().as_default(), tf.device('/cpu:0'):
20 | batch = tf.Variable(0, trainable=False)
21 |
22 | with tf.variable_scope(tf.get_variable_scope()):
23 | with tf.device('/gpu:0'):
24 | pointclouds = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, 3))
25 | is_training = tf.placeholder(tf.bool, shape=())
26 |
27 | #####DirectionEmbedding
28 | PDE = PDE_net.get_model_RRFSegNet('PDE_net',
29 | pointclouds,
30 | is_training=is_training,
31 | k=20)
32 |
33 | # PDE = PDE_net.get_model_DGCNN('PDE_net',
34 | # pointclouds,
35 | # is_training=is_training,
36 | # k=20)
37 |
38 | PDE = tf.nn.l2_normalize(PDE, axis=2, epsilon=1e-20)
39 |
40 | saver = tf.train.Saver(tf.global_variables())
41 | # Create a session
42 | config = tf.ConfigProto()
43 | config.gpu_options.allow_growth = True
44 | config.allow_soft_placement = True
45 | sess = tf.Session(config=config)
46 | saver.restore(sess, tf.train.latest_checkpoint(MODEL_DIR))
47 |
48 | PDE_net_ops = {'pointclouds': pointclouds,
49 | 'is_training': is_training,
50 | 'PDE': PDE,
51 | 'step': batch}
52 | return sess, PDE_net_ops
53 |
54 |
55 | def prediction(sess, testdata, ops):
56 |
57 | testdata = np.expand_dims(testdata, axis=0)
58 | feed_dict = {ops['pointclouds']: testdata,
59 | ops['is_training']: False}
60 |
61 | pde_ = sess.run(ops['PDE'], feed_dict=feed_dict)
62 | testdata = np.squeeze(testdata)
63 | pde_ = np.squeeze(pde_)
64 | ####################
65 | xyz_direction = np.concatenate([testdata, pde_], -1)
66 | return xyz_direction
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Individual Tree Extraction
2 |
3 | This is an implement of our paper that has been published in ISPRS Journal of Photogrammetry and Remote Sensing:
4 |
5 | [**Individual Tree Extraction from Urban Mobile Laser Scanning Point Clouds Using Deep Pointwise Direction Embedding**](https://www.sciencedirect.com/science/article/pii/S0924271621000654?via%3Dihub)
6 |
7 | Haifeng Luo, Kourosh Khoshelham, Chongcheng Chen, and Hanxian He
8 |
9 | # Introduction:
10 | In this paper, we propose a novel top-down approach to extract individual trees from urban MLS point clouds. Firstly, a semantic segmentation deep network is applied to segment tree points from raw urban MLS point clouds, and then the segmented tree points are further grouped into a set of tree clusters using Euclidean distance clustering. Next, a pointwise direction embedding deep network (PDE-net) is proposed to predict the direction vectors pointing to tree centers for each tree cluster to enhance the boundaries of instance-level trees. After that, a direction aggregation-based strategy is developed to detect the tree centers for each tree cluster, and the clusters are classified into single-tree clusters and multi-tree clusters based on the number of detected tree centers. Finally, the single-tree clusters are directly extracted as individual trees, while the multi-tree clusters are further separated into instance-level trees based on our proposed accessible region growing algorithm combining the embedded pointwise directions and detected tree centers.
11 |
12 | This repo does not contain the source code of semantic segmentation. In theory, any exsiting semantic segmentation framework can be used to extract the tree points, such as DGCNN, PointASNL, MS-RRFRSegNet.
13 |
14 |

15 |
16 | # Requirment:
17 | Python 3.6
18 | Tensorflow 1.80
19 | Cython
20 |
21 | # Train PDE-net:
22 |
23 | To train the PDE-net:
24 |
25 | cd backbone_network
26 | python PDE_net_training.py
27 |
28 | * Data format of training samples: [x, y, z, tree_id]
29 |
30 | # Compile Voxel-based Region Growing:
31 |
32 | cd voxel_region_grow
33 | python VoxelRegionGrow_Setup.py build_ext --inplace
34 |
35 | # Extract Individual Trees:
36 |
37 | To extract the individual trees from tree clusters:
38 |
39 | python IndividualTreeExtraction.py
40 |
41 | * Data format of the input tree clusters: [x, y, z]
42 |
43 | * Data format of the output results: [x, y, z, r, g, b, tree_id]
44 |
45 |
46 | # Citation:
47 |
48 | If you find this project useful for your research, please kindly cite our paper:
49 |
50 | @article{luo2021individual,
51 | title={Individual tree extraction from urban mobile laser scanning point clouds using deep pointwise direction embedding},
52 | author={Luo, Haifeng and Khoshelham, Kourosh and Chen, Chongcheng and He, Hanxian},
53 | journal={ISPRS Journal of Photogrammetry and Remote Sensing},
54 | volume={175},
55 | pages={326--339},
56 | year={2021},
57 | publisher={Elsevier}
58 | }
59 |
60 | # Contact
61 |
62 | If you have any questions, please contact [Haifeng Luo](h.feng.luo@outlook.com).
63 |
--------------------------------------------------------------------------------
/accessible_region/AccessibleRegionGrowing.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon July 11 18:50:39 2020
3 |
4 | @author: Haifeng Luo
5 | """
6 |
7 | import numpy as np
8 | import os
9 | import sys
10 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
11 | ROOT_DIR = os.path.dirname(BASE_DIR)
12 | sys.path.append(os.path.join(ROOT_DIR, 'voxel_region_grow'))
13 | import VoxelRegionGrow
14 |
15 |
16 | def detect_accessible_region(input_xyz, point_directions, center_xyz, voxel_size, angle_threshold=np.pi / 9):
17 | """
18 | Generate Accessible Region
19 | """
20 | #####
21 | temp_point2center_vector = center_xyz - input_xyz
22 | ######
23 | temp_point2center_vector_L2 = np.linalg.norm(temp_point2center_vector, axis=1)
24 | temp_point2center_vector_L2[temp_point2center_vector_L2 == 0.0] = 1e-25
25 | point_directions_L2 = np.linalg.norm(point_directions, axis=1)
26 | point_directions_L2[point_directions_L2 == 0.0] = 1e-25
27 |
28 | ######
29 | angles_point2center_vector_and_vote = np.arccos(np.sum(np.multiply(point_directions, temp_point2center_vector), axis=1) /
30 | (point_directions_L2 * temp_point2center_vector_L2))
31 | #####
32 | accessible_index = np.where(angles_point2center_vector_and_vote < angle_threshold)
33 | #####
34 | x_range_bottom = center_xyz[0] - 1.5 * voxel_size
35 | x_range_up = center_xyz[0] + 1.5 * voxel_size
36 | y_range_bottom = center_xyz[1] - 1.5 * voxel_size
37 | y_range_up = center_xyz[1] + 1.5 * voxel_size
38 | vertical_neighbors_index_x = np.where((input_xyz[:, 0] >= x_range_bottom) == (input_xyz[:, 0] <= x_range_up))
39 | vertical_neighbors_index_y = np.where((input_xyz[:, 1] >= y_range_bottom) == (input_xyz[:, 1] <= y_range_up))
40 | vertical_neighbors_index_xy = np.intersect1d(vertical_neighbors_index_x[0], vertical_neighbors_index_y[0])
41 | ###
42 | accessible_index = list(accessible_index[0]) + list(vertical_neighbors_index_xy)
43 | accessible_index = list(set(accessible_index))
44 | #####
45 | accessible_region = input_xyz[accessible_index, :]
46 |
47 | return accessible_region, accessible_index
48 |
49 | ############################################################
50 | def voxelization(accessible_region, accessible_index, voxel_size, center_xyz, min_xyz, num_voxel_xyz):
51 |
52 |
53 | ###seed position
54 | seed_x = center_xyz[0]
55 | seed_y = center_xyz[1]
56 | seed_z = center_xyz[2]
57 | seed_voxel_id_x = int(np.floor((seed_x - min_xyz[0]) / voxel_size))
58 | seed_voxel_id_y = int(np.floor((seed_y - min_xyz[1]) / voxel_size))
59 | seed_voxel_id_z = int(np.floor((seed_z - min_xyz[2]) / voxel_size))
60 | seed_voxel = [seed_voxel_id_x, seed_voxel_id_y, seed_voxel_id_z]
61 |
62 | #######init voxels
63 | output_voxels = np.zeros((int(num_voxel_xyz[0]), int(num_voxel_xyz[1]), int(num_voxel_xyz[2])), dtype=int)
64 | ######
65 | valid_voxel_position = []
66 | voxel2point_index_list = []
67 | for i in range(int(num_voxel_xyz[0])):
68 | if i == 0:
69 | temp_x_range = np.where((accessible_region[:, 0] >= min_xyz[0] + i * voxel_size)
70 | == (accessible_region[:, 0] <= min_xyz[0] + (i + 1) * voxel_size))
71 | else:
72 | temp_x_range = np.where((accessible_region[:, 0] > min_xyz[0] + i * voxel_size)
73 | == (accessible_region[:, 0] <= min_xyz[0] + (i + 1) * voxel_size))
74 |
75 | if np.size(temp_x_range[0]) == 0:
76 | continue
77 | else:
78 | for j in range(int(num_voxel_xyz[1])):
79 | if j == 0:
80 | temp_y_range = np.where((accessible_region[:, 1] >= min_xyz[1] + j * voxel_size)
81 | == (accessible_region[:, 1] <= min_xyz[1] + (j + 1) * voxel_size))
82 | else:
83 | temp_y_range = np.where((accessible_region[:, 1] > min_xyz[1] + j * voxel_size)
84 | == (accessible_region[:, 1] <= min_xyz[1] + (j + 1) * voxel_size))
85 | if np.size(temp_y_range[0]) == 0:
86 | continue
87 | else:
88 | xy_intersect = np.intersect1d(temp_x_range[0], temp_y_range[0])
89 | if np.size(xy_intersect) == 0:
90 | continue
91 | else:
92 | for k in range(int(num_voxel_xyz[2])):
93 | if k == 0:
94 | temp_z_range = np.where((accessible_region[:, 2] >= min_xyz[2] + k * voxel_size)
95 | == (accessible_region[:, 2] <= min_xyz[2] + (
96 | k + 1) * voxel_size))
97 | else:
98 | temp_z_range = np.where((accessible_region[:, 2] > min_xyz[2] + k * voxel_size)
99 | == (accessible_region[:, 2] <= min_xyz[2] + (
100 | k + 1) * voxel_size))
101 |
102 | if np.size(temp_z_range[0]) == 0:
103 | continue
104 | else:
105 | xy_z_intersect = np.intersect1d(xy_intersect, temp_z_range[0])
106 | if np.size(xy_z_intersect) != 0:
107 | valid_voxel_position.append([i, j, k])
108 | ######
109 | temp_voxel2point_index_list = [accessible_index[l] for l in list(xy_z_intersect)]
110 | voxel2point_index_list.append(temp_voxel2point_index_list)
111 | output_voxels[i, j, k] = 1
112 |
113 | return output_voxels, seed_voxel, valid_voxel_position, voxel2point_index_list
114 |
115 | ############################################################
116 | def voxel_region_grow(output_voxels, seed):
117 | voxelRG = VoxelRegionGrow.Build(output_voxels)
118 | objcetMask = voxelRG.Run(seed)
119 | objcetMaskVoxelIndex = np.vstack(np.where(np.array(objcetMask) == 1)).T
120 | objcetMaskVoxelIndex = [list(tempMaskVoxelIndex) for tempMaskVoxelIndex in objcetMaskVoxelIndex]
121 | return objcetMask, objcetMaskVoxelIndex
122 |
123 |
124 |
125 |
126 |
127 |
128 |
--------------------------------------------------------------------------------
/backbone_network/BatchSampleGenerator.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon July 11 18:50:39 2020
3 |
4 | @author: Haifeng Luo
5 | """
6 |
7 | import numpy as np
8 | import os
9 | import sys
10 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
11 | ROOT_DIR = os.path.dirname(BASE_DIR)
12 | sys.path.append(os.path.join(ROOT_DIR, 'utils'))
13 | import py_util
14 |
15 | def minibatch_generator(trainingdata_path, batch_size, train_set, num_points):
16 | '''
17 | Generator for PDE-net training and validation
18 | '''
19 |
20 | while True:
21 | train_xyz_data = []
22 | direction_label_data = []
23 | object_label_data = []
24 | batch = 0
25 | for i in (range(len(train_set))):
26 | batch += 1
27 | url = train_set[i]
28 | temp_point_set = py_util.load_data(trainingdata_path+url)
29 |
30 | #####
31 | temp_xyz = temp_point_set[:, :3]
32 | temp_xyz = py_util.normalize(temp_xyz)
33 |
34 | object_label = temp_point_set[:, 3]
35 | unique_object_label = np.unique(object_label)
36 |
37 | temp_multi_objects_sample = []
38 | for j in range(np.size(unique_object_label)):
39 | ###for each object
40 | temp_index = np.where(object_label == unique_object_label[j])
41 | temp_index_object_xyz = temp_xyz[temp_index[0], :]
42 | ###object_label
43 | temp_object_label = np.expand_dims(object_label[temp_index[0]], axis=-1)
44 | ###center point
45 | temp_object_center_xyz = py_util.compute_object_center(temp_index_object_xyz)
46 | ###deta_x + x = center_point ---->deta_x = center_point - x
47 | temp_direction_label = temp_object_center_xyz - temp_index_object_xyz
48 | ####[x, y, z, deta_x, deta_y, deta_z]
49 | temp_xyz_direction_label_concat = np.concatenate([temp_index_object_xyz,
50 | temp_direction_label,
51 | temp_object_label],
52 | axis=-1)
53 | ####
54 | temp_multi_objects_sample.append(temp_xyz_direction_label_concat)
55 |
56 | temp_multi_objects_sample = np.vstack(temp_multi_objects_sample)
57 | ###
58 | temp_multi_objects_sample = py_util.shuffle_data(temp_multi_objects_sample)
59 | temp_multi_objects_sample = temp_multi_objects_sample[:num_points, :]
60 | ###
61 | training_xyz = temp_multi_objects_sample[:, :3]
62 | training_direction_label = temp_multi_objects_sample[:, 3:-1]
63 | training_object_label = temp_multi_objects_sample[:, -1]
64 |
65 | train_xyz_data.append(training_xyz)
66 | direction_label_data.append(training_direction_label)
67 | object_label_data.append(training_object_label)
68 |
69 | if batch % batch_size == 0:
70 | train_xyz_data = np.array(train_xyz_data)
71 | direction_label_data = np.array(direction_label_data)
72 | object_label_data = np.array(object_label_data)
73 | yield [train_xyz_data, direction_label_data, object_label_data]
74 | train_xyz_data = []
75 | direction_label_data = []
76 | object_label_data = []
77 | batch = 0
--------------------------------------------------------------------------------
/backbone_network/Loss.py:
--------------------------------------------------------------------------------
1 | """
2 | Loss functions for training the PDE-net
3 |
4 | Created on Mon July 11 18:50:39 2020
5 |
6 | @author: Haifeng Luo
7 | """
8 | import tensorflow as tf
9 |
10 |
11 | def slack_based_direction_loss(pre_direction, gt_direction, sigma=0.955):
12 | '''
13 | Error Slack-based Direction Loss
14 | '''
15 | gt_direction = tf.nn.l2_normalize(gt_direction, axis=2, epsilon=1e-20)
16 | pre_direction = tf.nn.l2_normalize(pre_direction, axis=2, epsilon=1e-20)
17 |
18 | loss = tf.subtract(sigma, tf.reduce_sum(tf.multiply(pre_direction, gt_direction), axis=2))
19 | tmp = tf.zeros_like(loss)
20 | condition = tf.greater(loss, 0.0)
21 | loss = tf.where(condition, loss, tmp)
22 | loss = tf.reduce_mean(loss)
23 | return loss
24 |
25 |
26 | def direction_loss(pre_direction, gt_direction):
27 | '''
28 | Plain Direction Loss
29 | '''
30 | gt_direction = tf.nn.l2_normalize(gt_direction, axis=2, epsilon=1e-20)
31 | pre_direction = tf.nn.l2_normalize(pre_direction, axis=2, epsilon=1e-20)
32 | loss = -tf.reduce_mean(tf.reduce_sum(tf.multiply(pre_direction, gt_direction), axis=2))
33 |
34 | return loss
--------------------------------------------------------------------------------
/backbone_network/PDE_net.py:
--------------------------------------------------------------------------------
1 | """
2 | In theory, the backbone network can be any semantic segmentation
3 | framework that directly takes discrete points as input.
4 |
5 | Created on Mon July 11 18:50:39 2020
6 |
7 | @author: Haifeng Luo
8 | """
9 | import tensorflow as tf
10 | import os
11 | import sys
12 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
13 | ROOT_DIR = os.path.dirname(BASE_DIR)
14 | sys.path.append(os.path.join(ROOT_DIR, 'utils'))
15 | import tf_util
16 |
17 |
18 | def relation_reasoning_layers(name, inputs, is_training, bn_decay, nodes_list, weight_decay, is_dist):
19 | '''
20 | relation feature reasoning layers
21 | '''
22 | with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
23 | net = tf_util.conv2d(inputs, nodes_list[0], [1, 1],
24 | padding='VALID', stride=[1, 1],
25 | bn=True, is_training=is_training,
26 | scope='mpl_g1', bn_decay=bn_decay,
27 | weight_decay=weight_decay, is_dist=is_dist)
28 | net = tf_util.conv2d(net, nodes_list[1], [1, 1],
29 | padding='VALID', stride=[1, 1],
30 | bn=True, is_training=is_training,
31 | scope='mpl_g2', bn_decay=bn_decay,
32 | weight_decay=weight_decay, is_dist=is_dist)
33 | # net = tf.reduce_sum(net, axis=-2, keep_dims=True)
34 | net = tf.reduce_max(net, axis=-2, keep_dims=True)
35 |
36 | net = tf_util.conv2d(net, nodes_list[2], [1, 1],
37 | padding='VALID', stride=[1, 1],
38 | bn=True, is_training=is_training,
39 | scope='mpl_f1', bn_decay=bn_decay,
40 | weight_decay=weight_decay, is_dist=is_dist)
41 | return net
42 |
43 | def get_model_RRFSegNet(name, points, is_training, k=20, is_dist=True, weight_decay=0.0004,
44 | bn_decay=None, reuse=tf.AUTO_REUSE):
45 | ''' RRFSegNet-based Backbone Network (PDE-net) '''
46 |
47 | with tf.variable_scope(name, reuse=reuse):
48 | num_point = points.get_shape()[1].value
49 | Position = points[:, :, :3]
50 | adj = tf_util.pairwise_distance(Position)
51 | nn_idx = tf_util.knn(adj, k=k)
52 | ### layer_1
53 | relation_features1 = tf_util.get_relation_features(points,nn_idx=nn_idx, k=k)
54 | net_1 = relation_reasoning_layers('layer_1', relation_features1,
55 | is_training=is_training, bn_decay=bn_decay,
56 | nodes_list=[64, 64, 64],
57 | weight_decay=weight_decay,
58 | is_dist=is_dist)
59 | ### layer_2
60 | relation_features1 = tf_util.get_relation_features(net_1, nn_idx=nn_idx, k=k)
61 | net_2 = relation_reasoning_layers('layer_2', relation_features1,
62 | is_training=is_training, bn_decay=bn_decay,
63 | nodes_list=[128, 128, 128],
64 | weight_decay=weight_decay,
65 | is_dist=is_dist)
66 |
67 | ###generate global features
68 | global_net = tf_util.conv2d(tf.concat([net_1, net_2], axis=-1), 1024, [1, 1],
69 | padding='VALID', stride=[1, 1], weight_decay=weight_decay,
70 | bn=True, is_training=is_training,
71 | scope='mpl_global', bn_decay=bn_decay, is_dist=is_dist)
72 |
73 | global_net = tf.reduce_max(global_net, axis=1, keep_dims=True)
74 | global_net = tf.tile(global_net, [1, num_point, 1, 1])
75 |
76 | ###
77 | concat = tf.concat(axis=3, values=[global_net, net_1, net_2])
78 |
79 | # CONV
80 | net = tf_util.conv2d(concat, 256, [1, 1], padding='VALID', stride=[1, 1],
81 | bn=True, is_training=is_training, scope='dir/conv1',
82 | weight_decay=weight_decay, is_dist=is_dist, bn_decay=bn_decay)
83 | net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
84 | net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1],
85 | bn=True, is_training=is_training, scope='dir/conv2', is_dist=is_dist)
86 | net = tf_util.conv2d(net, 3, [1, 1], padding='VALID', stride=[1, 1],
87 | bn=True, activation_fn=None, is_training=is_training,
88 | scope='dir/conv3', is_dist=is_dist)
89 | net = tf.squeeze(net, axis=2)
90 |
91 | return net
92 |
93 |
94 | def get_model_DGCNN(name, point_cloud, is_training, is_dist=False,
95 | weight_decay=0.0001, bn_decay=None, k=20, reuse=tf.AUTO_REUSE):
96 | '''DGCNN-based backbone network (PDE-net)'''
97 |
98 | with tf.variable_scope(name, reuse=reuse):
99 |
100 | num_point = point_cloud.get_shape()[1].value
101 | input_image = tf.expand_dims(point_cloud, -1)
102 | input_point_cloud = tf.expand_dims(point_cloud, -2)
103 | adj = tf_util.pairwise_distance(point_cloud[:, :, :3])
104 | nn_idx = tf_util.knn(adj, k=k)
105 | ###
106 | edge_feature1 = tf_util.get_edge_feature(input_image, nn_idx=nn_idx, k=k)
107 | net = tf_util.conv2d(edge_feature1, 64, [1, 1],
108 | padding='VALID', stride=[1, 1],
109 | bn=True, is_training=is_training, weight_decay=weight_decay,
110 | scope='adj_conv1', bn_decay=bn_decay, is_dist=is_dist)
111 | net_1 = tf.reduce_max(net, axis=-2, keep_dims=True)
112 |
113 | edge_feature2 = tf_util.get_edge_feature(net_1, nn_idx=nn_idx, k=k)
114 | net = tf_util.conv2d(edge_feature2, 64, [1,1],
115 | padding='VALID', stride=[1,1],
116 | bn=True, is_training=is_training, weight_decay=weight_decay,
117 | scope='adj_conv3', bn_decay=bn_decay, is_dist=is_dist)
118 | net_2 = tf.reduce_max(net, axis=-2, keep_dims=True)
119 |
120 | edge_feature3 = tf_util.get_edge_feature(net_2, nn_idx=nn_idx, k=k)
121 | net = tf_util.conv2d(edge_feature3, 64, [1,1],
122 | padding='VALID', stride=[1,1],
123 | bn=True, is_training=is_training, weight_decay=weight_decay,
124 | scope='adj_conv5', bn_decay=bn_decay, is_dist=is_dist)
125 | net_3 = tf.reduce_max(net, axis=-2, keep_dims=True)
126 |
127 | net = tf_util.conv2d(tf.concat([net_1, net_2, net_3], axis=-1), 1024, [1, 1],
128 | padding='VALID', stride=[1,1],
129 | bn=True, is_training=is_training,
130 | scope='adj_conv7', bn_decay=bn_decay, is_dist=is_dist)
131 | out_max = tf_util.max_pool2d(net, [num_point, 1], padding='VALID', scope='maxpool')
132 | expand = tf.tile(out_max, [1, num_point, 1, 1])
133 |
134 | ##############
135 | net = tf.concat(axis=3, values=[expand, net_1, net_2, net_3, input_point_cloud])
136 | ############
137 | net = tf_util.conv2d(net, 512, [1, 1], padding='VALID', stride=[1, 1],
138 | bn=True, is_training=is_training, scope='dir/conv1', is_dist=is_dist)
139 | net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp1')
140 | net = tf_util.conv2d(net, 64, [1, 1], padding='VALID', stride=[1, 1],
141 | bn=True, is_training=is_training, scope='dir/conv2', is_dist=is_dist)
142 | net = tf_util.dropout(net, keep_prob=0.7, is_training=is_training, scope='dp2')
143 | net = tf_util.conv2d(net, 3, [1, 1], padding='VALID', stride=[1, 1],
144 | bn=True, activation_fn=None, is_training=is_training,
145 | scope='dir/conv3', is_dist=is_dist)
146 | net = tf.squeeze(net, axis=2)
147 | return net
148 |
--------------------------------------------------------------------------------
/backbone_network/PDE_net_training.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon July 11 18:50:39 2020
3 |
4 | @author: Haifeng Luo
5 | """
6 | import argparse
7 | import os
8 | import sys
9 | import tensorflow as tf
10 | from tqdm import tqdm
11 | import BatchSampleGenerator as BSG
12 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
13 | ROOT_DIR = os.path.dirname(BASE_DIR)
14 | sys.path.append(os.path.join(ROOT_DIR, 'utils'))
15 | import py_util
16 | import PDE_net
17 | import Loss
18 |
19 |
20 | parser = argparse.ArgumentParser()
21 | parser.add_argument('--log_dir', default='pre_trained_PDE_net', help='Log dir [default: log]')
22 | parser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')
23 | parser.add_argument('--max_epoch', type=int, default=100, help='Epoch to run [default: 100]')
24 | parser.add_argument('--batch_size', type=int, default=12, help='Batch Size during training for each GPU [default: 12]')
25 | parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
26 | parser.add_argument('--decay_step', type=int, default=50000, help='Decay step for lr decay [default: 50000]')
27 | parser.add_argument('--decay_rate', type=float, default=0.95, help='Decay rate for lr decay [default: 0.95]')
28 | parser.add_argument('--training_data_path',
29 | default='./data/training_data/',
30 | help='Make sure the source training-data files path')
31 | parser.add_argument('--validating_data_path',
32 | default='./data/validating_data/',
33 | help='Make sure the source validating-data files path')
34 |
35 | FLAGS = parser.parse_args()
36 | TRAIN_DATA_PATH = FLAGS.training_data_path
37 | VALIDATION_PATH = FLAGS.validating_data_path
38 |
39 |
40 | BATCH_SIZE = FLAGS.batch_size
41 | NUM_POINT = FLAGS.num_point
42 | MAX_EPOCH = FLAGS.max_epoch
43 | BASE_LEARNING_RATE = FLAGS.learning_rate
44 | DECAY_STEP = FLAGS.decay_step
45 | DECAY_RATE = FLAGS.decay_rate
46 |
47 | BN_INIT_DECAY = 0.5
48 | BN_DECAY_DECAY_RATE = 0.5
49 | BN_DECAY_DECAY_STEP = float(DECAY_STEP)
50 | BN_DECAY_CLIP = 0.99
51 |
52 | LOG_DIR = FLAGS.log_dir
53 | if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
54 | LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
55 | LOG_FOUT.write(str(FLAGS) + '\n')
56 |
57 | def log_string(out_str):
58 | LOG_FOUT.write(out_str + '\n')
59 | LOG_FOUT.flush()
60 | print(out_str)
61 |
62 | def get_learning_rate(batch):
63 | learning_rate = tf.train.exponential_decay(
64 | BASE_LEARNING_RATE, # Base learning rate.
65 | batch * BATCH_SIZE, # Current index into the dataset.
66 | DECAY_STEP, # Decay step.
67 | DECAY_RATE, # Decay rate.
68 | staircase=True)
69 | learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!!
70 | return learning_rate
71 |
72 | def get_bn_decay(batch):
73 | bn_momentum = tf.train.exponential_decay(
74 | BN_INIT_DECAY,
75 | batch * BATCH_SIZE,
76 | BN_DECAY_DECAY_STEP,
77 | BN_DECAY_DECAY_RATE,
78 | staircase=False)
79 | bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
80 | return bn_decay
81 |
82 | def train():
83 | with tf.Graph().as_default(), tf.device('/cpu:0'):
84 | batch = tf.Variable(0, trainable=False)
85 | bn_decay = get_bn_decay(batch)
86 | learning_rate = get_learning_rate(batch)
87 |
88 | with tf.variable_scope(tf.get_variable_scope()):
89 | with tf.device('/gpu:0'):
90 | pointclouds = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, 3))
91 | direction_labels = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINT, 3))
92 | is_training = tf.placeholder(tf.bool, shape=())
93 |
94 | #####DirectionEmbedding
95 | DeepPointwiseDirections = PDE_net.get_model_RRFSegNet('PDE_net',
96 | pointclouds,
97 | is_training=is_training,
98 | weight_decay=0.0001,
99 | bn_decay=bn_decay,
100 | k=20)
101 |
102 | #####DirectionEmbedding
103 | # DeepPointwiseDirections = PDE_net.get_model_DGCNN('PDE_net',
104 | # pointclouds,
105 | # is_training=is_training,
106 | # weight_decay=0.0001,
107 | # bn_decay=bn_decay,
108 | # k=20)
109 | ######
110 | loss_esd = Loss.slack_based_direction_loss(DeepPointwiseDirections, direction_labels)
111 | loss_pd = Loss.direction_loss(DeepPointwiseDirections, direction_labels)
112 | loss = 1 * loss_esd + 0 * loss_pd + tf.add_n(tf.get_collection('losses'))
113 |
114 | ###optimizer--Adam
115 | train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=batch)
116 |
117 | saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)
118 | # Create a session
119 | config = tf.ConfigProto()
120 | config.gpu_options.allow_growth = True
121 | config.allow_soft_placement = True
122 | sess = tf.Session(config=config)
123 | #####
124 | init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
125 | sess.run(init)
126 |
127 | ops = {'learning_rate': learning_rate,
128 | 'pointclouds': pointclouds,
129 | 'direction_labels': direction_labels,
130 | 'is_training': is_training,
131 | 'loss': loss,
132 | 'loss_esd': loss_esd,
133 | 'loss_pd': loss_pd,
134 | 'train_op': train_op,
135 | 'step': batch}
136 |
137 | init_loss = 999.999
138 | for epoch in range(MAX_EPOCH):
139 | log_string('**** EPOCH %03d ****' % (epoch))
140 | sys.stdout.flush()
141 |
142 | ####training data generator
143 | train_set = py_util.get_data_set(TRAIN_DATA_PATH)
144 | generator_training = BSG.minibatch_generator(TRAIN_DATA_PATH,BATCH_SIZE, train_set, NUM_POINT)
145 |
146 | ####validating data generator
147 | val_set = py_util.get_data_set(VALIDATION_PATH)
148 | generator_val = BSG.minibatch_generator(TRAIN_DATA_PATH, BATCH_SIZE, val_set, NUM_POINT)
149 |
150 | #####trainging steps
151 | temp_loss = train_one_epoch(sess, epoch, train_set, generator_training, ops)
152 | #####validating steps
153 | validation(sess, val_set, generator_val, ops)
154 |
155 | ####saving the trianed models
156 | if temp_loss < init_loss:
157 | saver.save(sess, os.path.join(LOG_DIR, 'epoch_' + str(epoch) + '.ckpt'))
158 | init_loss = temp_loss
159 |
160 | def train_one_epoch(sess, epoch, train_set, generator, ops):
161 | """ ops: dict mapping from string to tf ops """
162 |
163 | num_batches_training = len(train_set) // (BATCH_SIZE)
164 | print('-----------------training--------------------')
165 | print('training steps: %d'%num_batches_training)
166 |
167 | total_loss = 0
168 | total_loss_esd = 0
169 | total_loss_pd = 0
170 | for i in tqdm(range(num_batches_training)):
171 | ###
172 | batch_train_data, batch_direction_label_data, _ = next(generator)
173 | ###
174 | feed_dict = {ops['pointclouds']: batch_train_data,
175 | ops['direction_labels']: batch_direction_label_data,
176 | ops['is_training']: True}
177 |
178 | _, lr, loss, loss_esd_, loss_pd_ = sess.run([ops['train_op'], ops['learning_rate'], ops['loss'],
179 | ops['loss_esd'], ops['loss_pd']], feed_dict=feed_dict)
180 | total_loss += loss
181 | total_loss_esd += loss_esd_
182 | total_loss_pd += loss_pd_
183 |
184 | if i % 20 == 0:
185 | print('lr: %f, loss: %f, loss_esd: %f,loss_pd: %f'%(lr, loss, loss_esd_, loss_pd_))
186 |
187 | print('trianing_log_epoch_%d'%epoch)
188 | log_string('epoch: %d, loss: %f, loss_esd: %f,loss_pd: %f'%(epoch, total_loss/(num_batches_training),
189 | total_loss_esd/(num_batches_training),
190 | total_loss_pd/(num_batches_training)))
191 | return total_loss/(num_batches_training)
192 |
193 |
194 | def validation(sess, test_set, generator, ops):
195 |
196 | num_batches_testing = len(test_set) // (BATCH_SIZE)
197 | total_loss = 0
198 | total_loss_esd = 0
199 | total_loss_pd = 0
200 | for _ in tqdm(range(num_batches_testing)):
201 | ###
202 | batch_test_data, batch_direction_label_data, _ = next(generator)
203 | ###
204 | feed_dict = {ops['pointclouds']: batch_test_data,
205 | ops['direction_labels']: batch_direction_label_data,
206 | ops['is_training']: False,
207 | }
208 | loss_, loss_esd_, loss_pd_ = sess.run([ops['loss'], ops['loss_esd'], ops['loss_pd']], feed_dict=feed_dict)
209 | total_loss += loss_
210 | total_loss_esd += loss_esd_
211 | total_loss_pd += loss_pd_
212 |
213 | log_string('val loss: %f, loss_esd: %f, loss_pd: %f'%(total_loss/num_batches_testing,
214 | total_loss_esd/num_batches_testing,
215 | total_loss_pd/num_batches_testing))
216 |
217 | if __name__ == "__main__":
218 | train()
219 | LOG_FOUT.close()
220 |
--------------------------------------------------------------------------------
/figs/overall_workflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HiphonL/IndividualTreeExtraction/88a102a7c5d4e6932c3753a3b4c613a8abffbf97/figs/overall_workflow.png
--------------------------------------------------------------------------------
/utils/py_util.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon July 11 18:50:39 2020
3 |
4 | @author: Haifeng Luo
5 | """
6 | import numpy as np
7 | import random
8 | import math
9 | import os
10 |
11 | def load_data(path):
12 | try:
13 | return np.load(path)
14 | except:
15 | return np.loadtxt(path)
16 |
17 | def get_data_set(data_path):
18 | files_set = os.listdir(data_path)
19 | random.shuffle(files_set)
20 | return files_set
21 |
22 |
23 | def get_train_val_set(trainingdata_path, val_rate=0.20):
24 | train_set = []
25 | val_set = []
26 | all_train_set = os.listdir(trainingdata_path)
27 | random.shuffle(all_train_set)
28 | total_num = len(all_train_set)
29 | val_num = int(val_rate * total_num)
30 | for j in range(len(all_train_set)):
31 | if j < val_num:
32 | val_set.append(all_train_set[j])
33 | else:
34 | train_set.append(all_train_set[j])
35 | return train_set, val_set
36 |
37 |
38 | def normalize(sample_xyz):
39 | min_xyz = np.min(sample_xyz, axis=0)
40 | max_xyz = np.max(sample_xyz, axis=0)
41 | deta_central_xyz = (max_xyz - min_xyz)/2.0
42 | central_xyz = deta_central_xyz + min_xyz
43 | n_data = sample_xyz - central_xyz
44 | # normalize into unit sphere
45 | n_data /= np.max(np.linalg.norm(n_data, axis=1))
46 | return n_data
47 |
48 | def compute_object_center(sample_xyz):
49 | min_xyz = np.min(sample_xyz, axis=0)
50 | max_xyz = np.max(sample_xyz, axis=0)
51 | deta_central_xyz = (max_xyz - min_xyz) / 2.0
52 | central_xyz = deta_central_xyz + min_xyz
53 | return central_xyz
54 |
55 |
56 | def jitter_point_cloud(sample_xyz, Jitter_argument, sigma=0.001, clip=0.05):
57 | if np.random.random() < Jitter_argument:
58 | N, C = sample_xyz.shape
59 | assert(clip > 0)
60 | jittered_data = np.clip(sigma * np.random.randn(N, C), -1*clip, clip)
61 | sample_xyz += jittered_data
62 | return sample_xyz
63 |
64 | def shuffle_data(data):
65 | idx = np.arange(np.size(data, 0))
66 | np.random.shuffle(idx)
67 | return data[idx, ...]
68 |
69 | def ratation(sample_xyz, Rotation_argument):
70 | if np.random.random() < Rotation_argument:
71 | ###
72 | rot = random.uniform(0, 2 * math.pi)
73 | rotation_matrix = [[math.cos(rot), math.sin(rot), 0],
74 | [-math.sin(rot), math.cos(rot), 0],
75 | [0, 0, 1]]
76 | sample_xyz = np.dot(sample_xyz, rotation_matrix)
77 | return sample_xyz
78 |
79 | def ratation_angle(sample_xyz, angel):
80 | rot = angel/180.0
81 | rotation_matrix = [[math.cos(rot), math.sin(rot), 0],
82 | [-math.sin(rot), math.cos(rot), 0],
83 | [0, 0, 1]]
84 | sample_xyz = np.dot(sample_xyz, rotation_matrix)
85 | return sample_xyz
86 |
87 | def transfer_xy(sample_xyz, x_d, y_d):
88 | temp_ones = np.ones([np.size(sample_xyz, 0), 1])
89 | sample_xyz = np.concatenate([sample_xyz, temp_ones], axis=-1)
90 |
91 | transfer_matrix = [[1, 0, 0, 0],
92 | [0, 1, 0, 0],
93 | [0, 0, 1, 1],
94 | [x_d, y_d, 0, 1]]
95 | sample_xyz = np.dot(sample_xyz, transfer_matrix)
96 | return sample_xyz[:, :3]
97 |
98 | def farthest_point_sample(xyz, npoint):
99 | N, _ = xyz.shape
100 | centroids = []
101 | distance = np.ones(N) * 1e10
102 | farthest = np.random.randint(0, N)
103 | for i in range(npoint):
104 | centroids.append(farthest)
105 | centroid = xyz[farthest, :]
106 | dist = np.sum((xyz - centroid) ** 2, -1)
107 | mask = dist < distance
108 | distance[mask] = dist[mask]
109 | farthest = int(np.where(distance == np.max(distance))[0][0])
110 | return centroids
111 |
--------------------------------------------------------------------------------
/utils/tf_util.py:
--------------------------------------------------------------------------------
1 | """ Wrapper functions for TensorFlow layers.
2 |
3 | Author: Charles R. Qi
4 | Date: November 2016
5 |
6 | Upadted by Yue Wang and Yongbin Sun
7 | """
8 |
9 | import numpy as np
10 | import tensorflow as tf
11 | from itertools import combinations
12 | from scipy.special import comb, perm
13 |
14 |
15 | def _variable_on_cpu(name, shape, initializer, use_fp16=False, trainable=True):
16 | """Helper to create a Variable stored on CPU memory.
17 | Args:
18 | name: name of the variable
19 | shape: list of ints
20 | initializer: initializer for Variable
21 | Returns:
22 | Variable Tensor
23 | """
24 | with tf.device('/cpu:0'):
25 | dtype = tf.float16 if use_fp16 else tf.float32
26 | var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype, trainable=trainable)
27 | return var
28 |
29 | def _variable_with_weight_decay(name, shape, stddev, wd, use_xavier=True):
30 | """Helper to create an initialized Variable with weight decay.
31 |
32 | Note that the Variable is initialized with a truncated normal distribution.
33 | A weight decay is added only if one is specified.
34 |
35 | Args:
36 | name: name of the variable
37 | shape: list of ints
38 | stddev: standard deviation of a truncated Gaussian
39 | wd: add L2Loss weight decay multiplied by this float. If None, weight
40 | decay is not added for this Variable.
41 | use_xavier: bool, whether to use xavier initializer
42 |
43 | Returns:
44 | Variable Tensor
45 | """
46 | if use_xavier:
47 | initializer = tf.contrib.layers.xavier_initializer()
48 | else:
49 | initializer = tf.truncated_normal_initializer(stddev=stddev)
50 | var = _variable_on_cpu(name, shape, initializer)
51 | if wd is not None:
52 | weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
53 | tf.add_to_collection('losses', weight_decay)
54 | return var
55 |
56 |
57 | def conv1d(inputs,
58 | num_output_channels,
59 | kernel_size,
60 | scope,
61 | stride=1,
62 | padding='SAME',
63 | use_xavier=True,
64 | stddev=1e-3,
65 | weight_decay=0.0,
66 | activation_fn=tf.nn.relu,
67 | bn=False,
68 | bn_decay=None,
69 | is_training=None,
70 | is_dist=False):
71 | """ 1D convolution with non-linear operation.
72 |
73 | Args:
74 | inputs: 3-D tensor variable BxLxC
75 | num_output_channels: int
76 | kernel_size: int
77 | scope: string
78 | stride: int
79 | padding: 'SAME' or 'VALID'
80 | use_xavier: bool, use xavier_initializer if true
81 | stddev: float, stddev for truncated_normal init
82 | weight_decay: float
83 | activation_fn: function
84 | bn: bool, whether to use batch norm
85 | bn_decay: float or float tensor variable in [0,1]
86 | is_training: bool Tensor variable
87 |
88 | Returns:
89 | Variable tensor
90 | """
91 | with tf.variable_scope(scope) as sc:
92 | num_in_channels = inputs.get_shape()[-1].value
93 | kernel_shape = [kernel_size,
94 | num_in_channels, num_output_channels]
95 | kernel = _variable_with_weight_decay('weights',
96 | shape=kernel_shape,
97 | use_xavier=use_xavier,
98 | stddev=stddev,
99 | wd=weight_decay)
100 | outputs = tf.nn.conv1d(inputs, kernel,
101 | stride=stride,
102 | padding=padding)
103 | biases = _variable_on_cpu('biases', [num_output_channels],
104 | tf.constant_initializer(0.0))
105 | outputs = tf.nn.bias_add(outputs, biases)
106 |
107 | if bn:
108 | outputs = batch_norm_for_conv1d(outputs, is_training,
109 | bn_decay=bn_decay, scope='bn', is_dist=is_dist)
110 |
111 | if activation_fn is not None:
112 | outputs = activation_fn(outputs)
113 | return outputs
114 |
115 |
116 | def conv2d(inputs,
117 | num_output_channels,
118 | kernel_size,
119 | scope,
120 | stride=[1, 1],
121 | padding='SAME',
122 | use_xavier=True,
123 | stddev=1e-3,
124 | weight_decay=0.0,
125 | activation_fn=tf.nn.relu,
126 | bn=False,
127 | bn_decay=None,
128 | is_training=None,
129 | is_dist=False):
130 | """ 2D convolution with non-linear operation.
131 |
132 | Args:
133 | inputs: 4-D tensor variable BxHxWxC
134 | num_output_channels: int
135 | kernel_size: a list of 2 ints
136 | scope: string
137 | stride: a list of 2 ints
138 | padding: 'SAME' or 'VALID'
139 | use_xavier: bool, use xavier_initializer if true
140 | stddev: float, stddev for truncated_normal init
141 | weight_decay: float
142 | activation_fn: function
143 | bn: bool, whether to use batch norm
144 | bn_decay: float or float tensor variable in [0,1]
145 | is_training: bool Tensor variable
146 |
147 | Returns:
148 | Variable tensor
149 | """
150 | with tf.variable_scope(scope) as sc:
151 | kernel_h, kernel_w = kernel_size
152 | num_in_channels = inputs.get_shape()[-1].value
153 | kernel_shape = [kernel_h, kernel_w,
154 | num_in_channels, num_output_channels]
155 | kernel = _variable_with_weight_decay('weights',
156 | shape=kernel_shape,
157 | use_xavier=use_xavier,
158 | stddev=stddev,
159 | wd=weight_decay)
160 | stride_h, stride_w = stride
161 | outputs = tf.nn.conv2d(inputs, kernel,
162 | [1, stride_h, stride_w, 1],
163 | padding=padding)
164 | biases = _variable_on_cpu('biases', [num_output_channels],
165 | tf.constant_initializer(0.0))
166 | outputs = tf.nn.bias_add(outputs, biases)
167 |
168 | if bn:
169 | outputs = batch_norm_for_conv2d(outputs, is_training,
170 | bn_decay=bn_decay, scope='bn', is_dist=is_dist)
171 |
172 | if activation_fn is not None:
173 | outputs = activation_fn(outputs)
174 | return outputs
175 |
176 |
177 | def conv2d_transpose(inputs,
178 | num_output_channels,
179 | kernel_size,
180 | scope,
181 | stride=[1, 1],
182 | padding='SAME',
183 | use_xavier=True,
184 | stddev=1e-3,
185 | weight_decay=0.0,
186 | activation_fn=tf.nn.relu,
187 | bn=False,
188 | bn_decay=None,
189 | is_training=None,
190 | is_dist=False):
191 | """ 2D convolution transpose with non-linear operation.
192 |
193 | Args:
194 | inputs: 4-D tensor variable BxHxWxC
195 | num_output_channels: int
196 | kernel_size: a list of 2 ints
197 | scope: string
198 | stride: a list of 2 ints
199 | padding: 'SAME' or 'VALID'
200 | use_xavier: bool, use xavier_initializer if true
201 | stddev: float, stddev for truncated_normal init
202 | weight_decay: float
203 | activation_fn: function
204 | bn: bool, whether to use batch norm
205 | bn_decay: float or float tensor variable in [0,1]
206 | is_training: bool Tensor variable
207 |
208 | Returns:
209 | Variable tensor
210 |
211 | Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-1], ksize, stride) == a
212 | """
213 | with tf.variable_scope(scope) as sc:
214 | kernel_h, kernel_w = kernel_size
215 | num_in_channels = inputs.get_shape()[-1].value
216 | kernel_shape = [kernel_h, kernel_w,
217 | num_output_channels, num_in_channels] # reversed to conv2d
218 | kernel = _variable_with_weight_decay('weights',
219 | shape=kernel_shape,
220 | use_xavier=use_xavier,
221 | stddev=stddev,
222 | wd=weight_decay)
223 | stride_h, stride_w = stride
224 |
225 | # from slim.convolution2d_transpose
226 | def get_deconv_dim(dim_size, stride_size, kernel_size, padding):
227 | dim_size *= stride_size
228 |
229 | if padding == 'VALID' and dim_size is not None:
230 | dim_size += max(kernel_size - stride_size, 0)
231 | return dim_size
232 |
233 | # caculate output shape
234 | batch_size = inputs.get_shape()[0].value
235 | height = inputs.get_shape()[1].value
236 | width = inputs.get_shape()[2].value
237 | out_height = get_deconv_dim(height, stride_h, kernel_h, padding)
238 | out_width = get_deconv_dim(width, stride_w, kernel_w, padding)
239 | output_shape = [batch_size, out_height, out_width, num_output_channels]
240 |
241 | outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape,
242 | [1, stride_h, stride_w, 1],
243 | padding=padding)
244 | biases = _variable_on_cpu('biases', [num_output_channels],
245 | tf.constant_initializer(0.0))
246 | outputs = tf.nn.bias_add(outputs, biases)
247 |
248 | if bn:
249 | outputs = batch_norm_for_conv2d(outputs, is_training,
250 | bn_decay=bn_decay, scope='bn', is_dist=is_dist)
251 |
252 | if activation_fn is not None:
253 | outputs = activation_fn(outputs)
254 | return outputs
255 |
256 |
257 |
258 | def conv3d(inputs,
259 | num_output_channels,
260 | kernel_size,
261 | scope,
262 | stride=[1, 1, 1],
263 | padding='SAME',
264 | use_xavier=True,
265 | stddev=1e-3,
266 | weight_decay=0.0,
267 | activation_fn=tf.nn.relu,
268 | bn=False,
269 | bn_decay=None,
270 | is_training=None,
271 | is_dist=False):
272 | """ 3D convolution with non-linear operation.
273 |
274 | Args:
275 | inputs: 5-D tensor variable BxDxHxWxC
276 | num_output_channels: int
277 | kernel_size: a list of 3 ints
278 | scope: string
279 | stride: a list of 3 ints
280 | padding: 'SAME' or 'VALID'
281 | use_xavier: bool, use xavier_initializer if true
282 | stddev: float, stddev for truncated_normal init
283 | weight_decay: float
284 | activation_fn: function
285 | bn: bool, whether to use batch norm
286 | bn_decay: float or float tensor variable in [0,1]
287 | is_training: bool Tensor variable
288 |
289 | Returns:
290 | Variable tensor
291 | """
292 | with tf.variable_scope(scope) as sc:
293 | kernel_d, kernel_h, kernel_w = kernel_size
294 | num_in_channels = inputs.get_shape()[-1].value
295 | kernel_shape = [kernel_d, kernel_h, kernel_w,
296 | num_in_channels, num_output_channels]
297 | kernel = _variable_with_weight_decay('weights',
298 | shape=kernel_shape,
299 | use_xavier=use_xavier,
300 | stddev=stddev,
301 | wd=weight_decay)
302 | stride_d, stride_h, stride_w = stride
303 | outputs = tf.nn.conv3d(inputs, kernel,
304 | [1, stride_d, stride_h, stride_w, 1],
305 | padding=padding)
306 | biases = _variable_on_cpu('biases', [num_output_channels],
307 | tf.constant_initializer(0.0))
308 | outputs = tf.nn.bias_add(outputs, biases)
309 |
310 | if bn:
311 | outputs = batch_norm_for_conv3d(outputs, is_training,
312 | bn_decay=bn_decay, scope='bn', is_dist=is_dist)
313 |
314 | if activation_fn is not None:
315 | outputs = activation_fn(outputs)
316 | return outputs
317 |
318 | def fully_connected(inputs,
319 | num_outputs,
320 | scope,
321 | use_xavier=True,
322 | stddev=1e-3,
323 | weight_decay=0.0,
324 | activation_fn=tf.nn.relu,
325 | bn=False,
326 | bn_decay=None,
327 | is_training=None,
328 | is_dist=False):
329 | """ Fully connected layer with non-linear operation.
330 |
331 | Args:
332 | inputs: 2-D tensor BxN
333 | num_outputs: int
334 |
335 | Returns:
336 | Variable tensor of size B x num_outputs.
337 | """
338 | with tf.variable_scope(scope) as sc:
339 | num_input_units = inputs.get_shape()[-1].value
340 | weights = _variable_with_weight_decay('weights',
341 | shape=[num_input_units, num_outputs],
342 | use_xavier=use_xavier,
343 | stddev=stddev,
344 | wd=weight_decay)
345 | outputs = tf.matmul(inputs, weights)
346 | biases = _variable_on_cpu('biases', [num_outputs],
347 | tf.constant_initializer(0.0))
348 | outputs = tf.nn.bias_add(outputs, biases)
349 |
350 | if bn:
351 | outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn', is_dist=is_dist)
352 |
353 | if activation_fn is not None:
354 | outputs = activation_fn(outputs)
355 | return outputs
356 |
357 |
358 | def max_pool2d(inputs,
359 | kernel_size,
360 | scope,
361 | stride=[2, 2],
362 | padding='VALID'):
363 | """ 2D max pooling.
364 |
365 | Args:
366 | inputs: 4-D tensor BxHxWxC
367 | kernel_size: a list of 2 ints
368 | stride: a list of 2 ints
369 |
370 | Returns:
371 | Variable tensor
372 | """
373 | with tf.variable_scope(scope) as sc:
374 | kernel_h, kernel_w = kernel_size
375 | stride_h, stride_w = stride
376 | outputs = tf.nn.max_pool(inputs,
377 | ksize=[1, kernel_h, kernel_w, 1],
378 | strides=[1, stride_h, stride_w, 1],
379 | padding=padding,
380 | name=sc.name)
381 | return outputs
382 |
383 | def avg_pool2d(inputs,
384 | kernel_size,
385 | scope,
386 | stride=[2, 2],
387 | padding='VALID'):
388 | """ 2D avg pooling.
389 |
390 | Args:
391 | inputs: 4-D tensor BxHxWxC
392 | kernel_size: a list of 2 ints
393 | stride: a list of 2 ints
394 |
395 | Returns:
396 | Variable tensor
397 | """
398 | with tf.variable_scope(scope) as sc:
399 | kernel_h, kernel_w = kernel_size
400 | stride_h, stride_w = stride
401 | outputs = tf.nn.avg_pool(inputs,
402 | ksize=[1, kernel_h, kernel_w, 1],
403 | strides=[1, stride_h, stride_w, 1],
404 | padding=padding,
405 | name=sc.name)
406 | return outputs
407 |
408 |
409 | def max_pool3d(inputs,
410 | kernel_size,
411 | scope,
412 | stride=[2, 2, 2],
413 | padding='VALID'):
414 | """ 3D max pooling.
415 |
416 | Args:
417 | inputs: 5-D tensor BxDxHxWxC
418 | kernel_size: a list of 3 ints
419 | stride: a list of 3 ints
420 |
421 | Returns:
422 | Variable tensor
423 | """
424 | with tf.variable_scope(scope) as sc:
425 | kernel_d, kernel_h, kernel_w = kernel_size
426 | stride_d, stride_h, stride_w = stride
427 | outputs = tf.nn.max_pool3d(inputs,
428 | ksize=[1, kernel_d, kernel_h, kernel_w, 1],
429 | strides=[1, stride_d, stride_h, stride_w, 1],
430 | padding=padding,
431 | name=sc.name)
432 | return outputs
433 |
434 | def avg_pool3d(inputs,
435 | kernel_size,
436 | scope,
437 | stride=[2, 2, 2],
438 | padding='VALID'):
439 | """ 3D avg pooling.
440 |
441 | Args:
442 | inputs: 5-D tensor BxDxHxWxC
443 | kernel_size: a list of 3 ints
444 | stride: a list of 3 ints
445 |
446 | Returns:
447 | Variable tensor
448 | """
449 | with tf.variable_scope(scope) as sc:
450 | kernel_d, kernel_h, kernel_w = kernel_size
451 | stride_d, stride_h, stride_w = stride
452 | outputs = tf.nn.avg_pool3d(inputs,
453 | ksize=[1, kernel_d, kernel_h, kernel_w, 1],
454 | strides=[1, stride_d, stride_h, stride_w, 1],
455 | padding=padding,
456 | name=sc.name)
457 | return outputs
458 |
459 |
460 |
461 |
462 |
463 | def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
464 | """ Batch normalization on convolutional maps and beyond...
465 | Ref.: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow
466 |
467 | Args:
468 | inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
469 | is_training: boolean tf.Varialbe, true indicates training phase
470 | scope: string, variable scope
471 | moments_dims: a list of ints, indicating dimensions for moments calculation
472 | bn_decay: float or float tensor variable, controling moving average weight
473 | Return:
474 | normed: batch-normalized maps
475 | """
476 | with tf.variable_scope(scope) as sc:
477 | num_channels = inputs.get_shape()[-1].value
478 | beta = tf.Variable(tf.constant(0.0, shape=[num_channels]),
479 | name='beta', trainable=True)
480 | gamma = tf.Variable(tf.constant(1.0, shape=[num_channels]),
481 | name='gamma', trainable=True)
482 | batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
483 | decay = bn_decay if bn_decay is not None else 0.9
484 | ema = tf.train.ExponentialMovingAverage(decay=decay)
485 | # Operator that maintains moving averages of variables.
486 | ema_apply_op = tf.cond(is_training,
487 | lambda: ema.apply([batch_mean, batch_var]),
488 | lambda: tf.no_op())
489 |
490 | # Update moving average and return current batch's avg and var.
491 | def mean_var_with_update():
492 | with tf.control_dependencies([ema_apply_op]):
493 | return tf.identity(batch_mean), tf.identity(batch_var)
494 |
495 | # ema.average returns the Variable holding the average of var.
496 | mean, var = tf.cond(is_training,
497 | mean_var_with_update,
498 | lambda: (ema.average(batch_mean), ema.average(batch_var)))
499 | normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
500 | return normed
501 |
502 |
503 | def batch_norm_dist_template(inputs, is_training, scope, moments_dims, bn_decay):
504 | """ The batch normalization for distributed training.
505 | Args:
506 | inputs: Tensor, k-D input ... x C could be BC or BHWC or BDHWC
507 | is_training: boolean tf.Varialbe, true indicates training phase
508 | scope: string, variable scope
509 | moments_dims: a list of ints, indicating dimensions for moments calculation
510 | bn_decay: float or float tensor variable, controling moving average weight
511 | Return:
512 | normed: batch-normalized maps
513 | """
514 | with tf.variable_scope(scope) as sc:
515 | num_channels = inputs.get_shape()[-1].value
516 | beta = _variable_on_cpu('beta', [num_channels], initializer=tf.zeros_initializer())
517 | gamma = _variable_on_cpu('gamma', [num_channels], initializer=tf.ones_initializer())
518 |
519 | pop_mean = _variable_on_cpu('pop_mean', [num_channels], initializer=tf.zeros_initializer(), trainable=False)
520 | pop_var = _variable_on_cpu('pop_var', [num_channels], initializer=tf.ones_initializer(), trainable=False)
521 |
522 | def train_bn_op():
523 | batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
524 | decay = bn_decay if bn_decay is not None else 0.9
525 | train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
526 | train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))
527 | with tf.control_dependencies([train_mean, train_var]):
528 | return tf.nn.batch_normalization(inputs, batch_mean, batch_var, beta, gamma, 1e-3)
529 |
530 | def test_bn_op():
531 | return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, gamma, 1e-3)
532 |
533 | normed = tf.cond(is_training,
534 | train_bn_op,
535 | test_bn_op)
536 | return normed
537 |
538 |
539 |
540 | def batch_norm_for_fc(inputs, is_training, bn_decay, scope, is_dist=False):
541 | """ Batch normalization on FC data.
542 |
543 | Args:
544 | inputs: Tensor, 2D BxC input
545 | is_training: boolean tf.Varialbe, true indicates training phase
546 | bn_decay: float or float tensor variable, controling moving average weight
547 | scope: string, variable scope
548 | is_dist: true indicating distributed training scheme
549 | Return:
550 | normed: batch-normalized maps
551 | """
552 | if is_dist:
553 | return batch_norm_dist_template(inputs, is_training, scope, [0,], bn_decay)
554 | else:
555 | return batch_norm_template(inputs, is_training, scope, [0,], bn_decay)
556 |
557 |
558 | def batch_norm_for_conv1d(inputs, is_training, bn_decay, scope, is_dist=False):
559 | """ Batch normalization on 1D convolutional maps.
560 |
561 | Args:
562 | inputs: Tensor, 3D BLC input maps
563 | is_training: boolean tf.Varialbe, true indicates training phase
564 | bn_decay: float or float tensor variable, controling moving average weight
565 | scope: string, variable scope
566 | is_dist: true indicating distributed training scheme
567 | Return:
568 | normed: batch-normalized maps
569 | """
570 | if is_dist:
571 | return batch_norm_dist_template(inputs, is_training, scope, [0,1], bn_decay)
572 | else:
573 | return batch_norm_template(inputs, is_training, scope, [0,1], bn_decay)
574 |
575 |
576 |
577 |
578 | def batch_norm_for_conv2d(inputs, is_training, bn_decay, scope, is_dist=False):
579 | """ Batch normalization on 2D convolutional maps.
580 |
581 | Args:
582 | inputs: Tensor, 4D BHWC input maps
583 | is_training: boolean tf.Varialbe, true indicates training phase
584 | bn_decay: float or float tensor variable, controling moving average weight
585 | scope: string, variable scope
586 | is_dist: true indicating distributed training scheme
587 | Return:
588 | normed: batch-normalized maps
589 | """
590 | if is_dist:
591 | return batch_norm_dist_template(inputs, is_training, scope, [0,1,2], bn_decay)
592 | else:
593 | return batch_norm_template(inputs, is_training, scope, [0,1,2], bn_decay)
594 |
595 |
596 |
597 | def batch_norm_for_conv3d(inputs, is_training, bn_decay, scope, is_dist=False):
598 | """ Batch normalization on 3D convolutional maps.
599 |
600 | Args:
601 | inputs: Tensor, 5D BDHWC input maps
602 | is_training: boolean tf.Varialbe, true indicates training phase
603 | bn_decay: float or float tensor variable, controling moving average weight
604 | scope: string, variable scope
605 | is_dist: true indicating distributed training scheme
606 | Return:
607 | normed: batch-normalized maps
608 | """
609 | if is_dist:
610 | return batch_norm_dist_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
611 | else:
612 | return batch_norm_template(inputs, is_training, scope, [0,1,2,3], bn_decay)
613 |
614 |
615 | def dropout(inputs,
616 | is_training,
617 | scope,
618 | keep_prob=0.5,
619 | noise_shape=None):
620 | """ Dropout layer.
621 |
622 | Args:
623 | inputs: tensor
624 | is_training: boolean tf.Variable
625 | scope: string
626 | keep_prob: float in [0,1]
627 | noise_shape: list of ints
628 |
629 | Returns:
630 | tensor variable
631 | """
632 | with tf.variable_scope(scope) as sc:
633 | outputs = tf.cond(is_training,
634 | lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
635 | lambda: inputs)
636 | return outputs
637 |
638 |
639 | def pairwise_distance(point_cloud):
640 | """Compute pairwise distance of a point cloud.
641 |
642 | Args:
643 | point_cloud: tensor (batch_size, num_points, num_dims)
644 |
645 | Returns:
646 | pairwise distance: (batch_size, num_points, num_points)
647 | """
648 | og_batch_size = point_cloud.get_shape().as_list()[0]
649 | point_cloud = tf.squeeze(point_cloud)
650 | if og_batch_size == 1:
651 | point_cloud = tf.expand_dims(point_cloud, 0)
652 |
653 | point_cloud_transpose = tf.transpose(point_cloud, perm=[0, 2, 1])
654 | point_cloud_inner = tf.matmul(point_cloud, point_cloud_transpose)
655 | point_cloud_inner = -2*point_cloud_inner
656 | point_cloud_square = tf.reduce_sum(tf.square(point_cloud), axis=-1, keep_dims=True)
657 | point_cloud_square_tranpose = tf.transpose(point_cloud_square, perm=[0, 2, 1])
658 | return point_cloud_square + point_cloud_inner + point_cloud_square_tranpose
659 |
660 | def knn(adj_matrix, k=5):
661 | """Get KNN based on the pairwise distance.
662 | Args:
663 | pairwise distance: (batch_size, num_points, num_points)
664 | k: int
665 |
666 | Returns:
667 | nearest neighbors: (batch_size, num_points, k)
668 | """
669 | neg_adj = -adj_matrix
670 | _, nn_idx = tf.nn.top_k(neg_adj, k=k+1) ### remove the current point
671 | return nn_idx[:, :, 1:]
672 |
673 | def get_edge_feature(point_cloud, nn_idx, k=20):
674 | """Construct edge feature for each point
675 | Args:
676 | point_cloud: (batch_size, num_points, 1, num_dims)
677 | nn_idx: (batch_size, num_points, k)
678 | k: int
679 |
680 | Returns:
681 | edge features: (batch_size, num_points, k, num_dims)
682 | """
683 | og_batch_size = point_cloud.get_shape().as_list()[0]
684 | point_cloud = tf.squeeze(point_cloud)
685 | if og_batch_size == 1:
686 | point_cloud = tf.expand_dims(point_cloud, 0)
687 |
688 | point_cloud_central = point_cloud
689 |
690 | point_cloud_shape = point_cloud.get_shape()
691 | batch_size = point_cloud_shape[0].value
692 | num_points = point_cloud_shape[1].value
693 | num_dims = point_cloud_shape[2].value
694 |
695 | idx_ = tf.range(batch_size) * num_points
696 | idx_ = tf.reshape(idx_, [batch_size, 1, 1])
697 |
698 | point_cloud_flat = tf.reshape(point_cloud, [-1, num_dims])
699 | point_cloud_neighbors = tf.gather(point_cloud_flat, nn_idx+idx_)
700 | point_cloud_central = tf.expand_dims(point_cloud_central, axis=-2)
701 |
702 | point_cloud_central = tf.tile(point_cloud_central, [1, 1, k, 1])
703 |
704 | edge_feature = tf.concat([point_cloud_central, point_cloud_neighbors-point_cloud_central], axis=-1)
705 | return edge_feature
706 |
707 |
708 |
709 | def gather_tensor_along_2nd_axis(batch_input, batch_indices):
710 | ####2nd [batch npoint dim] --->npoint axis
711 |
712 | [bat_size, point_num, d1] = batch_input.get_shape()
713 | [_, index_num] = batch_indices.get_shape()
714 | bat_size_range = tf.range(bat_size)
715 | bat_size_range_flat = tf.reshape(bat_size_range, [-1, 1])
716 | bat_size_range_flat_repeat = tf.tile(bat_size_range_flat, [1, int(index_num)])
717 | bat_size_range_flat_repeat = tf.reshape(bat_size_range_flat_repeat, [-1])
718 | indices_2d_flat = tf.cast(tf.reshape(batch_indices, [-1]), dtype=tf.int32)
719 | indices_2d_flat_repeat = bat_size_range_flat_repeat*int(point_num) + indices_2d_flat
720 |
721 | batch_input = tf.reshape(batch_input, [-1, int(d1)])
722 | batch_input_new = tf.gather(batch_input, indices_2d_flat_repeat)
723 |
724 | batch_input_new = tf.reshape(batch_input_new, [bat_size, int(index_num), int(d1)])
725 | return batch_input_new
726 |
727 |
728 | def get_relation_features(point_features, nn_idx, k):
729 |
730 | og_batch_size = point_features.get_shape().as_list()[0]
731 | point_features = tf.squeeze(point_features)
732 | if og_batch_size == 1:
733 | point_features = tf.expand_dims(point_features, 0)
734 |
735 | point_cloud_central = point_features
736 |
737 | point_cloud_shape = point_features.get_shape()
738 | batch_size = point_cloud_shape[0].value
739 | num_points = point_cloud_shape[1].value
740 | num_dims = point_cloud_shape[2].value
741 |
742 | idx_ = tf.range(batch_size) * num_points
743 | idx_ = tf.reshape(idx_, [batch_size, 1, 1])
744 |
745 | point_cloud_flat = tf.reshape(point_features, [-1, num_dims])
746 | point_cloud_neighbors = tf.gather(point_cloud_flat, nn_idx+idx_)
747 | point_cloud_central = tf.expand_dims(point_cloud_central, axis=-2)
748 |
749 | point_cloud_central = tf.tile(point_cloud_central, [1, 1, k, 1])
750 | point_cloud_neighbors = point_cloud_neighbors - point_cloud_central
751 |
752 | #############
753 | # rank_state_sums = int(comb(k, 2))
754 | # list_rank_state = list(combinations(list(range(k)), 2))
755 | #############
756 |
757 | num_vertex_pairs = k
758 | vertex_pairs_list = [(i, i + 1) for i in range(k - 1)]
759 | vertex_pairs_list.append((k - 1, 0))
760 | for i in range(num_vertex_pairs):
761 |
762 | temp_concat = tf.concat([point_cloud_neighbors[:, :, vertex_pairs_list[i][0], :],
763 | point_cloud_neighbors[:, :, vertex_pairs_list[i][1], :]], axis=-1)
764 |
765 | temp_concat = tf.expand_dims(temp_concat, -2)
766 | if i == 0:
767 | relation_features = temp_concat
768 | else:
769 | relation_features = tf.concat([relation_features, temp_concat], axis=-2)
770 |
771 | point_features = tf.expand_dims(point_features, axis=-2)
772 | point_features = tf.tile(point_features, [1, 1, num_vertex_pairs, 1])
773 | relation_features = tf.concat([point_features, relation_features], axis=-1)
774 | return relation_features
775 |
--------------------------------------------------------------------------------
/voxel_region_grow/VoxelRegionGrow.pyx:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from collections import deque
3 | import numpy as np
4 | cimport numpy as np
5 | cimport cython
6 |
7 | cdef class Build:
8 |
9 | cdef int[:,:,:] voxels
10 | cdef int[:,:,:] outputMask
11 | cdef int num_voxel_x
12 | cdef int num_voxel_y
13 | cdef int num_voxel_z
14 | cdef queue
15 |
16 | def __cinit__(self, int[:,:,:] voxels):
17 | self.voxels = voxels
18 | self.outputMask = np.zeros_like(self.voxels)
19 | self.queue = deque()
20 | self.num_voxel_x = voxels.shape[0]
21 | self.num_voxel_y = voxels.shape[1]
22 | self.num_voxel_z = voxels.shape[2]
23 |
24 | def Run(self, seed):
25 | cdef int newItem[3]
26 | cdef int neighbors[26][3]
27 | self.queue.append((seed[0], seed[1], seed[2]))
28 |
29 | if self.voxels[seed[0], seed[1], seed[2]] == 1:
30 | self.outputMask[seed[0], seed[1], seed[2]] = 1
31 |
32 | while len(self.queue) != 0:
33 | newItem = self.queue.pop()
34 | neighbors = [[newItem[0]-1, newItem[1]-1, newItem[2]-1], [newItem[0]-1, newItem[1]-1, newItem[2]], [newItem[0]-1, newItem[1]-1, newItem[2]+1],
35 | [newItem[0]-1, newItem[1], newItem[2]-1], [newItem[0]-1, newItem[1], newItem[2]], [newItem[0]-1, newItem[1], newItem[2]+1],
36 | [newItem[0]-1, newItem[1]+1, newItem[2]-1], [newItem[0]-1, newItem[1]+1, newItem[2]], [newItem[0]-1, newItem[1]+1, newItem[2]+1],
37 | [newItem[0], newItem[1]-1, newItem[2]-1], [newItem[0], newItem[1]-1, newItem[2]], [newItem[0], newItem[1]-1, newItem[2]+1],
38 | [newItem[0], newItem[1], newItem[2]-1], [newItem[0], newItem[1], newItem[2]+1], [newItem[0], newItem[1]+1, newItem[2]-1],
39 | [newItem[0], newItem[1]+1, newItem[2]], [newItem[0], newItem[1]+1, newItem[2]+1], [newItem[0]+1, newItem[1]-1, newItem[2]-1],
40 | [newItem[0]+1, newItem[1]-1, newItem[2]], [newItem[0]+1, newItem[1]-1, newItem[2]+1], [newItem[0]+1, newItem[1], newItem[2]-1],
41 | [newItem[0]+1, newItem[1], newItem[2]], [newItem[0]+1, newItem[1], newItem[2]+1], [newItem[0]+1, newItem[1]+1, newItem[2]-1],
42 | [newItem[0]+1, newItem[1]+1, newItem[2]], [newItem[0]+1, newItem[1]+1, newItem[2]+1]
43 | ]
44 | for neighbor in neighbors:
45 | self.checkNeighbour(neighbor[0], neighbor[1], neighbor[2])
46 |
47 | return self.outputMask
48 |
49 | cdef checkNeighbour(self, int x, int y, int z):
50 | cdef int voxelValue
51 |
52 | if (x < self.num_voxel_x and y < self.num_voxel_y and z < self.num_voxel_z and x > -1 and y > -1 and z > -1):
53 |
54 | voxelValue = self.voxels[x, y, z]
55 | if self.isVoxelAcceptable(voxelValue) and self.outputMask[x,y,z] == 0:
56 | self.outputMask[x,y,z] = 1
57 | self.queue.append((x, y, z))
58 |
59 | cdef isVoxelAcceptable(self, int voxelValue):
60 | if voxelValue == 1:
61 | return True
62 | return False
63 |
--------------------------------------------------------------------------------
/voxel_region_grow/VoxelRegionGrow_Setup.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon July 11 18:50:39 2020
3 |
4 | @author: Haifeng Luo
5 | """
6 |
7 | from distutils.core import setup
8 | from Cython.Build import cythonize
9 | import numpy
10 |
11 | setup(
12 | ext_modules=cythonize("VoxelRegionGrow.pyx"),
13 | include_dirs=[numpy.get_include()]
14 | )
--------------------------------------------------------------------------------
/voxel_traversal/VoxelTraversalAlgorithm.py:
--------------------------------------------------------------------------------
1 | """
2 | Created on Mon July 11 18:50:39 2020
3 |
4 | @author: Haifeng Luo
5 | """
6 | import numpy as np
7 | import matplotlib.pyplot as plt
8 | from mpl_toolkits.mplot3d import Axes3D
9 |
10 | def show_voxel(voxels):
11 | fig = plt.figure()
12 | ax = fig.gca(projection='3d')
13 | ax.voxels(voxels, facecolors='red', edgecolor='k')
14 | plt.show()
15 |
16 | ############################################################
17 | def voxel_traversal(start_point, directions, min_xyz, num_voxel_xyz, voxel_size):
18 | '''
19 | voxel traversal for tree center detection
20 | '''
21 | current_voxel_x = int(np.floor((start_point[0] - min_xyz[0]) / voxel_size))
22 | current_voxel_y = int(np.floor((start_point[1] - min_xyz[1]) / voxel_size))
23 | current_voxel_z = int(np.floor((start_point[2] - min_xyz[2]) / voxel_size))
24 |
25 | stepX = 1 if directions[0] >= 0 else -1
26 | stepY = 1 if directions[1] >= 0 else -1
27 | stepZ = 1 if directions[2] >= 0 else -1
28 |
29 | next_voxel_boundary_x = (current_voxel_x + stepX) * voxel_size + min_xyz[0]
30 | next_voxel_boundary_y = (current_voxel_y + stepY) * voxel_size + min_xyz[1]
31 | next_voxel_boundary_z = (current_voxel_z + stepZ) * voxel_size + min_xyz[2]
32 |
33 | tMaxX = (next_voxel_boundary_x - start_point[0]) / directions[0] if directions[0] != 0 else float('inf')
34 | tMaxY = (next_voxel_boundary_y - start_point[1]) / directions[1] if directions[1] != 0 else float('inf')
35 | tMaxZ = (next_voxel_boundary_z - start_point[2]) / directions[2] if directions[2] != 0 else float('inf')
36 |
37 | tDeltaX = voxel_size / directions[0] * stepX if directions[0] != 0 else float('inf')
38 | tDeltaY = voxel_size / directions[1] * stepY if directions[1] != 0 else float('inf')
39 | tDeltaZ = voxel_size / directions[2] * stepZ if directions[2] != 0 else float('inf')
40 |
41 | visited_voxels = []
42 | visited_voxels.append([current_voxel_x, current_voxel_y, current_voxel_z])
43 | while current_voxel_x <= (num_voxel_xyz[0] - 1) and current_voxel_x >= 0 and current_voxel_y <= (
44 | num_voxel_xyz[1] - 1) \
45 | and current_voxel_y >= 0 and current_voxel_z <= (num_voxel_xyz[2] - 1) and current_voxel_z >= 0:
46 |
47 | if tMaxX < tMaxY:
48 | if tMaxX < tMaxZ:
49 | current_voxel_x += stepX
50 | tMaxX += tDeltaX
51 | else:
52 | current_voxel_z += stepZ
53 | tMaxZ += tDeltaZ
54 | else:
55 | if tMaxY < tMaxZ:
56 | current_voxel_y += stepY
57 | tMaxY += tDeltaY
58 | else:
59 | current_voxel_z += stepZ
60 | tMaxZ += tDeltaZ
61 | if current_voxel_x < num_voxel_xyz[0] and current_voxel_x >= 0 and \
62 | current_voxel_y < num_voxel_xyz[1] and current_voxel_y >= 0 and \
63 | current_voxel_z < num_voxel_xyz[2] and current_voxel_z >= 0:
64 | visited_voxels.append([current_voxel_x, current_voxel_y, current_voxel_z])
65 |
66 | return visited_voxels
67 |
68 |
69 | def show_direction_aggregation(points, directions, voxel_size, num_voxel_xyz, min_xyz):
70 | numpints = np.size(points, 0)
71 | ####
72 | output_voxels = np.zeros((int(num_voxel_xyz[0]), int(num_voxel_xyz[1]), int(num_voxel_xyz[2])), dtype=int)
73 | for i in range(numpints):
74 | visited_voxels = voxel_traversal(points[i, :], directions[i, :], min_xyz, num_voxel_xyz, voxel_size)
75 |
76 | for j in range(len(visited_voxels)):
77 | output_voxels[int(visited_voxels[j][0]), int(visited_voxels[j][1]), int(visited_voxels[j][2])] = 1
78 | # #####
79 | if i == 5: #show 5 point directions
80 | output_voxels_v2 = np.array(output_voxels)
81 | output_voxels_v2 = output_voxels_v2.astype(bool)
82 | show_voxel(output_voxels_v2)
83 |
--------------------------------------------------------------------------------