├── GLtree ├── interval_tree.py ├── octree.py └── octree_vis_only.py ├── LICENSE ├── README.md ├── asset └── scene0708_00.gif ├── model ├── model2d.py └── model3d.py ├── utils ├── ply_utils.py └── vis_utils.py ├── vis_sequence.py └── vis_sequence_GLtree.py /GLtree/interval_tree.py: -------------------------------------------------------------------------------- 1 | """ 2 | A classic (not left-leaning) Red-Black Tree implementation, supporting addition and deletion. 3 | """ 4 | import time 5 | from queue import Queue 6 | import numpy as np 7 | import math 8 | # The possible Node colors 9 | BLACK = 'BLACK' 10 | RED = 'RED' 11 | NIL = 'NIL' 12 | 13 | 14 | class Node: 15 | def __init__(self, mark_value,min_value,max_value, color, parent, left=None, right=None,isEdge=False): 16 | self.min_value = min_value 17 | self.max_value = max_value 18 | 19 | self.small_node=None 20 | self.big_node=None 21 | 22 | self.mark_value = mark_value 23 | self.color = color 24 | self.parent = parent 25 | self.left = left 26 | self.right = right 27 | self.isEdge=isEdge 28 | self.set_list=[set(),None,None] 29 | 30 | def __repr__(self): 31 | return '{color} {val} Node'.format(color=self.color, val=self.value) 32 | 33 | def __iter__(self): 34 | if self.left.color != NIL: 35 | yield from self.left.__iter__() 36 | 37 | yield self.mark_value 38 | 39 | if self.right.color != NIL: 40 | yield from self.right.__iter__() 41 | 42 | def __eq__(self, other): 43 | if self.color == NIL and self.color == other.color: 44 | return True 45 | 46 | if self.parent is None or other.parent is None: 47 | parents_are_same = self.parent is None and other.parent is None 48 | else: 49 | parents_are_same = self.parent.mark_value == other.parent.mark_value and self.parent.color == other.parent.color 50 | return self.mark_value == other.mark_value and self.color == other.color and parents_are_same 51 | 52 | def has_children(self) -> bool: 53 | """ Returns a boolean indicating if the node has children """ 54 | return bool(self.get_children_count()) 55 | 56 | def get_children_count(self) -> int: 57 | """ Returns the number of NOT NIL children the node has """ 58 | if self.color == NIL: 59 | return 0 60 | return sum([int(self.left.color != NIL), int(self.right.color != NIL)]) 61 | 62 | 63 | class RedBlackTree: 64 | # every node has null nodes as children initially, create one such object for easy management 65 | NIL_LEAF = Node(mark_value=None,min_value=None,max_value=None, color=NIL, parent=None) 66 | 67 | def __init__(self,block_threshold): 68 | self.count = 0 69 | self.root = None 70 | self.block_num=10 71 | self.max_distance=block_threshold 72 | self.ROTATIONS = { 73 | # Used for deletion and uses the sibling's relationship with his parent as a guide to the rotation 74 | 'L': self._right_rotation, 75 | 'R': self._left_rotation 76 | } 77 | 78 | 79 | def bulid_connection_left_to_right(self,left_node,right_node): 80 | left_node.big_node=right_node 81 | right_node.small_node=left_node 82 | left_node.set_list[2]=right_node.set_list[0] 83 | right_node.set_list[1]=left_node.set_list[0] 84 | 85 | 86 | 87 | def add(self,value): 88 | 89 | node,find_node=self.insert_node(value) 90 | if find_node and not node.isEdge: 91 | return node 92 | node.isEdge=False 93 | count_iter=0 94 | 95 | right_temp_node,find_node=self.insert_node(value+self.max_distance*(count_iter+1)) 96 | self.bulid_connection_left_to_right(node,right_temp_node) 97 | count_iter+=1 98 | 99 | while count_iter bool: 170 | """ Returns a boolean indicating if the given value is present in the tree """ 171 | return bool(self.find_node(value)) 172 | 173 | def ceil(self, value) -> int or None: 174 | """ 175 | Given a value, return the closest value that is equal or bigger than it, 176 | returning None when no such exists 177 | """ 178 | if self.root is None: return None 179 | last_found_val = None if self.root.value < value else self.root.value 180 | 181 | def find_ceil(node): 182 | nonlocal last_found_val 183 | if node == self.NIL_LEAF: 184 | return None 185 | if node.value == value: 186 | last_found_val = node.value 187 | return node.value 188 | elif node.value < value: 189 | # go right 190 | return find_ceil(node.right) 191 | else: 192 | # this node is bigger, save its value and go left 193 | last_found_val = node.value 194 | 195 | return find_ceil(node.left) 196 | find_ceil(self.root) 197 | return last_found_val 198 | 199 | def floor(self, value) -> int or None: 200 | """ 201 | Given a value, return the closest value that is equal or less than it, 202 | returning None when no such exists 203 | """ 204 | if self.root is None: return None 205 | last_found_val = None if self.root.value > value else self.root.value 206 | 207 | def find_floor(node): 208 | nonlocal last_found_val 209 | if node == self.NIL_LEAF: 210 | return None 211 | if node.value == value: 212 | last_found_val = node.value 213 | return node.value 214 | elif node.value < value: 215 | # this node is smaller, save its value and go right, trying to find a cloer one 216 | last_found_val = node.value 217 | 218 | return find_floor(node.right) 219 | else: 220 | return find_floor(node.left) 221 | 222 | find_floor(self.root) 223 | return last_found_val 224 | 225 | def _remove(self, node): 226 | """ 227 | Receives a node with 0 or 1 children (typically some sort of successor) 228 | and removes it according to its color/children 229 | :param node: Node with 0 or 1 children 230 | """ 231 | left_child = node.left 232 | right_child = node.right 233 | not_nil_child = left_child if left_child != self.NIL_LEAF else right_child 234 | if node == self.root: 235 | if not_nil_child != self.NIL_LEAF: 236 | # if we're removing the root and it has one valid child, simply make that child the root 237 | self.root = not_nil_child 238 | self.root.parent = None 239 | self.root.color = BLACK 240 | else: 241 | self.root = None 242 | elif node.color == RED: 243 | if not node.has_children(): 244 | # Red node with no children, the simplest remove 245 | self._remove_leaf(node) 246 | else: 247 | """ 248 | Since the node is red he cannot have a child. 249 | If he had a child, it'd need to be black, but that would mean that 250 | the black height would be bigger on the one side and that would make our tree invalid 251 | """ 252 | raise Exception('Unexpected behavior') 253 | else: # node is black! 254 | if right_child.has_children() or left_child.has_children(): # sanity check 255 | raise Exception('The red child of a black node with 0 or 1 children' 256 | ' cannot have children, otherwise the black height of the tree becomes invalid! ') 257 | if not_nil_child.color == RED: 258 | """ 259 | Swap the values with the red child and remove it (basically un-link it) 260 | Since we're a node with one child only, we can be sure that there are no nodes below the red child. 261 | """ 262 | node.value = not_nil_child.value 263 | node.left = not_nil_child.left 264 | node.right = not_nil_child.right 265 | else: # BLACK child 266 | # 6 cases :o 267 | self._remove_black_node(node) 268 | 269 | def _remove_leaf(self, leaf): 270 | """ Simply removes a leaf node by making it's parent point to a NIL LEAF""" 271 | if leaf.mark_value >= leaf.parent.mark_value: 272 | # in those weird cases where they're equal due to the successor swap 273 | leaf.parent.right = self.NIL_LEAF 274 | else: 275 | leaf.parent.left = self.NIL_LEAF 276 | 277 | def _remove_black_node(self, node): 278 | """ 279 | Loop through each case recursively until we reach a terminating case. 280 | What we're left with is a leaf node which is ready to be deleted without consequences 281 | """ 282 | self.__case_1(node) 283 | self._remove_leaf(node) 284 | 285 | def __case_1(self, node): 286 | """ 287 | Case 1 is when there's a double black node on the root 288 | Because we're at the root, we can simply remove it 289 | and reduce the black height of the whole tree. 290 | 291 | __|10B|__ __10B__ 292 | / \ ==> / \ 293 | 9B 20B 9B 20B 294 | """ 295 | if self.root == node: 296 | node.color = BLACK 297 | return 298 | self.__case_2(node) 299 | 300 | def __case_2(self, node): 301 | """ 302 | Case 2 applies when 303 | the parent is BLACK 304 | the sibling is RED 305 | the sibling's children are BLACK or NIL 306 | It takes the sibling and rotates it 307 | 308 | 40B 60B 309 | / \ --CASE 2 ROTATE--> / \ 310 | |20B| 60R LEFT ROTATE 40R 80B 311 | DBL BLACK IS 20----^ / \ SIBLING 60R / \ 312 | 50B 80B |20B| 50B 313 | (if the sibling's direction was left of it's parent, we would RIGHT ROTATE it) 314 | Now the original node's parent is RED 315 | and we can apply case 4 or case 6 316 | """ 317 | parent = node.parent 318 | sibling, direction = self._get_sibling(node) 319 | if sibling.color == RED and parent.color == BLACK and sibling.left.color != RED and sibling.right.color != RED: 320 | self.ROTATIONS[direction](node=None, parent=sibling, grandfather=parent) 321 | parent.color = RED 322 | sibling.color = BLACK 323 | return self.__case_1(node) 324 | self.__case_3(node) 325 | 326 | def __case_3(self, node): 327 | """ 328 | Case 3 deletion is when: 329 | the parent is BLACK 330 | the sibling is BLACK 331 | the sibling's children are BLACK 332 | Then, we make the sibling red and 333 | pass the double black node upwards 334 | 335 | Parent is black 336 | ___50B___ Sibling is black ___50B___ 337 | / \ Sibling's children are black / \ 338 | 30B 80B CASE 3 30B |80B| Continue with other cases 339 | / \ / \ ==> / \ / \ 340 | 20B 35R 70B |90B|<---REMOVE 20B 35R 70R X 341 | / \ / \ 342 | 34B 37B 34B 37B 343 | """ 344 | parent = node.parent 345 | sibling, _ = self._get_sibling(node) 346 | if (sibling.color == BLACK and parent.color == BLACK 347 | and sibling.left.color != RED and sibling.right.color != RED): 348 | # color the sibling red and forward the double black node upwards 349 | # (call the cases again for the parent) 350 | sibling.color = RED 351 | return self.__case_1(parent) # start again 352 | 353 | self.__case_4(node) 354 | 355 | def __case_4(self, node): 356 | """ 357 | If the parent is red and the sibling is black with no red children, 358 | simply swap their colors 359 | DB-Double Black 360 | __10R__ __10B__ The black height of the left subtree has been incremented 361 | / \ / \ And the one below stays the same 362 | DB 15B ===> X 15R No consequences, we're done! 363 | / \ / \ 364 | 12B 17B 12B 17B 365 | """ 366 | parent = node.parent 367 | if parent.color == RED: 368 | sibling, direction = self._get_sibling(node) 369 | if sibling.color == BLACK and sibling.left.color != RED and sibling.right.color != RED: 370 | parent.color, sibling.color = sibling.color, parent.color # switch colors 371 | return # Terminating 372 | self.__case_5(node) 373 | 374 | def __case_5(self, node): 375 | """ 376 | Case 5 is a rotation that changes the circumstances so that we can do a case 6 377 | If the closer node is red and the outer BLACK or NIL, we do a left/right rotation, depending on the orientation 378 | This will showcase when the CLOSER NODE's direction is RIGHT 379 | 380 | ___50B___ __50B__ 381 | / \ / \ 382 | 30B |80B| <-- Double black 35B |80B| Case 6 is now 383 | / \ / \ Closer node is red (35R) / \ / applicable here, 384 | 20B 35R 70R X Outer is black (20B) 30R 37B 70R so we redirect the node 385 | / \ So we do a LEFT ROTATION / \ to it :) 386 | 34B 37B on 35R (closer node) 20B 34B 387 | """ 388 | sibling, direction = self._get_sibling(node) 389 | closer_node = sibling.right if direction == 'L' else sibling.left 390 | outer_node = sibling.left if direction == 'L' else sibling.right 391 | if closer_node.color == RED and outer_node.color != RED and sibling.color == BLACK: 392 | if direction == 'L': 393 | self._left_rotation(node=None, parent=closer_node, grandfather=sibling) 394 | else: 395 | self._right_rotation(node=None, parent=closer_node, grandfather=sibling) 396 | closer_node.color = BLACK 397 | sibling.color = RED 398 | 399 | self.__case_6(node) 400 | 401 | def __case_6(self, node): 402 | """ 403 | Case 6 requires 404 | SIBLING to be BLACK 405 | OUTER NODE to be RED 406 | Then, does a right/left rotation on the sibling 407 | This will showcase when the SIBLING's direction is LEFT 408 | 409 | Double Black 410 | __50B__ | __35B__ 411 | / \ | / \ 412 | SIBLING--> 35B |80B| <- 30R 50R 413 | / \ / / \ / \ 414 | 30R 37B 70R Outer node is RED 20B 34B 37B 80B 415 | / \ Closer node doesn't / 416 | 20B 34B matter 70R 417 | Parent doesn't 418 | matter 419 | So we do a right rotation on 35B! 420 | """ 421 | sibling, direction = self._get_sibling(node) 422 | outer_node = sibling.left if direction == 'L' else sibling.right 423 | 424 | def __case_6_rotation(direction): 425 | parent_color = sibling.parent.color 426 | self.ROTATIONS[direction](node=None, parent=sibling, grandfather=sibling.parent) 427 | # new parent is sibling 428 | sibling.color = parent_color 429 | sibling.right.color = BLACK 430 | sibling.left.color = BLACK 431 | 432 | if sibling.color == BLACK and outer_node.color == RED: 433 | return __case_6_rotation(direction) # terminating 434 | 435 | raise Exception('We should have ended here, something is wrong') 436 | 437 | def _try_rebalance(self, node): 438 | """ 439 | Given a red child node, determine if there is a need to rebalance (if the parent is red) 440 | If there is, rebalance it 441 | """ 442 | parent = node.parent 443 | value = node.mark_value 444 | if (parent is None # what the fuck? (should not happen) 445 | or parent.parent is None # parent is the root 446 | or (node.color != RED or parent.color != RED)): # no need to rebalance 447 | return 448 | grandfather = parent.parent 449 | node_dir = 'L' if parent.mark_value > value else 'R' 450 | parent_dir = 'L' if grandfather.mark_value > parent.mark_value else 'R' 451 | uncle = grandfather.right if parent_dir == 'L' else grandfather.left 452 | general_direction = node_dir + parent_dir 453 | 454 | if uncle == self.NIL_LEAF or uncle.color == BLACK: 455 | # rotate 456 | if general_direction == 'LL': 457 | self._right_rotation(node, parent, grandfather, to_recolor=True) 458 | elif general_direction == 'RR': 459 | self._left_rotation(node, parent, grandfather, to_recolor=True) 460 | elif general_direction == 'LR': 461 | self._right_rotation(node=None, parent=node, grandfather=parent) 462 | # due to the prev rotation, our node is now the parent 463 | self._left_rotation(node=parent, parent=node, grandfather=grandfather, to_recolor=True) 464 | elif general_direction == 'RL': 465 | self._left_rotation(node=None, parent=node, grandfather=parent) 466 | # due to the prev rotation, our node is now the parent 467 | self._right_rotation(node=parent, parent=node, grandfather=grandfather, to_recolor=True) 468 | else: 469 | raise Exception("{} is not a valid direction!".format(general_direction)) 470 | else: # uncle is RED 471 | self._recolor(grandfather) 472 | 473 | def __update_parent(self, node, parent_old_child, new_parent): 474 | """ 475 | Our node 'switches' places with the old child 476 | Assigns a new parent to the node. 477 | If the new_parent is None, this means that our node becomes the root of the tree 478 | """ 479 | node.parent = new_parent 480 | if new_parent: 481 | # Determine the old child's position in order to put node there 482 | if new_parent.mark_value > parent_old_child.mark_value: 483 | new_parent.left = node 484 | else: 485 | new_parent.right = node 486 | else: 487 | self.root = node 488 | 489 | def _right_rotation(self, node, parent, grandfather, to_recolor=False): 490 | grand_grandfather = grandfather.parent 491 | self.__update_parent(node=parent, parent_old_child=grandfather, new_parent=grand_grandfather) 492 | 493 | old_right = parent.right 494 | parent.right = grandfather 495 | grandfather.parent = parent 496 | 497 | grandfather.left = old_right # save the old right values 498 | old_right.parent = grandfather 499 | 500 | if to_recolor: 501 | parent.color = BLACK 502 | node.color = RED 503 | grandfather.color = RED 504 | 505 | def _left_rotation(self, node, parent, grandfather, to_recolor=False): 506 | grand_grandfather = grandfather.parent 507 | self.__update_parent(node=parent, parent_old_child=grandfather, new_parent=grand_grandfather) 508 | 509 | old_left = parent.left 510 | parent.left = grandfather 511 | grandfather.parent = parent 512 | 513 | grandfather.right = old_left # save the old left values 514 | old_left.parent = grandfather 515 | 516 | if to_recolor: 517 | parent.color = BLACK 518 | node.color = RED 519 | grandfather.color = RED 520 | 521 | def _recolor(self, grandfather): 522 | grandfather.right.color = BLACK 523 | grandfather.left.color = BLACK 524 | if grandfather != self.root: 525 | grandfather.color = RED 526 | self._try_rebalance(grandfather) 527 | 528 | def _find_parent(self, value): 529 | """ Finds a place for the value in our binary tree""" 530 | def inner_find(parent): 531 | """ 532 | Return the appropriate parent node for our new node as well as the side it should be on 533 | """ 534 | if parent.max_value >= value >= parent.min_value: 535 | return parent, None 536 | elif parent.max_value < value: 537 | if parent.right.color == NIL: # no more to go 538 | return parent, 'R' 539 | return inner_find(parent.right) 540 | elif value < parent.min_value: 541 | if parent.left.color == NIL: # no more to go 542 | return parent, 'L' 543 | return inner_find(parent.left) 544 | 545 | return inner_find(self.root) 546 | 547 | def find_node(self, value): 548 | def inner_find(root): 549 | if root is None or root == self.NIL_LEAF: 550 | return None 551 | if value > root.max_value: 552 | return inner_find(root.right) 553 | elif value < root.min_value: 554 | return inner_find(root.left) 555 | else: 556 | return root 557 | 558 | found_node = inner_find(self.root) 559 | return found_node 560 | 561 | def _find_in_order_successor(self, node): 562 | right_node = node.right 563 | left_node = right_node.left 564 | if left_node == self.NIL_LEAF: 565 | return right_node 566 | while left_node.left != self.NIL_LEAF: 567 | left_node = left_node.left 568 | return left_node 569 | 570 | def _get_sibling(self, node): 571 | """ 572 | Returns the sibling of the node, as well as the side it is on 573 | e.g 574 | 575 | 20 (A) 576 | / \ 577 | 15(B) 25(C) 578 | 579 | _get_sibling(25(C)) => 15(B), 'R' 580 | """ 581 | parent = node.parent 582 | if node.mark_value >= parent.mark_value: 583 | sibling = parent.left 584 | direction = 'L' 585 | else: 586 | sibling = parent.right 587 | direction = 'R' 588 | return sibling, direction 589 | 590 | def all_points_from_tree(self,return_color=False,return_label=False): 591 | root_node=self.root 592 | all_points_set=set() 593 | node_q=Queue() 594 | node_q.put(root_node) 595 | while not node_q.empty(): 596 | temp_node=node_q.get() 597 | all_points_set=all_points_set|temp_node.set_list[0] 598 | if temp_node.left.mark_value is not None: 599 | node_q.put(temp_node.left) 600 | if temp_node.right.mark_value is not None: 601 | node_q.put(temp_node.right) 602 | if return_color: 603 | node_point=np.zeros((len(all_points_set),6)) 604 | if return_label: 605 | node_point=np.zeros((len(all_points_set),4)) 606 | 607 | point_count=0 608 | for point in all_points_set: 609 | if return_label: 610 | has_same=False 611 | for branch_node in point.branch_array: 612 | if branch_node is not None and branch_node.pred_result==point.pred_result: 613 | has_same=True 614 | break 615 | if not has_same: 616 | continue 617 | node_point[point_count,3]=point.pred_result 618 | 619 | node_point[point_count,:3]=point.point_coor 620 | 621 | 622 | if return_color: 623 | node_point[point_count,3:]=point.point_color 624 | 625 | point_count+=1 626 | return node_point[:point_count,] 627 | -------------------------------------------------------------------------------- /GLtree/octree.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from queue import Queue 3 | import time 4 | class point3D: 5 | def __init__(self,point_coor,feature_2d,max_octree_threshold): 6 | self.point_coor=point_coor 7 | self.feature_fuse=feature_2d 8 | self.branch_array=[None, None, None, None, None, None, None, None] 9 | self.branch_distance=np.full((8),max_octree_threshold) 10 | self.result_feature=np.zeros((128)) 11 | self.pred_result=-1 12 | self.frame_id=0 13 | self.scan_times=0 14 | self.uncertainty=1 15 | 16 | 17 | 18 | def findNearPoint(self,near_node_num,max_node): 19 | 20 | neighbor_2dfeature=np.zeros((max_node+1,128)) 21 | neighbor_node=np.zeros((max_node+1,3)) 22 | count = 0 23 | neighbor_node[count, :] = 0 24 | neighbor_2dfeature[count] = self.feature_fuse 25 | find_queue = Queue() 26 | count += 1 27 | 28 | for i,node in enumerate(self.branch_array): 29 | if node is not None: 30 | neighbor_node[count, :] = node.point_coor - self.point_coor 31 | neighbor_2dfeature[count] = node.feature_fuse 32 | if node.branch_array[i] is not None: 33 | find_queue.put((i,node.branch_array[i])) 34 | count += 1 35 | 36 | while not find_queue.empty() and count=near_node_num: 47 | sample=np.random.choice(count,near_node_num,replace=False) 48 | else: 49 | sample=np.random.choice(count,near_node_num,replace=True) 50 | sample[0] = 0 51 | return neighbor_2dfeature[sample,:].T,neighbor_node[sample,:].T,count -------------------------------------------------------------------------------- /GLtree/octree_vis_only.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from queue import Queue 3 | import time 4 | class point3D: 5 | def __init__(self,point_coor,point_color): 6 | self.point_coor=point_coor 7 | self.point_color=point_color 8 | self.branch_array=[None, None, None, None, None, None, None, None] 9 | self.branch_distance=np.full((8),0.15) 10 | self.frame_id=0 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 by Jiazhao Zhang 2 | 3 | FusionAwareConv is licensed under a 4 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. 5 | 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 8 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 9 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 10 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 11 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 12 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 13 | THE SOFTWARE. 14 | 15 | 16 | 17 | 18 | Attribution-NonCommercial-ShareAlike 4.0 International 19 | 20 | ======================================================================= 21 | 22 | Creative Commons Corporation ("Creative Commons") is not a law firm and 23 | does not provide legal services or legal advice. Distribution of 24 | Creative Commons public licenses does not create a lawyer-client or 25 | other relationship. Creative Commons makes its licenses and related 26 | information available on an "as-is" basis. Creative Commons gives no 27 | warranties regarding its licenses, any material licensed under their 28 | terms and conditions, or any related information. Creative Commons 29 | disclaims all liability for damages resulting from their use to the 30 | fullest extent possible. 31 | 32 | Using Creative Commons Public Licenses 33 | 34 | Creative Commons public licenses provide a standard set of terms and 35 | conditions that creators and other rights holders may use to share 36 | original works of authorship and other material subject to copyright 37 | and certain other rights specified in the public license below. The 38 | following considerations are for informational purposes only, are not 39 | exhaustive, and do not form part of our licenses. 40 | 41 | Considerations for licensors: Our public licenses are 42 | intended for use by those authorized to give the public 43 | permission to use material in ways otherwise restricted by 44 | copyright and certain other rights. Our licenses are 45 | irrevocable. Licensors should read and understand the terms 46 | and conditions of the license they choose before applying it. 47 | Licensors should also secure all rights necessary before 48 | applying our licenses so that the public can reuse the 49 | material as expected. Licensors should clearly mark any 50 | material not subject to the license. This includes other CC- 51 | licensed material, or material used under an exception or 52 | limitation to copyright. More considerations for licensors: 53 | wiki.creativecommons.org/Considerations_for_licensors 54 | 55 | Considerations for the public: By using one of our public 56 | licenses, a licensor grants the public permission to use the 57 | licensed material under specified terms and conditions. If 58 | the licensor's permission is not necessary for any reason--for 59 | example, because of any applicable exception or limitation to 60 | copyright--then that use is not regulated by the license. Our 61 | licenses grant only permissions under copyright and certain 62 | other rights that a licensor has authority to grant. Use of 63 | the licensed material may still be restricted for other 64 | reasons, including because others have copyright or other 65 | rights in the material. A licensor may make special requests, 66 | such as asking that all changes be marked or described. 67 | Although not required by our licenses, you are encouraged to 68 | respect those requests where reasonable. More_considerations 69 | for the public: 70 | wiki.creativecommons.org/Considerations_for_licensees 71 | 72 | ======================================================================= 73 | 74 | Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 75 | Public License 76 | 77 | By exercising the Licensed Rights (defined below), You accept and agree 78 | to be bound by the terms and conditions of this Creative Commons 79 | Attribution-NonCommercial-ShareAlike 4.0 International Public License 80 | ("Public License"). To the extent this Public License may be 81 | interpreted as a contract, You are granted the Licensed Rights in 82 | consideration of Your acceptance of these terms and conditions, and the 83 | Licensor grants You such rights in consideration of benefits the 84 | Licensor receives from making the Licensed Material available under 85 | these terms and conditions. 86 | 87 | 88 | Section 1 -- Definitions. 89 | 90 | a. Adapted Material means material subject to Copyright and Similar 91 | Rights that is derived from or based upon the Licensed Material 92 | and in which the Licensed Material is translated, altered, 93 | arranged, transformed, or otherwise modified in a manner requiring 94 | permission under the Copyright and Similar Rights held by the 95 | Licensor. For purposes of this Public License, where the Licensed 96 | Material is a musical work, performance, or sound recording, 97 | Adapted Material is always produced where the Licensed Material is 98 | synched in timed relation with a moving image. 99 | 100 | b. Adapter's License means the license You apply to Your Copyright 101 | and Similar Rights in Your contributions to Adapted Material in 102 | accordance with the terms and conditions of this Public License. 103 | 104 | c. BY-NC-SA Compatible License means a license listed at 105 | creativecommons.org/compatiblelicenses, approved by Creative 106 | Commons as essentially the equivalent of this Public License. 107 | 108 | d. Copyright and Similar Rights means copyright and/or similar rights 109 | closely related to copyright including, without limitation, 110 | performance, broadcast, sound recording, and Sui Generis Database 111 | Rights, without regard to how the rights are labeled or 112 | categorized. For purposes of this Public License, the rights 113 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 114 | Rights. 115 | 116 | e. Effective Technological Measures means those measures that, in the 117 | absence of proper authority, may not be circumvented under laws 118 | fulfilling obligations under Article 11 of the WIPO Copyright 119 | Treaty adopted on December 20, 1996, and/or similar international 120 | agreements. 121 | 122 | f. Exceptions and Limitations means fair use, fair dealing, and/or 123 | any other exception or limitation to Copyright and Similar Rights 124 | that applies to Your use of the Licensed Material. 125 | 126 | g. License Elements means the license attributes listed in the name 127 | of a Creative Commons Public License. The License Elements of this 128 | Public License are Attribution, NonCommercial, and ShareAlike. 129 | 130 | h. Licensed Material means the artistic or literary work, database, 131 | or other material to which the Licensor applied this Public 132 | License. 133 | 134 | i. Licensed Rights means the rights granted to You subject to the 135 | terms and conditions of this Public License, which are limited to 136 | all Copyright and Similar Rights that apply to Your use of the 137 | Licensed Material and that the Licensor has authority to license. 138 | 139 | j. Licensor means the individual(s) or entity(ies) granting rights 140 | under this Public License. 141 | 142 | k. NonCommercial means not primarily intended for or directed towards 143 | commercial advantage or monetary compensation. For purposes of 144 | this Public License, the exchange of the Licensed Material for 145 | other material subject to Copyright and Similar Rights by digital 146 | file-sharing or similar means is NonCommercial provided there is 147 | no payment of monetary compensation in connection with the 148 | exchange. 149 | 150 | l. Share means to provide material to the public by any means or 151 | process that requires permission under the Licensed Rights, such 152 | as reproduction, public display, public performance, distribution, 153 | dissemination, communication, or importation, and to make material 154 | available to the public including in ways that members of the 155 | public may access the material from a place and at a time 156 | individually chosen by them. 157 | 158 | m. Sui Generis Database Rights means rights other than copyright 159 | resulting from Directive 96/9/EC of the European Parliament and of 160 | the Council of 11 March 1996 on the legal protection of databases, 161 | as amended and/or succeeded, as well as other essentially 162 | equivalent rights anywhere in the world. 163 | 164 | n. You means the individual or entity exercising the Licensed Rights 165 | under this Public License. Your has a corresponding meaning. 166 | 167 | 168 | Section 2 -- Scope. 169 | 170 | a. License grant. 171 | 172 | 1. Subject to the terms and conditions of this Public License, 173 | the Licensor hereby grants You a worldwide, royalty-free, 174 | non-sublicensable, non-exclusive, irrevocable license to 175 | exercise the Licensed Rights in the Licensed Material to: 176 | 177 | a. reproduce and Share the Licensed Material, in whole or 178 | in part, for NonCommercial purposes only; and 179 | 180 | b. produce, reproduce, and Share Adapted Material for 181 | NonCommercial purposes only. 182 | 183 | 2. Exceptions and Limitations. For the avoidance of doubt, where 184 | Exceptions and Limitations apply to Your use, this Public 185 | License does not apply, and You do not need to comply with 186 | its terms and conditions. 187 | 188 | 3. Term. The term of this Public License is specified in Section 189 | 6(a). 190 | 191 | 4. Media and formats; technical modifications allowed. The 192 | Licensor authorizes You to exercise the Licensed Rights in 193 | all media and formats whether now known or hereafter created, 194 | and to make technical modifications necessary to do so. The 195 | Licensor waives and/or agrees not to assert any right or 196 | authority to forbid You from making technical modifications 197 | necessary to exercise the Licensed Rights, including 198 | technical modifications necessary to circumvent Effective 199 | Technological Measures. For purposes of this Public License, 200 | simply making modifications authorized by this Section 2(a) 201 | (4) never produces Adapted Material. 202 | 203 | 5. Downstream recipients. 204 | 205 | a. Offer from the Licensor -- Licensed Material. Every 206 | recipient of the Licensed Material automatically 207 | receives an offer from the Licensor to exercise the 208 | Licensed Rights under the terms and conditions of this 209 | Public License. 210 | 211 | b. Additional offer from the Licensor -- Adapted Material. 212 | Every recipient of Adapted Material from You 213 | automatically receives an offer from the Licensor to 214 | exercise the Licensed Rights in the Adapted Material 215 | under the conditions of the Adapter's License You apply. 216 | 217 | c. No downstream restrictions. You may not offer or impose 218 | any additional or different terms or conditions on, or 219 | apply any Effective Technological Measures to, the 220 | Licensed Material if doing so restricts exercise of the 221 | Licensed Rights by any recipient of the Licensed 222 | Material. 223 | 224 | 6. No endorsement. Nothing in this Public License constitutes or 225 | may be construed as permission to assert or imply that You 226 | are, or that Your use of the Licensed Material is, connected 227 | with, or sponsored, endorsed, or granted official status by, 228 | the Licensor or others designated to receive attribution as 229 | provided in Section 3(a)(1)(A)(i). 230 | 231 | b. Other rights. 232 | 233 | 1. Moral rights, such as the right of integrity, are not 234 | licensed under this Public License, nor are publicity, 235 | privacy, and/or other similar personality rights; however, to 236 | the extent possible, the Licensor waives and/or agrees not to 237 | assert any such rights held by the Licensor to the limited 238 | extent necessary to allow You to exercise the Licensed 239 | Rights, but not otherwise. 240 | 241 | 2. Patent and trademark rights are not licensed under this 242 | Public License. 243 | 244 | 3. To the extent possible, the Licensor waives any right to 245 | collect royalties from You for the exercise of the Licensed 246 | Rights, whether directly or through a collecting society 247 | under any voluntary or waivable statutory or compulsory 248 | licensing scheme. In all other cases the Licensor expressly 249 | reserves any right to collect such royalties, including when 250 | the Licensed Material is used other than for NonCommercial 251 | purposes. 252 | 253 | 254 | Section 3 -- License Conditions. 255 | 256 | Your exercise of the Licensed Rights is expressly made subject to the 257 | following conditions. 258 | 259 | a. Attribution. 260 | 261 | 1. If You Share the Licensed Material (including in modified 262 | form), You must: 263 | 264 | a. retain the following if it is supplied by the Licensor 265 | with the Licensed Material: 266 | 267 | i. identification of the creator(s) of the Licensed 268 | Material and any others designated to receive 269 | attribution, in any reasonable manner requested by 270 | the Licensor (including by pseudonym if 271 | designated); 272 | 273 | ii. a copyright notice; 274 | 275 | iii. a notice that refers to this Public License; 276 | 277 | iv. a notice that refers to the disclaimer of 278 | warranties; 279 | 280 | v. a URI or hyperlink to the Licensed Material to the 281 | extent reasonably practicable; 282 | 283 | b. indicate if You modified the Licensed Material and 284 | retain an indication of any previous modifications; and 285 | 286 | c. indicate the Licensed Material is licensed under this 287 | Public License, and include the text of, or the URI or 288 | hyperlink to, this Public License. 289 | 290 | 2. You may satisfy the conditions in Section 3(a)(1) in any 291 | reasonable manner based on the medium, means, and context in 292 | which You Share the Licensed Material. For example, it may be 293 | reasonable to satisfy the conditions by providing a URI or 294 | hyperlink to a resource that includes the required 295 | information. 296 | 3. If requested by the Licensor, You must remove any of the 297 | information required by Section 3(a)(1)(A) to the extent 298 | reasonably practicable. 299 | 300 | b. ShareAlike. 301 | 302 | In addition to the conditions in Section 3(a), if You Share 303 | Adapted Material You produce, the following conditions also apply. 304 | 305 | 1. The Adapter's License You apply must be a Creative Commons 306 | license with the same License Elements, this version or 307 | later, or a BY-NC-SA Compatible License. 308 | 309 | 2. You must include the text of, or the URI or hyperlink to, the 310 | Adapter's License You apply. You may satisfy this condition 311 | in any reasonable manner based on the medium, means, and 312 | context in which You Share Adapted Material. 313 | 314 | 3. You may not offer or impose any additional or different terms 315 | or conditions on, or apply any Effective Technological 316 | Measures to, Adapted Material that restrict exercise of the 317 | rights granted under the Adapter's License You apply. 318 | 319 | 320 | Section 4 -- Sui Generis Database Rights. 321 | 322 | Where the Licensed Rights include Sui Generis Database Rights that 323 | apply to Your use of the Licensed Material: 324 | 325 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 326 | to extract, reuse, reproduce, and Share all or a substantial 327 | portion of the contents of the database for NonCommercial purposes 328 | only; 329 | 330 | b. if You include all or a substantial portion of the database 331 | contents in a database in which You have Sui Generis Database 332 | Rights, then the database in which You have Sui Generis Database 333 | Rights (but not its individual contents) is Adapted Material, 334 | including for purposes of Section 3(b); and 335 | 336 | c. You must comply with the conditions in Section 3(a) if You Share 337 | all or a substantial portion of the contents of the database. 338 | 339 | For the avoidance of doubt, this Section 4 supplements and does not 340 | replace Your obligations under this Public License where the Licensed 341 | Rights include other Copyright and Similar Rights. 342 | 343 | 344 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 345 | 346 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 347 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 348 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 349 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 350 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 351 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 352 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 353 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 354 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 355 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 356 | 357 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 358 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 359 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 360 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 361 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 362 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 363 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 364 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 365 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 366 | 367 | c. The disclaimer of warranties and limitation of liability provided 368 | above shall be interpreted in a manner that, to the extent 369 | possible, most closely approximates an absolute disclaimer and 370 | waiver of all liability. 371 | 372 | 373 | Section 6 -- Term and Termination. 374 | 375 | a. This Public License applies for the term of the Copyright and 376 | Similar Rights licensed here. However, if You fail to comply with 377 | this Public License, then Your rights under this Public License 378 | terminate automatically. 379 | 380 | b. Where Your right to use the Licensed Material has terminated under 381 | Section 6(a), it reinstates: 382 | 383 | 1. automatically as of the date the violation is cured, provided 384 | it is cured within 30 days of Your discovery of the 385 | violation; or 386 | 387 | 2. upon express reinstatement by the Licensor. 388 | 389 | For the avoidance of doubt, this Section 6(b) does not affect any 390 | right the Licensor may have to seek remedies for Your violations 391 | of this Public License. 392 | 393 | c. For the avoidance of doubt, the Licensor may also offer the 394 | Licensed Material under separate terms or conditions or stop 395 | distributing the Licensed Material at any time; however, doing so 396 | will not terminate this Public License. 397 | 398 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 399 | License. 400 | 401 | 402 | Section 7 -- Other Terms and Conditions. 403 | 404 | a. The Licensor shall not be bound by any additional or different 405 | terms or conditions communicated by You unless expressly agreed. 406 | 407 | b. Any arrangements, understandings, or agreements regarding the 408 | Licensed Material not stated herein are separate from and 409 | independent of the terms and conditions of this Public License. 410 | 411 | 412 | Section 8 -- Interpretation. 413 | 414 | a. For the avoidance of doubt, this Public License does not, and 415 | shall not be interpreted to, reduce, limit, restrict, or impose 416 | conditions on any use of the Licensed Material that could lawfully 417 | be made without permission under this Public License. 418 | 419 | b. To the extent possible, if any provision of this Public License is 420 | deemed unenforceable, it shall be automatically reformed to the 421 | minimum extent necessary to make it enforceable. If the provision 422 | cannot be reformed, it shall be severed from this Public License 423 | without affecting the enforceability of the remaining terms and 424 | conditions. 425 | 426 | c. No term or condition of this Public License will be waived and no 427 | failure to comply consented to unless expressly agreed to by the 428 | Licensor. 429 | 430 | d. Nothing in this Public License constitutes or may be interpreted 431 | as a limitation upon, or waiver of, any privileges and immunities 432 | that apply to the Licensor or You, including from the legal 433 | processes of any jurisdiction or authority. 434 | 435 | ======================================================================= 436 | 437 | Creative Commons is not a party to its public 438 | licenses. Notwithstanding, Creative Commons may elect to apply one of 439 | its public licenses to material it publishes and in those instances 440 | will be considered the “Licensor.” The text of the Creative Commons 441 | public licenses is dedicated to the public domain under the CC0 Public 442 | Domain Dedication. Except for the limited purpose of indicating that 443 | material is shared under a Creative Commons public license or as 444 | otherwise permitted by the Creative Commons policies published at 445 | creativecommons.org/policies, Creative Commons does not authorize the 446 | use of the trademark "Creative Commons" or any other trademark or logo 447 | of Creative Commons without its prior written consent including, 448 | without limitation, in connection with any unauthorized modifications 449 | to any of its public licenses or any other arrangements, 450 | understandings, or agreements concerning use of licensed material. For 451 | the avoidance of doubt, this paragraph does not form part of the 452 | public licenses. 453 | 454 | Creative Commons may be contacted at creativecommons.org. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FusionAwareConv 2 | 3 | This project is based on our CVPR 2020 paper,[Fusion-Aware Point Convolution for Online Semantic 3D Scene Segmentation 4 | ](https://arxiv.org/abs/2003.06233) 5 | 6 | 7 | 8 | 9 | ## Introduction 10 | 11 | We propose a novel fusionaware 3D point convolution which operates directly on the geometric surface being reconstructed and exploits effectively the inter-frame correlation for high quality 3D feature 12 | learning. 13 | ## Installation 14 | This code is based on [PyTorch](https://pytorch.org/) and needs [open3D](http://www.open3d.org/) for convenient visualization 15 | 16 | Our code has been tested with Python 3.7.6, PyTorch 1.1.0, open3d 0.9.0, CUDA 10.0 on Ubuntu 16.04. 17 | 18 | 19 | ## Dataset and Pre-trained weights 20 | We use the ScanNetv2 as our test dataset. If you want to test all the data, you can download the ScanNetV2 dataset from [here](http://www.scan-net.org/). For a quick visulazation test, we provide several pre-proessing scenes of the test set [sequence](https://1drv.ms/u/s!AvuKnc9E9hmqhXJWps9cdc-hDPgA?e=kQ8Bw5). Put the ***scene.h5*** in `path/data`. 21 | 22 | We also provide the pre-trained weights for [ScanNet benchmark](http://kaldir.vc.in.tum.de/scannet_benchmark/), you can download from [here](https://1drv.ms/u/s!AvuKnc9E9hmqhXMuH6MUHIitw4iw?e=e2d3vb). After finishing the download, put the weights in `path/weight`. 23 | 24 | 25 | ## Test 26 | 27 | ### Online Segmentation Visulization 28 | We have already intergrate the open3d for visulizaiton, you can run the command below: 29 | 30 | ``` 31 | python vis_sequence.py --weight2d_path=weight_path/weight2d_name --weight3d_path=weight_path/weight3d_name --gpu=0 --use_vis=1 --scene_path=scene_path/scene_name 32 | ``` 33 | The complete segmentation result will be generated in `result.ply`. 34 | 35 | 36 | ### Global-local Tree Visualization 37 | We achieve the a test demo for global-local tree visulizaiton only. Run the command below to see the processing of the tree built. 38 | ``` 39 | python vis_sequence.py --use_vis=1 --scene_path=scene_path/scene_name 40 | ``` 41 | The complete result will be generated in `result_GLtree.ply`. 42 | 43 | 44 | 45 | 46 | 47 | ## Citation 48 | If you find our work useful in your research, please consider citing: 49 | ``` 50 | @article{zhang2020fusion, 51 | title={Fusion-Aware Point Convolution for Online Semantic 3D Scene Segmentation}, 52 | author={Zhang, Jiazhao and Zhu, Chenyang and Zheng, Lintao and Xu, Kai}, 53 | journal={arXiv preprint arXiv:2003.06233}, 54 | year={2020} 55 | } 56 | ``` 57 | 58 | ## Acknowledgments 59 | Code is inspired by [Red-Black-Tree](https://github.com/stanislavkozlovski/Red-Black-Tree) and [FuseNet_PyTorch](https://github.com/zanilzanzan/FuseNet_PyTorch). 60 | 61 | ## Contact 62 | If you have any questions, please email Jiazhao Zhang at zhngjizh@gmail.com. 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /asset/scene0708_00.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jzhzhang/FusionAwareConv/c90615de2f763a1255aeae321a62f3e33266cd11/asset/scene0708_00.gif -------------------------------------------------------------------------------- /model/model2d.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torchvision import models 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | 8 | class FuseNet_feature(nn.Module): 9 | def __init__(self, num_labels, gpu_device=3, use_class=False): 10 | super(FuseNet_feature, self).__init__() 11 | 12 | feats = list(models.vgg16(pretrained=False).features.children()) 13 | feats2 = list(models.vgg16(pretrained=False).features.children()) 14 | 15 | avg = torch.mean(feats[0].weight.data, dim=1) 16 | avg = avg.unsqueeze(1) 17 | 18 | bn_moment = 0.1 19 | self.use_class = use_class 20 | 21 | if use_class: 22 | num_classes = 10 23 | 24 | # DEPTH ENCODER 25 | self.conv11d = nn.Conv2d(1, 64, kernel_size=3, padding=1) 26 | self.conv11d.weight.data = avg 27 | 28 | self.CBR1_D = nn.Sequential( 29 | nn.BatchNorm2d(64), 30 | feats[1], 31 | feats[2] , 32 | nn.BatchNorm2d(64) , 33 | feats[3] , 34 | ) 35 | self.CBR2_D = nn.Sequential( 36 | feats[5] , 37 | nn.BatchNorm2d(128), 38 | feats[6], 39 | feats[7] , 40 | nn.BatchNorm2d(128) , 41 | feats[8] , 42 | ) 43 | self.CBR3_D = nn.Sequential( 44 | feats[10] , 45 | nn.BatchNorm2d(256) , 46 | feats[11] , 47 | feats[12] , 48 | nn.BatchNorm2d(256) , 49 | feats[13] , 50 | feats[14] , 51 | nn.BatchNorm2d(256) , 52 | feats[15] , 53 | ) 54 | self.dropout3_d = nn.Dropout(p=0.5) 55 | 56 | self.CBR4_D = nn.Sequential( 57 | feats[17] , 58 | nn.BatchNorm2d(512) , 59 | feats[18] , 60 | feats[19] , 61 | nn.BatchNorm2d(512) , 62 | feats[20] , 63 | feats[21] , 64 | nn.BatchNorm2d(512) , 65 | feats[22] , 66 | ) 67 | self.dropout4_d = nn.Dropout(p=0.5) 68 | 69 | self.CBR5_D = nn.Sequential( 70 | feats[24] , 71 | nn.BatchNorm2d(512) , 72 | feats[25] , 73 | feats[26] , 74 | nn.BatchNorm2d(512) , 75 | feats[27] , 76 | feats[28] , 77 | nn.BatchNorm2d(512) , 78 | feats[29] , 79 | ) 80 | 81 | # RGB ENCODER 82 | self.CBR1_RGB = nn.Sequential( 83 | feats2[0] , 84 | nn.BatchNorm2d(64) , 85 | feats2[1] , 86 | feats2[2] , 87 | nn.BatchNorm2d(64) , 88 | feats2[3] , 89 | ) 90 | 91 | self.CBR2_RGB = nn.Sequential( 92 | feats2[5] , 93 | nn.BatchNorm2d(128) , 94 | feats2[6] , 95 | feats2[7] , 96 | nn.BatchNorm2d(128) , 97 | feats2[8] , 98 | ) 99 | 100 | self.CBR3_RGB = nn.Sequential( 101 | feats2[10] , 102 | nn.BatchNorm2d(256) , 103 | feats2[11] , 104 | feats2[12] , 105 | nn.BatchNorm2d(256) , 106 | feats2[13] , 107 | feats2[14] , 108 | nn.BatchNorm2d(256) , 109 | feats2[15] , 110 | ) 111 | self.dropout3 = nn.Dropout(p=0.5) 112 | 113 | self.CBR4_RGB = nn.Sequential( 114 | feats2[17] , 115 | nn.BatchNorm2d(512) , 116 | feats2[18] , 117 | feats2[19] , 118 | nn.BatchNorm2d(512) , 119 | feats2[20] , 120 | feats2[21] , 121 | nn.BatchNorm2d(512) , 122 | feats2[22] , 123 | ) 124 | self.dropout4 = nn.Dropout(p=0.5) 125 | 126 | self.CBR5_RGB = nn.Sequential( 127 | feats2[24] , 128 | nn.BatchNorm2d(512) , 129 | feats2[25] , 130 | feats2[26] , 131 | nn.BatchNorm2d(512) , 132 | feats2[27] , 133 | feats2[28] , 134 | nn.BatchNorm2d(512) , 135 | feats2[29] , 136 | ) 137 | self.dropout5 = nn.Dropout(p=0.5) 138 | 139 | if use_class: 140 | self.ClassHead = nn.Sequential( 141 | # classifier[0] , 142 | nn.Linear(35840, 4096) , 143 | nn.ReLU(), 144 | nn.Dropout(p=0.5) , 145 | nn.Linear(4096, 4096) , 146 | # classifier[3] , 147 | nn.ReLU(), 148 | nn.Dropout(p=0.5) , 149 | nn.Linear(4096, num_classes) 150 | ) 151 | 152 | # RGB DECODER 153 | self.CBR5_Dec = nn.Sequential( 154 | nn.Conv2d(512, 512, kernel_size=3, padding=1) , 155 | nn.BatchNorm2d(512, momentum=bn_moment) , 156 | nn.ReLU() , 157 | nn.Conv2d(512, 512, kernel_size=3, padding=1) , 158 | nn.BatchNorm2d(512, momentum=bn_moment) , 159 | nn.ReLU() , 160 | nn.Conv2d(512, 512, kernel_size=3, padding=1) , 161 | nn.BatchNorm2d(512, momentum=bn_moment) , 162 | nn.ReLU() , 163 | nn.Dropout(p=0.5) , 164 | ) 165 | 166 | self.CBR4_Dec = nn.Sequential( 167 | nn.Conv2d(512, 512, kernel_size=3, padding=1) , 168 | nn.BatchNorm2d(512, momentum=bn_moment) , 169 | nn.ReLU() , 170 | nn.Conv2d(512, 512, kernel_size=3, padding=1) , 171 | nn.BatchNorm2d(512, momentum=bn_moment) , 172 | nn.ReLU() , 173 | nn.Conv2d(512, 256, kernel_size=3, padding=1) , 174 | nn.BatchNorm2d(256, momentum=bn_moment) , 175 | nn.ReLU() , 176 | nn.Dropout(p=0.5) , 177 | ) 178 | 179 | self.CBR3_Dec = nn.Sequential( 180 | nn.Conv2d(256, 256, kernel_size=3, padding=1) , 181 | nn.BatchNorm2d(256, momentum=bn_moment) , 182 | nn.ReLU() , 183 | nn.Conv2d(256, 256, kernel_size=3, padding=1) , 184 | nn.BatchNorm2d(256, momentum=bn_moment) , 185 | nn.ReLU() , 186 | nn.Conv2d(256, 128, kernel_size=3, padding=1) , 187 | nn.BatchNorm2d(128, momentum=bn_moment) , 188 | nn.ReLU() , 189 | nn.Dropout(p=0.5) , 190 | ) 191 | 192 | self.CBR2_Dec = nn.Sequential( 193 | nn.Conv2d(128, 128, kernel_size=3, padding=1) , 194 | nn.BatchNorm2d(128, momentum=bn_moment) , 195 | nn.ReLU() , 196 | nn.Conv2d(128, 64, kernel_size=3, padding=1) , 197 | nn.BatchNorm2d(64, momentum=bn_moment) , 198 | nn.ReLU() , 199 | ) 200 | 201 | self.CBR1_Dec = nn.Sequential( 202 | nn.Conv2d(64, 64, kernel_size=3, padding=1) , 203 | nn.BatchNorm2d(64, momentum=bn_moment) , 204 | nn.ReLU() , 205 | nn.Conv2d(64, num_labels, kernel_size=3, padding=1) , 206 | ) 207 | 208 | self.initialize_weights() 209 | 210 | # He Initialization for the linear layers in the classification head 211 | def initialize_weights(self): 212 | for m in self.modules(): 213 | if isinstance(m, nn.Linear): 214 | size = m.weight.size() 215 | fan_out = size[0] # number of rows 216 | fan_in = size[1] # number of columns 217 | variance = np.sqrt(4.0/(fan_in + fan_out)) 218 | m.weight.data.normal_(0.0, variance) 219 | 220 | def forward(self, rgb_inputs, depth_inputs): 221 | # DEPTH ENCODER 222 | # Stage 1 223 | x = self.conv11d(depth_inputs) 224 | x_1 = self.CBR1_D(x) 225 | x, id1_d = F.max_pool2d(x_1, kernel_size=2, stride=2, return_indices=True) 226 | 227 | # Stage 2 228 | x_2 = self.CBR2_D(x) 229 | x, id2_d = F.max_pool2d(x_2, kernel_size=2, stride=2, return_indices=True) 230 | 231 | # Stage 3 232 | x_3 = self.CBR3_D(x) 233 | x, id3_d = F.max_pool2d(x_3, kernel_size=2, stride=2, return_indices=True) 234 | x = self.dropout3_d(x) 235 | 236 | # Stage 4 237 | x_4 = self.CBR4_D(x) 238 | x, id4_d = F.max_pool2d(x_4, kernel_size=2, stride=2, return_indices=True) 239 | x = self.dropout4_d(x) 240 | 241 | # Stage 5 242 | x_5 = self.CBR5_D(x) 243 | 244 | # RGB ENCODER 245 | # Stage 1 246 | y = self.CBR1_RGB(rgb_inputs) 247 | y = torch.add(y, x_1) 248 | y, id1 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True) 249 | 250 | # Stage 2 251 | y = self.CBR2_RGB(y) 252 | y = torch.add(y, x_2) 253 | y, id2 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True) 254 | 255 | # Stage 3 256 | y = self.CBR3_RGB(y) 257 | y = torch.add(y, x_3) 258 | y, id3 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True) 259 | y = self.dropout3(y) 260 | 261 | # Stage 4 262 | y = self.CBR4_RGB(y) 263 | y = torch.add(y,x_4) 264 | y, id4 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True) 265 | y = self.dropout4(y) 266 | 267 | # Stage 5 268 | y = self.CBR5_RGB(y) 269 | y = torch.add(y, x_5) 270 | y_size = y.size() 271 | 272 | y, id5 = F.max_pool2d(y, kernel_size=2, stride=2, return_indices=True) 273 | y = self.dropout5(y) 274 | 275 | if self.use_class: 276 | y_class = y.view(y.size(0), -1) 277 | y_class = self.ClassHead(y_class) 278 | 279 | 280 | y = F.max_unpool2d(y, id5, kernel_size=2, stride=2, output_size=y_size) 281 | y = self.CBR5_Dec(y) 282 | 283 | y = F.max_unpool2d(y, id4, kernel_size=2, stride=2) 284 | y = self.CBR4_Dec(y) 285 | 286 | y = F.max_unpool2d(y, id3, kernel_size=2, stride=2) 287 | y = self.CBR3_Dec(y) 288 | 289 | 290 | return y 291 | 292 | -------------------------------------------------------------------------------- /model/model3d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.parallel 4 | import torch.utils.data 5 | from torch.autograd import Variable 6 | import numpy as np 7 | import torch.nn.functional as F 8 | 9 | class FusionAwareFuseConv(nn.Module): 10 | def __init__(self, num_class): 11 | super(FusionAwareFuseConv, self).__init__() 12 | self.cmid = 32 13 | self.feature_dim = 128 14 | 15 | self.mlp_conv1 = torch.nn.Conv2d(3, 8, (1, 1)) 16 | self.mlp_conv2 = torch.nn.Conv2d(8, 16, (1, 1)) 17 | self.mlp_conv3 = torch.nn.Conv2d(16, 16, (1, 1)) 18 | self.mlp_conv4 = torch.nn.Conv2d(16, 32, (1, 1)) 19 | 20 | self.fc_1_256 = torch.nn.Conv1d(self.cmid * self.feature_dim, 1024, 1) 21 | self.fc_2 = torch.nn.Conv1d(1024, 256, 1) 22 | self.fc_3 = torch.nn.Conv1d(256, 128, 1) 23 | self.fc_output_20 = torch.nn.Conv1d(128, num_class, 1) 24 | 25 | 26 | 27 | self.bn_conv1 = nn.BatchNorm2d(8) 28 | self.bn_conv2 = nn.BatchNorm2d(16) 29 | self.bn_conv3 = nn.BatchNorm2d(16) 30 | self.bn_conv4 = nn.BatchNorm2d(32) 31 | 32 | 33 | self.fc1_bn = nn.BatchNorm1d(1024) 34 | self.fc2_bn = nn.BatchNorm1d(256) 35 | self.fc3_bn = nn.BatchNorm1d(128) 36 | 37 | 38 | 39 | 40 | def forward(self, feature2d,points, pre_result): 41 | self.batch_size = points.shape[0] 42 | self.node_size=points.shape[3] 43 | 44 | 45 | points = F.relu(self.bn_conv1(self.mlp_conv1(points))) 46 | points = F.relu(self.bn_conv2(self.mlp_conv2(points))) 47 | points = F.relu(self.bn_conv3(self.mlp_conv3(points))) 48 | points = F.relu(self.bn_conv4(self.mlp_conv4(points))) 49 | 50 | feature2d = feature2d.permute(0, 3, 1, 2) 51 | points = points.permute(0, 3, 2, 1) 52 | combine = torch.matmul(feature2d, points) 53 | 54 | combine = combine.permute(0, 2, 3, 1) 55 | combine = combine.view(self.batch_size, -1, self.node_size) 56 | 57 | combine = F.relu(self.fc1_bn(self.fc_1_256(combine))) 58 | combine = F.relu(self.fc2_bn(self.fc_2(combine))) 59 | combine = F.relu(self.fc3_bn(self.fc_3(combine))) 60 | combine=torch.max(combine,pre_result) 61 | 62 | combine_result = self.fc_output_20(combine) 63 | result = F.log_softmax(combine_result, dim=1) 64 | sorted_result,indices=torch.sort(result,dim=1,descending=True) 65 | 66 | uncertainty=sorted_result[:,0,:]/sorted_result[:,1,:] 67 | 68 | return result,combine,uncertainty 69 | 70 | 71 | 72 | 73 | def create_FusionAwareFuseConv(num_class): 74 | return FusionAwareFuseConv(num_class=num_class) 75 | 76 | -------------------------------------------------------------------------------- /utils/ply_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import struct 3 | import math 4 | 5 | label_mapper=np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]) 6 | 7 | 8 | def create_color_palette(): 9 | return [ 10 | (0, 0, 0), 11 | (174, 199, 232), # wall0 12 | (152, 223, 138), # floor1 13 | (31, 119, 180), # cabinet2 14 | (255, 187, 120), # bed3 15 | (188, 189, 34), # chair4 16 | (140, 86, 75), # sofa5 17 | (255, 152, 150), # table6 18 | (214, 39, 40), # door7 19 | (197, 176, 213), # window8 20 | (148, 103, 189), # bookshelf9 21 | (196, 156, 148), # picture10 22 | (23, 190, 207), # counter11 23 | (178, 76, 76), 24 | (247, 182, 210), # desk13 25 | (66, 188, 102), 26 | (219, 219, 141), # curtain15 27 | (140, 57, 197), 28 | (202, 185, 52), 29 | (51, 176, 203), 30 | (200, 54, 131), 31 | (92, 193, 61), 32 | (78, 71, 183), 33 | (172, 114, 82), 34 | (255, 127, 14), # refrigerator23 35 | (91, 163, 138), 36 | (153, 98, 156), 37 | (140, 153, 101), 38 | (158, 218, 229), # shower curtain27 39 | (100, 125, 154), 40 | (178, 127, 135), 41 | (120, 185, 128), 42 | (146, 111, 194), 43 | (44, 160, 44), # toilet32 44 | (112, 128, 144), # sink33 45 | (96, 207, 209), 46 | (227, 119, 194), # bathtub35 47 | (213, 92, 176), 48 | (94, 106, 211), 49 | (82, 84, 163), # otherfurn38 50 | (100, 85, 144) 51 | ] 52 | 53 | label_name=[ 54 | "wall", 55 | "floor", 56 | "cabinet", 57 | "bed", 58 | "chair", 59 | "sofa", 60 | "table", 61 | "door", 62 | "window", 63 | "bookshelf", 64 | "picture", 65 | "counter", 66 | "desk", 67 | "curtain", 68 | "refrigerator", 69 | "shower curtain", 70 | "toilet", 71 | "sink", 72 | "bathtub", 73 | "otherfurn", 74 | ] 75 | 76 | def write_ply(point_cloud,rgb_cloud=None,label_cloud=None,output_dir="./",name="test",hasrgb=False,haslabel=False): 77 | point_count=point_cloud.shape[0] 78 | ply_file = open(output_dir+name+ ".ply", 'w') 79 | ply_file.write("ply\n") 80 | ply_file.write("format ascii 1.0\n") 81 | ply_file.write("element vertex " + str(point_count) + "\n") 82 | ply_file.write("property float x\n") 83 | ply_file.write("property float y\n") 84 | ply_file.write("property float z\n") 85 | 86 | ply_file.write("property uchar red\n") 87 | ply_file.write("property uchar green\n") 88 | ply_file.write("property uchar blue\n") 89 | if haslabel: 90 | ply_file.write("property uchar label\n") 91 | 92 | 93 | ply_file.write("end_header\n") 94 | color=create_color_palette() 95 | for i in range(point_count): 96 | ply_file.write(str(point_cloud[i, 0]) + " " + 97 | str(point_cloud[i, 1]) + " " + 98 | str(point_cloud[i, 2])) 99 | if hasrgb: 100 | ply_file.write(" "+str(int(rgb_cloud[i, 0])) + " " + 101 | str(int(rgb_cloud[i, 1])) + " " + 102 | str(int(rgb_cloud[i, 2]))) 103 | if haslabel: 104 | ply_file.write(" "+str(color[int(label_cloud[i])][0]) + " " + 105 | str(color[int(label_cloud[i])][1]) + " " + 106 | str(color[int(label_cloud[i])][2])) 107 | ply_file.write((" "+str(int(label_cloud[i])))) 108 | 109 | ply_file.write("\n") 110 | ply_file.close() 111 | print("save result to "+output_dir+name+ ".ply") 112 | 113 | -------------------------------------------------------------------------------- /utils/vis_utils.py: -------------------------------------------------------------------------------- 1 | import open3d as o3d 2 | class vis_pointcloud: 3 | def __init__(self,use_vis): 4 | self.use_vis=use_vis 5 | if self.use_vis==0: 6 | return 7 | self.vis = o3d.visualization.Visualizer() 8 | self.vis.create_window(window_name="scene",width=640,height=480,left=50) 9 | render_option=self.vis.get_render_option() 10 | render_option.point_size=0.5 11 | 12 | def update(self,points,points_color): 13 | if self.use_vis==0: 14 | return 15 | pcd = o3d.geometry.PointCloud() 16 | pcd.points = o3d.utility.Vector3dVector(points) 17 | pcd.colors = o3d.utility.Vector3dVector(points_color/255) 18 | self.vis.add_geometry(pcd) 19 | self.vis.poll_events() 20 | self.vis.update_renderer() 21 | 22 | def run(self): 23 | if self.use_vis==0: 24 | return 25 | self.vis.run() 26 | 27 | 28 | class Vis_color: 29 | def __init__(self,use_vis): 30 | self.use_vis=use_vis 31 | if use_vis==0: 32 | return 33 | self.vis_image = o3d.visualization.Visualizer() 34 | self.vis_image.create_window(window_name="scene",width=320,height=240,left=720) 35 | 36 | def update(self,color_image): 37 | if self.use_vis==0: 38 | return 39 | geometry_image=o3d.geometry.Image(color_image) 40 | self.vis_image.add_geometry(geometry_image) 41 | self.vis_image.poll_events() 42 | self.vis_image.update_renderer() 43 | geometry_image.clear() 44 | -------------------------------------------------------------------------------- /vis_sequence.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | 4 | parser = argparse.ArgumentParser() 5 | parser.add_argument('--gpu', type=int, default=0) 6 | parser.add_argument('--point_size', type=int, default=512) 7 | parser.add_argument('--min_octree_threshold', type=float, default=0.04) 8 | parser.add_argument('--max_octree_threshold', type=float, default=0.15) 9 | parser.add_argument('--interval_size', type=float, default=0.035) 10 | parser.add_argument('--weight2d_path', type=str, default="weight/model2d.pth") 11 | parser.add_argument('--weight3d_path', type=str, default="weight/model3d.pth") 12 | parser.add_argument('--scene_path', type=str, default="data/scene_0.h5") 13 | parser.add_argument('--use_vis', type=int, default="1") 14 | 15 | 16 | 17 | opt = parser.parse_args() 18 | os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu) 19 | 20 | 21 | from utils.vis_utils import vis_pointcloud,Vis_color 22 | import torch 23 | import time 24 | from model.model2d import FuseNet_feature 25 | from model.model3d import create_FusionAwareFuseConv 26 | from GLtree.interval_tree import RedBlackTree, Node, BLACK, RED, NIL 27 | from GLtree.octree import point3D 28 | import numpy as np 29 | from utils.ply_utils import write_ply,create_color_palette,label_mapper 30 | import torchvision.transforms as transforms 31 | import random 32 | import h5py 33 | 34 | SCANNET_TYPES = {'scannet': (40, [0.496342, 0.466664, 0.440796], [0.277856, 0.28623, 0.291129])} 35 | transform_image = transforms.Compose([transforms.ToTensor(), 36 | transforms.Normalize(SCANNET_TYPES['scannet'][1], SCANNET_TYPES['scannet'][2])]) 37 | 38 | num_classes = 20 39 | near_node_num = 8 40 | max_node = 8 41 | 42 | point_size = opt.point_size 43 | print("[INFO] load model") 44 | model2d=FuseNet_feature(num_classes) 45 | model3d = create_FusionAwareFuseConv(num_classes) 46 | 47 | 48 | model3d.load_state_dict(torch.load(opt.weight3d_path)) 49 | model2d.load_state_dict(torch.load(opt.weight2d_path)) 50 | 51 | model2d = model2d.cuda() 52 | model3d = model3d.cuda() 53 | 54 | 55 | color_map=create_color_palette() 56 | model2d.eval() 57 | model3d.eval() 58 | print("[INFO] load data") 59 | 60 | data_file=h5py.File(opt.scene_path,"r") 61 | 62 | color_image_array=data_file['color_image'] 63 | depth_map_array=data_file['depth_map'] 64 | valid_pose_array=data_file['pose_valid'] 65 | points_array=data_file['points_array'] 66 | mask_array=data_file['mask'] 67 | 68 | x_rb_tree = RedBlackTree(opt.interval_size) 69 | y_rb_tree = RedBlackTree(opt.interval_size) 70 | z_rb_tree = RedBlackTree(opt.interval_size) 71 | 72 | vis_p=vis_pointcloud(opt.use_vis) 73 | vis_c=Vis_color(opt.use_vis) 74 | 75 | frame_index=0 76 | print("[INFO] begin") 77 | 78 | with torch.no_grad(): 79 | for i in range(0,color_image_array.shape[0]): 80 | print("---------------------------") 81 | print("image:",i) 82 | time_s=time.time() 83 | color_image=color_image_array[i,:,:,:].astype(np.uint8) 84 | depth_image=depth_map_array[i,:,:] 85 | points=points_array[i,:,:] 86 | points_mask=mask_array[i,:,:] 87 | valid_pose=valid_pose_array[i] 88 | if valid_pose==0: 89 | continue 90 | 91 | color_image_cuda = transform_image(color_image).cuda() 92 | depth_image=transforms.ToTensor()(depth_image).type(torch.FloatTensor).cuda() 93 | input_color = torch.unsqueeze(color_image_cuda, 0) 94 | depth_image = torch.unsqueeze(depth_image, 0) 95 | imageft=model2d(input_color,depth_image).detach().cpu().numpy() 96 | x_tree_node_list=[] 97 | y_tree_node_list=[] 98 | z_tree_node_list=[] 99 | per_image_node_set=set() 100 | 101 | for p in range(point_size): 102 | 103 | x_temp_node = x_rb_tree.add(points[p,0]) 104 | y_temp_node = y_rb_tree.add(points[p,1]) 105 | z_temp_node = z_rb_tree.add(points[p,2]) 106 | x_tree_node_list.append(x_temp_node) 107 | y_tree_node_list.append(y_temp_node) 108 | z_tree_node_list.append(z_temp_node) 109 | 110 | for p in range(point_size): 111 | 112 | x_set_union = x_tree_node_list[p].set_list 113 | y_set_union = y_tree_node_list[p].set_list 114 | z_set_union = z_tree_node_list[p].set_list 115 | set_intersection = x_set_union[0] & y_set_union[0] & z_set_union[0] 116 | temp_branch = [None, None, None, None, None, None, None, None] 117 | temp_branch_distance = np.full((8),opt.max_octree_threshold) 118 | is_find_nearest = False 119 | branch_record = set() 120 | list_intersection=list(set_intersection) 121 | random.shuffle(list_intersection) 122 | 123 | for point_iter in list_intersection: 124 | distance = np.sum(np.absolute(point_iter.point_coor - points[p,:])) 125 | if distance < opt.min_octree_threshold: 126 | is_find_nearest = True 127 | if frame_index!=point_iter.frame_id: 128 | point_iter.feature_fuse = np.maximum(imageft[0, :, int(points_mask[p, 0]), 129 | int(points_mask[p, 1])].copy() , point_iter.feature_fuse) 130 | point_iter.frame_id=frame_index 131 | per_image_node_set.add(point_iter) 132 | break 133 | x = int(point_iter.point_coor[0] >= points[p, 0]) 134 | y = int(point_iter.point_coor[1] >= points[p, 1]) 135 | z = int(point_iter.point_coor[2] >= points[p, 2]) 136 | branch_num= x * 4 + y * 2 + z 137 | if distance < point_iter.branch_distance[7-branch_num]: 138 | branch_record.add((point_iter, 7 - branch_num, distance)) 139 | if distance < temp_branch_distance[branch_num]: 140 | temp_branch[branch_num] = point_iter 141 | temp_branch_distance[branch_num] = distance 142 | 143 | if not is_find_nearest: 144 | new_3dpoint = point3D(points[p, :].T, imageft[0, :, int(points_mask[p, 0]), 145 | int(points_mask[p, 1])].copy(),opt.max_octree_threshold) 146 | for point_branch in branch_record: 147 | point_branch[0].branch_array[point_branch[1]] = new_3dpoint 148 | point_branch[0].branch_distance[point_branch[1]] = point_branch[2] 149 | 150 | new_3dpoint.branch_array = temp_branch 151 | new_3dpoint.branch_distance = temp_branch_distance 152 | per_image_node_set.add(new_3dpoint) 153 | 154 | for x_set in x_set_union: 155 | x_set.add(new_3dpoint) 156 | for y_set in y_set_union: 157 | y_set.add(new_3dpoint) 158 | for z_set in z_set_union: 159 | z_set.add(new_3dpoint) 160 | 161 | node_lengths=len(per_image_node_set) 162 | input_feature = np.zeros([1, 128, near_node_num, node_lengths]) 163 | input_coor = np.zeros([1, 3, near_node_num, node_lengths]) 164 | result_feature = np.zeros([1, 128, node_lengths]) 165 | points = np.zeros([node_lengths, 3]) 166 | points_color = np.zeros([node_lengths,3]) 167 | 168 | set_count=0 169 | for set_point in per_image_node_set: 170 | neighbor_2dfeature, neighbor_coor,_ =set_point.findNearPoint(near_node_num,max_node) 171 | input_feature[0, :, :, set_count] = neighbor_2dfeature 172 | input_coor[0, :, :, set_count] = neighbor_coor 173 | result_feature[0,:,set_count]=set_point.result_feature 174 | points[set_count,:]=set_point.point_coor 175 | set_count+=1 176 | 177 | input_feature=torch.from_numpy(input_feature).cuda() 178 | input_coor=torch.from_numpy(input_coor).cuda() 179 | result_feature=torch.from_numpy(result_feature).cuda() 180 | output,combine_result,uncertainty = model3d(input_feature.float(), input_coor.float(),result_feature.float()) 181 | result_array = combine_result.detach().cpu().numpy() 182 | uncertainty_array= uncertainty.detach().cpu().numpy() 183 | point_pred_label=label_mapper[torch.argmax(output, 1).long().squeeze().cpu().numpy()] 184 | set_count=0 185 | for set_point in per_image_node_set: 186 | if uncertainty_array[0][set_count]= points[p, 0]) 95 | y = int(point_iter.point_coor[1] >= points[p, 1]) 96 | z = int(point_iter.point_coor[2] >= points[p, 2]) 97 | branch_num= x * 4 + y * 2 + z 98 | if distance < point_iter.branch_distance[7-branch_num]: 99 | branch_record.add((point_iter, 7 - branch_num, distance)) 100 | if distance < temp_branch_distance[branch_num]: 101 | temp_branch[branch_num] = point_iter 102 | temp_branch_distance[branch_num] = distance 103 | 104 | if not is_find_nearest: 105 | new_3dpoint = point3D(points[p, :].T,color_image[int(points_mask[p, 0])*4, 106 | int(points_mask[p, 1])*4,:]) 107 | for point_branch in branch_record: 108 | point_branch[0].branch_array[point_branch[1]] = new_3dpoint 109 | point_branch[0].branch_distance[point_branch[1]] = point_branch[2] 110 | 111 | new_3dpoint.branch_array = temp_branch 112 | new_3dpoint.branch_distance = temp_branch_distance 113 | per_image_node_set.add(new_3dpoint) 114 | 115 | for x_set in x_set_union: 116 | x_set.add(new_3dpoint) 117 | for y_set in y_set_union: 118 | y_set.add(new_3dpoint) 119 | for z_set in z_set_union: 120 | z_set.add(new_3dpoint) 121 | 122 | node_lengths=len(per_image_node_set) 123 | 124 | points = np.zeros([node_lengths, 3]) 125 | points_color = np.zeros([node_lengths,3]) 126 | 127 | set_count=0 128 | for set_point in per_image_node_set: 129 | points[set_count,:]=set_point.point_coor 130 | points_color[set_count,:]=set_point.point_color 131 | set_count+=1 132 | 133 | frame_index+=1 134 | print("time per frame",time.time()-time_s) 135 | vis_p.update(points,points_color) 136 | vis_c.update(color_image) 137 | 138 | point_result=x_rb_tree.all_points_from_tree(return_color=True) 139 | write_ply(point_result[:,:3],hasrgb=True,rgb_cloud=point_result[:,3:],output_dir="./",name="result_GLtree") 140 | 141 | del x_rb_tree 142 | del y_rb_tree 143 | del z_rb_tree 144 | vis_p.run() --------------------------------------------------------------------------------