├── .DS_Store
├── .gitattributes
├── .idea
├── .gitignore
├── DualAfford.iml
├── inspectionProfiles
│ └── profiles_settings.xml
├── misc.xml
└── modules.xml
├── README.md
├── code
├── .DS_Store
├── blender_utils
│ ├── camera.blend
│ ├── cube.obj
│ ├── geometry_utils.py
│ ├── quaternion.py
│ ├── render_blender.py
│ └── render_using_blender.py
├── camera.py
├── camera_rl.py
├── cate_setting.json
├── collect_data.py
├── collect_data_SAC.py
├── collect_data_checkDual.py
├── collect_data_main.py
├── data.py
├── env.py
├── env_rl.py
├── eval_sampleSucc.py
├── eval_sampleSucc_main.py
├── gen_cate_setting.py
├── logs
│ ├── .gitignore
│ ├── README.md
│ └── w2a
│ │ └── finalexp-model_all_final-pulling-None-train_all_v1
│ │ ├── ckpts
│ │ ├── 81-lr_scheduler.pth
│ │ ├── 81-network.pth
│ │ ├── 81-optimizer.pth
│ │ ├── 81-sample_succ_list.pth
│ │ └── 81-train_dataset.pth
│ │ ├── conf.pth
│ │ ├── datagen_v1.py
│ │ ├── dataset_v1.py
│ │ ├── model_all_final.py
│ │ ├── train_all_v1.py
│ │ └── train_log.txt
├── models
│ ├── .DS_Store
│ ├── SAC.py
│ ├── model_3d_legacy.py
│ ├── model_actor_fir.py
│ ├── model_actor_sec.py
│ ├── model_aff_fir.py
│ ├── model_aff_sec.py
│ ├── model_critic_fir.py
│ ├── model_critic_sec.py
│ ├── pointnet2
│ │ ├── __init__.py
│ │ ├── _version.py
│ │ ├── config
│ │ │ ├── config.yaml
│ │ │ ├── model
│ │ │ │ ├── msg.yaml
│ │ │ │ └── ssg.yaml
│ │ │ ├── task
│ │ │ │ ├── cls.yaml
│ │ │ │ └── semseg.yaml
│ │ │ └── task_model
│ │ │ │ ├── cls-msg.yaml
│ │ │ │ ├── cls-ssg.yaml
│ │ │ │ ├── semseg-msg.yaml
│ │ │ │ └── semseg-ssg.yaml
│ │ ├── data
│ │ │ ├── Indoor3DSemSegLoader.py
│ │ │ ├── ModelNet40Loader.py
│ │ │ ├── __init__.py
│ │ │ └── data_utils.py
│ │ ├── models
│ │ │ ├── __init__.py
│ │ │ ├── pointnet2_msg_cls.py
│ │ │ ├── pointnet2_msg_sem.py
│ │ │ ├── pointnet2_ssg_cls.py
│ │ │ └── pointnet2_ssg_sem.py
│ │ └── train.py
│ └── pointnet2_ops
│ │ ├── __init__.py
│ │ ├── _ext-src
│ │ ├── include
│ │ │ ├── ball_query.h
│ │ │ ├── cuda_utils.h
│ │ │ ├── group_points.h
│ │ │ ├── interpolate.h
│ │ │ ├── sampling.h
│ │ │ └── utils.h
│ │ └── src
│ │ │ ├── ball_query.cpp
│ │ │ ├── ball_query_gpu.cu
│ │ │ ├── bindings.cpp
│ │ │ ├── group_points.cpp
│ │ │ ├── group_points_gpu.cu
│ │ │ ├── interpolate.cpp
│ │ │ ├── interpolate_gpu.cu
│ │ │ ├── sampling.cpp
│ │ │ └── sampling_gpu.cu
│ │ ├── _version.py
│ │ ├── pointnet2_modules.py
│ │ └── pointnet2_utils.py
├── requirements.txt
├── robots
│ ├── .DS_Store
│ ├── franka_description
│ │ └── meshes
│ │ │ ├── collision
│ │ │ ├── finger.stl
│ │ │ ├── finger.stl.convex.stl
│ │ │ ├── hand.stl
│ │ │ ├── hand.stl.convex.stl
│ │ │ ├── link0.stl
│ │ │ ├── link0.stl.convex.stl
│ │ │ ├── link1.stl
│ │ │ ├── link1.stl.convex.stl
│ │ │ ├── link2.stl
│ │ │ ├── link2.stl.convex.stl
│ │ │ ├── link3.stl
│ │ │ ├── link3.stl.convex.stl
│ │ │ ├── link4.stl
│ │ │ ├── link4.stl.convex.stl
│ │ │ ├── link5.stl
│ │ │ ├── link5.stl.convex.stl
│ │ │ ├── link6.stl
│ │ │ ├── link6.stl.convex.stl
│ │ │ ├── link7.stl
│ │ │ └── link7.stl.convex.stl
│ │ │ └── visual
│ │ │ ├── finger.dae
│ │ │ ├── hand.dae
│ │ │ ├── link0.dae
│ │ │ ├── link1.dae
│ │ │ ├── link2.dae
│ │ │ ├── link3.dae
│ │ │ ├── link4.dae
│ │ │ ├── link5.dae
│ │ │ ├── link6.dae
│ │ │ └── link7.dae
│ ├── panda.urdf
│ ├── panda_gripper.urdf
│ ├── panda_robot.py
│ └── panda_robot_rl.py
├── scripts
│ ├── .DS_Store
│ ├── run_collect_SACVAL_pickup_TRAIN_display.sh
│ ├── run_collect_SAC_pickup_TRAIN_display.sh
│ ├── run_collect_random_push_TRAIN.sh
│ ├── run_eval_push_afterCA.sh
│ ├── run_eval_push_beforeCA.sh
│ ├── run_train_CA_push.sh
│ ├── run_train_actor_push_fir.sh
│ ├── run_train_actor_push_sec.sh
│ ├── run_train_affordance_push_fir.sh
│ ├── run_train_affordance_push_sec.sh
│ ├── run_train_critic_push_fir.sh
│ └── run_train_critic_push_sec.sh
├── train_actor.py
├── train_affordance.py
├── train_collaborative_adaptation.py
├── train_critic.py
└── utils.py
├── images
└── teaser.png
└── stats
├── .DS_Store
├── train_where2actPP_test_data_list.txt
├── train_where2actPP_test_data_list_pickup.txt
├── train_where2actPP_train_data_list.txt
├── train_where2actPP_train_data_list_pickup.txt
└── train_where2actPP_val_data_list_pickup.txt
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/.DS_Store
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/DualAfford.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DualAfford: Learning Collaborative Visual Affordance for Dual-gripper Manipulation
2 |
3 |
4 |
5 | 
6 |
7 | **The Proposed DualAfford Task.** Given different shapes and manipulation tasks (*e.g.*, pushing the keyboard in the direction indicated by the red arrow), our proposed *DualAfford* framework predicts dual collaborative visual actionable affordance and gripper orientations. The prediction for the second gripper (b) is dependent on the first (a). We can directly apply our network to real-world data.
8 |
9 | ## About the paper
10 |
11 | DualAfford is accepted to ICLR 2023.
12 |
13 | Our team: [Yan Zhao](https://sxy7147.github.io)\*, [Ruihai Wu](https://warshallrho.github.io)\*, [Zhehuan Chen](https://acmlczh.github.io), [Yourong Zhang](https://www.linkedin.com/in/yourong-zhang-2b1aab23a/), [Qingnan Fan](https://fqnchina.github.io), [Kaichun Mo](https://kaichun-mo.github.io) and [Hao Dong](https://zsdonghao.github.io).
14 |
15 | (* indicates joint first authors)
16 |
17 | Arxiv Version: https://arxiv.org/pdf/2207.01971.pdf
18 |
19 | Project Page: https://hyperplane-lab.github.io/DualAfford/
20 |
21 |
22 |
23 | ## Before start
24 | To train the models, please first go to the `data` folder and download the pre-processed SAPIEN dataset for DualAfford.
25 | To test over the pretrained models, please go to the `logs` folder and download the pretrained checkpoints.
26 |
27 | You can click [here](https://mirrors.pku.edu.cn/dl-release/DualAfford_ICLR2023/) to download all resources.
28 |
29 | ## Dependencies
30 | This code has been tested on Ubuntu 18.04 with Cuda 10.1, Python 3.6, and PyTorch 1.7.0.
31 |
32 | First, install SAPIEN
33 |
34 | pip install http://storage1.ucsd.edu/wheels/sapien-dev/sapien-0.8.0.dev0-cp36-cp36m-manylinux2014_x86_64.whl
35 |
36 |
37 | Then, if you want to run the 3D experiment, this depends on PointNet++.
38 |
39 | git clone --recursive https://github.com/erikwijmans/Pointnet2_PyTorch
40 | cd Pointnet2_PyTorch
41 | # [IMPORTANT] comment these two lines of code:
42 | # https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/pointnet2_ops_lib/pointnet2_ops/_ext-src/src/sampling_gpu.cu#L100-L101
43 | pip install -r requirements.txt
44 | pip install -e .
45 |
46 | Finally, run the following to install other packages.
47 |
48 | # make sure you are at the repository root directory
49 | pip install -r requirements.txt
50 |
51 | to install the other dependencies.
52 |
53 | For visualization, please install blender v2.79 and put the executable in your environment path.
54 | Also, the prediction result can be visualized using MeshLab or the *RenderShape* tool in [Thea](https://github.com/sidch/thea).
55 |
56 | ## Generate Offline Training Data
57 | Before training the network, we need to collect a large set of interaction trials via random or RL exploration.
58 |
59 | To generate offline training data for the `pushing` task, run the following command:
60 |
61 | sh scripts/run_collect_random_push_TRAIN.sh
62 |
63 | Note that the scripts for other tasks (`rotate`, `topple`) are similar. You can modify the content of the script file to generate data for different settings. Also, please modify the `num_processes` parameter to specify the number of CPU cores to use.
64 |
65 | To generate offline training data for the `pickup` task, you need to train an RL model for each category of object. Run the following command for example:
66 |
67 | sh scripts/run_collect_SAC_pickup_TRAIN_display.sh
68 | sh scripts/run_collect_SACVAL_pickup_TRAIN_display.sh
69 |
70 | You can refer to `gen_cate_setting.py` to see the default settings of categories, modify the file and run this command to generate the json file for loading arguments:
71 |
72 | python gen_cate_setting.py
73 |
74 | ## Training Pipeline for the DualAfford Framework
75 |
76 | We also use the experiments for the `pushing` task as an example, and scripts for other tasks are similar.
77 |
78 | #### Train the Second Gripper Module (M2)
79 |
80 | As mentioned in our paper, we start with training the Second Gripper Module (M2). To train M2, we first train its Critic Module (C2) and Proposal Module (P2) simultaneously (the Proposal Module is also noted as actor in our code).
81 |
82 | To train C2 and P2, run:
83 |
84 | sh scripts/run_train_critic_push_sec.sh
85 | sh scripts/run_train_actor_push_sec.sh
86 |
87 | Then, after pretraining C2 and P2, we can train the Affordance Module (A2) for the Second Girpper Module. Please specify the checkpoints of the pretrained C2 and P2.
88 |
89 | sh scripts/run_train_affordance_push_sec.sh
90 |
91 | #### Train the First Gripper Module (M1)
92 |
93 | The First Gripper Module (M1) is trained with the awareness of the trained M2. In M1, we also train its Critic Module (C1) and Proposal Module (P1) first. Note that to train C1, we should specify the pre-trained checkpoint of the Second Gripper Module (M2).
94 |
95 | To train C1 and P1, run:
96 |
97 | sh scripts/run_train_critic_push_fir.sh
98 | sh scripts/run_train_actor_push_fir.sh
99 |
100 | Then, after pretraining C1 and P1, we can train the Affordance Module (A1) for the Frist Girpper Module. Please specify the checkpoints of the pretrained C1 and P1.
101 |
102 | sh scripts/run_train_affordance_push_fir.sh
103 |
104 | #### The Collaborative Adaptation Procedure
105 |
106 | To further enhance the collaboration between the two gripper modules, we introduce the Collaborative Adaptation procedure. In this procedure, the proposed dual-gripper actions are simultaneously executed in the simulator, using interaction outcomes to update the two gripper modules.
107 |
108 | To run the Collaborative Adaptation procedure, use the following command:
109 |
110 | sh scripts/run_train_CA_push.sh
111 |
112 | ## Evaluation
113 |
114 | To evaluate and visualize the results, run the following command:
115 |
116 | sh scripts/run_eval_push_afterCA.sh
117 |
118 | This will use the pretrained networks to propose interactions in the simulation, and the manipulation results are saved in `gif` format. We also visualize the affordance maps proposed by the two Affordance Modules (A1 and A2) in this code.
119 |
120 |
121 |
122 | ## Citations
123 |
124 | ```
125 | @inproceedings{zhao2023dualafford,
126 | title={DualAfford: Learning Collaborative Visual Affordance for Dual-gripper Manipulation},
127 | author={Yan Zhao and Ruihai Wu and Zhehuan Chen and Yourong Zhang and Qingnan Fan and Kaichun Mo and Hao Dong},
128 | booktitle={International Conference on Learning Representations},
129 | year={2023},
130 | }
131 | ```
132 |
133 | ## Questions
134 |
135 | Please post issues for questions and more helps on this Github repo page. We encourage using Github issues instead of sending us emails since your questions may benefit others.
136 |
137 | ## License
138 |
139 | MIT Licence
140 |
141 |
--------------------------------------------------------------------------------
/code/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/.DS_Store
--------------------------------------------------------------------------------
/code/blender_utils/camera.blend:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/blender_utils/camera.blend
--------------------------------------------------------------------------------
/code/blender_utils/cube.obj:
--------------------------------------------------------------------------------
1 | v -0.500000 -0.500000 0.500000
2 | v 0.500000 -0.500000 0.500000
3 | v -0.500000 0.500000 0.500000
4 | v 0.500000 0.500000 0.500000
5 | v -0.500000 0.500000 -0.500000
6 | v 0.500000 0.500000 -0.500000
7 | v -0.500000 -0.500000 -0.500000
8 | v 0.500000 -0.500000 -0.500000
9 | f 1 2 3
10 | f 3 2 4
11 | f 3 4 5
12 | f 5 4 6
13 | f 5 6 7
14 | f 7 6 8
15 | f 7 8 1
16 | f 1 8 2
17 | f 2 8 4
18 | f 4 8 6
19 | f 7 1 5
20 | f 5 1 3
21 |
--------------------------------------------------------------------------------
/code/blender_utils/quaternion.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018-present, Facebook, Inc.
2 | # All rights reserved.
3 | #
4 | # This source code is licensed under the license found in the
5 | # LICENSE file in the root directory of this source tree.
6 | #
7 |
8 | import torch
9 | import numpy as np
10 |
11 | # PyTorch-backed implementations
12 |
13 | def qmul(q, r):
14 | """
15 | Multiply quaternion(s) q with quaternion(s) r.
16 | Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.
17 | Returns q*r as a tensor of shape (*, 4).
18 | """
19 | assert q.shape[-1] == 4
20 | assert r.shape[-1] == 4
21 |
22 | original_shape = q.shape
23 |
24 | # Compute outer product
25 | terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4))
26 |
27 | w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
28 | x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
29 | y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
30 | z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
31 | return torch.stack((w, x, y, z), dim=1).view(original_shape)
32 |
33 | def qrot(q, v):
34 | """
35 | Rotate vector(s) v about the rotation described by quaternion(s) q.
36 | Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
37 | where * denotes any number of dimensions.
38 | Returns a tensor of shape (*, 3).
39 | """
40 | assert q.shape[-1] == 4
41 | assert v.shape[-1] == 3
42 | assert q.shape[:-1] == v.shape[:-1]
43 |
44 | original_shape = list(v.shape)
45 | q = q.view(-1, 4)
46 | v = v.view(-1, 3)
47 |
48 | qvec = q[:, 1:]
49 | uv = torch.cross(qvec, v, dim=1)
50 | uuv = torch.cross(qvec, uv, dim=1)
51 | return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)
52 |
53 | def qeuler(q, order, epsilon=0):
54 | """
55 | Convert quaternion(s) q to Euler angles.
56 | Expects a tensor of shape (*, 4), where * denotes any number of dimensions.
57 | Returns a tensor of shape (*, 3).
58 | """
59 | assert q.shape[-1] == 4
60 |
61 | original_shape = list(q.shape)
62 | original_shape[-1] = 3
63 | q = q.view(-1, 4)
64 |
65 | q0 = q[:, 0]
66 | q1 = q[:, 1]
67 | q2 = q[:, 2]
68 | q3 = q[:, 3]
69 |
70 | if order == 'xyz':
71 | x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))
72 | y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1+epsilon, 1-epsilon))
73 | z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))
74 | elif order == 'yzx':
75 | x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))
76 | y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))
77 | z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1+epsilon, 1-epsilon))
78 | elif order == 'zxy':
79 | x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1+epsilon, 1-epsilon))
80 | y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2*(q1 * q1 + q2 * q2))
81 | z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2*(q1 * q1 + q3 * q3))
82 | elif order == 'xzy':
83 | x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q3 * q3))
84 | y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2*(q2 * q2 + q3 * q3))
85 | z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1+epsilon, 1-epsilon))
86 | elif order == 'yxz':
87 | x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1+epsilon, 1-epsilon))
88 | y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2*(q1 * q1 + q2 * q2))
89 | z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2*(q1 * q1 + q3 * q3))
90 | elif order == 'zyx':
91 | x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2*(q1 * q1 + q2 * q2))
92 | y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1+epsilon, 1-epsilon))
93 | z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2*(q2 * q2 + q3 * q3))
94 | else:
95 | raise
96 |
97 | return torch.stack((x, y, z), dim=1).view(original_shape)
98 |
99 | # Numpy-backed implementations
100 |
101 | def qmul_np(q, r):
102 | q = torch.from_numpy(q).contiguous()
103 | r = torch.from_numpy(r).contiguous()
104 | return qmul(q, r).numpy()
105 |
106 | def qrot_np(q, v):
107 | q = torch.from_numpy(q).contiguous()
108 | v = torch.from_numpy(v).contiguous()
109 | return qrot(q, v).numpy()
110 |
111 | def qeuler_np(q, order, epsilon=0, use_gpu=False):
112 | if use_gpu:
113 | q = torch.from_numpy(q).cuda()
114 | return qeuler(q, order, epsilon).cpu().numpy()
115 | else:
116 | q = torch.from_numpy(q).contiguous()
117 | return qeuler(q, order, epsilon).numpy()
118 |
119 | def qfix(q):
120 | """
121 | Enforce quaternion continuity across the time dimension by selecting
122 | the representation (q or -q) with minimal distance (or, equivalently, maximal dot product)
123 | between two consecutive frames.
124 |
125 | Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints.
126 | Returns a tensor of the same shape.
127 | """
128 | assert len(q.shape) == 3
129 | assert q.shape[-1] == 4
130 |
131 | result = q.copy()
132 | dot_products = np.sum(q[1:]*q[:-1], axis=2)
133 | mask = dot_products < 0
134 | mask = (np.cumsum(mask, axis=0)%2).astype(bool)
135 | result[1:][mask] *= -1
136 | return result
137 |
138 | def expmap_to_quaternion(e):
139 | """
140 | Convert axis-angle rotations (aka exponential maps) to quaternions.
141 | Stable formula from "Practical Parameterization of Rotations Using the Exponential Map".
142 | Expects a tensor of shape (*, 3), where * denotes any number of dimensions.
143 | Returns a tensor of shape (*, 4).
144 | """
145 | assert e.shape[-1] == 3
146 |
147 | original_shape = list(e.shape)
148 | original_shape[-1] = 4
149 | e = e.reshape(-1, 3)
150 |
151 | theta = np.linalg.norm(e, axis=1).reshape(-1, 1)
152 | w = np.cos(0.5*theta).reshape(-1, 1)
153 | xyz = 0.5*np.sinc(0.5*theta/np.pi)*e
154 | return np.concatenate((w, xyz), axis=1).reshape(original_shape)
155 |
156 | def euler_to_quaternion(e, order):
157 | """
158 | Convert Euler angles to quaternions.
159 | """
160 | assert e.shape[-1] == 3
161 |
162 | original_shape = list(e.shape)
163 | original_shape[-1] = 4
164 |
165 | e = e.reshape(-1, 3)
166 |
167 | x = e[:, 0]
168 | y = e[:, 1]
169 | z = e[:, 2]
170 |
171 | rx = np.stack((np.cos(x/2), np.sin(x/2), np.zeros_like(x), np.zeros_like(x)), axis=1)
172 | ry = np.stack((np.cos(y/2), np.zeros_like(y), np.sin(y/2), np.zeros_like(y)), axis=1)
173 | rz = np.stack((np.cos(z/2), np.zeros_like(z), np.zeros_like(z), np.sin(z/2)), axis=1)
174 |
175 | result = None
176 | for coord in order:
177 | if coord == 'x':
178 | r = rx
179 | elif coord == 'y':
180 | r = ry
181 | elif coord == 'z':
182 | r = rz
183 | else:
184 | raise
185 | if result is None:
186 | result = r
187 | else:
188 | result = qmul_np(result, r)
189 |
190 | # Reverse antipodal representation to have a non-negative "w"
191 | if order in ['xyz', 'yzx', 'zxy']:
192 | result *= -1
193 |
194 | return result.reshape(original_shape)
195 |
196 |
--------------------------------------------------------------------------------
/code/blender_utils/render_blender.py:
--------------------------------------------------------------------------------
1 | # dataset distribution is specified by a distribution file containing samples
2 | import bpy
3 | import math
4 | import sys
5 | import os
6 | import numpy as np
7 | import random
8 | import struct
9 | from numpy.linalg import inv
10 | from math import *
11 | import mathutils
12 |
13 | def camPosToQuaternion(cx, cy, cz):
14 | camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
15 | cx = cx / camDist
16 | cy = cy / camDist
17 | cz = cz / camDist
18 | axis = (-cz, 0, cx)
19 | angle = math.acos(cy)
20 | a = math.sqrt(2) / 2
21 | b = math.sqrt(2) / 2
22 | w1 = axis[0]
23 | w2 = axis[1]
24 | w3 = axis[2]
25 | c = math.cos(angle / 2)
26 | d = math.sin(angle / 2)
27 | q1 = a * c - b * d * w1
28 | q2 = b * c + a * d * w1
29 | q3 = a * d * w2 + b * d * w3
30 | q4 = -b * d * w2 + a * d * w3
31 | return (q1, q2, q3, q4)
32 |
33 | def quaternionFromYawPitchRoll(yaw, pitch, roll):
34 | c1 = math.cos(yaw / 2.0)
35 | c2 = math.cos(pitch / 2.0)
36 | c3 = math.cos(roll / 2.0)
37 | s1 = math.sin(yaw / 2.0)
38 | s2 = math.sin(pitch / 2.0)
39 | s3 = math.sin(roll / 2.0)
40 | q1 = c1 * c2 * c3 + s1 * s2 * s3
41 | q2 = c1 * c2 * s3 - s1 * s2 * c3
42 | q3 = c1 * s2 * c3 + s1 * c2 * s3
43 | q4 = s1 * c2 * c3 - c1 * s2 * s3
44 | return (q1, q2, q3, q4)
45 |
46 |
47 | def camPosToQuaternion(cx, cy, cz):
48 | q1a = 0
49 | q1b = 0
50 | q1c = math.sqrt(2) / 2
51 | q1d = math.sqrt(2) / 2
52 | camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
53 | cx = cx / camDist
54 | cy = cy / camDist
55 | cz = cz / camDist
56 | t = math.sqrt(cx * cx + cy * cy)
57 | tx = cx / t
58 | ty = cy / t
59 | yaw = math.acos(ty)
60 | if tx > 0:
61 | yaw = 2 * math.pi - yaw
62 | pitch = 0
63 | tmp = min(max(tx*cx + ty*cy, -1),1)
64 | #roll = math.acos(tx * cx + ty * cy)
65 | roll = math.acos(tmp)
66 | if cz < 0:
67 | roll = -roll
68 | print("%f %f %f" % (yaw, pitch, roll))
69 | q2a, q2b, q2c, q2d = quaternionFromYawPitchRoll(yaw, pitch, roll)
70 | q1 = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d
71 | q2 = q1b * q2a + q1a * q2b + q1d * q2c - q1c * q2d
72 | q3 = q1c * q2a - q1d * q2b + q1a * q2c + q1b * q2d
73 | q4 = q1d * q2a + q1c * q2b - q1b * q2c + q1a * q2d
74 | return (q1, q2, q3, q4)
75 |
76 | def camRotQuaternion(cx, cy, cz, theta):
77 | theta = theta / 180.0 * math.pi
78 | camDist = math.sqrt(cx * cx + cy * cy + cz * cz)
79 | cx = -cx / camDist
80 | cy = -cy / camDist
81 | cz = -cz / camDist
82 | q1 = math.cos(theta * 0.5)
83 | q2 = -cx * math.sin(theta * 0.5)
84 | q3 = -cy * math.sin(theta * 0.5)
85 | q4 = -cz * math.sin(theta * 0.5)
86 | return (q1, q2, q3, q4)
87 |
88 | def quaternionProduct(qx, qy):
89 | a = qx[0]
90 | b = qx[1]
91 | c = qx[2]
92 | d = qx[3]
93 | e = qy[0]
94 | f = qy[1]
95 | g = qy[2]
96 | h = qy[3]
97 | q1 = a * e - b * f - c * g - d * h
98 | q2 = a * f + b * e + c * h - d * g
99 | q3 = a * g - b * h + c * e + d * f
100 | q4 = a * h + b * g - c * f + d * e
101 | return (q1, q2, q3, q4)
102 |
103 | def obj_centened_camera_pos(dist, azimuth_deg, elevation_deg):
104 | phi = float(elevation_deg) / 180 * math.pi
105 | theta = float(azimuth_deg) / 180 * math.pi
106 | x = (dist * math.cos(theta) * math.cos(phi))
107 | y = (dist * math.sin(theta) * math.cos(phi))
108 | z = (dist * math.sin(phi))
109 | return (x, y, z)
110 |
111 | def makeMaterial(name):
112 | mat = bpy.data.materials.new(name)
113 | mat.subsurface_scattering.use = True
114 | return mat
115 |
116 | def setMaterial(ob, mat):
117 | me = ob.data
118 | me.materials.append(mat)
119 |
120 | def importParamBIN(origin_list, lookat_list, upvec_list):
121 | paramRotList = list()
122 | paramTransList = list()
123 | cutList = list()
124 |
125 | x0 = -10000
126 | y0 = -10000
127 | x1 = 10000
128 | y1 = 10000
129 |
130 | origin = np.array([eval(i) for i in origin_list.split(',')])
131 | lookat = np.array([eval(i) for i in lookat_list.split(',')])
132 | viewUp = np.array([eval(i) for i in upvec_list.split(',')])
133 |
134 | viewDir = origin - lookat
135 | viewDir = viewDir / np.linalg.norm(viewDir)
136 | viewRight = np.cross(viewUp, viewDir)
137 | viewRight= viewRight / np.linalg.norm(viewRight)
138 | viewUp = np.cross(viewDir, viewRight)
139 | viewUp = viewUp / np.linalg.norm(viewUp)
140 |
141 | R = np.ndarray((3, 3))
142 | R[0, 0] = viewRight[0]
143 | R[1, 0] = viewRight[1]
144 | R[2, 0] = viewRight[2]
145 | R[0, 1] = viewUp[0]
146 | R[1, 1] = viewUp[1]
147 | R[2, 1] = viewUp[2]
148 | R[0, 2] = viewDir[0]
149 | R[1, 2] = viewDir[1]
150 | R[2, 2] = viewDir[2]
151 | R = inv(R);
152 |
153 | paramRotList.append(R)
154 |
155 | T = np.ndarray((3, 1))
156 | T[0, 0] = origin[0]
157 | T[1, 0] = origin[1]
158 | T[2, 0] = origin[2]
159 | T = np.dot(-R, T)
160 |
161 | paramTransList.append(T)
162 |
163 | cutList.append([x0, y0, x1, y1]);
164 |
165 | return (paramRotList, paramTransList, cutList)
166 |
167 | """---------- main -----------"""
168 | modelPath = sys.argv[6]
169 | outPath = sys.argv[7]
170 |
171 | modelId = os.path.basename(modelPath)[:-4]
172 |
173 | bpy.ops.import_scene.obj(filepath=modelPath)
174 |
175 | bpy.context.scene.render.alpha_mode = 'TRANSPARENT'
176 | #bpy.context.scene.render.use_shadows = False
177 | bpy.context.scene.render.use_raytrace = True
178 | bpy.context.scene.render.resolution_x = 224
179 | bpy.context.scene.render.resolution_y = 224
180 | bpy.context.scene.render.resolution_percentage = 100
181 | bpy.context.scene.world.light_settings.use_environment_light = True
182 | bpy.context.scene.world.light_settings.environment_energy = 0.2
183 | bpy.context.scene.render.use_freestyle = False
184 | bpy.context.scene.render.line_thickness = 0
185 | bpy.context.scene.render.edge_threshold = 0
186 | bpy.context.scene.render.edge_color = (1, 1, 1)
187 | bpy.context.scene.render.use_edge_enhance = False
188 |
189 | #bpy.context.mesh.show_normal_vertex = True;
190 |
191 |
192 | # YOUR CODE START HERE
193 |
194 | # fix mesh
195 | scene = bpy.context.scene
196 | for obj in scene.objects:
197 | if obj.type == 'MESH':
198 | scene.objects.active = obj
199 | bpy.ops.object.mode_set(mode='EDIT', toggle=False)
200 | bpy.ops.mesh.reveal()
201 | bpy.ops.mesh.select_all(action='SELECT')
202 | bpy.ops.mesh.normals_make_consistent()
203 | bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
204 |
205 | # clear default lights
206 | bpy.ops.object.select_by_type(type='LAMP')
207 | bpy.ops.object.delete(use_global=False)
208 |
209 | # set area lights
210 | light_azimuth_deg = 0
211 | light_elevation_deg = 90
212 | lightDist = 10
213 | lx, ly, lz = obj_centened_camera_pos(lightDist, light_azimuth_deg, light_elevation_deg)
214 | bpy.ops.object.lamp_add(type='AREA', view_align = False, location=(lx, ly, lz))
215 | data = bpy.data.objects['Area'].data
216 | data.energy = 1
217 | data.distance = 5
218 | #data.shape = 'SQUARE'
219 | #data.shadow_ray_samples_x = 8
220 |
221 | light_azimuth_deg = 0
222 | light_elevation_deg = 45
223 | lightDist = 10
224 | lx, ly, lz = obj_centened_camera_pos(lightDist, light_azimuth_deg, light_elevation_deg)
225 | bpy.ops.object.lamp_add(type='AREA', view_align = False, location=(lx, ly, lz))
226 | data = bpy.data.objects['Area.001'].data
227 | data.energy = 1
228 | data.distance = 5
229 |
230 | #camObj.rotation_mode = 'XYZ'
231 | #camObj.rotation_euler[0] = 0
232 | #camObj.rotation_euler[1] = 0
233 | #camObj.rotation_euler[2] = 0
234 |
235 | outFileView = outPath;
236 | bpy.data.objects['Area'].data.energy = 1;
237 | bpy.data.objects['Area.001'].data.energy = 1;
238 | bpy.context.scene.world.light_settings.environment_energy = 0.2
239 | bpy.data.scenes['Scene'].render.filepath = outFileView;
240 | bpy.ops.render.render( write_still=True )
241 |
242 |
--------------------------------------------------------------------------------
/code/blender_utils/render_using_blender.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | import numpy as np
4 | from subprocess import call
5 | from geometry_utils import load_obj, export_obj
6 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
7 | from quaternion import qrot
8 | from colors import colors
9 |
10 | cube_mesh = load_obj(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cube.obj'), no_normal=True)
11 | cube_v_torch = torch.from_numpy(cube_mesh['vertices'])
12 | cube_v = cube_mesh['vertices'] / 100
13 | cube_f = cube_mesh['faces']
14 |
15 | def render_pts(out_fn, pts, blender_fn='camera.blend', highlight_id=None):
16 | all_v = [np.zeros((0, 3), dtype=np.float32)];
17 | all_f = [np.zeros((0, 3), dtype=np.int32)];
18 | t = 0
19 | for i in range(pts.shape[0]):
20 | if (highlight_id is None) or (i != highlight_id):
21 | all_v.append(cube_v + pts[i])
22 | all_f.append(cube_f + 8 * t)
23 | t += 1
24 | all_v = np.vstack(all_v)
25 | all_f = np.vstack(all_f)
26 | with open(out_fn+'.obj', 'w') as fout:
27 | fout.write('mtllib %s\n' % (out_fn.split('/')[-1]+'.mtl'))
28 | for i in range(all_v.shape[0]):
29 | fout.write('v %f %f %f\n' % (all_v[i, 0], all_v[i, 1], all_v[i, 2]))
30 | fout.write('usemtl f0\n')
31 | for i in range(all_f.shape[0]):
32 | fout.write('f %d %d %d\n' % (all_f[i, 0], all_f[i, 1], all_f[i, 2]))
33 | if highlight_id is not None:
34 | vs = cube_v * 5 + pts[highlight_id]
35 | fs = cube_f + all_v.shape[0]
36 | for i in range(vs.shape[0]):
37 | fout.write('v %f %f %f\n' % (vs[i, 0], vs[i, 1], vs[i, 2]))
38 | fout.write('usemtl f1\n')
39 | for i in range(fs.shape[0]):
40 | fout.write('f %d %d %d\n' % (fs[i, 0], fs[i, 1], fs[i, 2]))
41 | with open(out_fn+'.mtl', 'w') as fout:
42 | fout.write('newmtl f0\nKd 0 0 1\n')
43 | fout.write('newmtl f1\nKd 1 0 0\n')
44 | cmd = 'cd %s && /home/blender-2.79-linux-glibc219-x86_64/blender -noaudio --background %s --python render_blender.py %s %s > /dev/null' \
45 | % (os.path.join(os.path.dirname(os.path.abspath(__file__))), \
46 | blender_fn, out_fn+'.obj', out_fn)
47 | # print("cmd:", cmd)
48 | call(cmd, shell=True)
49 |
50 | def render_pts_highlight_points(out_fn, pts, blender_fn='camera.blend', highlight_points=[]):
51 | all_v = [np.zeros((0, 3), dtype=np.float32)];
52 | all_f = [np.zeros((0, 3), dtype=np.int32)];
53 | t = 0
54 | for i in range(pts.shape[0]):
55 | all_v.append(cube_v + pts[i])
56 | all_f.append(cube_f + 8 * t)
57 | t += 1
58 | all_v = np.vstack(all_v)
59 | all_f = np.vstack(all_f)
60 | with open(out_fn+'.obj', 'w') as fout:
61 | fout.write('mtllib %s\n' % (out_fn.split('/')[-1]+'.mtl'))
62 | for i in range(all_v.shape[0]):
63 | fout.write('v %f %f %f\n' % (all_v[i, 0], all_v[i, 1], all_v[i, 2]))
64 | fout.write('usemtl f0\n')
65 | for i in range(all_f.shape[0]):
66 | fout.write('f %d %d %d\n' % (all_f[i, 0], all_f[i, 1], all_f[i, 2]))
67 | if len(highlight_points) != 0:
68 | for hightlight_point in highlight_points:
69 | vs = cube_v * 5 + hightlight_point
70 | fs = cube_f + all_v.shape[0]
71 | for i in range(vs.shape[0]):
72 | fout.write('v %f %f %f\n' % (vs[i, 0], vs[i, 1], vs[i, 2]))
73 | fout.write('usemtl f1\n')
74 | for i in range(fs.shape[0]):
75 | fout.write('f %d %d %d\n' % (fs[i, 0], fs[i, 1], fs[i, 2]))
76 | with open(out_fn+'.mtl', 'w') as fout:
77 | fout.write('newmtl f0\nKd 0 0 1\n')
78 | fout.write('newmtl f1\nKd 1 0 0\n')
79 | cmd = 'cd %s && home/blender-2.79-linux-glibc219-x86_64/blender -noaudio --background %s --python render_blender.py %s %s > /dev/null' \
80 | % (os.path.join(os.path.dirname(os.path.abspath(__file__))), \
81 | blender_fn, out_fn+'.obj', out_fn)
82 | call(cmd, shell=True)
83 |
84 | """
85 | pts: P x N x 3 (P <= 20)
86 | """
87 | def render_part_pts(out_fn, pts, blender_fn='camera.blend'):
88 | fobj = open(out_fn+'.obj', 'w')
89 | fobj.write('mtllib %s\n' % (out_fn.split('/')[-1]+'.mtl'))
90 | fmtl = open(out_fn+'.mtl', 'w')
91 | num_part = pts.shape[0]
92 | num_point = pts.shape[1]
93 | for pid in range(num_part):
94 | all_v = [np.zeros((0, 3), dtype=np.float32)];
95 | all_f = [np.zeros((0, 3), dtype=np.int32)];
96 | for i in range(num_point):
97 | all_v.append(cube_v + pts[pid, i])
98 | all_f.append(cube_f + 8 * (pid*num_point+i))
99 | all_v = np.vstack(all_v)
100 | all_f = np.vstack(all_f)
101 | for i in range(all_v.shape[0]):
102 | fobj.write('v %f %f %f\n' % (all_v[i, 0], all_v[i, 1], all_v[i, 2]))
103 | fobj.write('usemtl f%d\n' % pid)
104 | for i in range(all_f.shape[0]):
105 | fobj.write('f %d %d %d\n' % (all_f[i, 0], all_f[i, 1], all_f[i, 2]))
106 | fmtl.write('newmtl f%d\nKd %f %f %f\n' % (pid, colors[pid][0], colors[pid][1], colors[pid][2]))
107 | fobj.close()
108 | fmtl.close()
109 | cmd = 'cd %s && blender -noaudio --background %s --python render_blender.py %s %s > /dev/null' \
110 | % (os.path.join(os.path.dirname(os.path.abspath(__file__))), \
111 | blender_fn, out_fn+'.obj', out_fn)
112 | call(cmd, shell=True)
113 |
114 | def render_box(out_fn, box):
115 | box = torch.from_numpy(box)
116 | cmd = 'cp %s %s' % (os.path.join(BASE_DIR, 'cube.mtl'), out_fn + '.mtl')
117 | call(cmd, shell=True)
118 | with open(out_fn + '.obj', 'w') as fout:
119 | fout.write('mtllib %s\n' % (out_fn.split('/')[-1] + '.mtl'))
120 | v = (qrot(box[6:].unsqueeze(dim=0).repeat(8, 1), cube_v_torch * box[3:6].unsqueeze(dim=0)) + box[:3].unsqueeze(dim=0)).numpy()
121 | for i in range(8):
122 | fout.write('v %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
123 | for i in range(6):
124 | fout.write('usemtl f%d\n' % i)
125 | fout.write('f %d %d %d\n' % (cube_f[2*i, 0], cube_f[2*i, 1], cube_f[2*i, 2]))
126 | fout.write('f %d %d %d\n' % (cube_f[2*i+1, 0], cube_f[2*i+1, 1], cube_f[2*i+1, 2]))
127 | cmd = 'cd %s && blender -noaudio --background camera_centered.blend --python render_blender.py %s %s > /dev/null' \
128 | % (os.path.join(os.path.dirname(os.path.abspath(__file__))), \
129 | out_fn+'.obj', out_fn)
130 | call(cmd, shell=True)
131 |
132 | def render_boxes(out_fn, boxes):
133 | boxes = torch.from_numpy(boxes)
134 | cmd = 'cp %s %s' % (os.path.join(BASE_DIR, 'cube.mtl'), out_fn + '.mtl')
135 | call(cmd, shell=True)
136 | with open(out_fn + '.obj', 'w') as fout:
137 | fout.write('mtllib %s\n' % (out_fn.split('/')[-1] + '.mtl'))
138 | for j in range(boxes.shape[0]):
139 | v = (qrot(boxes[j, 6:].unsqueeze(dim=0).repeat(8, 1), cube_v_torch * boxes[j, 3:6].unsqueeze(dim=0)) + boxes[j, :3].unsqueeze(dim=0)).numpy()
140 | for i in range(8):
141 | fout.write('v %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
142 | for i in range(6):
143 | fout.write('usemtl f%d\n' % i)
144 | fout.write('f %d %d %d\n' % (cube_f[2*i, 0]+8*j, cube_f[2*i, 1]+8*j, cube_f[2*i, 2]+8*j))
145 | fout.write('f %d %d %d\n' % (cube_f[2*i+1, 0]+8*j, cube_f[2*i+1, 1]+8*j, cube_f[2*i+1, 2]+8*j))
146 | cmd = 'cd %s && blender -noaudio --background camera_centered.blend --python render_blender.py %s %s > /dev/null' \
147 | % (os.path.join(os.path.dirname(os.path.abspath(__file__))), \
148 | out_fn+'.obj', out_fn)
149 | call(cmd, shell=True)
150 |
151 | def render_box_with_rot_mat(out_fn, box):
152 | box = torch.from_numpy(box)
153 | cmd = 'cp %s %s' % (os.path.join(BASE_DIR, 'cube.mtl'), out_fn + '.mtl')
154 | call(cmd, shell=True)
155 | with open(out_fn + '.obj', 'w') as fout:
156 | fout.write('mtllib %s\n' % (out_fn.split('/')[-1] + '.mtl'))
157 | v = (torch.matmul(cube_v_torch * box[3:6].unsqueeze(dim=0), box[6:].reshape(3, 3).permute(1, 0)) + box[:3].unsqueeze(dim=0)).numpy()
158 | for i in range(8):
159 | fout.write('v %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
160 | for i in range(6):
161 | fout.write('usemtl f%d\n' % i)
162 | fout.write('f %d %d %d\n' % (cube_f[2*i, 0], cube_f[2*i, 1], cube_f[2*i, 2]))
163 | fout.write('f %d %d %d\n' % (cube_f[2*i+1, 0], cube_f[2*i+1, 1], cube_f[2*i+1, 2]))
164 | cmd = 'cd %s && blender -noaudio --background camera_centered.blend --python render_blender.py %s %s > /dev/null' \
165 | % (os.path.join(os.path.dirname(os.path.abspath(__file__))), \
166 | out_fn+'.obj', out_fn)
167 | call(cmd, shell=True)
168 |
169 |
--------------------------------------------------------------------------------
/code/camera_rl.py:
--------------------------------------------------------------------------------
1 | """
2 | an RGB-D camera
3 | """
4 | import numpy as np
5 | from sapien.core import Pose
6 |
7 |
8 | class Camera(object):
9 |
10 | def __init__(self, env, image_size1=448, image_size2=448, fov=35, near=0.1, far=100.0,
11 | dist=5.0, phi=np.pi / 10, theta=np.pi, raise_camera=0.0, base_theta=1.0,
12 | random_initialize=False, fixed_position=False, restrict_dir=False, exact_dir=False,
13 | low_view=False, real_data=False):
14 | # set camera intrinsics
15 | self.env = env
16 | builder = env.scene.create_actor_builder()
17 | # camera_mount_actor = builder.build_kinematic()
18 | camera_mount_actor = builder.build(is_kinematic=True)
19 | self.camera_mount_actor = camera_mount_actor
20 | self.camera = env.scene.add_mounted_camera(
21 | 'camera', camera_mount_actor, Pose(), image_size1, image_size2, 0, np.deg2rad(fov), near, far
22 | )
23 |
24 | # log parameters
25 | self.restrict_dir = restrict_dir
26 | self.exact_dir = exact_dir
27 | self.low_view = low_view
28 | self.fixed_position = fixed_position
29 | self.real_data = real_data
30 | self.near = near
31 | self.far = far
32 | self.dist = dist
33 | # self.pos = pos
34 | self.raise_camera = raise_camera
35 | self.base_theta = base_theta
36 |
37 | if random_initialize:
38 | self.pick_view()
39 | else:
40 | self.theta = theta
41 | self.phi = phi
42 |
43 | self.pos = None
44 | self.mat44 = None
45 | self.base_mat44 = None
46 | self.cam2cambase = None
47 |
48 | def change_pose(self, phi=None, theta=None):
49 | # set camera extrinsics
50 | if not self.fixed_position:
51 | self.pick_view()
52 | if phi is not None:
53 | self.phi = phi
54 | self.theta = theta
55 |
56 | self.pos = np.array([self.dist * np.cos(self.phi) * np.cos(self.theta),
57 | self.dist * np.cos(self.phi) * np.sin(self.theta),
58 | self.dist * np.sin(self.phi)])
59 |
60 | # world base
61 | forward = -self.pos / np.linalg.norm(self.pos)
62 | left = np.cross([0, 0, 1], forward)
63 | left = left / np.linalg.norm(left)
64 | up = np.cross(forward, left)
65 | self.mat44 = np.eye(4)
66 | self.mat44[:3, :3] = np.vstack([forward, left, up]).T
67 | self.mat44[:3, 3] = self.pos # mat44 is cam2world
68 | self.mat44[0, 3] += self.env.object_position_offset
69 | self.mat44[2, 3] += self.raise_camera # to object's center
70 | self.camera_mount_actor.set_pose(Pose.from_transformation_matrix(self.mat44))
71 |
72 | # compute camera-base frame (camera-center, world-up-z, camera-front-x)
73 | cb_up = np.array([0, 0, 1], dtype=np.float32)
74 | cb_left = np.cross(cb_up, forward)
75 | cb_left /= np.linalg.norm(cb_left)
76 | cb_forward = np.cross(cb_left, cb_up)
77 | cb_forward /= np.linalg.norm(cb_forward)
78 | self.base_mat44 = np.eye(4)
79 | self.base_mat44[:3, :3] = np.vstack([cb_forward, cb_left, cb_up]).T
80 | self.base_mat44[:3, 3] = self.pos # cambase2world
81 | self.base_mat44[2, 3] += self.raise_camera # to object's center
82 | self.cam2cambase = np.linalg.inv(self.base_mat44) @ self.mat44 # cam2cambase
83 | self.cam2cambase = self.cam2cambase[:3, :3]
84 |
85 | self.env.step()
86 | self.env.render()
87 |
88 | def pick_view(self):
89 | if self.exact_dir:
90 | self.theta = self.base_theta * np.pi
91 | elif self.restrict_dir:
92 | self.theta = (self.base_theta + (np.random.random() - 0.5)) * np.pi
93 | # self.theta = np.pi / 2
94 | else:
95 | self.theta = np.random.random() * np.pi * 2
96 | print(">>> THETA =", self.theta)
97 | if self.low_view:
98 | self.phi = (np.random.random() + 0.4) * np.pi / 6
99 | else:
100 | self.phi = (np.random.random() + 1) * np.pi / 6
101 | if self.real_data:
102 | self.theta = np.random.random() * np.pi * 2
103 | self.phi = np.random.random() * np.pi * 2
104 |
105 | def get_observation(self):
106 | self.camera.take_picture()
107 | rgba = self.camera.get_color_rgba()
108 | rgba = (rgba * 255).clip(0, 255).astype(np.float32) / 255
109 | white = np.ones((rgba.shape[0], rgba.shape[1], 3), dtype=np.float32)
110 | mask = np.tile(rgba[:, :, 3:4], [1, 1, 3])
111 | rgb = rgba[:, :, :3] * mask + white * (1 - mask)
112 | depth = self.camera.get_depth().astype(np.float32)
113 | return rgb, depth
114 |
115 | def compute_camera_XYZA(self, depth):
116 | camera_matrix = self.camera.get_camera_matrix()[:3, :3]
117 | y, x = np.where(depth < 1)
118 | z = self.near * self.far / (self.far + depth * (self.near - self.far))
119 | permutation = np.array([[0, 0, 1], [-1, 0, 0], [0, -1, 0]])
120 | points = (permutation @ np.dot(np.linalg.inv(camera_matrix), np.stack([x, y, np.ones_like(x)] * z[y, x], 0))).T
121 | return y, x, points
122 |
123 | @staticmethod
124 | def compute_XYZA_matrix(id1, id2, pts, size1, size2):
125 | out = np.zeros((size1, size2, 4), dtype=np.float32)
126 | out[id1, id2, :3] = pts
127 | out[id1, id2, 3] = 1
128 | return out
129 |
130 | def get_normal_map(self):
131 | nor = self.camera.get_normal_rgba()
132 | # convert from PartNet-space (x-right, y-up, z-backward) to SAPIEN-space (x-front, y-left, z-up)
133 | new_nor = np.array(nor, dtype=np.float32)
134 | new_nor[:, :, 0] = -nor[:, :, 2]
135 | new_nor[:, :, 1] = -nor[:, :, 0]
136 | new_nor[:, :, 2] = nor[:, :, 1]
137 | return new_nor
138 |
139 | def get_movable_link_mask(self, link_ids):
140 | link_seg = self.camera.get_segmentation()
141 | link_mask = np.zeros((link_seg.shape[0], link_seg.shape[1])).astype(np.uint8)
142 | for idx, lid in enumerate(link_ids):
143 | cur_link_pixels = int(np.sum(link_seg == lid))
144 | if cur_link_pixels > 0:
145 | link_mask[link_seg == lid] = idx + 1
146 | return link_mask
147 |
148 | def get_id_link_mask(self, link_ids):
149 | link_seg = self.camera.get_segmentation()
150 |
151 | # TODO: Debug
152 | print("NUMTOTLINK:", len(np.where(link_seg > 0)[0]))
153 |
154 | link_mask = np.zeros((link_seg.shape[0], link_seg.shape[1])).astype(np.uint8)
155 | for idx, lid in enumerate(link_ids):
156 | cur_link_pixels = int(np.sum(link_seg == lid))
157 | if cur_link_pixels > 0:
158 | link_mask[link_seg == lid] = idx + 1
159 | return link_mask
160 |
161 | def get_name_seg_mask(self, keyword=['handle']):
162 | # read part seg partid2renderids
163 | partid2renderids = dict() # {leg1:[id1,id2], leg2:[id_x, id_y]}
164 | for k in self.env.scene.render_id_to_visual_name:
165 | # print(k, self.env.scene.render_id_to_visual_name[k])
166 | if self.env.scene.render_id_to_visual_name[k].split('-')[0] in keyword: # e.g. leg-1 / base_body-3
167 | part_id = int(self.env.scene.render_id_to_visual_name[k].split('-')[-1])
168 | if part_id not in partid2renderids:
169 | partid2renderids[part_id] = []
170 | partid2renderids[part_id].append(k)
171 | # generate 0/1 target mask
172 | part_seg = self.camera.get_obj_segmentation() # (448, 448) render_id
173 | # print('part_seg: ', part_seg, part_seg.shape)
174 |
175 | dest_mask = np.zeros((part_seg.shape[0], part_seg.shape[1])).astype(np.uint8)
176 | for partid in partid2renderids:
177 | cur_part_mask = np.isin(part_seg, partid2renderids[partid])
178 | cur_part_mask_pixels = int(np.sum(cur_part_mask))
179 | if cur_part_mask_pixels > 0:
180 | dest_mask[cur_part_mask] = partid
181 | print("dest part get!")
182 | return dest_mask # (448, 448) part_id
183 |
184 | def get_object_mask(self):
185 | rgba = self.camera.get_albedo_rgba()
186 | return rgba[:, :, 3] > 0.5
187 |
188 | # return camera parameters
189 | def get_metadata(self):
190 | return {
191 | 'pose': self.camera.get_pose(),
192 | 'near': self.camera.get_near(),
193 | 'far': self.camera.get_far(),
194 | 'width': self.camera.get_width(),
195 | 'height': self.camera.get_height(),
196 | 'fov': self.camera.get_fovy(),
197 | 'camera_matrix': self.camera.get_camera_matrix(),
198 | 'projection_matrix': self.camera.get_projection_matrix(),
199 | 'model_matrix': self.camera.get_model_matrix(),
200 | 'mat44': self.mat44,
201 | 'cam2cambase': self.cam2cambase,
202 | 'basemat44': self.base_mat44,
203 | }
204 |
205 | # return camera parameters
206 | def get_metadata_json(self):
207 | return {
208 | 'dist': self.dist,
209 | 'theta': self.theta,
210 | 'phi': self.phi,
211 | 'near': self.camera.get_near(),
212 | 'far': self.camera.get_far(),
213 | 'width': self.camera.get_width(),
214 | 'height': self.camera.get_height(),
215 | 'fov': self.camera.get_fovy(),
216 | 'camera_matrix': self.camera.get_camera_matrix().tolist(),
217 | 'projection_matrix': self.camera.get_projection_matrix().tolist(),
218 | 'model_matrix': self.camera.get_model_matrix().tolist(),
219 | 'mat44': self.mat44.tolist(),
220 | 'cam2cambase': self.cam2cambase.tolist(),
221 | 'basemat44': self.base_mat44.tolist(),
222 | }
223 |
--------------------------------------------------------------------------------
/code/cate_setting.json:
--------------------------------------------------------------------------------
1 | {"shape_id": {"Eyeglasses": "01", "Bucket": "02", "Box": "03", "Cart": "04", "Chair": "05", "TrashCan": "06", "Pliers": "07", "FoldingChair": "08", "Basket": "09", "Display": "10", "Table": "11", "Oven": "12", "Dishwasher": "13", "KitchenPot": "14", "Microwave": "15", "Scissors": "16", "Laptop": "17", "BirdHouse": "18", "Skateboard": "19", "Bookshelf": "20", "Safe": "21", "Toaster": "22", "Printer": "23", "Bench": "24", "Keyboard2": "25"}, "pickup": {"cate_setting": {"Eyeglasses": {"obj_scale": 0.9, "urdf": "processed", "joint_range": [0, 0.3], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "24000"}, "Bucket": {"obj_scale": 0.85, "urdf": "removed", "joint_range": [0.99, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "24000"}, "TrashCan": {"obj_scale": 0.8, "urdf": "removed", "joint_range": [0.45, 0.55], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "24000"}, "Pliers": {"obj_scale": 0.75, "urdf": "processed", "joint_range": [0.3, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "24000"}, "Basket": {"obj_scale": 0.75, "urdf": "shapenet", "joint_range": [0.0, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "24000"}, "Display": {"obj_scale": 0.8, "urdf": "origin", "joint_range": [0, 0.03], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "24000"}, "KitchenPot": {"obj_scale": 0.68, "urdf": "removed", "joint_range": [0.0, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "35000"}, "Box": {"obj_scale": 0.8, "urdf": "removed", "joint_range": [0.0, 0.05], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "52000"}, "Scissors": {"obj_scale": 0.75, "urdf": "fixbase", "joint_range": [0.3, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "36000"}, "Laptop": {"obj_scale": 0.8, "urdf": "origin", "joint_range": [0.0, 0.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "39000"}}, "material_static_friction": 8.0, "material_dynamic_friction": 8.0, "scene_static_friction": 0.6, "scene_dynamic_friction": 0.6, "stiffness": 1000, "damping": 10, "density": 9.0, "gripper_scale": 1.65, "start_dist": 0.4, "final_dist": 0.1, "move_steps": 4000, "wait_steps": 2000, "RL_load_date": "default", "finetune_path": "../2gripper_logs/finetune/exp-finetune-CVPR_baseline-pickup-Eyeglasses,Bucket,TrashCan,Pliers,Basket,Display-050980", "finetune_eval_epoch": "2-300"}, "pulling": {"cate_setting": {"Eyeglasses": {"obj_scale": 0.8, "urdf": "processed", "joint_range": [0, 0.3], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "120000"}, "FoldingChair": {"obj_scale": 0.8, "urdf": "processed", "joint_range": [0.0, 0.1], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "27000"}, "Oven": {"obj_scale": 0.75, "urdf": "processed", "joint_range": [0.95, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}, "Dishwasher": {"obj_scale": 0.75, "urdf": "processed", "joint_range": [0.95, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "83000"}, "Skateboard": {"obj_scale": 0.8, "urdf": "shapenet", "joint_range": [0.95, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": true, "lie_down": false, "RL_exp_num": "113300"}, "Bookshelf": {"obj_scale": 0.75, "urdf": "shapenet", "joint_range": [0.95, 1.0], "base_theta": 0.5, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "161000"}, "Table": {"obj_scale": 0.8, "urdf": "removed", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": true, "lie_down": false, "RL_exp_num": "86000"}, "Box": {"obj_scale": 0.8, "urdf": "removed", "joint_range": [0.0, 0.05], "base_theta": 1.0, "restrict_dir": true, "low_view": true, "lie_down": true, "RL_exp_num": "136000"}, "Safe": {"obj_scale": 0.75, "urdf": "processed", "joint_range": [0.95, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}, "Microwave": {"obj_scale": 0.75, "urdf": "processed", "joint_range": [0.95, 1.0], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}, "BirdHouse": {"obj_scale": 0.75, "urdf": "shapenet", "joint_range": [0.95, 1.0], "base_theta": -0.5, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}}, "material_static_friction": 8.0, "material_dynamic_friction": 8.0, "scene_static_friction": 0.25, "scene_dynamic_friction": 0.25, "stiffness": 1000, "damping": 10, "density": 8.0, "gripper_scale": 1.5, "start_dist": 0.4, "final_dist": 0.1, "move_steps": 4000, "wait_steps": 1500, "RL_load_date": "default"}, "pushing": {"cate_setting": {"Toaster": {"obj_scale": 0.75, "urdf": "origin", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}, "KitchenPot": {"obj_scale": 0.75, "urdf": "origin", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}, "Basket": {"obj_scale": 0.75, "urdf": "shapenet", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}, "Box": {"obj_scale": 1.0, "urdf": "origin", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "19500"}, "Dishwasher": {"obj_scale": 0.75, "urdf": "origin", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "52400"}, "Display": {"obj_scale": 1.0, "urdf": "origin", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "58300"}, "Microwave": {"obj_scale": 1.0, "urdf": "origin", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}, "Printer": {"obj_scale": 1.0, "urdf": "origin", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "XXXXX"}, "Bench": {"obj_scale": 1.0, "urdf": "shapenet", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "65700"}, "Keyboard2": {"obj_scale": 1.0, "urdf": "shapenet", "joint_range": [0.0, 0.01], "base_theta": 1.0, "restrict_dir": true, "low_view": false, "lie_down": false, "RL_exp_num": "29600"}}, "material_static_friction": 4.0, "material_dynamic_friction": 4.0, "scene_static_friction": 0.3, "scene_dynamic_friction": 0.3, "stiffness": 1, "damping": 10, "density": 2.0, "gripper_scale": 2, "start_dist": 0.45, "final_dist": 0.1, "move_steps": 3500, "wait_steps": 2000, "RL_load_date": "default", "finetune_path": "../2gripper_logs/finetune/exp-finetune-CVPR_baseline-pushing-Box,Bucket,Dishwasher,Display,Microwave,Bench,Bowl,Keyboard2-042881", "finetune_eval_epoch": "8-0"}, "rotating": {"finetune_path": "../2gripper_logs/finetune/exp-finetune-CVPR_baseline-rotating-Box,Bucket,Dishwasher,Display,Microwave,Bench,Bowl,Keyboard2-051480", "finetune_eval_epoch": "16-0", "scene_static_friction": 0.15, "scene_dynamic_friction": 0.15}, "topple": {"finetune_path": "../2gripper_logs/finetune/exp-finetune-CVPR_baseline-topple-Box,Bucket,Dishwasher,Display,Bench,Bowl-050580", "finetune_eval_epoch": "9-0", "scene_static_friction": 0.15, "scene_dynamic_friction": 0.15}, "fetch": {"material_static_friction": 8.0, "material_dynamic_friction": 8.0, "scene_static_friction": 0.6, "scene_dynamic_friction": 0.6, "stiffness": 1, "damping": 10, "density": 80, "gripper_scale": 1.0, "start_dist": 0.5, "final_dist": 0.1, "move_steps": 5000, "wait_steps": 2000}}
--------------------------------------------------------------------------------
/code/collect_data_main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import numpy as np
4 | import utils
5 | from argparse import ArgumentParser
6 | import time
7 | import random
8 | import multiprocessing as mp
9 | from subprocess import call
10 | import datetime
11 |
12 |
13 | parser = ArgumentParser()
14 | parser.add_argument('--category', type=str)
15 | parser.add_argument('--primact_type', type=str)
16 | parser.add_argument('--out_dir', type=str)
17 | parser.add_argument('--mode', type=str, default='train/val/all')
18 |
19 | parser.add_argument('--density', type=float, default=2.0)
20 | parser.add_argument('--damping', type=int, default=10)
21 | parser.add_argument('--target_part_state', type=str, default='random-middle')
22 | parser.add_argument('--start_dist', type=float, default=0.30)
23 | parser.add_argument('--final_dist', type=float, default=0.10)
24 | parser.add_argument('--move_steps', type=int, default=2000)
25 | parser.add_argument('--wait_steps', type=int, default=2000)
26 |
27 | parser.add_argument('--num_processes', type=int, default=1)
28 | parser.add_argument('--start_epoch', type=int, default=0)
29 | parser.add_argument('--save_interval', type=int, default=5)
30 | parser.add_argument('--no_gui', action='store_true', default=False, help='no_gui [default: False]')
31 | parser.add_argument('--not_check_dual', action='store_true', default=False)
32 |
33 | args = parser.parse_args()
34 |
35 |
36 | def setup_seed(seed):
37 | np.random.seed(seed)
38 | random.seed(seed)
39 |
40 | def copy_file(trail_id, category, shape_id, succ, out_dir):
41 | tmp_file_dir = os.path.join(out_dir, 'tmp_succ_files')
42 | tmp_gif_dir = os.path.join(out_dir, 'tmp_succ_gif')
43 | if succ:
44 | target_file_dir = os.path.join(out_dir, 'succ_files')
45 | target_gif_dir = os.path.join(out_dir, 'succ_gif')
46 | else:
47 | target_file_dir = os.path.join(out_dir, 'fail_files')
48 | target_gif_dir = os.path.join(out_dir, 'fail_gif')
49 |
50 | cmd = 'cp %s %s/' % (os.path.join(tmp_file_dir, 'result_%d.json' % trail_id), target_file_dir)
51 | call(cmd, shell=True)
52 | cmd = 'cp %s %s/' % (os.path.join(tmp_file_dir, 'cam_XYZA_%d.h5' % trail_id), target_file_dir)
53 | call(cmd, shell=True)
54 | cmd = 'cp %s %s/' % (os.path.join(tmp_file_dir, 'interaction_mask_%d.png' % trail_id), target_file_dir)
55 | call(cmd, shell=True)
56 | cmd = 'cp %s %s/' % (os.path.join(tmp_gif_dir, '%d_%s_%s.gif' % (trail_id, category, shape_id)), target_gif_dir)
57 | call(cmd, shell=True)
58 |
59 |
60 |
61 | def run_jobs(idx_process, args, transition_Q):
62 | random.seed(datetime.datetime.now())
63 | setup_seed(random.randint(1, 1000) + idx_process)
64 | sum_trial = 100000
65 |
66 | cat_list, shape_list, shape2cat_dict, cat2shape_dict = utils.get_shape_list(all_categories=args.category, mode=args.mode)
67 |
68 | for trial in range(args.start_epoch, sum_trial):
69 | cur_trial = sum_trial * idx_process + trial
70 | cur_random_seed = np.random.randint(10000000)
71 |
72 | # load object
73 | selected_cat = cat_list[random.randint(0, len(cat_list) - 1)]
74 | shape_id = cat2shape_dict[selected_cat][random.randint(0, len(cat2shape_dict[selected_cat]) - 1)]
75 | print('shape_id: ', shape_id, selected_cat, cur_trial)
76 |
77 | cmd = 'python collect_data.py --trial_id %d --shape_id %s --category %s --primact_type %s --random_seed %d ' \
78 | '--density %f --damping %d --target_part_state %s --start_dist %f --final_dist %f ' \
79 | '--move_steps %d --wait_steps %d --out_dir %s --no_gui ' \
80 | % (cur_trial, shape_id, selected_cat, args.primact_type, cur_random_seed,
81 | args.density, args.damping, args.target_part_state, args.start_dist, args.final_dist,
82 | args.move_steps, args.wait_steps, args.out_dir)
83 | if trial % args.save_interval == 0:
84 | cmd += '--save_data '
85 | cmd += '> /dev/null 2>&1'
86 |
87 | ret = call(cmd, shell=True)
88 | if ret == 1:
89 | transition_Q.put(['fail', trial])
90 | elif ret == 2:
91 | transition_Q.put(['invalid', trial])
92 |
93 |
94 | if ret == 0:
95 | # check dual
96 | ret0, ret1 = 1, 1
97 | if not args.not_check_dual:
98 | cmd = 'python collect_data_checkDual.py --trial_id %d --random_seed %d --gripper_id %d ' \
99 | '--density %f --damping %d --target_part_state %s --start_dist %f --final_dist %f ' \
100 | '--move_steps %d --wait_steps %d --out_dir %s --no_gui ' \
101 | % (cur_trial, cur_random_seed, 0,
102 | args.density, args.damping, args.target_part_state, args.start_dist, args.final_dist,
103 | args.move_steps, args.wait_steps, args.out_dir)
104 | ret0 = call(cmd, shell=True)
105 |
106 | cmd = 'python collect_data_checkDual.py --trial_id %d --random_seed %d --gripper_id %d ' \
107 | '--density %f --damping %d --target_part_state %s --start_dist %f --final_dist %f ' \
108 | '--move_steps %d --wait_steps %d --out_dir %s --no_gui ' \
109 | % (cur_trial, cur_random_seed, 1,
110 | args.density, args.damping, args.target_part_state, args.start_dist, args.final_dist,
111 | args.move_steps, args.wait_steps, args.out_dir)
112 | ret1 = call(cmd, shell=True)
113 |
114 | if ret0 == 0 or ret1 == 0:
115 | transition_Q.put(['fail', trial]) # single succ
116 | copy_file(cur_trial, selected_cat, shape_id, succ=False, out_dir=args.out_dir)
117 | else:
118 | transition_Q.put(['succ', trial]) # dual succ
119 | copy_file(cur_trial, selected_cat, shape_id, succ=True, out_dir=args.out_dir)
120 |
121 |
122 |
123 |
124 | if __name__ == '__main__':
125 | out_dir = args.out_dir
126 | print('out_dir: ', out_dir)
127 | if os.path.exists(out_dir):
128 | response = input('Out directory "%s" already exists, continue? (y/n) ' % out_dir)
129 | if response != 'y' and response != 'Y':
130 | sys.exit()
131 |
132 |
133 | if not os.path.exists(out_dir):
134 | os.makedirs(os.path.join(out_dir, 'succ_gif'))
135 | os.makedirs(os.path.join(out_dir, 'fail_gif'))
136 | os.makedirs(os.path.join(out_dir, 'invalid_gif'))
137 | os.makedirs(os.path.join(out_dir, 'tmp_succ_gif'))
138 |
139 | os.makedirs(os.path.join(out_dir, 'succ_files'))
140 | os.makedirs(os.path.join(out_dir, 'fail_files'))
141 | os.makedirs(os.path.join(out_dir, 'invalid_files'))
142 | os.makedirs(os.path.join(out_dir, 'tmp_succ_files'))
143 |
144 |
145 | trans_q = mp.Queue()
146 | for idx_process in range(args.num_processes):
147 | p = mp.Process(target=run_jobs, args=(idx_process, args, trans_q))
148 | p.start()
149 |
150 |
151 | total, max_trial = 0, 0
152 | cnt_dict = {'succ': 0, 'fail': 0, 'invalid': 0}
153 |
154 | t0 = time.time()
155 | t_begin = datetime.datetime.now()
156 | while True:
157 | if not trans_q.empty():
158 | results = trans_q.get()
159 | result, trial_id = results
160 |
161 | cnt_dict[result] += 1
162 | total += 1
163 | max_trial = max(max_trial, trial_id)
164 |
165 | print(
166 | 'Episode: {} | trial_id: {} | Valid_portion: {:.4f} | Succ_portion: {:.4f} | Running Time: {:.4f} | Total Time:'.format(
167 | total, max_trial, (cnt_dict['succ'] + cnt_dict['fail']) / total, cnt_dict['succ'] / total, time.time() - t0), datetime.datetime.now() - t_begin
168 | )
169 | t0 = time.time()
170 |
171 | if total >= 10000:
172 | for idx_process in range(args.num_processes):
173 | p.terminate()
174 | p.join()
175 | break
176 |
177 |
--------------------------------------------------------------------------------
/code/gen_cate_setting.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | cs_file = open("./cate_setting.json", "w")
4 | cate_field_name = ["obj_scale", "urdf", "joint_range", "base_theta", "restrict_dir", "low_view", "lie_down", "RL_exp_num"]
5 | cate_setting = {
6 | "shape_id": {
7 | "Eyeglasses": "01", "Bucket": "02", "Box": "03", "Cart": "04", "Chair": "05",
8 | "TrashCan": "06", "Pliers": "07", "FoldingChair": "08", "Basket": "09", "Display": "10",
9 | "Table": "11", "Oven": "12", "Dishwasher": "13", "KitchenPot": "14", "Microwave": "15",
10 | "Scissors": "16", "Laptop": "17", "BirdHouse": "18", "Skateboard": "19", "Bookshelf": "20",
11 | "Safe": "21", "Toaster": "22", "Printer": "23", "Bench": "24", "Keyboard2": "25"
12 | },
13 | "pickup": {
14 | "cate_setting": {
15 | "Eyeglasses": [0.90, "processed", (0, 0.3), 1.0, True, False, False, "24000"],
16 | "Bucket": [0.85, "removed", (0.99, 1.0), 1.0, True, False, False, "24000"],
17 | "TrashCan": [0.80, "removed", (0.45, 0.55), 1.0, True, False, False, "24000"],
18 | "Pliers": [0.75, "processed", (0.3, 1.0), 1.0, True, False, False, "24000"],
19 | "Basket": [0.75, "shapenet", (0.0, 1.0), 1.0, True, False, False, "24000"],
20 | "Display": [0.80, "origin", (0, 0.03), 1.0, True, False, False, "24000"],
21 |
22 | "KitchenPot": [0.68, "removed", (0.0, 1.0), 1.0, True, False, False, "35000"],
23 | "Box": [0.80, "removed", (0.0, 0.05), 1.0, True, False, False, "52000"],
24 | "Scissors": [0.75, "fixbase", (0.3, 1.0), 1.0, True, False, False, "36000"],
25 | "Laptop": [0.80, "origin", (0.0, 0.0), 1.0, True, False, False, "39000"]
26 | },
27 | "material_static_friction": 8.0,
28 | "material_dynamic_friction": 8.0,
29 | "scene_static_friction": 0.60,
30 | "scene_dynamic_friction": 0.60,
31 | "stiffness": 1000,
32 | "damping": 10,
33 | "density": 9.0,
34 | "gripper_scale": 1.65,
35 | "start_dist": 0.40,
36 | "final_dist": 0.10,
37 | "move_steps": 4000,
38 | "wait_steps": 2000,
39 | "RL_load_date": "default",
40 | "finetune_path": "../2gripper_logs/finetune/exp-finetune-CVPR_baseline-pickup-Eyeglasses,Bucket,TrashCan,Pliers,Basket,Display-050980",
41 | "finetune_eval_epoch": "2-300"
42 | },
43 | "pulling": {
44 | "cate_setting": {
45 | "Eyeglasses": [0.80, "processed", (0, 0.3), 1.0, True, False, False, "120000"],
46 | "FoldingChair": [0.80, "processed", (0.0, 0.1), 1.0, True, False, False, "27000"],
47 | "Oven": [0.75, "processed", (0.95, 1.0), 1.0, True, False, False, "XXXXX"],
48 | "Dishwasher": [0.75, "processed", (0.95, 1.0), 1.0, True, False, False, "83000"],
49 | "Skateboard": [0.80, "shapenet", (0.95, 1.0), 1.0, True, True, False, "113300"],
50 | "Bookshelf": [0.75, "shapenet", (0.95, 1.0), 0.5, True, False, False, "161000"],
51 |
52 | "Table": [0.80, "removed", (0.0, 0.01), 1.0, True, True, False, "86000"],
53 | "Box": [0.80, "removed", (0.0, 0.05), 1.0, True, True, True, "136000"],
54 | "Safe": [0.75, "processed", (0.95, 1.0), 1.0, True, False, False, "XXXXX"],
55 | "Microwave": [0.75, "processed", (0.95, 1.0), 1.0, True, False, False, "XXXXX"],
56 | "BirdHouse": [0.75, "shapenet", (0.95, 1.0), -0.5, True, False, False, "XXXXX"]
57 | },
58 | "material_static_friction": 8.0,
59 | "material_dynamic_friction": 8.0,
60 | "scene_static_friction": 0.25,
61 | "scene_dynamic_friction": 0.25,
62 | "stiffness": 1000,
63 | "damping": 10,
64 | "density": 8.0,
65 | "gripper_scale": 1.50,
66 | "start_dist": 0.40,
67 | "final_dist": 0.10,
68 | "move_steps": 4000,
69 | "wait_steps": 1500,
70 | "RL_load_date": "default"
71 | },
72 | "pushing": {
73 | "cate_setting": {
74 | "Toaster": [0.75, "origin", (0.0, 0.01), 1.0, True, False, False, "XXXXX"],
75 | "KitchenPot": [0.75, "origin", (0.0, 0.01), 1.0, True, False, False, "XXXXX"],
76 | "Basket": [0.75, "shapenet", (0.0, 0.01), 1.0, True, False, False, "XXXXX"],
77 |
78 | "Box": [1.00, "origin", (0.0, 0.01), 1.0, True, False, False, "19500"],
79 | "Dishwasher": [0.75, "origin", (0.0, 0.01), 1.0, True, False, False, "52400"],
80 | "Display": [1.00, "origin", (0.0, 0.01), 1.0, True, False, False, "58300"],
81 | "Microwave": [1.00, "origin", (0.0, 0.01), 1.0, True, False, False, "XXXXX"],
82 | "Printer": [1.00, "origin", (0.0, 0.01), 1.0, True, False, False, "XXXXX"],
83 | "Bench": [1.00, "shapenet", (0.0, 0.01), 1.0, True, False, False, "65700"],
84 | "Keyboard2": [1.00, "shapenet", (0.0, 0.01), 1.0, True, False, False, "29600"],
85 | },
86 | "material_static_friction": 4.0,
87 | "material_dynamic_friction": 4.0,
88 | "scene_static_friction": 0.30,
89 | "scene_dynamic_friction": 0.30,
90 | "stiffness": 1,
91 | "damping": 10,
92 | "density": 2.0,
93 | "gripper_scale": 2,
94 | "start_dist": 0.45,
95 | "final_dist": 0.10,
96 | "move_steps": 3500,
97 | "wait_steps": 2000,
98 | "RL_load_date": "default",
99 | "finetune_path": "../2gripper_logs/finetune/exp-finetune-CVPR_baseline-pushing-Box,Bucket,Dishwasher,Display,Microwave,Bench,Bowl,Keyboard2-042881",
100 | "finetune_eval_epoch": "8-0"
101 | },
102 | "rotating": {
103 | "finetune_path": "../2gripper_logs/finetune/exp-finetune-CVPR_baseline-rotating-Box,Bucket,Dishwasher,Display,Microwave,Bench,Bowl,Keyboard2-051480",
104 | "finetune_eval_epoch": "16-0",
105 | "scene_static_friction": 0.15,
106 | "scene_dynamic_friction": 0.15,
107 | },
108 | "topple": {
109 | "finetune_path": "../2gripper_logs/finetune/exp-finetune-CVPR_baseline-topple-Box,Bucket,Dishwasher,Display,Bench,Bowl-050580",
110 | "finetune_eval_epoch": "9-0",
111 | "scene_static_friction": 0.15,
112 | "scene_dynamic_friction": 0.15,
113 | },
114 | "fetch": {
115 | "material_static_friction": 8.0,
116 | "material_dynamic_friction": 8.0,
117 | "scene_static_friction": 0.60,
118 | "scene_dynamic_friction": 0.60,
119 | "stiffness": 1,
120 | "damping": 10,
121 | "density": 80,
122 | "gripper_scale": 1.0,
123 | "start_dist": 0.50,
124 | "final_dist": 0.10,
125 | "move_steps": 5000,
126 | "wait_steps": 2000,
127 | }
128 | }
129 |
130 | for primact in cate_setting:
131 | if primact != "shape_id":
132 | if "cate_setting" in cate_setting[primact]:
133 | for cate in cate_setting[primact]["cate_setting"]:
134 | cate_setting[primact]["cate_setting"][cate] = dict(zip(cate_field_name, cate_setting[primact]["cate_setting"][cate]))
135 |
136 | json.dump(cate_setting, cs_file)
137 |
--------------------------------------------------------------------------------
/code/logs/.gitignore:
--------------------------------------------------------------------------------
1 | !README.md
2 | !.gitignore
3 | !w2a
4 |
--------------------------------------------------------------------------------
/code/logs/README.md:
--------------------------------------------------------------------------------
1 | This folder stores the training logs.
2 |
3 | Download the pre-trained models here. The pretrained model `where2act` for RL data collection is ready here, and you should download other required models.
--------------------------------------------------------------------------------
/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-lr_scheduler.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-lr_scheduler.pth
--------------------------------------------------------------------------------
/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-network.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-network.pth
--------------------------------------------------------------------------------
/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-optimizer.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-optimizer.pth
--------------------------------------------------------------------------------
/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-sample_succ_list.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-sample_succ_list.pth
--------------------------------------------------------------------------------
/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-train_dataset.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/ckpts/81-train_dataset.pth
--------------------------------------------------------------------------------
/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/conf.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/conf.pth
--------------------------------------------------------------------------------
/code/logs/w2a/finalexp-model_all_final-pulling-None-train_all_v1/datagen_v1.py:
--------------------------------------------------------------------------------
1 | """
2 | Batch-generate data
3 | Commands can be
4 | - New Data: collect_data.py
5 | - Old Data: recollect_data.py
6 | """
7 |
8 | import os
9 | import numpy as np
10 | import multiprocessing as mp
11 | from subprocess import call
12 | from utils import printout
13 | import time
14 |
15 |
16 | class DataGen(object):
17 |
18 | def __init__(self, num_processes, flog=None):
19 | self.num_processes = num_processes
20 | self.flog = flog
21 |
22 | self.todos = []
23 | self.processes = []
24 | self.is_running = False
25 | self.Q = mp.Queue()
26 |
27 | def __len__(self):
28 | return len(self.todos)
29 |
30 | def add_one_collect_job(self, data_dir, shape_id, category, cnt_id, primact_type, trial_id):
31 | if self.is_running:
32 | printout(self.flog, 'ERROR: cannot add a new job while DataGen is running!')
33 | exit(1)
34 |
35 | todo = ('COLLECT', shape_id, category, cnt_id, primact_type, data_dir, trial_id, np.random.randint(10000000))
36 | self.todos.append(todo)
37 |
38 | def add_one_recollect_job(self, src_data_dir, dir1, dir2, recollect_record_name, tar_data_dir, x, y):
39 | if self.is_running:
40 | printout(self.flog, 'ERROR: cannot add a new job while DataGen is running!')
41 | exit(1)
42 |
43 | todo = ('RECOLLECT', src_data_dir, recollect_record_name, tar_data_dir, np.random.randint(10000000), x, y, dir1, dir2)
44 | self.todos.append(todo)
45 |
46 | @staticmethod
47 | def job_func(pid, todos, Q):
48 | succ_todos = []
49 | for todo in todos:
50 | if todo[0] == 'COLLECT':
51 | cmd = 'xvfb-run -a python collect_data.py %s %s %d %s %s --trial_id %d --random_seed %d --no_gui > /dev/null 2>&1' \
52 | % (todo[1], todo[2], todo[3], todo[4], todo[5], todo[6], todo[7])
53 | folder_name = todo[5]
54 | job_name = '%s_%s_%d_%s_%s' % (todo[1], todo[2], todo[3], todo[4], todo[6])
55 | elif todo[0] == 'RECOLLECT':
56 | cmd = 'xvfb-run -a python recollect_data.py %s %s %s --random_seed %d --no_gui --x %d --y %d --dir1 %s --dir2 %s > /dev/null 2>&1' \
57 | % (todo[1], todo[2], todo[3], todo[4], todo[5], todo[6], todo[7], todo[8])
58 | folder_name = todo[3]
59 | job_name = todo[2]
60 | ret = call(cmd, shell=True)
61 | if ret == 0:
62 | succ_todos.append(os.path.join(folder_name, job_name))
63 | Q.put(succ_todos)
64 |
65 | def start_all(self):
66 | if self.is_running:
67 | printout(self.flog, 'ERROR: cannot start all while DataGen is running!')
68 | exit(1)
69 |
70 | total_todos = len(self)
71 | num_todos_per_process = int(np.ceil(total_todos / self.num_processes))
72 | np.random.shuffle(self.todos)
73 | for i in range(self.num_processes):
74 | todos = self.todos[i*num_todos_per_process: min(total_todos, (i+1)*num_todos_per_process)]
75 | p = mp.Process(target=self.job_func, args=(i, todos, self.Q))
76 | p.start()
77 | self.processes.append(p)
78 |
79 | self.is_running = True
80 |
81 | def join_all(self):
82 | if not self.is_running:
83 | printout(self.flog, 'ERROR: cannot join all while DataGen is idle!')
84 | exit(1)
85 |
86 | ret = []
87 | for p in self.processes:
88 | ret += self.Q.get()
89 |
90 | for p in self.processes:
91 | p.join()
92 |
93 | self.todos = []
94 | self.processes = []
95 | self.Q = mp.Queue()
96 | self.is_running=False
97 | return ret
98 |
99 |
100 |
--------------------------------------------------------------------------------
/code/models/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/models/.DS_Store
--------------------------------------------------------------------------------
/code/models/model_aff_fir.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch.utils.data import DataLoader
5 |
6 | # https://github.com/erikwijmans/Pointnet2_PyTorch
7 | from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
8 | from pointnet2.models.pointnet2_ssg_cls import PointNet2ClassificationSSG
9 |
10 |
11 | class PointNet2SemSegSSG(PointNet2ClassificationSSG):
12 | def _build_model(self):
13 | self.SA_modules = nn.ModuleList()
14 | self.SA_modules.append(
15 | PointnetSAModule(
16 | npoint=1024,
17 | radius=0.1,
18 | nsample=32,
19 | mlp=[3, 32, 32, 64],
20 | use_xyz=True,
21 | )
22 | )
23 | self.SA_modules.append(
24 | PointnetSAModule(
25 | npoint=256,
26 | radius=0.2,
27 | nsample=32,
28 | mlp=[64, 64, 64, 128],
29 | use_xyz=True,
30 | )
31 | )
32 | self.SA_modules.append(
33 | PointnetSAModule(
34 | npoint=64,
35 | radius=0.4,
36 | nsample=32,
37 | mlp=[128, 128, 128, 256],
38 | use_xyz=True,
39 | )
40 | )
41 | self.SA_modules.append(
42 | PointnetSAModule(
43 | npoint=16,
44 | radius=0.8,
45 | nsample=32,
46 | mlp=[256, 256, 256, 512],
47 | use_xyz=True,
48 | )
49 | )
50 |
51 | self.FP_modules = nn.ModuleList()
52 | self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
53 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
54 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
55 | self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))
56 |
57 | self.fc_layer = nn.Sequential(
58 | nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
59 | nn.BatchNorm1d(self.hparams['feat_dim']),
60 | nn.ReLU(True),
61 | )
62 |
63 | def forward(self, pointcloud):
64 | r"""
65 | Forward pass of the network
66 | Parameters
67 | ----------
68 | pointcloud: Variable(torch.cuda.FloatTensor)
69 | (B, N, 3 + input_channels) tensor
70 | Point cloud to run predicts on
71 | Each point in the point-cloud MUST
72 | be formated as (x, y, z, features...)
73 | """
74 | xyz, features = self._break_up_pc(pointcloud)
75 |
76 | l_xyz, l_features = [xyz], [features]
77 | for i in range(len(self.SA_modules)):
78 | li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
79 | l_xyz.append(li_xyz)
80 | l_features.append(li_features)
81 |
82 | for i in range(-1, -(len(self.FP_modules) + 1), -1):
83 | l_features[i - 1] = self.FP_modules[i](
84 | l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
85 | )
86 |
87 | return self.fc_layer(l_features[0])
88 |
89 |
90 | class ActionScore(nn.Module):
91 | def __init__(self, input_dim, output_dim=1):
92 | super(ActionScore, self).__init__()
93 |
94 | self.hidden_dim = 128
95 | self.mlp1 = nn.Linear(input_dim, self.hidden_dim)
96 | self.mlp2 = nn.Linear(self.hidden_dim, output_dim)
97 |
98 | # feats B x F
99 | # output: B
100 | def forward(self, inputs):
101 | feats = torch.cat(inputs, dim=-1)
102 | net = F.leaky_relu(self.mlp1(feats))
103 | net = self.mlp2(net)
104 | return net
105 |
106 |
107 | class Network(nn.Module):
108 | def __init__(self, feat_dim, task_feat_dim, cp_feat_dim, dir_feat_dim, topk=1, task_input_dim=3):
109 | super(Network, self).__init__()
110 |
111 | self.topk = topk
112 |
113 | self.pointnet2 = PointNet2SemSegSSG({'feat_dim': feat_dim})
114 |
115 | self.mlp_task = nn.Linear(task_input_dim, task_feat_dim)
116 | self.mlp_dir = nn.Linear(3 + 3, dir_feat_dim)
117 | self.mlp_cp = nn.Linear(3, cp_feat_dim) # contact point
118 |
119 | self.BCELoss = nn.BCEWithLogitsLoss(reduction='none')
120 | self.L1Loss = nn.L1Loss(reduction='none')
121 |
122 | self.action_score = ActionScore(feat_dim + task_feat_dim + cp_feat_dim)
123 |
124 |
125 | def forward(self, pcs, task, cp1):
126 | pcs[:, 0] = cp1
127 | pcs = pcs.repeat(1, 1, 2)
128 | whole_feats = self.pointnet2(pcs)
129 | net1 = whole_feats[:, :, 0]
130 |
131 | task_feats = self.mlp_task(task)
132 | cp1_feats = self.mlp_cp(cp1)
133 |
134 | pred_result_logits = self.action_score([net1, task_feats, cp1_feats])
135 | pred_score = torch.sigmoid(pred_result_logits)
136 | return pred_score
137 |
138 | def get_loss(self, pred_score, gt_score):
139 | loss = self.L1Loss(pred_score, gt_score).mean()
140 | return loss
141 |
142 |
143 | def inference_whole_pc(self, pcs, task):
144 | batch_size = pcs.shape[0]
145 | num_pts = pcs.shape[1]
146 |
147 | cp1 = pcs.view(batch_size * num_pts, -1)
148 | cp1_feats = self.mlp_cp(cp1)
149 |
150 | pcs = pcs.repeat(1, 1, 2)
151 | whole_feats = self.pointnet2(pcs)
152 | net1 = whole_feats.permute(0, 2, 1).reshape(batch_size * num_pts, -1)
153 |
154 | task_feats = self.mlp_task(task)
155 |
156 | expanded_task_feats = task_feats.unsqueeze(dim=1).repeat(1, num_pts, 1).reshape(batch_size * num_pts, -1)
157 |
158 | pred_result_logits = self.action_score([net1, expanded_task_feats, cp1_feats])
159 | pred_score = torch.sigmoid(pred_result_logits).reshape(batch_size, num_pts)
160 | return pred_score
161 |
162 |
--------------------------------------------------------------------------------
/code/models/model_aff_sec.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch.utils.data import DataLoader
5 |
6 | # https://github.com/erikwijmans/Pointnet2_PyTorch
7 | from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
8 | from pointnet2.models.pointnet2_ssg_cls import PointNet2ClassificationSSG
9 |
10 |
11 | class PointNet2SemSegSSG(PointNet2ClassificationSSG):
12 | def _build_model(self):
13 | self.SA_modules = nn.ModuleList()
14 | self.SA_modules.append(
15 | PointnetSAModule(
16 | npoint=1024,
17 | radius=0.1,
18 | nsample=32,
19 | mlp=[3, 32, 32, 64],
20 | use_xyz=True,
21 | )
22 | )
23 | self.SA_modules.append(
24 | PointnetSAModule(
25 | npoint=256,
26 | radius=0.2,
27 | nsample=32,
28 | mlp=[64, 64, 64, 128],
29 | use_xyz=True,
30 | )
31 | )
32 | self.SA_modules.append(
33 | PointnetSAModule(
34 | npoint=64,
35 | radius=0.4,
36 | nsample=32,
37 | mlp=[128, 128, 128, 256],
38 | use_xyz=True,
39 | )
40 | )
41 | self.SA_modules.append(
42 | PointnetSAModule(
43 | npoint=16,
44 | radius=0.8,
45 | nsample=32,
46 | mlp=[256, 256, 256, 512],
47 | use_xyz=True,
48 | )
49 | )
50 |
51 | self.FP_modules = nn.ModuleList()
52 | self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
53 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
54 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
55 | self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))
56 |
57 | self.fc_layer = nn.Sequential(
58 | nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
59 | nn.BatchNorm1d(self.hparams['feat_dim']),
60 | nn.ReLU(True),
61 | )
62 |
63 | def forward(self, pointcloud):
64 | r"""
65 | Forward pass of the network
66 | Parameters
67 | ----------
68 | pointcloud: Variable(torch.cuda.FloatTensor)
69 | (B, N, 3 + input_channels) tensor
70 | Point cloud to run predicts on
71 | Each point in the point-cloud MUST
72 | be formated as (x, y, z, features...)
73 | """
74 | xyz, features = self._break_up_pc(pointcloud)
75 |
76 | l_xyz, l_features = [xyz], [features]
77 | for i in range(len(self.SA_modules)):
78 | li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
79 | l_xyz.append(li_xyz)
80 | l_features.append(li_features)
81 |
82 | for i in range(-1, -(len(self.FP_modules) + 1), -1):
83 | l_features[i - 1] = self.FP_modules[i](
84 | l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
85 | )
86 |
87 | return self.fc_layer(l_features[0])
88 |
89 |
90 | class ActionScore(nn.Module):
91 | def __init__(self, input_dim, output_dim=1):
92 | super(ActionScore, self).__init__()
93 |
94 | self.hidden_dim = 128
95 | self.mlp1 = nn.Linear(input_dim, self.hidden_dim)
96 | self.mlp2 = nn.Linear(self.hidden_dim, output_dim)
97 |
98 | # feats B x F
99 | # output: B
100 | def forward(self, inputs):
101 | feats = torch.cat(inputs, dim=-1)
102 | net = F.leaky_relu(self.mlp1(feats))
103 | net = self.mlp2(net)
104 | return net
105 |
106 |
107 | class Network(nn.Module):
108 | def __init__(self, feat_dim, task_feat_dim, cp_feat_dim, dir_feat_dim, topk=1, task_input_dim=3):
109 | super(Network, self).__init__()
110 |
111 | self.topk = topk
112 |
113 | self.pointnet2 = PointNet2SemSegSSG({'feat_dim': feat_dim})
114 |
115 | self.mlp_task = nn.Linear(task_input_dim, task_feat_dim)
116 | self.mlp_dir = nn.Linear(3 + 3, dir_feat_dim)
117 | self.mlp_cp = nn.Linear(3, cp_feat_dim) # contact point
118 |
119 | self.BCELoss = nn.BCEWithLogitsLoss(reduction='none')
120 | self.L1Loss = nn.L1Loss(reduction='none')
121 |
122 | self.action_score = ActionScore(feat_dim * 2 + task_feat_dim + cp_feat_dim * 2 + dir_feat_dim)
123 |
124 |
125 | def forward(self, pcs, task, cp1, cp2, dir1):
126 | pcs[:, 0] = cp1
127 | pcs[:, 1] = cp2
128 | pcs = pcs.repeat(1, 1, 2)
129 | whole_feats = self.pointnet2(pcs)
130 | net1 = whole_feats[:, :, 0]
131 | net2 = whole_feats[:, :, 1]
132 |
133 | task_feats = self.mlp_task(task)
134 | cp1_feats = self.mlp_cp(cp1)
135 | cp2_feats = self.mlp_cp(cp2)
136 | dir1_feats = self.mlp_dir(dir1)
137 |
138 | pred_result_logits = self.action_score([net1, net2, task_feats, cp1_feats, cp2_feats, dir1_feats])
139 | pred_score = torch.sigmoid(pred_result_logits)
140 | return pred_score
141 |
142 |
143 | def get_loss(self, pred_score, gt_score):
144 | loss = self.L1Loss(pred_score, gt_score).mean()
145 | return loss
146 |
147 |
148 | def inference_whole_pc(self, pcs, task, cp1, dir1):
149 | batch_size = pcs.shape[0]
150 | num_pts = pcs.shape[1]
151 |
152 | pcs[:, 0] = cp1
153 | cp2 = pcs.view(batch_size * num_pts, -1)
154 | cp2_feats = self.mlp_cp(cp2)
155 |
156 | pcs = pcs.repeat(1, 1, 2)
157 | whole_feats = self.pointnet2(pcs)
158 | net1 = whole_feats[:, :, 0]
159 | net2 = whole_feats.permute(0, 2, 1).reshape(batch_size * num_pts, -1)
160 |
161 | task_feats = self.mlp_task(task)
162 | cp1_feats = self.mlp_cp(cp1)
163 | dir1_feats = self.mlp_dir(dir1)
164 |
165 | expanded_net1 = net1.unsqueeze(dim=1).repeat(1, num_pts, 1).reshape(batch_size * num_pts, -1)
166 | expanded_task_feats = task_feats.unsqueeze(dim=1).repeat(1, num_pts, 1).reshape(batch_size * num_pts, -1)
167 | expanded_cp1_feats = cp1_feats.unsqueeze(dim=1).repeat(1, num_pts, 1).reshape(batch_size * num_pts, -1)
168 | expanded_dir1_feats = dir1_feats.unsqueeze(dim=1).repeat(1, num_pts, 1).reshape(batch_size * num_pts, -1)
169 |
170 | pred_result_logits = self.action_score([expanded_net1, net2, expanded_task_feats,
171 | expanded_cp1_feats, cp2_feats, expanded_dir1_feats])
172 | pred_score = torch.sigmoid(pred_result_logits).reshape(batch_size, num_pts)
173 | return pred_score
174 |
175 |
176 | def inference_whole_pc_finetune(self, pcs, task, cp1, dir1, rvs_ctpt=10):
177 | batch_size = pcs.shape[0]
178 | num_pts = pcs.shape[1]
179 |
180 | task_feats = self.mlp_task(task) # (B, -1)
181 | cp1_feats = self.mlp_cp(cp1) # (B * rvs_ctpt, -1)
182 | dir1_feats = self.mlp_dir(dir1) # (B * rvs_ctpt, -1)
183 |
184 | cp1 = cp1.reshape(batch_size, rvs_ctpt, 3)
185 | pcs[:, 0: rvs_ctpt] = cp1
186 | cp2 = pcs.view(batch_size * num_pts, -1)
187 | cp2_feats = self.mlp_cp(cp2) # (B * num_pts, -1)
188 |
189 | pcs = pcs.repeat(1, 1, 2)
190 | whole_feats = self.pointnet2(pcs)
191 | net1 = whole_feats[:, :, 0: rvs_ctpt].permute(0, 2, 1).reshape(batch_size * rvs_ctpt, -1)
192 | net2 = whole_feats.permute(0, 2, 1).reshape(batch_size * num_pts, -1)
193 |
194 | expanded_task_feats = task_feats.unsqueeze(dim=1).repeat(1, rvs_ctpt * num_pts, 1).reshape(batch_size * rvs_ctpt * num_pts, -1)
195 | expanded_net1 = net1.unsqueeze(dim=1).repeat(1, num_pts, 1).reshape(batch_size * rvs_ctpt * num_pts, -1)
196 | expanded_cp1_feats = cp1_feats.unsqueeze(dim=1).repeat(1, num_pts, 1).reshape(batch_size * rvs_ctpt * num_pts, -1)
197 | expanded_dir1_feats = dir1_feats.unsqueeze(dim=1).repeat(1, num_pts, 1).reshape(batch_size * rvs_ctpt * num_pts, -1)
198 |
199 | expanded_net2 = net2.reshape(batch_size, num_pts, -1).repeat(1, rvs_ctpt, 1).reshape(batch_size * num_pts * rvs_ctpt, -1)
200 | expanded_cp2_feats = cp2_feats.reshape(batch_size, num_pts, -1).repeat(1, rvs_ctpt, 1).reshape(batch_size * num_pts * rvs_ctpt, -1)
201 |
202 | pred_result_logits = self.action_score([expanded_net1, expanded_net2, expanded_task_feats,
203 | expanded_cp1_feats, expanded_cp2_feats, expanded_dir1_feats])
204 | pred_score = torch.sigmoid(pred_result_logits).reshape(batch_size * rvs_ctpt, num_pts)
205 | return pred_score
206 |
207 |
--------------------------------------------------------------------------------
/code/models/model_critic_fir.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import torch.nn.functional as F
4 | from torch.utils.data import DataLoader
5 | import ipdb
6 |
7 | # https://github.com/erikwijmans/Pointnet2_PyTorch
8 | from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
9 | from pointnet2.models.pointnet2_ssg_cls import PointNet2ClassificationSSG
10 |
11 |
12 | class PointNet2SemSegSSG(PointNet2ClassificationSSG):
13 | def _build_model(self):
14 | self.SA_modules = nn.ModuleList()
15 | self.SA_modules.append(
16 | PointnetSAModule(
17 | npoint=1024,
18 | radius=0.1,
19 | nsample=32,
20 | mlp=[3, 32, 32, 64],
21 | use_xyz=True,
22 | )
23 | )
24 | self.SA_modules.append(
25 | PointnetSAModule(
26 | npoint=256,
27 | radius=0.2,
28 | nsample=32,
29 | mlp=[64, 64, 64, 128],
30 | use_xyz=True,
31 | )
32 | )
33 | self.SA_modules.append(
34 | PointnetSAModule(
35 | npoint=64,
36 | radius=0.4,
37 | nsample=32,
38 | mlp=[128, 128, 128, 256],
39 | use_xyz=True,
40 | )
41 | )
42 | self.SA_modules.append(
43 | PointnetSAModule(
44 | npoint=16,
45 | radius=0.8,
46 | nsample=32,
47 | mlp=[256, 256, 256, 512],
48 | use_xyz=True,
49 | )
50 | )
51 | # self.SA_modules.append(
52 | # PointnetSAModule(
53 | # mlp=[512, 512],
54 | # )
55 | # )
56 |
57 | self.FP_modules = nn.ModuleList()
58 | self.FP_modules.append(PointnetFPModule(mlp=[128 + 3, 128, 128, 128]))
59 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
60 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
61 | self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))
62 |
63 | self.fc_layer = nn.Sequential(
64 | nn.Conv1d(128, self.hparams['feat_dim'], kernel_size=1, bias=False),
65 | nn.BatchNorm1d(self.hparams['feat_dim']),
66 | nn.ReLU(True),
67 | )
68 |
69 | def forward(self, pointcloud):
70 | r"""
71 | Forward pass of the network
72 |
73 | Parameters
74 | ----------
75 | pointcloud: Variable(torch.cuda.FloatTensor)
76 | (B, N, 3 + input_channels) tensor
77 | Point cloud to run predicts on
78 | Each point in the point-cloud MUST
79 | be formated as (x, y, z, features...)
80 | """
81 | xyz, features = self._break_up_pc(pointcloud)
82 |
83 | l_xyz, l_features = [xyz], [features]
84 | # print(pointcloud.shape)
85 | # print(xyz.shape)
86 | for i in range(len(self.SA_modules)):
87 | li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
88 | l_xyz.append(li_xyz)
89 | l_features.append(li_features)
90 | # print(li_features.shape)
91 |
92 | for i in range(-1, -(len(self.FP_modules) + 1), -1):
93 | l_features[i - 1] = self.FP_modules[i](
94 | l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
95 | )
96 |
97 | return self.fc_layer(l_features[0])
98 |
99 |
100 | class Critic(nn.Module):
101 |
102 | def __init__(self, input_dim, output_dim=1, hidden_dim=128):
103 | super(Critic, self).__init__()
104 |
105 | self.hidden_dim = hidden_dim
106 | self.mlp1 = nn.Linear(input_dim, self.hidden_dim)
107 | self.mlp2 = nn.Linear(self.hidden_dim, output_dim)
108 |
109 | # pixel_feats B x F, query_fats: B x 6
110 | # output: B
111 | def forward(self, inputs):
112 | input_net = torch.cat(inputs, dim=-1)
113 | hidden_net = F.leaky_relu(self.mlp1(input_net))
114 | net = self.mlp2(hidden_net)
115 | return net
116 |
117 |
118 | class Network(nn.Module):
119 | def __init__(self, feat_dim, task_feat_dim, cp_feat_dim, dir_feat_dim, hidden_feat_dim=128, task_input_dim=3):
120 | super(Network, self).__init__()
121 |
122 | self.pointnet2 = PointNet2SemSegSSG({'feat_dim': feat_dim})
123 |
124 | self.critic = Critic(input_dim=feat_dim + task_feat_dim + cp_feat_dim + dir_feat_dim, hidden_dim=hidden_feat_dim)
125 |
126 | self.mlp_task = nn.Linear(task_input_dim, task_feat_dim)
127 | self.mlp_dir = nn.Linear(3 + 3, dir_feat_dim)
128 | self.mlp_cp = nn.Linear(3, cp_feat_dim) # contact point
129 |
130 | self.BCELoss = nn.BCEWithLogitsLoss(reduction='none')
131 | self.sigmoid = nn.Sigmoid()
132 | self.BCELoss_withoutSigmoid = nn.BCELoss(reduction='none')
133 | self.L1Loss = nn.L1Loss(reduction='none')
134 |
135 | def forward(self, pcs, task, cp1, dir1):
136 | pcs[:, 0] = cp1
137 | pcs = pcs.repeat(1, 1, 2)
138 | whole_feats = self.pointnet2(pcs)
139 | net1 = whole_feats[:, :, 0]
140 |
141 | task_feats = self.mlp_task(task)
142 | cp1_feats = self.mlp_cp(cp1)
143 | dir1_feats = self.mlp_dir(dir1)
144 |
145 | pred_result_logits = self.critic([net1, task_feats, cp1_feats, dir1_feats])
146 | pred_scores = torch.sigmoid(pred_result_logits)
147 | return pred_scores
148 |
149 | def forward_n(self, pcs, task, cp1, dir1, rvs=100):
150 | batch_size = pcs.shape[0]
151 | pcs[:, 0] = cp1
152 | pcs = pcs.repeat(1, 1, 2)
153 | whole_feats = self.pointnet2(pcs)
154 | net1 = whole_feats[:, :, 0]
155 |
156 | task_feats = self.mlp_task(task) # B * 3
157 | cp1_feats = self.mlp_cp(cp1)
158 | dir1_feats = self.mlp_dir(dir1) # B * 6
159 |
160 | expanded_net1 = net1.unsqueeze(dim=1).repeat(1, rvs, 1).reshape(batch_size * rvs, -1)
161 | expanded_task_feats = task_feats.unsqueeze(dim=1).repeat(1, rvs, 1).reshape(batch_size * rvs, -1)
162 | expanded_cp1_feats = cp1_feats.unsqueeze(dim=1).repeat(1, rvs, 1).reshape(batch_size * rvs, -1)
163 |
164 | pred_result_logits = self.critic([expanded_net1, expanded_task_feats,
165 | expanded_cp1_feats, dir1_feats])
166 | pred_scores = torch.sigmoid(pred_result_logits)
167 |
168 | return pred_scores
169 |
170 | def forward_n_finetune(self, pcs, task, cp1, dir1, rvs_ctpt=10, rvs=100):
171 | batch_size = pcs.shape[0]
172 |
173 | task_feats = self.mlp_task(task) # (B, -1)
174 | cp1_feats = self.mlp_cp(cp1) # (B * rvs_ctpt, -1)
175 | dir1_feats = self.mlp_dir(dir1) # (B * rvs_ctpt * rvs, -1)
176 |
177 | cp1 = cp1.reshape(batch_size, rvs_ctpt, 3)
178 | pcs[:, 0: rvs_ctpt] = cp1
179 | pcs = pcs.repeat(1, 1, 2)
180 | whole_feats = self.pointnet2(pcs)
181 | net1 = whole_feats[:, :, 0: rvs_ctpt].permute(0, 2, 1).reshape(batch_size * rvs_ctpt, -1)
182 |
183 | expanded_task_feats = task_feats.unsqueeze(dim=1).repeat(1, rvs_ctpt * rvs, 1).reshape(batch_size * rvs_ctpt * rvs, -1)
184 | expanded_net1 = net1.unsqueeze(dim=1).repeat(1, rvs, 1).reshape(batch_size * rvs_ctpt * rvs, -1)
185 | expanded_cp1_feats = cp1_feats.unsqueeze(dim=1).repeat(1, rvs, 1).reshape(batch_size * rvs_ctpt * rvs, -1)
186 |
187 | pred_result_logits = self.critic([expanded_net1, expanded_task_feats, expanded_cp1_feats, dir1_feats])
188 | pred_scores = torch.sigmoid(pred_result_logits)
189 |
190 | return pred_scores
191 |
192 |
193 | def get_ce_loss_total(self, pred_logits, gt_labels):
194 | loss = self.BCELoss_withoutSigmoid(pred_logits, gt_labels.float())
195 | return loss
196 |
197 | def get_L1_loss(self, pred_score, gt_score):
198 | loss = self.L1Loss(pred_score, gt_score)
199 | return loss
200 |
--------------------------------------------------------------------------------
/code/models/pointnet2/__init__.py:
--------------------------------------------------------------------------------
1 | from pointnet2 import data, models, utils
2 | from pointnet2._version import __version__
3 |
--------------------------------------------------------------------------------
/code/models/pointnet2/_version.py:
--------------------------------------------------------------------------------
1 | __version__ = "3.0.0"
2 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/config.yaml:
--------------------------------------------------------------------------------
1 | defaults:
2 | - task: cls
3 | - model: ssg
4 | - task_model: ${defaults.0.task}-${defaults.1.model}
5 |
6 | hydra:
7 | run:
8 | dir: outputs
9 |
10 | gpus:
11 | - 0
12 |
13 | optimizer: ???
14 |
15 | task_model: ???
16 |
17 | model: ???
18 |
19 | distrib_backend: dp
20 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/model/msg.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | use_xyz: True
3 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/model/ssg.yaml:
--------------------------------------------------------------------------------
1 | model:
2 | use_xyz: True
3 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/task/cls.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | weight_decay: 0.0
3 | lr: 1e-3
4 | lr_decay: 0.7
5 | bn_momentum: 0.5
6 | bnm_decay: 0.5
7 | decay_step: 2e4
8 |
9 |
10 | num_points: 4096
11 | epochs: 200
12 | batch_size: 32
13 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/task/semseg.yaml:
--------------------------------------------------------------------------------
1 | optimizer:
2 | weight_decay: 0.0
3 | lr: 1e-3
4 | lr_decay: 0.5
5 | bn_momentum: 0.5
6 | bnm_decay: 0.5
7 | decay_step: 3e5
8 |
9 | num_points: 4096
10 | epochs: 50
11 | batch_size: 24
12 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/task_model/cls-msg.yaml:
--------------------------------------------------------------------------------
1 | task_model:
2 | class: pointnet2.models.PointNet2ClassificationMSG
3 | name: cls-msg
4 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/task_model/cls-ssg.yaml:
--------------------------------------------------------------------------------
1 | task_model:
2 | class: pointnet2.models.PointNet2ClassificationSSG
3 | name: cls-ssg
4 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/task_model/semseg-msg.yaml:
--------------------------------------------------------------------------------
1 | task_model:
2 | class: pointnet2.models.PointNet2SemSegMSG
3 | name: sem-msg
4 |
--------------------------------------------------------------------------------
/code/models/pointnet2/config/task_model/semseg-ssg.yaml:
--------------------------------------------------------------------------------
1 | task_model:
2 | class: pointnet2.models.PointNet2SemSegSSG
3 | name: sem-ssg
4 |
--------------------------------------------------------------------------------
/code/models/pointnet2/data/Indoor3DSemSegLoader.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shlex
3 | import subprocess
4 |
5 | import h5py
6 | import numpy as np
7 | import torch
8 | import torch.utils.data as data
9 |
10 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
11 |
12 |
13 | def _get_data_files(list_filename):
14 | with open(list_filename) as f:
15 | return [line.rstrip() for line in f]
16 |
17 |
18 | def _load_data_file(name):
19 | f = h5py.File(name, "r")
20 | data = f["data"][:]
21 | label = f["label"][:]
22 | return data, label
23 |
24 |
25 | class Indoor3DSemSeg(data.Dataset):
26 | def __init__(self, num_points, train=True, download=True, data_precent=1.0):
27 | super().__init__()
28 | self.data_precent = data_precent
29 | self.folder = "indoor3d_sem_seg_hdf5_data"
30 | self.data_dir = os.path.join(BASE_DIR, self.folder)
31 | self.url = (
32 | "https://shapenet.cs.stanford.edu/media/indoor3d_sem_seg_hdf5_data.zip"
33 | )
34 |
35 | if download and not os.path.exists(self.data_dir):
36 | zipfile = os.path.join(BASE_DIR, os.path.basename(self.url))
37 | subprocess.check_call(
38 | shlex.split("curl {} -o {}".format(self.url, zipfile))
39 | )
40 |
41 | subprocess.check_call(
42 | shlex.split("unzip {} -d {}".format(zipfile, BASE_DIR))
43 | )
44 |
45 | subprocess.check_call(shlex.split("rm {}".format(zipfile)))
46 |
47 | self.train, self.num_points = train, num_points
48 |
49 | all_files = _get_data_files(os.path.join(self.data_dir, "all_files.txt"))
50 | room_filelist = _get_data_files(
51 | os.path.join(self.data_dir, "room_filelist.txt")
52 | )
53 |
54 | data_batchlist, label_batchlist = [], []
55 | for f in all_files:
56 | data, label = _load_data_file(os.path.join(BASE_DIR, f))
57 | data_batchlist.append(data)
58 | label_batchlist.append(label)
59 |
60 | data_batches = np.concatenate(data_batchlist, 0)
61 | labels_batches = np.concatenate(label_batchlist, 0)
62 |
63 | test_area = "Area_5"
64 | train_idxs, test_idxs = [], []
65 | for i, room_name in enumerate(room_filelist):
66 | if test_area in room_name:
67 | test_idxs.append(i)
68 | else:
69 | train_idxs.append(i)
70 |
71 | if self.train:
72 | self.points = data_batches[train_idxs, ...]
73 | self.labels = labels_batches[train_idxs, ...]
74 | else:
75 | self.points = data_batches[test_idxs, ...]
76 | self.labels = labels_batches[test_idxs, ...]
77 |
78 | def __getitem__(self, idx):
79 | pt_idxs = np.arange(0, self.num_points)
80 | np.random.shuffle(pt_idxs)
81 |
82 | current_points = torch.from_numpy(self.points[idx, pt_idxs].copy()).float()
83 | current_labels = torch.from_numpy(self.labels[idx, pt_idxs].copy()).long()
84 |
85 | return current_points, current_labels
86 |
87 | def __len__(self):
88 | return int(self.points.shape[0] * self.data_precent)
89 |
90 | def set_num_points(self, pts):
91 | self.num_points = pts
92 |
93 | def randomize(self):
94 | pass
95 |
96 |
97 | if __name__ == "__main__":
98 | dset = Indoor3DSemSeg(16, "./", train=True)
99 | print(dset[0])
100 | print(len(dset))
101 | dloader = torch.utils.data.DataLoader(dset, batch_size=32, shuffle=True)
102 | for i, data in enumerate(dloader, 0):
103 | inputs, labels = data
104 | if i == len(dloader) - 1:
105 | print(inputs.size())
106 |
--------------------------------------------------------------------------------
/code/models/pointnet2/data/ModelNet40Loader.py:
--------------------------------------------------------------------------------
1 | import os
2 | import os.path as osp
3 | import shlex
4 | import shutil
5 | import subprocess
6 |
7 | import lmdb
8 | import msgpack_numpy
9 | import numpy as np
10 | import torch
11 | import torch.utils.data as data
12 | import tqdm
13 |
14 | BASE_DIR = os.path.dirname(os.path.abspath(__file__))
15 |
16 |
17 | def pc_normalize(pc):
18 | l = pc.shape[0]
19 | centroid = np.mean(pc, axis=0)
20 | pc = pc - centroid
21 | m = np.max(np.sqrt(np.sum(pc ** 2, axis=1)))
22 | pc = pc / m
23 | return pc
24 |
25 |
26 | class ModelNet40Cls(data.Dataset):
27 | def __init__(self, num_points, transforms=None, train=True, download=True):
28 | super().__init__()
29 |
30 | self.transforms = transforms
31 |
32 | self.set_num_points(num_points)
33 | self._cache = os.path.join(BASE_DIR, "modelnet40_normal_resampled_cache")
34 |
35 | if not osp.exists(self._cache):
36 | self.folder = "modelnet40_normal_resampled"
37 | self.data_dir = os.path.join(BASE_DIR, self.folder)
38 | self.url = (
39 | "https://shapenet.cs.stanford.edu/media/modelnet40_normal_resampled.zip"
40 | )
41 |
42 | if download and not os.path.exists(self.data_dir):
43 | zipfile = os.path.join(BASE_DIR, os.path.basename(self.url))
44 | subprocess.check_call(
45 | shlex.split("curl {} -o {}".format(self.url, zipfile))
46 | )
47 |
48 | subprocess.check_call(
49 | shlex.split("unzip {} -d {}".format(zipfile, BASE_DIR))
50 | )
51 |
52 | subprocess.check_call(shlex.split("rm {}".format(zipfile)))
53 |
54 | self.train = train
55 | self.set_num_points(num_points)
56 |
57 | self.catfile = os.path.join(self.data_dir, "modelnet40_shape_names.txt")
58 | self.cat = [line.rstrip() for line in open(self.catfile)]
59 | self.classes = dict(zip(self.cat, range(len(self.cat))))
60 |
61 | os.makedirs(self._cache)
62 |
63 | print("Converted to LMDB for faster dataloading while training")
64 | for split in ["train", "test"]:
65 | if split == "train":
66 | shape_ids = [
67 | line.rstrip()
68 | for line in open(
69 | os.path.join(self.data_dir, "modelnet40_train.txt")
70 | )
71 | ]
72 | else:
73 | shape_ids = [
74 | line.rstrip()
75 | for line in open(
76 | os.path.join(self.data_dir, "modelnet40_test.txt")
77 | )
78 | ]
79 |
80 | shape_names = ["_".join(x.split("_")[0:-1]) for x in shape_ids]
81 | # list of (shape_name, shape_txt_file_path) tuple
82 | self.datapath = [
83 | (
84 | shape_names[i],
85 | os.path.join(self.data_dir, shape_names[i], shape_ids[i])
86 | + ".txt",
87 | )
88 | for i in range(len(shape_ids))
89 | ]
90 |
91 | with lmdb.open(
92 | osp.join(self._cache, split), map_size=1 << 36
93 | ) as lmdb_env, lmdb_env.begin(write=True) as txn:
94 | for i in tqdm.trange(len(self.datapath)):
95 | fn = self.datapath[i]
96 | point_set = np.loadtxt(fn[1], delimiter=",").astype(np.float32)
97 | cls = self.classes[self.datapath[i][0]]
98 | cls = int(cls)
99 |
100 | txn.put(
101 | str(i).encode(),
102 | msgpack_numpy.packb(
103 | dict(pc=point_set, lbl=cls), use_bin_type=True
104 | ),
105 | )
106 |
107 | shutil.rmtree(self.data_dir)
108 |
109 | self._lmdb_file = osp.join(self._cache, "train" if train else "test")
110 | with lmdb.open(self._lmdb_file, map_size=1 << 36) as lmdb_env:
111 | self._len = lmdb_env.stat()["entries"]
112 |
113 | self._lmdb_env = None
114 |
115 | def __getitem__(self, idx):
116 | if self._lmdb_env is None:
117 | self._lmdb_env = lmdb.open(
118 | self._lmdb_file, map_size=1 << 36, readonly=True, lock=False
119 | )
120 |
121 | with self._lmdb_env.begin(buffers=True) as txn:
122 | ele = msgpack_numpy.unpackb(txn.get(str(idx).encode()), raw=False)
123 |
124 | point_set = ele["pc"]
125 |
126 | pt_idxs = np.arange(0, self.num_points)
127 | np.random.shuffle(pt_idxs)
128 |
129 | point_set = point_set[pt_idxs, :]
130 | point_set[:, 0:3] = pc_normalize(point_set[:, 0:3])
131 |
132 | if self.transforms is not None:
133 | point_set = self.transforms(point_set)
134 |
135 | return point_set, ele["lbl"]
136 |
137 | def __len__(self):
138 | return self._len
139 |
140 | def set_num_points(self, pts):
141 | self.num_points = min(int(1e4), pts)
142 |
143 |
144 | if __name__ == "__main__":
145 | from torchvision import transforms
146 | import data_utils as d_utils
147 |
148 | transforms = transforms.Compose(
149 | [
150 | d_utils.PointcloudToTensor(),
151 | d_utils.PointcloudRotate(axis=np.array([1, 0, 0])),
152 | d_utils.PointcloudScale(),
153 | d_utils.PointcloudTranslate(),
154 | d_utils.PointcloudJitter(),
155 | ]
156 | )
157 | dset = ModelNet40Cls(16, train=True, transforms=transforms)
158 | print(dset[0][0])
159 | print(dset[0][1])
160 | print(len(dset))
161 | dloader = torch.utils.data.DataLoader(dset, batch_size=32, shuffle=True)
162 |
--------------------------------------------------------------------------------
/code/models/pointnet2/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .Indoor3DSemSegLoader import Indoor3DSemSeg
2 | from .ModelNet40Loader import ModelNet40Cls
3 |
--------------------------------------------------------------------------------
/code/models/pointnet2/data/data_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 |
5 | def angle_axis(angle, axis):
6 | # type: (float, np.ndarray) -> float
7 | r"""Returns a 4x4 rotation matrix that performs a rotation around axis by angle
8 |
9 | Parameters
10 | ----------
11 | angle : float
12 | Angle to rotate by
13 | axis: np.ndarray
14 | Axis to rotate about
15 |
16 | Returns
17 | -------
18 | torch.Tensor
19 | 3x3 rotation matrix
20 | """
21 | u = axis / np.linalg.norm(axis)
22 | cosval, sinval = np.cos(angle), np.sin(angle)
23 |
24 | # yapf: disable
25 | cross_prod_mat = np.array([[0.0, -u[2], u[1]],
26 | [u[2], 0.0, -u[0]],
27 | [-u[1], u[0], 0.0]])
28 |
29 | R = torch.from_numpy(
30 | cosval * np.eye(3)
31 | + sinval * cross_prod_mat
32 | + (1.0 - cosval) * np.outer(u, u)
33 | )
34 | # yapf: enable
35 | return R.float()
36 |
37 |
38 | class PointcloudScale(object):
39 | def __init__(self, lo=0.8, hi=1.25):
40 | self.lo, self.hi = lo, hi
41 |
42 | def __call__(self, points):
43 | scaler = np.random.uniform(self.lo, self.hi)
44 | points[:, 0:3] *= scaler
45 | return points
46 |
47 |
48 | class PointcloudRotate(object):
49 | def __init__(self, axis=np.array([0.0, 1.0, 0.0])):
50 | self.axis = axis
51 |
52 | def __call__(self, points):
53 | rotation_angle = np.random.uniform() * 2 * np.pi
54 | rotation_matrix = angle_axis(rotation_angle, self.axis)
55 |
56 | normals = points.size(1) > 3
57 | if not normals:
58 | return torch.matmul(points, rotation_matrix.t())
59 | else:
60 | pc_xyz = points[:, 0:3]
61 | pc_normals = points[:, 3:]
62 | points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
63 | points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
64 |
65 | return points
66 |
67 |
68 | class PointcloudRotatePerturbation(object):
69 | def __init__(self, angle_sigma=0.06, angle_clip=0.18):
70 | self.angle_sigma, self.angle_clip = angle_sigma, angle_clip
71 |
72 | def _get_angles(self):
73 | angles = np.clip(
74 | self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip
75 | )
76 |
77 | return angles
78 |
79 | def __call__(self, points):
80 | angles = self._get_angles()
81 | Rx = angle_axis(angles[0], np.array([1.0, 0.0, 0.0]))
82 | Ry = angle_axis(angles[1], np.array([0.0, 1.0, 0.0]))
83 | Rz = angle_axis(angles[2], np.array([0.0, 0.0, 1.0]))
84 |
85 | rotation_matrix = torch.matmul(torch.matmul(Rz, Ry), Rx)
86 |
87 | normals = points.size(1) > 3
88 | if not normals:
89 | return torch.matmul(points, rotation_matrix.t())
90 | else:
91 | pc_xyz = points[:, 0:3]
92 | pc_normals = points[:, 3:]
93 | points[:, 0:3] = torch.matmul(pc_xyz, rotation_matrix.t())
94 | points[:, 3:] = torch.matmul(pc_normals, rotation_matrix.t())
95 |
96 | return points
97 |
98 |
99 | class PointcloudJitter(object):
100 | def __init__(self, std=0.01, clip=0.05):
101 | self.std, self.clip = std, clip
102 |
103 | def __call__(self, points):
104 | jittered_data = (
105 | points.new(points.size(0), 3)
106 | .normal_(mean=0.0, std=self.std)
107 | .clamp_(-self.clip, self.clip)
108 | )
109 | points[:, 0:3] += jittered_data
110 | return points
111 |
112 |
113 | class PointcloudTranslate(object):
114 | def __init__(self, translate_range=0.1):
115 | self.translate_range = translate_range
116 |
117 | def __call__(self, points):
118 | translation = np.random.uniform(-self.translate_range, self.translate_range)
119 | points[:, 0:3] += translation
120 | return points
121 |
122 |
123 | class PointcloudToTensor(object):
124 | def __call__(self, points):
125 | return torch.from_numpy(points).float()
126 |
127 |
128 | class PointcloudRandomInputDropout(object):
129 | def __init__(self, max_dropout_ratio=0.875):
130 | assert max_dropout_ratio >= 0 and max_dropout_ratio < 1
131 | self.max_dropout_ratio = max_dropout_ratio
132 |
133 | def __call__(self, points):
134 | pc = points.numpy()
135 |
136 | dropout_ratio = np.random.random() * self.max_dropout_ratio # 0~0.875
137 | drop_idx = np.where(np.random.random((pc.shape[0])) <= dropout_ratio)[0]
138 | if len(drop_idx) > 0:
139 | pc[drop_idx] = pc[0] # set to the first point
140 |
141 | return torch.from_numpy(pc).float()
142 |
--------------------------------------------------------------------------------
/code/models/pointnet2/models/__init__.py:
--------------------------------------------------------------------------------
1 | from pointnet2.models.pointnet2_msg_cls import PointNet2ClassificationMSG
2 | from pointnet2.models.pointnet2_msg_sem import PointNet2SemSegMSG
3 | from pointnet2.models.pointnet2_ssg_cls import PointNet2ClassificationSSG
4 | from pointnet2.models.pointnet2_ssg_sem import PointNet2SemSegSSG
5 |
--------------------------------------------------------------------------------
/code/models/pointnet2/models/pointnet2_msg_cls.py:
--------------------------------------------------------------------------------
1 | import pytorch_lightning as pl
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 | from pointnet2_ops.pointnet2_modules import PointnetSAModule, PointnetSAModuleMSG
6 |
7 | from pointnet2.models.pointnet2_ssg_cls import PointNet2ClassificationSSG
8 |
9 |
10 | class PointNet2ClassificationMSG(PointNet2ClassificationSSG):
11 | def _build_model(self):
12 | super()._build_model()
13 |
14 | self.SA_modules = nn.ModuleList()
15 | self.SA_modules.append(
16 | PointnetSAModuleMSG(
17 | npoint=512,
18 | radii=[0.1, 0.2, 0.4],
19 | nsamples=[16, 32, 128],
20 | mlps=[[3, 32, 32, 64], [3, 64, 64, 128], [3, 64, 96, 128]],
21 | use_xyz=self.hparams["model.use_xyz"],
22 | )
23 | )
24 |
25 | input_channels = 64 + 128 + 128
26 | self.SA_modules.append(
27 | PointnetSAModuleMSG(
28 | npoint=128,
29 | radii=[0.2, 0.4, 0.8],
30 | nsamples=[32, 64, 128],
31 | mlps=[
32 | [input_channels, 64, 64, 128],
33 | [input_channels, 128, 128, 256],
34 | [input_channels, 128, 128, 256],
35 | ],
36 | use_xyz=self.hparams["model.use_xyz"],
37 | )
38 | )
39 | self.SA_modules.append(
40 | PointnetSAModule(
41 | mlp=[128 + 256 + 256, 256, 512, 1024],
42 | use_xyz=self.hparams["model.use_xyz"],
43 | )
44 | )
45 |
--------------------------------------------------------------------------------
/code/models/pointnet2/models/pointnet2_msg_sem.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 | import pytorch_lightning as pl
4 | import torch
5 | import torch.nn as nn
6 | from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModuleMSG
7 |
8 | from pointnet2.models.pointnet2_ssg_sem import PointNet2SemSegSSG
9 |
10 |
11 | class PointNet2SemSegMSG(PointNet2SemSegSSG):
12 | def _build_model(self):
13 | self.SA_modules = nn.ModuleList()
14 | c_in = 6
15 | self.SA_modules.append(
16 | PointnetSAModuleMSG(
17 | npoint=1024,
18 | radii=[0.05, 0.1],
19 | nsamples=[16, 32],
20 | mlps=[[c_in, 16, 16, 32], [c_in, 32, 32, 64]],
21 | use_xyz=self.hparams["model.use_xyz"],
22 | )
23 | )
24 | c_out_0 = 32 + 64
25 |
26 | c_in = c_out_0
27 | self.SA_modules.append(
28 | PointnetSAModuleMSG(
29 | npoint=256,
30 | radii=[0.1, 0.2],
31 | nsamples=[16, 32],
32 | mlps=[[c_in, 64, 64, 128], [c_in, 64, 96, 128]],
33 | use_xyz=self.hparams["model.use_xyz"],
34 | )
35 | )
36 | c_out_1 = 128 + 128
37 |
38 | c_in = c_out_1
39 | self.SA_modules.append(
40 | PointnetSAModuleMSG(
41 | npoint=64,
42 | radii=[0.2, 0.4],
43 | nsamples=[16, 32],
44 | mlps=[[c_in, 128, 196, 256], [c_in, 128, 196, 256]],
45 | use_xyz=self.hparams["model.use_xyz"],
46 | )
47 | )
48 | c_out_2 = 256 + 256
49 |
50 | c_in = c_out_2
51 | self.SA_modules.append(
52 | PointnetSAModuleMSG(
53 | npoint=16,
54 | radii=[0.4, 0.8],
55 | nsamples=[16, 32],
56 | mlps=[[c_in, 256, 256, 512], [c_in, 256, 384, 512]],
57 | use_xyz=self.hparams["model.use_xyz"],
58 | )
59 | )
60 | c_out_3 = 512 + 512
61 |
62 | self.FP_modules = nn.ModuleList()
63 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 6, 128, 128]))
64 | self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_0, 256, 256]))
65 | self.FP_modules.append(PointnetFPModule(mlp=[512 + c_out_1, 512, 512]))
66 | self.FP_modules.append(PointnetFPModule(mlp=[c_out_3 + c_out_2, 512, 512]))
67 |
68 | self.fc_lyaer = nn.Sequential(
69 | nn.Conv1d(128, 128, kernel_size=1, bias=False),
70 | nn.BatchNorm1d(128),
71 | nn.ReLU(True),
72 | nn.Dropout(0.5),
73 | nn.Conv1d(128, 13, kernel_size=1),
74 | )
75 |
--------------------------------------------------------------------------------
/code/models/pointnet2/models/pointnet2_ssg_cls.py:
--------------------------------------------------------------------------------
1 | import pytorch_lightning as pl
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 | import torch.optim.lr_scheduler as lr_sched
6 | from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
7 | from torch.utils.data import DataLoader, DistributedSampler
8 | from torchvision import transforms
9 |
10 | import pointnet2.data.data_utils as d_utils
11 | from pointnet2.data.ModelNet40Loader import ModelNet40Cls
12 |
13 |
14 | def set_bn_momentum_default(bn_momentum):
15 | def fn(m):
16 | if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):
17 | m.momentum = bn_momentum
18 |
19 | return fn
20 |
21 |
22 | class BNMomentumScheduler(lr_sched.LambdaLR):
23 | def __init__(self, model, bn_lambda, last_epoch=-1, setter=set_bn_momentum_default):
24 | if not isinstance(model, nn.Module):
25 | raise RuntimeError(
26 | "Class '{}' is not a PyTorch nn Module".format(type(model)._name_)
27 | )
28 |
29 | self.model = model
30 | self.setter = setter
31 | self.lmbd = bn_lambda
32 |
33 | self.step(last_epoch + 1)
34 | self.last_epoch = last_epoch
35 |
36 | def step(self, epoch=None):
37 | if epoch is None:
38 | epoch = self.last_epoch + 1
39 |
40 | self.last_epoch = epoch
41 | self.model.apply(self.setter(self.lmbd(epoch)))
42 |
43 | def state_dict(self):
44 | return dict(last_epoch=self.last_epoch)
45 |
46 | def load_state_dict(self, state):
47 | self.last_epoch = state["last_epoch"]
48 | self.step(self.last_epoch)
49 |
50 |
51 | lr_clip = 1e-5
52 | bnm_clip = 1e-2
53 |
54 |
55 | class PointNet2ClassificationSSG(pl.LightningModule):
56 | def __init__(self, hparams):
57 | super().__init__()
58 |
59 | self.hparams = hparams
60 |
61 | self._build_model()
62 |
63 | def _build_model(self):
64 | self.SA_modules = nn.ModuleList()
65 | self.SA_modules.append(
66 | PointnetSAModule(
67 | npoint=512,
68 | radius=0.2,
69 | nsample=64,
70 | mlp=[3, 64, 64, 128],
71 | use_xyz=self.hparams["model.use_xyz"],
72 | )
73 | )
74 | self.SA_modules.append(
75 | PointnetSAModule(
76 | npoint=128,
77 | radius=0.4,
78 | nsample=64,
79 | mlp=[128, 128, 128, 256],
80 | use_xyz=self.hparams["model.use_xyz"],
81 | )
82 | )
83 | self.SA_modules.append(
84 | PointnetSAModule(
85 | mlp=[256, 256, 512, 1024], use_xyz=self.hparams["model.use_xyz"]
86 | )
87 | )
88 |
89 | self.fc_layer = nn.Sequential(
90 | nn.Linear(1024, 512, bias=False),
91 | nn.BatchNorm1d(512),
92 | nn.ReLU(True),
93 | nn.Linear(512, 256, bias=False),
94 | nn.BatchNorm1d(256),
95 | nn.ReLU(True),
96 | nn.Dropout(0.5),
97 | nn.Linear(256, 40),
98 | )
99 |
100 | def _break_up_pc(self, pc):
101 | xyz = pc[..., 0:3].contiguous()
102 | features = pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None
103 |
104 | return xyz, features
105 |
106 | def forward(self, pointcloud):
107 | r"""
108 | Forward pass of the network
109 |
110 | Parameters
111 | ----------
112 | pointcloud: Variable(torch.cuda.FloatTensor)
113 | (B, N, 3 + input_channels) tensor
114 | Point cloud to run predicts on
115 | Each point in the point-cloud MUST
116 | be formated as (x, y, z, features...)
117 | """
118 | xyz, features = self._break_up_pc(pointcloud)
119 |
120 | for module in self.SA_modules:
121 | xyz, features = module(xyz, features)
122 |
123 | return self.fc_layer(features.squeeze(-1))
124 |
125 | def training_step(self, batch, batch_idx):
126 | pc, labels = batch
127 |
128 | logits = self.forward(pc)
129 | loss = F.cross_entropy(logits, labels)
130 | with torch.no_grad():
131 | acc = (torch.argmax(logits, dim=1) == labels).float().mean()
132 |
133 | log = dict(train_loss=loss, train_acc=acc)
134 |
135 | return dict(loss=loss, log=log, progress_bar=dict(train_acc=acc))
136 |
137 | def validation_step(self, batch, batch_idx):
138 | pc, labels = batch
139 |
140 | logits = self.forward(pc)
141 | loss = F.cross_entropy(logits, labels)
142 | acc = (torch.argmax(logits, dim=1) == labels).float().mean()
143 |
144 | return dict(val_loss=loss, val_acc=acc)
145 |
146 | def validation_end(self, outputs):
147 | reduced_outputs = {}
148 | for k in outputs[0]:
149 | for o in outputs:
150 | reduced_outputs[k] = reduced_outputs.get(k, []) + [o[k]]
151 |
152 | for k in reduced_outputs:
153 | reduced_outputs[k] = torch.stack(reduced_outputs[k]).mean()
154 |
155 | reduced_outputs.update(
156 | dict(log=reduced_outputs.copy(), progress_bar=reduced_outputs.copy())
157 | )
158 |
159 | return reduced_outputs
160 |
161 | def configure_optimizers(self):
162 | lr_lbmd = lambda _: max(
163 | self.hparams["optimizer.lr_decay"]
164 | ** (
165 | int(
166 | self.global_step
167 | * self.hparams["batch_size"]
168 | / self.hparams["optimizer.decay_step"]
169 | )
170 | ),
171 | lr_clip / self.hparams["optimizer.lr"],
172 | )
173 | bn_lbmd = lambda _: max(
174 | self.hparams["optimizer.bn_momentum"]
175 | * self.hparams["optimizer.bnm_decay"]
176 | ** (
177 | int(
178 | self.global_step
179 | * self.hparams["batch_size"]
180 | / self.hparams["optimizer.decay_step"]
181 | )
182 | ),
183 | bnm_clip,
184 | )
185 |
186 | optimizer = torch.optim.Adam(
187 | self.parameters(),
188 | lr=self.hparams["optimizer.lr"],
189 | weight_decay=self.hparams["optimizer.weight_decay"],
190 | )
191 | lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd)
192 | bnm_scheduler = BNMomentumScheduler(self, bn_lambda=bn_lbmd)
193 |
194 | return [optimizer], [lr_scheduler, bnm_scheduler]
195 |
196 | def prepare_data(self):
197 | train_transforms = transforms.Compose(
198 | [
199 | d_utils.PointcloudToTensor(),
200 | d_utils.PointcloudScale(),
201 | d_utils.PointcloudRotate(),
202 | d_utils.PointcloudRotatePerturbation(),
203 | d_utils.PointcloudTranslate(),
204 | d_utils.PointcloudJitter(),
205 | d_utils.PointcloudRandomInputDropout(),
206 | ]
207 | )
208 |
209 | self.train_dset = ModelNet40Cls(
210 | self.hparams["num_points"], transforms=train_transforms, train=True
211 | )
212 | self.val_dset = ModelNet40Cls(
213 | self.hparams["num_points"], transforms=None, train=False
214 | )
215 |
216 | def _build_dataloader(self, dset, mode):
217 | return DataLoader(
218 | dset,
219 | batch_size=self.hparams["batch_size"],
220 | shuffle=mode == "train",
221 | num_workers=4,
222 | pin_memory=True,
223 | drop_last=mode == "train",
224 | )
225 |
226 | def train_dataloader(self):
227 | return self._build_dataloader(self.train_dset, mode="train")
228 |
229 | def val_dataloader(self):
230 | return self._build_dataloader(self.val_dset, mode="val")
231 |
--------------------------------------------------------------------------------
/code/models/pointnet2/models/pointnet2_ssg_sem.py:
--------------------------------------------------------------------------------
1 | import pytorch_lightning as pl
2 | import torch
3 | import torch.nn as nn
4 | from pointnet2_ops.pointnet2_modules import PointnetFPModule, PointnetSAModule
5 | from torch.utils.data import DataLoader
6 |
7 | from pointnet2.data import Indoor3DSemSeg
8 | from pointnet2.models.pointnet2_ssg_cls import PointNet2ClassificationSSG
9 |
10 |
11 | class PointNet2SemSegSSG(PointNet2ClassificationSSG):
12 | def _build_model(self):
13 | self.SA_modules = nn.ModuleList()
14 | self.SA_modules.append(
15 | PointnetSAModule(
16 | npoint=1024,
17 | radius=0.1,
18 | nsample=32,
19 | mlp=[6, 32, 32, 64],
20 | use_xyz=self.hparams["model.use_xyz"],
21 | )
22 | )
23 | self.SA_modules.append(
24 | PointnetSAModule(
25 | npoint=256,
26 | radius=0.2,
27 | nsample=32,
28 | mlp=[64, 64, 64, 128],
29 | use_xyz=self.hparams["model.use_xyz"],
30 | )
31 | )
32 | self.SA_modules.append(
33 | PointnetSAModule(
34 | npoint=64,
35 | radius=0.4,
36 | nsample=32,
37 | mlp=[128, 128, 128, 256],
38 | use_xyz=self.hparams["model.use_xyz"],
39 | )
40 | )
41 | self.SA_modules.append(
42 | PointnetSAModule(
43 | npoint=16,
44 | radius=0.8,
45 | nsample=32,
46 | mlp=[256, 256, 256, 512],
47 | use_xyz=self.hparams["model.use_xyz"],
48 | )
49 | )
50 |
51 | self.FP_modules = nn.ModuleList()
52 | self.FP_modules.append(PointnetFPModule(mlp=[128 + 6, 128, 128, 128]))
53 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 64, 256, 128]))
54 | self.FP_modules.append(PointnetFPModule(mlp=[256 + 128, 256, 256]))
55 | self.FP_modules.append(PointnetFPModule(mlp=[512 + 256, 256, 256]))
56 |
57 | self.fc_lyaer = nn.Sequential(
58 | nn.Conv1d(128, 128, kernel_size=1, bias=False),
59 | nn.BatchNorm1d(128),
60 | nn.ReLU(True),
61 | nn.Dropout(0.5),
62 | nn.Conv1d(128, 13, kernel_size=1),
63 | )
64 |
65 | def forward(self, pointcloud):
66 | r"""
67 | Forward pass of the network
68 |
69 | Parameters
70 | ----------
71 | pointcloud: Variable(torch.cuda.FloatTensor)
72 | (B, N, 3 + input_channels) tensor
73 | Point cloud to run predicts on
74 | Each point in the point-cloud MUST
75 | be formated as (x, y, z, features...)
76 | """
77 | xyz, features = self._break_up_pc(pointcloud)
78 |
79 | l_xyz, l_features = [xyz], [features]
80 | for i in range(len(self.SA_modules)):
81 | li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
82 | l_xyz.append(li_xyz)
83 | l_features.append(li_features)
84 |
85 | for i in range(-1, -(len(self.FP_modules) + 1), -1):
86 | l_features[i - 1] = self.FP_modules[i](
87 | l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
88 | )
89 |
90 | return self.fc_lyaer(l_features[0])
91 |
92 | def prepare_data(self):
93 | self.train_dset = Indoor3DSemSeg(self.hparams["num_points"], train=True)
94 | self.val_dset = Indoor3DSemSeg(self.hparams["num_points"], train=False)
95 |
--------------------------------------------------------------------------------
/code/models/pointnet2/train.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import hydra
4 | import omegaconf
5 | import pytorch_lightning as pl
6 | import torch
7 | from pytorch_lightning.loggers import TensorBoardLogger
8 |
9 | torch.backends.cudnn.enabled = True
10 | torch.backends.cudnn.benchmark = True
11 |
12 |
13 | def hydra_params_to_dotdict(hparams):
14 | def _to_dot_dict(cfg):
15 | res = {}
16 | for k, v in cfg.items():
17 | if isinstance(v, omegaconf.DictConfig):
18 | res.update(
19 | {k + "." + subk: subv for subk, subv in _to_dot_dict(v).items()}
20 | )
21 | elif isinstance(v, (str, int, float, bool)):
22 | res[k] = v
23 |
24 | return res
25 |
26 | return _to_dot_dict(hparams)
27 |
28 |
29 | @hydra.main("config/config.yaml")
30 | def main(cfg):
31 | model = hydra.utils.instantiate(cfg.task_model, hydra_params_to_dotdict(cfg))
32 |
33 | early_stop_callback = pl.callbacks.EarlyStopping(patience=5)
34 | checkpoint_callback = pl.callbacks.ModelCheckpoint(
35 | monitor="val_acc",
36 | mode="max",
37 | save_top_k=2,
38 | filepath=os.path.join(
39 | cfg.task_model.name, "{epoch}-{val_loss:.2f}-{val_acc:.3f}"
40 | ),
41 | verbose=True,
42 | )
43 | trainer = pl.Trainer(
44 | gpus=list(cfg.gpus),
45 | max_epochs=cfg.epochs,
46 | early_stop_callback=early_stop_callback,
47 | checkpoint_callback=checkpoint_callback,
48 | distributed_backend=cfg.distrib_backend,
49 | )
50 |
51 | trainer.fit(model)
52 |
53 |
54 | if __name__ == "__main__":
55 | main()
56 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/__init__.py:
--------------------------------------------------------------------------------
1 | import pointnet2_ops.pointnet2_modules
2 | import pointnet2_ops.pointnet2_utils
3 | from pointnet2_ops._version import __version__
4 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/include/ball_query.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 |
4 | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
5 | const int nsample);
6 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/include/cuda_utils.h:
--------------------------------------------------------------------------------
1 | #ifndef _CUDA_UTILS_H
2 | #define _CUDA_UTILS_H
3 |
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | #include
10 |
11 | #include
12 |
13 | #define TOTAL_THREADS 512
14 |
15 | inline int opt_n_threads(int work_size) {
16 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0);
17 |
18 | return max(min(1 << pow_2, TOTAL_THREADS), 1);
19 | }
20 |
21 | inline dim3 opt_block_config(int x, int y) {
22 | const int x_threads = opt_n_threads(x);
23 | const int y_threads =
24 | max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1);
25 | dim3 block_config(x_threads, y_threads, 1);
26 |
27 | return block_config;
28 | }
29 |
30 | #define CUDA_CHECK_ERRORS() \
31 | do { \
32 | cudaError_t err = cudaGetLastError(); \
33 | if (cudaSuccess != err) { \
34 | fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \
35 | cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \
36 | __FILE__); \
37 | exit(-1); \
38 | } \
39 | } while (0)
40 |
41 | #endif
42 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/include/group_points.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 |
4 | at::Tensor group_points(at::Tensor points, at::Tensor idx);
5 | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
6 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/include/interpolate.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 |
3 | #include
4 | #include
5 |
6 | std::vector three_nn(at::Tensor unknowns, at::Tensor knows);
7 | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
8 | at::Tensor weight);
9 | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
10 | at::Tensor weight, const int m);
11 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/include/sampling.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 |
4 | at::Tensor gather_points(at::Tensor points, at::Tensor idx);
5 | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n);
6 | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples);
7 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/include/utils.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include
3 | #include
4 |
5 | #define CHECK_CUDA(x) \
6 | do { \
7 | AT_ASSERT(x.is_cuda(), #x " must be a CUDA tensor"); \
8 | } while (0)
9 |
10 | #define CHECK_CONTIGUOUS(x) \
11 | do { \
12 | AT_ASSERT(x.is_contiguous(), #x " must be a contiguous tensor"); \
13 | } while (0)
14 |
15 | #define CHECK_IS_INT(x) \
16 | do { \
17 | AT_ASSERT(x.scalar_type() == at::ScalarType::Int, \
18 | #x " must be an int tensor"); \
19 | } while (0)
20 |
21 | #define CHECK_IS_FLOAT(x) \
22 | do { \
23 | AT_ASSERT(x.scalar_type() == at::ScalarType::Float, \
24 | #x " must be a float tensor"); \
25 | } while (0)
26 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/ball_query.cpp:
--------------------------------------------------------------------------------
1 | #include "ball_query.h"
2 | #include "utils.h"
3 |
4 | void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
5 | int nsample, const float *new_xyz,
6 | const float *xyz, int *idx);
7 |
8 | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius,
9 | const int nsample) {
10 | CHECK_CONTIGUOUS(new_xyz);
11 | CHECK_CONTIGUOUS(xyz);
12 | CHECK_IS_FLOAT(new_xyz);
13 | CHECK_IS_FLOAT(xyz);
14 |
15 | if (new_xyz.is_cuda()) {
16 | CHECK_CUDA(xyz);
17 | }
18 |
19 | at::Tensor idx =
20 | torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample},
21 | at::device(new_xyz.device()).dtype(at::ScalarType::Int));
22 |
23 | if (new_xyz.is_cuda()) {
24 | query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1),
25 | radius, nsample, new_xyz.data_ptr(),
26 | xyz.data_ptr(), idx.data_ptr());
27 | } else {
28 | AT_ASSERT(false, "CPU not supported");
29 | }
30 |
31 | return idx;
32 | }
33 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/ball_query_gpu.cu:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | #include "cuda_utils.h"
6 |
7 | // input: new_xyz(b, m, 3) xyz(b, n, 3)
8 | // output: idx(b, m, nsample)
9 | __global__ void query_ball_point_kernel(int b, int n, int m, float radius,
10 | int nsample,
11 | const float *__restrict__ new_xyz,
12 | const float *__restrict__ xyz,
13 | int *__restrict__ idx) {
14 | int batch_index = blockIdx.x;
15 | xyz += batch_index * n * 3;
16 | new_xyz += batch_index * m * 3;
17 | idx += m * nsample * batch_index;
18 |
19 | int index = threadIdx.x;
20 | int stride = blockDim.x;
21 |
22 | float radius2 = radius * radius;
23 | for (int j = index; j < m; j += stride) {
24 | float new_x = new_xyz[j * 3 + 0];
25 | float new_y = new_xyz[j * 3 + 1];
26 | float new_z = new_xyz[j * 3 + 2];
27 | for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) {
28 | float x = xyz[k * 3 + 0];
29 | float y = xyz[k * 3 + 1];
30 | float z = xyz[k * 3 + 2];
31 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) +
32 | (new_z - z) * (new_z - z);
33 | if (d2 < radius2) {
34 | if (cnt == 0) {
35 | for (int l = 0; l < nsample; ++l) {
36 | idx[j * nsample + l] = k;
37 | }
38 | }
39 | idx[j * nsample + cnt] = k;
40 | ++cnt;
41 | }
42 | }
43 | }
44 | }
45 |
46 | void query_ball_point_kernel_wrapper(int b, int n, int m, float radius,
47 | int nsample, const float *new_xyz,
48 | const float *xyz, int *idx) {
49 | cudaStream_t stream = at::cuda::getCurrentCUDAStream();
50 | query_ball_point_kernel<<>>(
51 | b, n, m, radius, nsample, new_xyz, xyz, idx);
52 |
53 | CUDA_CHECK_ERRORS();
54 | }
55 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/bindings.cpp:
--------------------------------------------------------------------------------
1 | #include "ball_query.h"
2 | #include "group_points.h"
3 | #include "interpolate.h"
4 | #include "sampling.h"
5 |
6 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
7 | m.def("gather_points", &gather_points);
8 | m.def("gather_points_grad", &gather_points_grad);
9 | m.def("furthest_point_sampling", &furthest_point_sampling);
10 |
11 | m.def("three_nn", &three_nn);
12 | m.def("three_interpolate", &three_interpolate);
13 | m.def("three_interpolate_grad", &three_interpolate_grad);
14 |
15 | m.def("ball_query", &ball_query);
16 |
17 | m.def("group_points", &group_points);
18 | m.def("group_points_grad", &group_points_grad);
19 | }
20 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/group_points.cpp:
--------------------------------------------------------------------------------
1 | #include "group_points.h"
2 | #include "utils.h"
3 |
4 | void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
5 | const float *points, const int *idx,
6 | float *out);
7 |
8 | void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
9 | int nsample, const float *grad_out,
10 | const int *idx, float *grad_points);
11 |
12 | at::Tensor group_points(at::Tensor points, at::Tensor idx) {
13 | CHECK_CONTIGUOUS(points);
14 | CHECK_CONTIGUOUS(idx);
15 | CHECK_IS_FLOAT(points);
16 | CHECK_IS_INT(idx);
17 |
18 | if (points.is_cuda()) {
19 | CHECK_CUDA(idx);
20 | }
21 |
22 | at::Tensor output =
23 | torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)},
24 | at::device(points.device()).dtype(at::ScalarType::Float));
25 |
26 | if (points.is_cuda()) {
27 | group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
28 | idx.size(1), idx.size(2),
29 | points.data_ptr(), idx.data_ptr(),
30 | output.data_ptr());
31 | } else {
32 | AT_ASSERT(false, "CPU not supported");
33 | }
34 |
35 | return output;
36 | }
37 |
38 | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) {
39 | CHECK_CONTIGUOUS(grad_out);
40 | CHECK_CONTIGUOUS(idx);
41 | CHECK_IS_FLOAT(grad_out);
42 | CHECK_IS_INT(idx);
43 |
44 | if (grad_out.is_cuda()) {
45 | CHECK_CUDA(idx);
46 | }
47 |
48 | at::Tensor output =
49 | torch::zeros({grad_out.size(0), grad_out.size(1), n},
50 | at::device(grad_out.device()).dtype(at::ScalarType::Float));
51 |
52 | if (grad_out.is_cuda()) {
53 | group_points_grad_kernel_wrapper(
54 | grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2),
55 | grad_out.data_ptr(), idx.data_ptr(),
56 | output.data_ptr());
57 | } else {
58 | AT_ASSERT(false, "CPU not supported");
59 | }
60 |
61 | return output;
62 | }
63 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/group_points_gpu.cu:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "cuda_utils.h"
5 |
6 | // input: points(b, c, n) idx(b, npoints, nsample)
7 | // output: out(b, c, npoints, nsample)
8 | __global__ void group_points_kernel(int b, int c, int n, int npoints,
9 | int nsample,
10 | const float *__restrict__ points,
11 | const int *__restrict__ idx,
12 | float *__restrict__ out) {
13 | int batch_index = blockIdx.x;
14 | points += batch_index * n * c;
15 | idx += batch_index * npoints * nsample;
16 | out += batch_index * npoints * nsample * c;
17 |
18 | const int index = threadIdx.y * blockDim.x + threadIdx.x;
19 | const int stride = blockDim.y * blockDim.x;
20 | for (int i = index; i < c * npoints; i += stride) {
21 | const int l = i / npoints;
22 | const int j = i % npoints;
23 | for (int k = 0; k < nsample; ++k) {
24 | int ii = idx[j * nsample + k];
25 | out[(l * npoints + j) * nsample + k] = points[l * n + ii];
26 | }
27 | }
28 | }
29 |
30 | void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample,
31 | const float *points, const int *idx,
32 | float *out) {
33 | cudaStream_t stream = at::cuda::getCurrentCUDAStream();
34 |
35 | group_points_kernel<<>>(
36 | b, c, n, npoints, nsample, points, idx, out);
37 |
38 | CUDA_CHECK_ERRORS();
39 | }
40 |
41 | // input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample)
42 | // output: grad_points(b, c, n)
43 | __global__ void group_points_grad_kernel(int b, int c, int n, int npoints,
44 | int nsample,
45 | const float *__restrict__ grad_out,
46 | const int *__restrict__ idx,
47 | float *__restrict__ grad_points) {
48 | int batch_index = blockIdx.x;
49 | grad_out += batch_index * npoints * nsample * c;
50 | idx += batch_index * npoints * nsample;
51 | grad_points += batch_index * n * c;
52 |
53 | const int index = threadIdx.y * blockDim.x + threadIdx.x;
54 | const int stride = blockDim.y * blockDim.x;
55 | for (int i = index; i < c * npoints; i += stride) {
56 | const int l = i / npoints;
57 | const int j = i % npoints;
58 | for (int k = 0; k < nsample; ++k) {
59 | int ii = idx[j * nsample + k];
60 | atomicAdd(grad_points + l * n + ii,
61 | grad_out[(l * npoints + j) * nsample + k]);
62 | }
63 | }
64 | }
65 |
66 | void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
67 | int nsample, const float *grad_out,
68 | const int *idx, float *grad_points) {
69 | cudaStream_t stream = at::cuda::getCurrentCUDAStream();
70 |
71 | group_points_grad_kernel<<>>(
72 | b, c, n, npoints, nsample, grad_out, idx, grad_points);
73 |
74 | CUDA_CHECK_ERRORS();
75 | }
76 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/interpolate.cpp:
--------------------------------------------------------------------------------
1 | #include "interpolate.h"
2 | #include "utils.h"
3 |
4 | void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
5 | const float *known, float *dist2, int *idx);
6 | void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
7 | const float *points, const int *idx,
8 | const float *weight, float *out);
9 | void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
10 | const float *grad_out,
11 | const int *idx, const float *weight,
12 | float *grad_points);
13 |
14 | std::vector three_nn(at::Tensor unknowns, at::Tensor knows) {
15 | CHECK_CONTIGUOUS(unknowns);
16 | CHECK_CONTIGUOUS(knows);
17 | CHECK_IS_FLOAT(unknowns);
18 | CHECK_IS_FLOAT(knows);
19 |
20 | if (unknowns.is_cuda()) {
21 | CHECK_CUDA(knows);
22 | }
23 |
24 | at::Tensor idx =
25 | torch::zeros({unknowns.size(0), unknowns.size(1), 3},
26 | at::device(unknowns.device()).dtype(at::ScalarType::Int));
27 | at::Tensor dist2 =
28 | torch::zeros({unknowns.size(0), unknowns.size(1), 3},
29 | at::device(unknowns.device()).dtype(at::ScalarType::Float));
30 |
31 | if (unknowns.is_cuda()) {
32 | three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1),
33 | unknowns.data_ptr(), knows.data_ptr(),
34 | dist2.data_ptr(), idx.data_ptr());
35 | } else {
36 | AT_ASSERT(false, "CPU not supported");
37 | }
38 |
39 | return {dist2, idx};
40 | }
41 |
42 | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx,
43 | at::Tensor weight) {
44 | CHECK_CONTIGUOUS(points);
45 | CHECK_CONTIGUOUS(idx);
46 | CHECK_CONTIGUOUS(weight);
47 | CHECK_IS_FLOAT(points);
48 | CHECK_IS_INT(idx);
49 | CHECK_IS_FLOAT(weight);
50 |
51 | if (points.is_cuda()) {
52 | CHECK_CUDA(idx);
53 | CHECK_CUDA(weight);
54 | }
55 |
56 | at::Tensor output =
57 | torch::zeros({points.size(0), points.size(1), idx.size(1)},
58 | at::device(points.device()).dtype(at::ScalarType::Float));
59 |
60 | if (points.is_cuda()) {
61 | three_interpolate_kernel_wrapper(
62 | points.size(0), points.size(1), points.size(2), idx.size(1),
63 | points.data_ptr(), idx.data_ptr(), weight.data_ptr(),
64 | output.data_ptr());
65 | } else {
66 | AT_ASSERT(false, "CPU not supported");
67 | }
68 |
69 | return output;
70 | }
71 | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx,
72 | at::Tensor weight, const int m) {
73 | CHECK_CONTIGUOUS(grad_out);
74 | CHECK_CONTIGUOUS(idx);
75 | CHECK_CONTIGUOUS(weight);
76 | CHECK_IS_FLOAT(grad_out);
77 | CHECK_IS_INT(idx);
78 | CHECK_IS_FLOAT(weight);
79 |
80 | if (grad_out.is_cuda()) {
81 | CHECK_CUDA(idx);
82 | CHECK_CUDA(weight);
83 | }
84 |
85 | at::Tensor output =
86 | torch::zeros({grad_out.size(0), grad_out.size(1), m},
87 | at::device(grad_out.device()).dtype(at::ScalarType::Float));
88 |
89 | if (grad_out.is_cuda()) {
90 | three_interpolate_grad_kernel_wrapper(
91 | grad_out.size(0), grad_out.size(1), grad_out.size(2), m,
92 | grad_out.data_ptr(), idx.data_ptr(),
93 | weight.data_ptr(), output.data_ptr());
94 | } else {
95 | AT_ASSERT(false, "CPU not supported");
96 | }
97 |
98 | return output;
99 | }
100 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/interpolate_gpu.cu:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 |
5 | #include "cuda_utils.h"
6 |
7 | // input: unknown(b, n, 3) known(b, m, 3)
8 | // output: dist2(b, n, 3), idx(b, n, 3)
9 | __global__ void three_nn_kernel(int b, int n, int m,
10 | const float *__restrict__ unknown,
11 | const float *__restrict__ known,
12 | float *__restrict__ dist2,
13 | int *__restrict__ idx) {
14 | int batch_index = blockIdx.x;
15 | unknown += batch_index * n * 3;
16 | known += batch_index * m * 3;
17 | dist2 += batch_index * n * 3;
18 | idx += batch_index * n * 3;
19 |
20 | int index = threadIdx.x;
21 | int stride = blockDim.x;
22 | for (int j = index; j < n; j += stride) {
23 | float ux = unknown[j * 3 + 0];
24 | float uy = unknown[j * 3 + 1];
25 | float uz = unknown[j * 3 + 2];
26 |
27 | double best1 = 1e40, best2 = 1e40, best3 = 1e40;
28 | int besti1 = 0, besti2 = 0, besti3 = 0;
29 | for (int k = 0; k < m; ++k) {
30 | float x = known[k * 3 + 0];
31 | float y = known[k * 3 + 1];
32 | float z = known[k * 3 + 2];
33 | float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z);
34 | if (d < best1) {
35 | best3 = best2;
36 | besti3 = besti2;
37 | best2 = best1;
38 | besti2 = besti1;
39 | best1 = d;
40 | besti1 = k;
41 | } else if (d < best2) {
42 | best3 = best2;
43 | besti3 = besti2;
44 | best2 = d;
45 | besti2 = k;
46 | } else if (d < best3) {
47 | best3 = d;
48 | besti3 = k;
49 | }
50 | }
51 | dist2[j * 3 + 0] = best1;
52 | dist2[j * 3 + 1] = best2;
53 | dist2[j * 3 + 2] = best3;
54 |
55 | idx[j * 3 + 0] = besti1;
56 | idx[j * 3 + 1] = besti2;
57 | idx[j * 3 + 2] = besti3;
58 | }
59 | }
60 |
61 | void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown,
62 | const float *known, float *dist2, int *idx) {
63 | cudaStream_t stream = at::cuda::getCurrentCUDAStream();
64 | three_nn_kernel<<>>(b, n, m, unknown, known,
65 | dist2, idx);
66 |
67 | CUDA_CHECK_ERRORS();
68 | }
69 |
70 | // input: points(b, c, m), idx(b, n, 3), weight(b, n, 3)
71 | // output: out(b, c, n)
72 | __global__ void three_interpolate_kernel(int b, int c, int m, int n,
73 | const float *__restrict__ points,
74 | const int *__restrict__ idx,
75 | const float *__restrict__ weight,
76 | float *__restrict__ out) {
77 | int batch_index = blockIdx.x;
78 | points += batch_index * m * c;
79 |
80 | idx += batch_index * n * 3;
81 | weight += batch_index * n * 3;
82 |
83 | out += batch_index * n * c;
84 |
85 | const int index = threadIdx.y * blockDim.x + threadIdx.x;
86 | const int stride = blockDim.y * blockDim.x;
87 | for (int i = index; i < c * n; i += stride) {
88 | const int l = i / n;
89 | const int j = i % n;
90 | float w1 = weight[j * 3 + 0];
91 | float w2 = weight[j * 3 + 1];
92 | float w3 = weight[j * 3 + 2];
93 |
94 | int i1 = idx[j * 3 + 0];
95 | int i2 = idx[j * 3 + 1];
96 | int i3 = idx[j * 3 + 2];
97 |
98 | out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 +
99 | points[l * m + i3] * w3;
100 | }
101 | }
102 |
103 | void three_interpolate_kernel_wrapper(int b, int c, int m, int n,
104 | const float *points, const int *idx,
105 | const float *weight, float *out) {
106 | cudaStream_t stream = at::cuda::getCurrentCUDAStream();
107 | three_interpolate_kernel<<>>(
108 | b, c, m, n, points, idx, weight, out);
109 |
110 | CUDA_CHECK_ERRORS();
111 | }
112 |
113 | // input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3)
114 | // output: grad_points(b, c, m)
115 |
116 | __global__ void three_interpolate_grad_kernel(
117 | int b, int c, int n, int m, const float *__restrict__ grad_out,
118 | const int *__restrict__ idx, const float *__restrict__ weight,
119 | float *__restrict__ grad_points) {
120 | int batch_index = blockIdx.x;
121 | grad_out += batch_index * n * c;
122 | idx += batch_index * n * 3;
123 | weight += batch_index * n * 3;
124 | grad_points += batch_index * m * c;
125 |
126 | const int index = threadIdx.y * blockDim.x + threadIdx.x;
127 | const int stride = blockDim.y * blockDim.x;
128 | for (int i = index; i < c * n; i += stride) {
129 | const int l = i / n;
130 | const int j = i % n;
131 | float w1 = weight[j * 3 + 0];
132 | float w2 = weight[j * 3 + 1];
133 | float w3 = weight[j * 3 + 2];
134 |
135 | int i1 = idx[j * 3 + 0];
136 | int i2 = idx[j * 3 + 1];
137 | int i3 = idx[j * 3 + 2];
138 |
139 | atomicAdd(grad_points + l * m + i1, grad_out[i] * w1);
140 | atomicAdd(grad_points + l * m + i2, grad_out[i] * w2);
141 | atomicAdd(grad_points + l * m + i3, grad_out[i] * w3);
142 | }
143 | }
144 |
145 | void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m,
146 | const float *grad_out,
147 | const int *idx, const float *weight,
148 | float *grad_points) {
149 | cudaStream_t stream = at::cuda::getCurrentCUDAStream();
150 | three_interpolate_grad_kernel<<>>(
151 | b, c, n, m, grad_out, idx, weight, grad_points);
152 |
153 | CUDA_CHECK_ERRORS();
154 | }
155 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/sampling.cpp:
--------------------------------------------------------------------------------
1 | #include "sampling.h"
2 | #include "utils.h"
3 |
4 | void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
5 | const float *points, const int *idx,
6 | float *out);
7 | void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
8 | const float *grad_out, const int *idx,
9 | float *grad_points);
10 |
11 | void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
12 | const float *dataset, float *temp,
13 | int *idxs);
14 |
15 | at::Tensor gather_points(at::Tensor points, at::Tensor idx) {
16 | CHECK_CONTIGUOUS(points);
17 | CHECK_CONTIGUOUS(idx);
18 | CHECK_IS_FLOAT(points);
19 | CHECK_IS_INT(idx);
20 |
21 | if (points.is_cuda()) {
22 | CHECK_CUDA(idx);
23 | }
24 |
25 | at::Tensor output =
26 | torch::zeros({points.size(0), points.size(1), idx.size(1)},
27 | at::device(points.device()).dtype(at::ScalarType::Float));
28 |
29 | if (points.is_cuda()) {
30 | gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2),
31 | idx.size(1), points.data_ptr(),
32 | idx.data_ptr(), output.data_ptr());
33 | } else {
34 | AT_ASSERT(false, "CPU not supported");
35 | }
36 |
37 | return output;
38 | }
39 |
40 | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx,
41 | const int n) {
42 | CHECK_CONTIGUOUS(grad_out);
43 | CHECK_CONTIGUOUS(idx);
44 | CHECK_IS_FLOAT(grad_out);
45 | CHECK_IS_INT(idx);
46 |
47 | if (grad_out.is_cuda()) {
48 | CHECK_CUDA(idx);
49 | }
50 |
51 | at::Tensor output =
52 | torch::zeros({grad_out.size(0), grad_out.size(1), n},
53 | at::device(grad_out.device()).dtype(at::ScalarType::Float));
54 |
55 | if (grad_out.is_cuda()) {
56 | gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n,
57 | idx.size(1), grad_out.data_ptr(),
58 | idx.data_ptr(),
59 | output.data_ptr());
60 | } else {
61 | AT_ASSERT(false, "CPU not supported");
62 | }
63 |
64 | return output;
65 | }
66 | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) {
67 | CHECK_CONTIGUOUS(points);
68 | CHECK_IS_FLOAT(points);
69 |
70 | at::Tensor output =
71 | torch::zeros({points.size(0), nsamples},
72 | at::device(points.device()).dtype(at::ScalarType::Int));
73 |
74 | at::Tensor tmp =
75 | torch::full({points.size(0), points.size(1)}, 1e10,
76 | at::device(points.device()).dtype(at::ScalarType::Float));
77 |
78 | if (points.is_cuda()) {
79 | furthest_point_sampling_kernel_wrapper(
80 | points.size(0), points.size(1), nsamples, points.data_ptr(),
81 | tmp.data_ptr(), output.data_ptr());
82 | } else {
83 | AT_ASSERT(false, "CPU not supported");
84 | }
85 |
86 | return output;
87 | }
88 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_ext-src/src/sampling_gpu.cu:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "cuda_utils.h"
5 |
6 | // input: points(b, c, n) idx(b, m)
7 | // output: out(b, c, m)
8 | __global__ void gather_points_kernel(int b, int c, int n, int m,
9 | const float *__restrict__ points,
10 | const int *__restrict__ idx,
11 | float *__restrict__ out) {
12 | for (int i = blockIdx.x; i < b; i += gridDim.x) {
13 | for (int l = blockIdx.y; l < c; l += gridDim.y) {
14 | for (int j = threadIdx.x; j < m; j += blockDim.x) {
15 | int a = idx[i * m + j];
16 | out[(i * c + l) * m + j] = points[(i * c + l) * n + a];
17 | }
18 | }
19 | }
20 | }
21 |
22 | void gather_points_kernel_wrapper(int b, int c, int n, int npoints,
23 | const float *points, const int *idx,
24 | float *out) {
25 | gather_points_kernel<<>>(b, c, n, npoints,
27 | points, idx, out);
28 |
29 | CUDA_CHECK_ERRORS();
30 | }
31 |
32 | // input: grad_out(b, c, m) idx(b, m)
33 | // output: grad_points(b, c, n)
34 | __global__ void gather_points_grad_kernel(int b, int c, int n, int m,
35 | const float *__restrict__ grad_out,
36 | const int *__restrict__ idx,
37 | float *__restrict__ grad_points) {
38 | for (int i = blockIdx.x; i < b; i += gridDim.x) {
39 | for (int l = blockIdx.y; l < c; l += gridDim.y) {
40 | for (int j = threadIdx.x; j < m; j += blockDim.x) {
41 | int a = idx[i * m + j];
42 | atomicAdd(grad_points + (i * c + l) * n + a,
43 | grad_out[(i * c + l) * m + j]);
44 | }
45 | }
46 | }
47 | }
48 |
49 | void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints,
50 | const float *grad_out, const int *idx,
51 | float *grad_points) {
52 | gather_points_grad_kernel<<>>(
54 | b, c, n, npoints, grad_out, idx, grad_points);
55 |
56 | CUDA_CHECK_ERRORS();
57 | }
58 |
59 | __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i,
60 | int idx1, int idx2) {
61 | const float v1 = dists[idx1], v2 = dists[idx2];
62 | const int i1 = dists_i[idx1], i2 = dists_i[idx2];
63 | dists[idx1] = max(v1, v2);
64 | dists_i[idx1] = v2 > v1 ? i2 : i1;
65 | }
66 |
67 | // Input dataset: (b, n, 3), tmp: (b, n)
68 | // Ouput idxs (b, m)
69 | template
70 | __global__ void furthest_point_sampling_kernel(
71 | int b, int n, int m, const float *__restrict__ dataset,
72 | float *__restrict__ temp, int *__restrict__ idxs) {
73 | if (m <= 0) return;
74 | __shared__ float dists[block_size];
75 | __shared__ int dists_i[block_size];
76 |
77 | int batch_index = blockIdx.x;
78 | dataset += batch_index * n * 3;
79 | temp += batch_index * n;
80 | idxs += batch_index * m;
81 |
82 | int tid = threadIdx.x;
83 | const int stride = block_size;
84 |
85 | int old = 0;
86 | if (threadIdx.x == 0) idxs[0] = old;
87 |
88 | __syncthreads();
89 | for (int j = 1; j < m; j++) {
90 | int besti = 0;
91 | float best = -1;
92 | float x1 = dataset[old * 3 + 0];
93 | float y1 = dataset[old * 3 + 1];
94 | float z1 = dataset[old * 3 + 2];
95 | for (int k = tid; k < n; k += stride) {
96 | float x2, y2, z2;
97 | x2 = dataset[k * 3 + 0];
98 | y2 = dataset[k * 3 + 1];
99 | z2 = dataset[k * 3 + 2];
100 | float mag = (x2 * x2) + (y2 * y2) + (z2 * z2);
101 | if (mag <= 1e-3) continue;
102 |
103 | float d =
104 | (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1);
105 |
106 | float d2 = min(d, temp[k]);
107 | temp[k] = d2;
108 | besti = d2 > best ? k : besti;
109 | best = d2 > best ? d2 : best;
110 | }
111 | dists[tid] = best;
112 | dists_i[tid] = besti;
113 | __syncthreads();
114 |
115 | if (block_size >= 512) {
116 | if (tid < 256) {
117 | __update(dists, dists_i, tid, tid + 256);
118 | }
119 | __syncthreads();
120 | }
121 | if (block_size >= 256) {
122 | if (tid < 128) {
123 | __update(dists, dists_i, tid, tid + 128);
124 | }
125 | __syncthreads();
126 | }
127 | if (block_size >= 128) {
128 | if (tid < 64) {
129 | __update(dists, dists_i, tid, tid + 64);
130 | }
131 | __syncthreads();
132 | }
133 | if (block_size >= 64) {
134 | if (tid < 32) {
135 | __update(dists, dists_i, tid, tid + 32);
136 | }
137 | __syncthreads();
138 | }
139 | if (block_size >= 32) {
140 | if (tid < 16) {
141 | __update(dists, dists_i, tid, tid + 16);
142 | }
143 | __syncthreads();
144 | }
145 | if (block_size >= 16) {
146 | if (tid < 8) {
147 | __update(dists, dists_i, tid, tid + 8);
148 | }
149 | __syncthreads();
150 | }
151 | if (block_size >= 8) {
152 | if (tid < 4) {
153 | __update(dists, dists_i, tid, tid + 4);
154 | }
155 | __syncthreads();
156 | }
157 | if (block_size >= 4) {
158 | if (tid < 2) {
159 | __update(dists, dists_i, tid, tid + 2);
160 | }
161 | __syncthreads();
162 | }
163 | if (block_size >= 2) {
164 | if (tid < 1) {
165 | __update(dists, dists_i, tid, tid + 1);
166 | }
167 | __syncthreads();
168 | }
169 |
170 | old = dists_i[0];
171 | if (tid == 0) idxs[j] = old;
172 | }
173 | }
174 |
175 | void furthest_point_sampling_kernel_wrapper(int b, int n, int m,
176 | const float *dataset, float *temp,
177 | int *idxs) {
178 | unsigned int n_threads = opt_n_threads(n);
179 |
180 | cudaStream_t stream = at::cuda::getCurrentCUDAStream();
181 |
182 | switch (n_threads) {
183 | case 512:
184 | furthest_point_sampling_kernel<512>
185 | <<>>(b, n, m, dataset, temp, idxs);
186 | break;
187 | case 256:
188 | furthest_point_sampling_kernel<256>
189 | <<>>(b, n, m, dataset, temp, idxs);
190 | break;
191 | case 128:
192 | furthest_point_sampling_kernel<128>
193 | <<>>(b, n, m, dataset, temp, idxs);
194 | break;
195 | case 64:
196 | furthest_point_sampling_kernel<64>
197 | <<>>(b, n, m, dataset, temp, idxs);
198 | break;
199 | case 32:
200 | furthest_point_sampling_kernel<32>
201 | <<>>(b, n, m, dataset, temp, idxs);
202 | break;
203 | case 16:
204 | furthest_point_sampling_kernel<16>
205 | <<>>(b, n, m, dataset, temp, idxs);
206 | break;
207 | case 8:
208 | furthest_point_sampling_kernel<8>
209 | <<>>(b, n, m, dataset, temp, idxs);
210 | break;
211 | case 4:
212 | furthest_point_sampling_kernel<4>
213 | <<>>(b, n, m, dataset, temp, idxs);
214 | break;
215 | case 2:
216 | furthest_point_sampling_kernel<2>
217 | <<>>(b, n, m, dataset, temp, idxs);
218 | break;
219 | case 1:
220 | furthest_point_sampling_kernel<1>
221 | <<>>(b, n, m, dataset, temp, idxs);
222 | break;
223 | default:
224 | furthest_point_sampling_kernel<512>
225 | <<>>(b, n, m, dataset, temp, idxs);
226 | }
227 |
228 | CUDA_CHECK_ERRORS();
229 | }
230 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/_version.py:
--------------------------------------------------------------------------------
1 | __version__ = "3.0.0"
2 |
--------------------------------------------------------------------------------
/code/models/pointnet2_ops/pointnet2_modules.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Tuple
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | from pointnet2_ops import pointnet2_utils
7 |
8 |
9 | def build_shared_mlp(mlp_spec: List[int], bn: bool = True):
10 | layers = []
11 | for i in range(1, len(mlp_spec)):
12 | layers.append(
13 | nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1, bias=not bn)
14 | )
15 | if bn:
16 | layers.append(nn.BatchNorm2d(mlp_spec[i]))
17 | layers.append(nn.ReLU(True))
18 |
19 | return nn.Sequential(*layers)
20 |
21 |
22 | class _PointnetSAModuleBase(nn.Module):
23 | def __init__(self):
24 | super(_PointnetSAModuleBase, self).__init__()
25 | self.npoint = None
26 | self.groupers = None
27 | self.mlps = None
28 |
29 | def forward(
30 | self, xyz: torch.Tensor, features: Optional[torch.Tensor]
31 | ) -> Tuple[torch.Tensor, torch.Tensor]:
32 | r"""
33 | Parameters
34 | ----------
35 | xyz : torch.Tensor
36 | (B, N, 3) tensor of the xyz coordinates of the features
37 | features : torch.Tensor
38 | (B, C, N) tensor of the descriptors of the the features
39 |
40 | Returns
41 | -------
42 | new_xyz : torch.Tensor
43 | (B, npoint, 3) tensor of the new features' xyz
44 | new_features : torch.Tensor
45 | (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors
46 | """
47 |
48 | new_features_list = []
49 |
50 | xyz_flipped = xyz.transpose(1, 2).contiguous()
51 | new_xyz = (
52 | pointnet2_utils.gather_operation(
53 | xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint)
54 | )
55 | .transpose(1, 2)
56 | .contiguous()
57 | if self.npoint is not None
58 | else None
59 | )
60 |
61 | for i in range(len(self.groupers)):
62 | new_features = self.groupers[i](
63 | xyz, new_xyz, features
64 | ) # (B, C, npoint, nsample)
65 |
66 | new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
67 | new_features = F.max_pool2d(
68 | new_features, kernel_size=[1, new_features.size(3)]
69 | ) # (B, mlp[-1], npoint, 1)
70 | new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
71 |
72 | new_features_list.append(new_features)
73 |
74 | return new_xyz, torch.cat(new_features_list, dim=1)
75 |
76 |
77 | class PointnetSAModuleMSG(_PointnetSAModuleBase):
78 | r"""Pointnet set abstrction layer with multiscale grouping
79 |
80 | Parameters
81 | ----------
82 | npoint : int
83 | Number of features
84 | radii : list of float32
85 | list of radii to group with
86 | nsamples : list of int32
87 | Number of samples in each ball query
88 | mlps : list of list of int32
89 | Spec of the pointnet before the global max_pool for each scale
90 | bn : bool
91 | Use batchnorm
92 | """
93 |
94 | def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True):
95 | # type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None
96 | super(PointnetSAModuleMSG, self).__init__()
97 |
98 | assert len(radii) == len(nsamples) == len(mlps)
99 |
100 | self.npoint = npoint
101 | self.groupers = nn.ModuleList()
102 | self.mlps = nn.ModuleList()
103 | for i in range(len(radii)):
104 | radius = radii[i]
105 | nsample = nsamples[i]
106 | self.groupers.append(
107 | pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
108 | if npoint is not None
109 | else pointnet2_utils.GroupAll(use_xyz)
110 | )
111 | mlp_spec = mlps[i]
112 | if use_xyz:
113 | mlp_spec[0] += 3
114 |
115 | self.mlps.append(build_shared_mlp(mlp_spec, bn))
116 |
117 |
118 | class PointnetSAModule(PointnetSAModuleMSG):
119 | r"""Pointnet set abstrction layer
120 |
121 | Parameters
122 | ----------
123 | npoint : int
124 | Number of features
125 | radius : float
126 | Radius of ball
127 | nsample : int
128 | Number of samples in the ball query
129 | mlp : list
130 | Spec of the pointnet before the global max_pool
131 | bn : bool
132 | Use batchnorm
133 | """
134 |
135 | def __init__(
136 | self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True
137 | ):
138 | # type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None
139 | super(PointnetSAModule, self).__init__(
140 | mlps=[mlp],
141 | npoint=npoint,
142 | radii=[radius],
143 | nsamples=[nsample],
144 | bn=bn,
145 | use_xyz=use_xyz,
146 | )
147 |
148 |
149 | class PointnetFPModule(nn.Module):
150 | r"""Propigates the features of one set to another
151 |
152 | Parameters
153 | ----------
154 | mlp : list
155 | Pointnet module parameters
156 | bn : bool
157 | Use batchnorm
158 | """
159 |
160 | def __init__(self, mlp, bn=True):
161 | # type: (PointnetFPModule, List[int], bool) -> None
162 | super(PointnetFPModule, self).__init__()
163 | self.mlp = build_shared_mlp(mlp, bn=bn)
164 |
165 | def forward(self, unknown, known, unknow_feats, known_feats):
166 | # type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor
167 | r"""
168 | Parameters
169 | ----------
170 | unknown : torch.Tensor
171 | (B, n, 3) tensor of the xyz positions of the unknown features
172 | known : torch.Tensor
173 | (B, m, 3) tensor of the xyz positions of the known features
174 | unknow_feats : torch.Tensor
175 | (B, C1, n) tensor of the features to be propigated to
176 | known_feats : torch.Tensor
177 | (B, C2, m) tensor of features to be propigated
178 |
179 | Returns
180 | -------
181 | new_features : torch.Tensor
182 | (B, mlp[-1], n) tensor of the features of the unknown features
183 | """
184 |
185 | if known is not None:
186 | dist, idx = pointnet2_utils.three_nn(unknown, known)
187 | dist_recip = 1.0 / (dist + 1e-8)
188 | norm = torch.sum(dist_recip, dim=2, keepdim=True)
189 | weight = dist_recip / norm
190 |
191 | interpolated_feats = pointnet2_utils.three_interpolate(
192 | known_feats, idx, weight
193 | )
194 | else:
195 | interpolated_feats = known_feats.expand(
196 | *(known_feats.size()[0:2] + [unknown.size(1)])
197 | )
198 |
199 | if unknow_feats is not None:
200 | new_features = torch.cat(
201 | [interpolated_feats, unknow_feats], dim=1
202 | ) # (B, C2 + C1, n)
203 | else:
204 | new_features = interpolated_feats
205 |
206 | new_features = new_features.unsqueeze(-1)
207 | new_features = self.mlp(new_features)
208 |
209 | return new_features.squeeze(-1)
210 |
--------------------------------------------------------------------------------
/code/requirements.txt:
--------------------------------------------------------------------------------
1 | h5py
2 | imageio
3 | matplotlib
4 | opencv-python
5 | Pillow
6 | progressbar2
7 | requests
8 | scipy
9 | transforms3d
10 | torch
11 | torchvision
12 | tensorboardX
13 |
--------------------------------------------------------------------------------
/code/robots/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/.DS_Store
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/finger.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/finger.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/finger.stl.convex.stl:
--------------------------------------------------------------------------------
1 | solid AssimpScene
2 | facet normal -nan -nan -nan
3 | outer loop
4 | vertex 0.00869277 -0.000132643 0.0501662
5 | vertex 0.0104486 0.00258331 0.000146801
6 | vertex 0.01036 0.0264034 0.000154629
7 | endloop
8 | endfacet
9 |
10 | facet normal -nan -nan -nan
11 | outer loop
12 | vertex 0.00869277 -0.000132643 0.0501662
13 | vertex 0.01036 0.0264034 0.000154629
14 | vertex 0.0104005 0.0252534 0.0190366
15 | endloop
16 | endfacet
17 |
18 | facet normal -nan -nan -nan
19 | outer loop
20 | vertex 0.0104005 0.0252534 0.0190366
21 | vertex 0.00861608 0.0139887 0.0513279
22 | vertex 0.00869277 -0.000132643 0.0501662
23 | endloop
24 | endfacet
25 |
26 | facet normal -nan -nan -nan
27 | outer loop
28 | vertex 0.0104486 0.00258331 0.000146801
29 | vertex -0.0103872 0.00253418 0.000131696
30 | vertex -0.0104013 0.0263094 0.00016651
31 | endloop
32 | endfacet
33 |
34 | facet normal -nan -nan -nan
35 | outer loop
36 | vertex 0.0104486 0.00258331 0.000146801
37 | vertex -0.0104013 0.0263094 0.00016651
38 | vertex 0.01036 0.0264034 0.000154629
39 | endloop
40 | endfacet
41 |
42 | facet normal -nan -nan -nan
43 | outer loop
44 | vertex -0.0103889 0.0252203 0.0191876
45 | vertex -0.00527792 0.0142931 0.053849
46 | vertex 0.00583983 0.0142743 0.0538034
47 | endloop
48 | endfacet
49 |
50 | facet normal -nan -nan -nan
51 | outer loop
52 | vertex -0.0103889 0.0252203 0.0191876
53 | vertex 0.00583983 0.0142743 0.0538034
54 | vertex 0.0104005 0.0252534 0.0190366
55 | endloop
56 | endfacet
57 |
58 | facet normal -nan -nan -nan
59 | outer loop
60 | vertex -0.0103889 0.0252203 0.0191876
61 | vertex 0.0104005 0.0252534 0.0190366
62 | vertex 0.01036 0.0264034 0.000154629
63 | endloop
64 | endfacet
65 |
66 | facet normal -nan -nan -nan
67 | outer loop
68 | vertex -0.0103889 0.0252203 0.0191876
69 | vertex 0.01036 0.0264034 0.000154629
70 | vertex -0.0104013 0.0263094 0.00016651
71 | endloop
72 | endfacet
73 |
74 | facet normal -nan -nan -nan
75 | outer loop
76 | vertex -0.0103872 0.00253418 0.000131696
77 | vertex -0.00862294 -5.68019e-05 0.0509528
78 | vertex -0.00884117 0.0139176 0.0505894
79 | endloop
80 | endfacet
81 |
82 | facet normal -nan -nan -nan
83 | outer loop
84 | vertex -0.0103872 0.00253418 0.000131696
85 | vertex -0.00884117 0.0139176 0.0505894
86 | vertex -0.0103889 0.0252203 0.0191876
87 | endloop
88 | endfacet
89 |
90 | facet normal -nan -nan -nan
91 | outer loop
92 | vertex -0.0103889 0.0252203 0.0191876
93 | vertex -0.0104013 0.0263094 0.00016651
94 | vertex -0.0103872 0.00253418 0.000131696
95 | endloop
96 | endfacet
97 |
98 | facet normal -nan -nan -nan
99 | outer loop
100 | vertex 0.00613802 -2.06026e-05 0.0535776
101 | vertex 0.00869277 -0.000132643 0.0501662
102 | vertex 0.00861608 0.0139887 0.0513279
103 | endloop
104 | endfacet
105 |
106 | facet normal -nan -nan -nan
107 | outer loop
108 | vertex -0.00884117 0.0139176 0.0505894
109 | vertex -0.00527792 0.0142931 0.053849
110 | vertex -0.0103889 0.0252203 0.0191876
111 | endloop
112 | endfacet
113 |
114 | facet normal -nan -nan -nan
115 | outer loop
116 | vertex -0.00884117 0.0139176 0.0505894
117 | vertex -0.00862294 -5.68019e-05 0.0509528
118 | vertex -0.00548142 -9.11208e-05 0.0537247
119 | endloop
120 | endfacet
121 |
122 | facet normal -nan -nan -nan
123 | outer loop
124 | vertex -0.00884117 0.0139176 0.0505894
125 | vertex -0.00548142 -9.11208e-05 0.0537247
126 | vertex -0.00527792 0.0142931 0.053849
127 | endloop
128 | endfacet
129 |
130 | facet normal -nan -nan -nan
131 | outer loop
132 | vertex 0.00583983 0.0142743 0.0538034
133 | vertex -0.00527792 0.0142931 0.053849
134 | vertex -0.00548142 -9.11208e-05 0.0537247
135 | endloop
136 | endfacet
137 |
138 | facet normal -nan -nan -nan
139 | outer loop
140 | vertex 0.00583983 0.0142743 0.0538034
141 | vertex -0.00548142 -9.11208e-05 0.0537247
142 | vertex 0.00613802 -2.06026e-05 0.0535776
143 | endloop
144 | endfacet
145 |
146 | facet normal -nan -nan -nan
147 | outer loop
148 | vertex 0.00583983 0.0142743 0.0538034
149 | vertex 0.00613802 -2.06026e-05 0.0535776
150 | vertex 0.00861608 0.0139887 0.0513279
151 | endloop
152 | endfacet
153 |
154 | facet normal -nan -nan -nan
155 | outer loop
156 | vertex 0.00583983 0.0142743 0.0538034
157 | vertex 0.00861608 0.0139887 0.0513279
158 | vertex 0.0104005 0.0252534 0.0190366
159 | endloop
160 | endfacet
161 |
162 | facet normal -nan -nan -nan
163 | outer loop
164 | vertex -0.00873039 -2.35252e-05 0.0361648
165 | vertex 0.00869277 -0.000132643 0.0501662
166 | vertex 0.00613802 -2.06026e-05 0.0535776
167 | endloop
168 | endfacet
169 |
170 | facet normal -nan -nan -nan
171 | outer loop
172 | vertex -0.00873039 -2.35252e-05 0.0361648
173 | vertex 0.00613802 -2.06026e-05 0.0535776
174 | vertex -0.00548142 -9.11208e-05 0.0537247
175 | endloop
176 | endfacet
177 |
178 | facet normal -nan -nan -nan
179 | outer loop
180 | vertex -0.00548142 -9.11208e-05 0.0537247
181 | vertex -0.00862294 -5.68019e-05 0.0509528
182 | vertex -0.00873039 -2.35252e-05 0.0361648
183 | endloop
184 | endfacet
185 |
186 | facet normal -nan -nan -nan
187 | outer loop
188 | vertex -0.00873039 -2.35252e-05 0.0361648
189 | vertex -0.00862294 -5.68019e-05 0.0509528
190 | vertex -0.0103872 0.00253418 0.000131696
191 | endloop
192 | endfacet
193 |
194 | facet normal -nan -nan -nan
195 | outer loop
196 | vertex -0.00873039 -2.35252e-05 0.0361648
197 | vertex -0.0103872 0.00253418 0.000131696
198 | vertex 0.0104486 0.00258331 0.000146801
199 | endloop
200 | endfacet
201 |
202 | facet normal -nan -nan -nan
203 | outer loop
204 | vertex -0.00873039 -2.35252e-05 0.0361648
205 | vertex 0.0104486 0.00258331 0.000146801
206 | vertex 0.00869277 -0.000132643 0.0501662
207 | endloop
208 | endfacet
209 |
210 | endsolid AssimpScene
211 |
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/hand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/hand.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/link0.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/link0.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/link1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/link1.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/link2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/link2.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/link3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/link3.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/link4.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/link4.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/link5.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/link5.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/link6.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/link6.stl
--------------------------------------------------------------------------------
/code/robots/franka_description/meshes/collision/link7.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/robots/franka_description/meshes/collision/link7.stl
--------------------------------------------------------------------------------
/code/robots/panda.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
--------------------------------------------------------------------------------
/code/robots/panda_gripper.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
--------------------------------------------------------------------------------
/code/scripts/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/code/scripts/.DS_Store
--------------------------------------------------------------------------------
/code/scripts/run_collect_SACVAL_pickup_TRAIN_display.sh:
--------------------------------------------------------------------------------
1 | xvfb-run -a python collect_data_SAC.py \
2 | --category Display \
3 | --primact_type pickup \
4 | --date "XXXX" \
5 | --out_dir ../data/2gripper_data/collect_data_RL_partialPC \
6 | --RL_mode test \
7 | --RL_exp_name SAC_XXXXX \
8 | --num_workers 5 \
9 | --state_add_pose \
10 | --use_RL_pred \
11 | --vis_gif \
12 | --fix_joint \
13 | --use_pam \
14 | --pam_rate 0.75 \
15 | --pam_exp_name finalexp-model_all_final-pulling-None-train_all_v1 \
16 | --pam_model_epoch 81 \
17 | --pam_model_version model_3d_legacy
18 |
19 |
20 | # This is RL testing (used for collecting data) script. Feel free to modify:
21 | # category, primact_type, collect_mode, date, out_dir: as introduced in the training script
22 | # RL_exp_name: to the suitable epoch you think
23 | # The default model RL_load_date and epoch are demonstrated in gen_cate_setting.py
24 | # Or you can just assign the argument RL_ckpt to specify the ckpt files, this will overlap RL_load_date and RL_exp_name
25 |
26 |
--------------------------------------------------------------------------------
/code/scripts/run_collect_SAC_pickup_TRAIN_display.sh:
--------------------------------------------------------------------------------
1 | xvfb-run -a python collect_data_SAC.py \
2 | --category Display \
3 | --primact_type pickup \
4 | --date "default" \
5 | --out_dir ../data/2gripper_data/collect_data_RL_partialPC \
6 | --batch_size 512 \
7 | --num_workers 12 \
8 | --soft_q_lr 2e-4 \
9 | --policy_lr 2e-4 \
10 | --alpha_lr 2e-4 \
11 | --update_itr 1 \
12 | --state_add_pose \
13 | --vis_gif \
14 | --use_pam \
15 | --use_RL_pred \
16 | --fix_joint \
17 | --pam_exp_name finalexp-model_all_final-pulling-None-train_all_v1 \
18 | --pam_model_epoch 81 \
19 | --pam_model_version model_3d_legacy
20 |
21 | # This is RL training script. Feel free to modify:
22 | # category: to other existing categories in the dataset
23 | # primact_type: to other primact types (normally only used in pickup)
24 | # collect_mode: to other train/test/val splits, notice that each category are only for either train/val or test
25 | # date: to identify your experiment (just leave it as default)
26 | # out_dir: to other saving paths
27 |
--------------------------------------------------------------------------------
/code/scripts/run_collect_random_push_TRAIN.sh:
--------------------------------------------------------------------------------
1 | xvfb-run -a python collect_data_main.py \
2 | --category Box,Dishwasher,Display,Microwave,Printer,Bench,Keyboard2 \
3 | --primact_type pushing \
4 | --out_dir ../data/AllTrainCat_push_TRAIN \
5 | --mode train \
6 | --target_part_state closed \
7 | --start_dist 0.45 \
8 | --final_dist 0.10 \
9 | --move_steps 3500 \
10 | --wait_steps 2000 \
11 | --num_processes 20 \
12 | --start_epoch 0 \
13 | --save_interval 10 \
14 | --no_gui \
15 |
--------------------------------------------------------------------------------
/code/scripts/run_eval_push_afterCA.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=3 xvfb-run -a python eval_sampleSucc_main.py \
2 | --categories Box,Bucket,Dishwasher,Display,Microwave,Bench,Bowl,Keyboard2 \
3 | --primact_type pushing \
4 | --aff1_version model_aff_fir \
5 | --actor1_version model_actor_fir \
6 | --critic1_version model_critic_fir \
7 | --aff2_version model_aff_sec \
8 | --actor2_version model_actor_sec \
9 | --critic2_version model_critic_sec \
10 | --CA_path ../logs/finetune/exp-FINETUNE \
11 | --CA_eval_epoch 8-0 \
12 | --out_folder sampleSucc_CA_results \
13 | --val_data_dir ../data/PUSH_TEST_DATA \
14 | --val_buffer_max_num 500 \
15 | --coordinate_system cambase \
16 | --target_part_state closed \
17 | --start_dist 0.45 \
18 | --final_dist 0.10 \
19 | --move_steps 3500 \
20 | --wait_steps 2000 \
21 | --num_processes 10 \
22 | --z_dim 32 \
23 | --repeat_num 3 \
24 | --use_CA \
25 | --euler_threshold 10 \
26 | --task_threshold 30 \
27 | --num_ctpt1 10 \
28 | --num_ctpt2 10 \
29 | --rv1 100 \
30 | --rv2 100 \
31 | --num_pair1 10 \
32 | --aff_topk 0.1 \
33 | --critic_topk1 0.01 \
34 | --critic_topk 0.001 \
35 | --no_gui
36 |
37 |
38 |
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/code/scripts/run_eval_push_beforeCA.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=1 xvfb-run -a python eval_sampleSucc_main.py \
2 | --categories Box,Bucket,Dishwasher,Display,Microwave,Bench,Bowl,Keyboard2 \
3 | --primact_type pushing \
4 | --aff1_version model_aff_fir \
5 | --aff1_path ../logs/affordance/exp-AFF1 \
6 | --aff1_eval_epoch 40 \
7 | --actor1_version model_actor_fir \
8 | --actor1_path ../logs/actor/exp-ACTOR1 \
9 | --actor1_eval_epoch 80 \
10 | --critic1_version model_critic_fir \
11 | --critic1_path ../logs/critic/exp-CRITIC1 \
12 | --critic1_eval_epoch 40 \
13 | --aff2_version model_aff_sec \
14 | --aff2_path ../logs/affordance/exp-AFF2 \
15 | --aff2_eval_epoch 40 \
16 | --actor2_version model_actor_sec \
17 | --actor2_path ../logs/actor/exp-ACTOR2 \
18 | --actor2_eval_epoch 80 \
19 | --critic2_version model_critic_sec \
20 | --critic2_path ../logs/critic/exp-CRITIC2 \
21 | --critic2_eval_epoch 20 \
22 | --out_folder sampleSucc_results \
23 | --val_data_dir ../data/PUSH_TEST_DATA \
24 | --val_buffer_max_num 500 \
25 | --coordinate_system cambase \
26 | --target_part_state closed \
27 | --start_dist 0.45 \
28 | --final_dist 0.10 \
29 | --move_steps 3500 \
30 | --wait_steps 2000 \
31 | --num_processes 10 \
32 | --z_dim 32 \
33 | --repeat_num 3 \
34 | --euler_threshold 10 \
35 | --task_threshold 30 \
36 | --num_ctpt1 10 \
37 | --num_ctpt2 10 \
38 | --rv1 100 \
39 | --rv2 100 \
40 | --num_pair1 10 \
41 | --aff_topk 0.1 \
42 | --critic_topk1 0.01 \
43 | --critic_topk 0.001 \
44 | --no_gui
45 |
46 |
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/code/scripts/run_train_CA_push.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 xvfb-run -a python train_collaborative_adaptation.py \
2 | --exp_suffix xxx \
3 | --category_types Box,Bucket,Dishwasher,Display,Microwave,Bench,Bowl,Keyboard2 \
4 | --primact_type pushing \
5 | --aff1_version model_aff_fir \
6 | --aff1_path ../logs/affordance/exp-AFF1 \
7 | --aff1_eval_epoch 40 \
8 | --actor1_version model_actor_fir \
9 | --actor1_path ../logs/actor/exp-ACTOR1 \
10 | --actor1_eval_epoch 80 \
11 | --critic1_version model_critic_fir \
12 | --critic1_path ../logs/critic/exp-CRITIC1 \
13 | --critic1_eval_epoch 40 \
14 | --aff2_version model_aff_sec \
15 | --aff2_path ../logs/affordance/exp-AFF2 \
16 | --aff2_eval_epoch 40 \
17 | --actor2_version model_actor_sec \
18 | --actor2_path ../logs/actor/exp-ACTOR2 \
19 | --actor2_eval_epoch 80 \
20 | --critic2_version model_critic_sec \
21 | --critic2_path ../2logs/critic/exp-CRITIC2 \
22 | --critic2_eval_epoch 20 \
23 | --out_folder collaborative_adaptation \
24 | --offline_data_dir ../data/PUSH_TRAIN_DATA \
25 | --val_data_dir ../data/PUSH_TEST_DATA \
26 | --train_buffer_max_num 12000 \
27 | --val_buffer_max_num 500 \
28 | --feat_dim 128 \
29 | --batch_size 32 \
30 | --lr 0.001 \
31 | --lr_decay_every 50 \
32 | --critic1_lr 0.0001 \
33 | --critic1_lr_decay_every 300 \
34 | --aff_lr 0.0001 \
35 | --aff_lr_decay_every 300 \
36 | --loss1_weight 1.0 \
37 | --loss2_weight 0.0 \
38 | --num_ctpt1 10 \
39 | --num_ctpt2 10 \
40 | --num_pair1 10 \
41 | --rv1 100 \
42 | --rv2 100 \
43 | --z_dim 32 \
44 | --target_part_state closed \
45 | --start_dist 0.45 \
46 | --final_dist 0.10 \
47 | --move_steps 3500 \
48 | --wait_steps 2000 \
49 | --coordinate_system cambase \
50 | --euler_threshold 3 \
51 | --task_threshold 30 \
52 | --aff_topk 0.1 \
53 | --critic_topk 0.001 \
54 | --critic_topk1 0.01 \
55 |
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/code/scripts/run_train_actor_push_fir.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=4 python train_actor.py \
2 | --exp_suffix xxx \
3 | --category_types Box,Dishwasher,Display,Microwave,Printer,Bench,Keyboard2 \
4 | --model_version model_actor_fir \
5 | --primact_type pushing \
6 | --offline_data_dir ../data/PUSH_TRAIN_DATA \
7 | --val_data_dir ../data/PUSH_TEST_DATA \
8 | --train_buffer_max_num 24000 \
9 | --val_buffer_max_num 1000 \
10 | --feat_dim 128 \
11 | --batch_size 32 \
12 | --lr 0.001 \
13 | --lr_decay_every 500 \
14 | --lbd_kl 0.02 \
15 | --lbd_dir 5.0 \
16 | --z_dim 32 \
17 | --coordinate_system cambase \
18 | --exchange_ctpts \
19 |
20 |
21 |
--------------------------------------------------------------------------------
/code/scripts/run_train_actor_push_sec.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=1 python train_actor.py \
2 | --exp_suffix xxx \
3 | --category_types Box,Dishwasher,Display,Microwave,Printer,Bench,Keyboard2 \
4 | --model_version model_actor_sec \
5 | --primact_type pushing \
6 | --offline_data_dir ../data/PUSH_TRAIN_DATA \
7 | --val_data_dir ../data/PUSH_TEST_DATA \
8 | --train_buffer_max_num 24000 \
9 | --val_buffer_max_num 1000 \
10 | --feat_dim 128 \
11 | --batch_size 32 \
12 | --lr 0.001 \
13 | --lr_decay_every 500 \
14 | --lbd_kl 0.02 \
15 | --lbd_dir 5.0 \
16 | --z_dim 32 \
17 | --coordinate_system cambase \
18 | --exchange_ctpts \
19 |
20 |
21 |
--------------------------------------------------------------------------------
/code/scripts/run_train_affordance_push_fir.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python train_affordance.py \
2 | --exp_suffix xxx \
3 | --category_types Box,Dishwasher,Display,Microwave,Printer,Bench,Keyboard2 \
4 | --primact_type pushing \
5 | --model_version model_aff_fir \
6 | --actor_version model_actor_fir \
7 | --actor_path ../logs/actor/exp-ACTOR2_MODEL \
8 | --actor_eval_epoch 80 \
9 | --critic_version model_critic_fir \
10 | --critic_path ../logs/critic/exp-CRITIC2_MODEL \
11 | --critic_eval_epoch 40 \
12 | --offline_data_dir ../data/PUSH_TRAIN_DATA \
13 | --val_data_dir ../data/PUSH_TEST_DATA \
14 | --train_buffer_max_num 24000 \
15 | --val_buffer_max_num 1000 \
16 | --feat_dim 128 \
17 | --batch_size 32 \
18 | --lr 0.001 \
19 | --lr_decay_every 500 \
20 | --z_dim 32 \
21 | --topk 100 \
22 | --coordinate_system cambase \
23 | --exchange_ctpts \
24 |
25 |
26 |
--------------------------------------------------------------------------------
/code/scripts/run_train_affordance_push_sec.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python train_affordance.py \
2 | --exp_suffix xxx \
3 | --category_types Box,Dishwasher,Display,Microwave,Printer,Bench,Keyboard2 \
4 | --primact_type pushing \
5 | --model_version model_aff_sec \
6 | --actor_version model_actor_sec \
7 | --actor_path ../logs/actor/exp-ACTOR1_MODEL \
8 | --actor_eval_epoch 80 \
9 | --critic_version model_critic_sec \
10 | --critic_path ../logs/critic/exp-CRITIC1_MODEL \
11 | --critic_eval_epoch 20 \
12 | --offline_data_dir ../data/PUSH_TRAIN_DATA \
13 | --val_data_dir ../data/PUSH_TEST_DATA \
14 | --train_buffer_max_num 24000 \
15 | --val_buffer_max_num 1000 \
16 | --feat_dim 128 \
17 | --batch_size 32 \
18 | --lr 0.001 \
19 | --lr_decay_every 500 \
20 | --z_dim 32 \
21 | --topk 100 \
22 | --coordinate_system cambase \
23 | --exchange_ctpts \
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/code/scripts/run_train_critic_push_fir.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python train_critic.py \
2 | --exp_suffix xxx \
3 | --category_types Box,Dishwasher,Display,Microwave,Printer,Bench,Keyboard2 \
4 | --primact_type pushing \
5 | --model_version model_critic_fir \
6 | --aff_version model_aff_sec \
7 | --aff_path ../logs/affordance/exp-AFFORDANCE2_MODEL \
8 | --aff_eval_epoch 40 \
9 | --actor_version model_actor_sec \
10 | --actor_path ../logs/actor/exp-ACTOR2_MODEL \
11 | --actor_eval_epoch 80 \
12 | --critic_version model_critic_sec \
13 | --critic_path ../logs/critic/exp-CRITIC2_MODEL \
14 | --critic_eval_epoch 20 \
15 | --offline_data_dir ../data/PUSH_TRAIN_DATA \
16 | --val_data_dir ../data/PUSH_TEST_DATA \
17 | --train_buffer_max_num 24000 \
18 | --val_buffer_max_num 1000 \
19 | --feat_dim 128 \
20 | --batch_size 32 \
21 | --lr 0.001 \
22 | --lr_decay_every 500 \
23 | --topk2 1000 \
24 | --succ_proportion 0.4 \
25 | --fail_proportion 0.8 \
26 | --coordinate_system cambase \
27 | --loss_type L1Loss \
28 | --exchange_ctpts \
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
--------------------------------------------------------------------------------
/code/scripts/run_train_critic_push_sec.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python train_critic.py \
2 | --exp_suffix xxx \
3 | --category_types Box,Dishwasher,Display,Microwave,Printer,Bench,Keyboard2 \
4 | --model_version model_critic_sec \
5 | --primact_type pushing \
6 | --offline_data_dir ../data/PUSH_TRAIN_DATA \
7 | --val_data_dir ../data/PUSH_TEST_DATA \
8 | --train_buffer_max_num 24000 \
9 | --val_buffer_max_num 1000 \
10 | --feat_dim 128 \
11 | --batch_size 32 \
12 | --lr 0.001 \
13 | --lr_decay_every 500 \
14 | --succ_proportion 0.4 \
15 | --fail_proportion 0.8 \
16 | --coordinate_system cambase \
17 | --exchange_ctpts \
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/images/teaser.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/images/teaser.png
--------------------------------------------------------------------------------
/stats/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sxy7147/DualAfford/069434aef0a925708b4d2c978397120721088c5d/stats/.DS_Store
--------------------------------------------------------------------------------
/stats/train_where2actPP_test_data_list_pickup.txt:
--------------------------------------------------------------------------------
1 | 10895 Scissors
2 | 10902 Scissors
3 | 10907 Scissors
4 | 10960 Scissors
5 | 10962 Scissors
6 | 10968 Scissors
7 | 10973 Scissors
8 | 11020 Scissors
9 | 11021 Scissors
10 | 11028 Scissors
11 | 11029 Scissors
12 | 11036 Scissors
13 | 11040 Scissors
14 | 11047 Scissors
15 | 11052 Scissors
16 | 11077 Scissors
17 | 9748 Laptop
18 | 9912 Laptop
19 | 9960 Laptop
20 | 9968 Laptop
21 | 9992 Laptop
22 | 9996 Laptop
23 | 10040 Laptop
24 | 10090 Laptop
25 | 10098 Laptop
26 | 10101 Laptop
27 | 10108 Laptop
28 | 10125 Laptop
29 | 10211 Laptop
30 | 10213 Laptop
31 | 10239 Laptop
32 | 10243 Laptop
33 | 10248 Laptop
34 | 10270 Laptop
35 | 10280 Laptop
36 | 10289 Laptop
37 | 11888 Laptop
38 | 12115 Laptop
39 | 47645 Box
40 | 100129 Box
41 | 100141 Box
42 | 100154 Box
43 | 100162 Box
44 | 100174 Box
45 | 100189 Box
46 | 100191 Box
47 | 100194 Box
48 | 100197 Box
49 | 100202 Box
50 | 100214 Box
51 | 100221 Box
52 | 100224 Box
53 | 100234 Box
54 | 100243 Box
55 | 100247 Box
56 | 100426 Box
57 | 100658 Box
58 | 100664 Box
59 | 100671 Box
60 | 100676 Box
61 | 100685 Box
62 | 102373 Box
63 | 102377 Box
64 | 102379 Box
65 | 102456 Box
66 | 100015 KitchenPot
67 | 100017 KitchenPot
68 | 100021 KitchenPot
69 | 100023 KitchenPot
70 | 100025 KitchenPot
71 | 100032 KitchenPot
72 | 100040 KitchenPot
73 | 100045 KitchenPot
74 | 100054 KitchenPot
75 | 100055 KitchenPot
76 | 100056 KitchenPot
77 | 100057 KitchenPot
78 | 100058 KitchenPot
79 | 100060 KitchenPot
80 | 100613 KitchenPot
81 | 100623 KitchenPot
82 | 100693 KitchenPot
83 | 102080 KitchenPot
84 | 102085 KitchenPot
--------------------------------------------------------------------------------
/stats/train_where2actPP_train_data_list_pickup.txt:
--------------------------------------------------------------------------------
1 | 101377 TrashCan
2 | 101378 TrashCan
3 | 101384 TrashCan
4 | 102153 TrashCan
5 | 102155 TrashCan
6 | 102156 TrashCan
7 | 102158 TrashCan
8 | 102160 TrashCan
9 | 102163 TrashCan
10 | 102171 TrashCan
11 | 102181 TrashCan
12 | 102182 TrashCan
13 | 102186 TrashCan
14 | 102189 TrashCan
15 | 102192 TrashCan
16 | 102193 TrashCan
17 | 102194 TrashCan
18 | 102200 TrashCan
19 | 102201 TrashCan
20 | 102202 TrashCan
21 | 102210 TrashCan
22 | 102229 TrashCan
23 | 102254 TrashCan
24 | 102256 TrashCan
25 | 102257 TrashCan
26 | 102992 TrashCan
27 | 103007 TrashCan
28 | 103010 TrashCan
29 | 103634 TrashCan
30 | 103635 TrashCan
31 | 103639 TrashCan
32 | 103646 TrashCan
33 | 103647 TrashCan
34 | 11124 TrashCan
35 | 11229 TrashCan
36 | 11259 TrashCan
37 | 11279 TrashCan
38 | 11361 TrashCan
39 | 11818 TrashCan
40 | 12289 TrashCan
41 | 12477 TrashCan
42 | 12483 TrashCan
43 | 3392 Display
44 | 4094 Display
45 | 4529 Display
46 | 4533 Display
47 | 4541 Display
48 | 4552 Display
49 | 4563 Display
50 | 4566 Display
51 | 4574 Display
52 | 4576 Display
53 | 4586 Display
54 | 4589 Display
55 | 4590 Display
56 | 4594 Display
57 | 4628 Display
58 | 4681 Display
59 | 4853 Display
60 | 5050 Display
61 | 5088 Display
62 | 5103 Display
63 | 5306 Display
64 | 5477 Display
65 | 100431 Bucket
66 | 100435 Bucket
67 | 100438 Bucket
68 | 100439 Bucket
69 | 100443 Bucket
70 | 100448 Bucket
71 | 100461 Bucket
72 | 100464 Bucket
73 | 100465 Bucket
74 | 100466 Bucket
75 | 100468 Bucket
76 | 100486 Bucket
77 | 102352 Bucket
78 | 102358 Bucket
79 | 102359 Bucket
80 | 102367 Bucket
81 | 102369 Bucket
82 | 100075 Cart
83 | 100491 Cart
84 | 100498 Cart
85 | 100501 Cart
86 | 100502 Cart
87 | 100508 Cart
88 | 100852 Cart
89 | 100853 Cart
90 | 100860 Cart
91 | 101065 Cart
92 | 101066 Cart
93 | 101075 Cart
94 | 101081 Cart
95 | 101086 Cart
96 | 101097 Cart
97 | 101097 Cart
98 | 101176 Cart
99 | 101892 Cart
100 | 102551 Cart
101 | 102552 Cart
102 | 101284 Eyeglasses
103 | 101285 Eyeglasses
104 | 101287 Eyeglasses
105 | 101288 Eyeglasses
106 | 101291 Eyeglasses
107 | 101293 Eyeglasses
108 | 101297 Eyeglasses
109 | 101300 Eyeglasses
110 | 101303 Eyeglasses
111 | 101328 Eyeglasses
112 | 101332 Eyeglasses
113 | 101335 Eyeglasses
114 | 101838 Eyeglasses
115 | 101840 Eyeglasses
116 | 101842 Eyeglasses
117 | 101843 Eyeglasses
118 | 101844 Eyeglasses
119 | 101845 Eyeglasses
120 | 101848 Eyeglasses
121 | 101859 Eyeglasses
122 | 101860 Eyeglasses
123 | 101866 Eyeglasses
124 | 101868 Eyeglasses
125 | 101869 Eyeglasses
126 | 101871 Eyeglasses
127 | 101874 Eyeglasses
128 | 102567 Eyeglasses
129 | 102569 Eyeglasses
130 | 102571 Eyeglasses
131 | 102572 Eyeglasses
132 | 102573 Eyeglasses
133 | 102578 Eyeglasses
134 | 102587 Eyeglasses
135 | 102588 Eyeglasses
136 | 102590 Eyeglasses
137 | 102599 Eyeglasses
138 | 102601 Eyeglasses
139 | 102603 Eyeglasses
140 | 102608 Eyeglasses
141 | 102611 Eyeglasses
142 | 102612 Eyeglasses
143 | 103028 Eyeglasses
144 | 103029 Eyeglasses
145 | 103177 Eyeglasses
146 | 103178 Eyeglasses
147 | 103184 Eyeglasses
148 | 103186 Eyeglasses
149 | 103189 Eyeglasses
150 | 103194 Eyeglasses
151 | 100520 FoldingChair
152 | 100521 FoldingChair
153 | 100526 FoldingChair
154 | 100531 FoldingChair
155 | 100532 FoldingChair
156 | 100557 FoldingChair
157 | 100561 FoldingChair
158 | 100562 FoldingChair
159 | 100568 FoldingChair
160 | 100579 FoldingChair
161 | 100586 FoldingChair
162 | 100590 FoldingChair
163 | 100599 FoldingChair
164 | 100600 FoldingChair
165 | 100609 FoldingChair
166 | 100611 FoldingChair
167 | 100616 FoldingChair
168 | 102263 FoldingChair
169 | 102269 FoldingChair
170 | 102314 FoldingChair
171 | 750001 Basket
172 | 750002 Basket
173 | 750003 Basket
174 | 750004 Basket
175 | 750007 Basket
176 | 750008 Basket
177 | 750009 Basket
178 | 750010 Basket
179 | 750013 Basket
180 | 750014 Basket
181 | 750015 Basket
182 | 750016 Basket
183 | 750019 Basket
184 | 750020 Basket
185 | 750021 Basket
186 | 750022 Basket
187 | 750023 Basket
188 | 750024 Basket
189 | 750026 Basket
190 | 750029 Basket
191 | 750034 Basket
192 | 750035 Basket
193 | 750036 Basket
194 | 750037 Basket
195 | 750038 Basket
196 | 750039 Basket
197 | 750041 Basket
198 | 750042 Basket
199 | 750043 Basket
200 | 750044 Basket
201 | 750045 Basket
202 | 750048 Basket
203 | 750049 Basket
204 | 750051 Basket
205 | 750052 Basket
206 | 750053 Basket
207 | 750054 Basket
208 | 750055 Basket
209 | 750062 Basket
210 | 750065 Basket
211 | 750067 Basket
212 | 750071 Basket
213 | 750072 Basket
214 | 750075 Basket
215 | 750076 Basket
216 | 750080 Basket
217 | 750081 Basket
218 | 750082 Basket
219 | 750083 Basket
220 | 750086 Basket
221 | 750087 Basket
222 | 750088 Basket
223 | 750089 Basket
224 | 750091 Basket
225 | 750093 Basket
226 | 750096 Basket
227 | 750097 Basket
228 | 750098 Basket
229 | 750101 Basket
230 | 750102 Basket
231 | 750103 Basket
232 | 750104 Basket
233 | 750106 Basket
234 | 750109 Basket
235 | 750111 Basket
236 | 750112 Basket
237 | 100142 Pliers
238 | 100144 Pliers
239 | 100178 Pliers
240 | 100179 Pliers
241 | 100182 Pliers
242 | 100188 Pliers
243 | 100705 Pliers
244 | 102073 Pliers
245 | 102074 Pliers
246 | 102075 Pliers
247 | 102221 Pliers
248 | 102242 Pliers
249 | 102251 Pliers
250 | 102253 Pliers
251 | 102258 Pliers
252 | 102260 Pliers
253 | 102285 Pliers
254 | 102288 Pliers
255 | 102292 Pliers
--------------------------------------------------------------------------------
/stats/train_where2actPP_val_data_list_pickup.txt:
--------------------------------------------------------------------------------
1 | 101326 Eyeglasses
2 | 101831 Eyeglasses
3 | 101833 Eyeglasses
4 | 101836 Eyeglasses
5 | 101839 Eyeglasses
6 | 101861 Eyeglasses
7 | 101863 Eyeglasses
8 | 101864 Eyeglasses
9 | 101870 Eyeglasses
10 | 102568 Eyeglasses
11 | 102570 Eyeglasses
12 | 102586 Eyeglasses
13 | 102589 Eyeglasses
14 | 102591 Eyeglasses
15 | 102596 Eyeglasses
16 | 102617 Eyeglasses
17 | 750005 Basket
18 | 750006 Basket
19 | 750017 Basket
20 | 750018 Basket
21 | 750025 Basket
22 | 750028 Basket
23 | 750031 Basket
24 | 750032 Basket
25 | 750033 Basket
26 | 750040 Basket
27 | 750056 Basket
28 | 750057 Basket
29 | 750059 Basket
30 | 750061 Basket
31 | 750063 Basket
32 | 750064 Basket
33 | 750066 Basket
34 | 750068 Basket
35 | 750069 Basket
36 | 750070 Basket
37 | 750073 Basket
38 | 750074 Basket
39 | 750078 Basket
40 | 750084 Basket
41 | 750094 Basket
42 | 750095 Basket
43 | 750100 Basket
44 | 750105 Basket
45 | 750107 Basket
46 | 750110 Basket
47 | 750113 Basket
48 | 102154 TrashCan
49 | 102165 TrashCan
50 | 102177 TrashCan
51 | 102187 TrashCan
52 | 102209 TrashCan
53 | 102218 TrashCan
54 | 102219 TrashCan
55 | 102244 TrashCan
56 | 102259 TrashCan
57 | 103012 TrashCan
58 | 10357 TrashCan
59 | 103633 TrashCan
60 | 10584 TrashCan
61 | 12445 TrashCan
62 | 12447 TrashCan
63 | 4108 TrashCan
64 | 100446 Bucket
65 | 100452 Bucket
66 | 100454 Bucket
67 | 100469 Bucket
68 | 100470 Bucket
69 | 100472 Bucket
70 | 100473 Bucket
71 | 100484 Bucket
72 | 3386 Display
73 | 3393 Display
74 | 4542 Display
75 | 4562 Display
76 | 4564 Display
77 | 4586 Display
78 | 4592 Display
79 | 4627 Display
80 | 100146 Pliers
81 | 100150 Pliers
82 | 100172 Pliers
83 | 100180 Pliers
84 | 102243 Pliers
85 | 102266 Pliers
86 |
--------------------------------------------------------------------------------