├── .gitattributes
├── Data
├── ellipsoid
│ ├── face1.obj
│ ├── face2.obj
│ ├── face3.obj
│ └── info_ellipsoid.dat
├── examples
│ ├── car.png
│ ├── chair.png
│ ├── gun.png
│ ├── lamp.png
│ ├── plane.png
│ └── table.png
├── test_list.txt
└── train_list.txt
├── Docs
└── images
│ ├── car_example.gif
│ ├── car_example.png
│ ├── plane.gif
│ └── plane.png
├── GenerateData
├── 1_sample_points.txt
├── 1a0bc9ab92c915167ae33d942430658c
│ ├── model.mtl
│ ├── model.obj
│ ├── model.xyz
│ ├── model_normal.xyz
│ └── rendering
│ │ ├── 00.png
│ │ ├── 01.png
│ │ ├── 02.png
│ │ ├── 03.png
│ │ ├── 04.png
│ │ ├── 05.png
│ │ ├── 06.png
│ │ ├── 07.png
│ │ ├── 08.png
│ │ ├── 09.png
│ │ ├── 10.png
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── rendering_metadata.txt
│ │ └── renderings.txt
├── 2_generate_normal.py
├── 3_camera_transform.py
├── 4_make_auxiliary_dat_file.ipynb
├── generate_data.py
├── init_obj
│ ├── init1.obj
│ ├── init1.png
│ ├── init2.obj
│ ├── init2.png
│ ├── init3.obj
│ └── init3.png
└── upsample.mlx
├── LICENSE
├── README.md
├── demo.py
├── eval_testset.py
├── external
├── approxmatch.cpp
├── approxmatch.cu
├── makefile
├── tf_approxmatch.cpp
├── tf_approxmatch.py
├── tf_approxmatch_g.cu
├── tf_approxmatch_g.cu.o
├── tf_approxmatch_so.so
├── tf_nndistance.cpp
├── tf_nndistance.py
├── tf_nndistance_g.cu
├── tf_nndistance_g.cu.o
└── tf_nndistance_so.so
├── p2m
├── __init__.py
├── api.py
├── chamfer.py
├── fetcher.py
├── inits.py
├── layers.py
├── losses.py
├── models.py
└── utils.py
└── train.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | GenerateData/*.ipynb linguist-documentation
2 |
--------------------------------------------------------------------------------
/Data/ellipsoid/face1.obj:
--------------------------------------------------------------------------------
1 | f 1 2 3
2 | f 2 4 5
3 | f 3 2 5
4 | f 4 6 5
5 | f 7 8 9
6 | f 1 9 8
7 | f 1 8 2
8 | f 2 8 10
9 | f 10 11 2
10 | f 2 11 4
11 | f 12 6 4
12 | f 12 4 13
13 | f 13 4 11
14 | f 13 14 12
15 | f 7 15 16
16 | f 8 7 16
17 | f 16 17 8
18 | f 8 17 10
19 | f 17 18 10
20 | f 10 18 11
21 | f 18 19 11
22 | f 11 19 13
23 | f 13 19 20
24 | f 14 13 20
25 | f 16 15 21
26 | f 21 22 23
27 | f 21 23 16
28 | f 16 23 17
29 | f 23 24 17
30 | f 17 24 18
31 | f 24 25 18
32 | f 18 25 19
33 | f 19 25 26
34 | f 20 19 26
35 | f 22 27 28
36 | f 23 22 28
37 | f 28 29 23
38 | f 23 29 24
39 | f 30 31 25
40 | f 30 25 29
41 | f 29 25 24
42 | f 25 31 26
43 | f 28 27 32
44 | f 29 28 32
45 | f 33 29 32
46 | f 29 33 30
47 | f 34 35 36
48 | f 35 37 38
49 | f 36 35 38
50 | f 37 39 38
51 | f 40 41 42
52 | f 3 35 34
53 | f 1 3 34
54 | f 1 34 42
55 | f 1 42 41
56 | f 5 35 3
57 | f 37 35 5
58 | f 37 43 39
59 | f 5 43 37
60 | f 5 44 43
61 | f 5 6 44
62 | f 44 45 43
63 | f 9 41 40
64 | f 9 40 7
65 | f 7 40 46
66 | f 9 1 41
67 | f 12 44 6
68 | f 44 12 14
69 | f 44 14 45
70 | f 45 14 47
71 | f 15 7 48
72 | f 48 7 46
73 | f 20 49 14
74 | f 49 47 14
75 | f 50 21 15
76 | f 50 15 51
77 | f 51 15 48
78 | f 21 50 22
79 | f 26 52 20
80 | f 52 49 20
81 | f 51 53 54
82 | f 50 51 54
83 | f 54 22 50
84 | f 27 22 54
85 | f 30 55 31
86 | f 31 55 56
87 | f 31 56 26
88 | f 26 56 52
89 | f 54 53 57
90 | f 57 58 59
91 | f 54 57 59
92 | f 54 59 32
93 | f 54 32 27
94 | f 32 60 33
95 | f 59 60 32
96 | f 61 62 55
97 | f 60 61 55
98 | f 60 55 30
99 | f 60 30 33
100 | f 55 62 56
101 | f 59 58 63
102 | f 60 59 63
103 | f 64 60 63
104 | f 60 64 61
105 | f 65 34 36
106 | f 66 65 36
107 | f 36 38 66
108 | f 38 67 66
109 | f 39 68 69
110 | f 39 69 38
111 | f 38 69 67
112 | f 68 70 69
113 | f 71 40 42
114 | f 72 71 42
115 | f 42 65 72
116 | f 34 65 42
117 | f 43 68 39
118 | f 68 43 45
119 | f 68 45 70
120 | f 70 45 73
121 | f 46 40 74
122 | f 74 40 71
123 | f 75 47 76
124 | f 75 73 47
125 | f 73 45 47
126 | f 76 77 75
127 | f 48 46 78
128 | f 78 46 74
129 | f 49 76 47
130 | f 79 76 49
131 | f 76 79 80
132 | f 77 76 80
133 | f 51 48 81
134 | f 81 48 78
135 | f 52 79 49
136 | f 82 79 52
137 | f 79 82 83
138 | f 80 79 83
139 | f 53 51 84
140 | f 84 51 81
141 | f 82 85 86
142 | f 82 52 85
143 | f 52 56 85
144 | f 82 86 83
145 | f 87 57 53
146 | f 84 87 53
147 | f 58 87 88
148 | f 57 87 58
149 | f 89 62 61
150 | f 90 62 89
151 | f 62 90 85
152 | f 56 62 85
153 | f 91 63 58
154 | f 88 91 58
155 | f 63 91 64
156 | f 64 91 92
157 | f 64 92 89
158 | f 61 64 89
159 | f 93 65 66
160 | f 94 93 66
161 | f 66 67 94
162 | f 67 95 94
163 | f 96 97 95
164 | f 96 95 69
165 | f 69 95 67
166 | f 69 70 96
167 | f 98 71 72
168 | f 99 98 72
169 | f 72 93 99
170 | f 65 93 72
171 | f 96 100 97
172 | f 100 96 70
173 | f 100 70 101
174 | f 101 70 73
175 | f 74 71 102
176 | f 102 71 98
177 | f 103 73 75
178 | f 103 104 73
179 | f 104 101 73
180 | f 75 77 103
181 | f 78 74 105
182 | f 105 74 102
183 | f 104 106 107
184 | f 103 106 104
185 | f 106 103 77
186 | f 80 106 77
187 | f 81 78 108
188 | f 108 78 105
189 | f 107 109 110
190 | f 106 109 107
191 | f 109 106 80
192 | f 83 109 80
193 | f 84 81 111
194 | f 111 81 108
195 | f 86 110 109
196 | f 86 85 110
197 | f 85 112 110
198 | f 86 109 83
199 | f 113 87 84
200 | f 111 113 84
201 | f 88 113 114
202 | f 87 113 88
203 | f 115 90 89
204 | f 116 90 115
205 | f 90 116 112
206 | f 85 90 112
207 | f 117 91 88
208 | f 114 117 88
209 | f 91 117 92
210 | f 92 117 118
211 | f 92 118 115
212 | f 89 92 115
213 | f 93 94 119
214 | f 94 95 120
215 | f 119 94 120
216 | f 95 97 120
217 | f 98 99 121
218 | f 122 119 123
219 | f 121 119 122
220 | f 121 93 119
221 | f 121 99 93
222 | f 120 123 119
223 | f 124 123 120
224 | f 124 125 126
225 | f 120 125 124
226 | f 120 100 125
227 | f 120 97 100
228 | f 100 101 125
229 | f 102 98 121
230 | f 127 102 121
231 | f 121 128 127
232 | f 122 128 121
233 | f 126 129 130
234 | f 125 129 126
235 | f 129 125 101
236 | f 104 129 101
237 | f 105 102 127
238 | f 131 105 127
239 | f 127 132 131
240 | f 128 132 127
241 | f 129 133 130
242 | f 133 129 104
243 | f 133 104 134
244 | f 134 104 107
245 | f 108 105 131
246 | f 135 108 131
247 | f 131 136 135
248 | f 132 136 131
249 | f 137 138 139
250 | f 139 138 134
251 | f 139 134 110
252 | f 110 134 107
253 | f 111 108 135
254 | f 140 111 135
255 | f 141 135 136
256 | f 141 142 135
257 | f 142 140 135
258 | f 142 143 144
259 | f 141 143 142
260 | f 137 144 143
261 | f 137 139 144
262 | f 139 145 144
263 | f 145 139 110
264 | f 112 145 110
265 | f 113 111 140
266 | f 114 113 140
267 | f 114 140 146
268 | f 146 140 142
269 | f 146 142 147
270 | f 142 144 147
271 | f 116 115 147
272 | f 116 147 145
273 | f 145 147 144
274 | f 116 145 112
275 | f 117 114 146
276 | f 118 117 146
277 | f 147 118 146
278 | f 118 147 115
279 | f 122 123 148
280 | f 123 124 149
281 | f 148 123 149
282 | f 124 126 149
283 | f 128 122 148
284 | f 150 128 148
285 | f 150 148 151
286 | f 148 149 151
287 | f 151 149 126
288 | f 130 151 126
289 | f 132 128 150
290 | f 152 132 150
291 | f 152 150 153
292 | f 150 151 153
293 | f 130 133 154
294 | f 130 154 151
295 | f 151 154 153
296 | f 133 134 154
297 | f 136 132 152
298 | f 155 136 152
299 | f 155 152 156
300 | f 152 153 156
301 | f 138 137 156
302 | f 138 156 154
303 | f 154 156 153
304 | f 138 154 134
305 | f 141 136 155
306 | f 143 141 155
307 | f 156 143 155
308 | f 143 156 137
309 |
--------------------------------------------------------------------------------
/Data/ellipsoid/info_ellipsoid.dat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Data/ellipsoid/info_ellipsoid.dat
--------------------------------------------------------------------------------
/Data/examples/car.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Data/examples/car.png
--------------------------------------------------------------------------------
/Data/examples/chair.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Data/examples/chair.png
--------------------------------------------------------------------------------
/Data/examples/gun.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Data/examples/gun.png
--------------------------------------------------------------------------------
/Data/examples/lamp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Data/examples/lamp.png
--------------------------------------------------------------------------------
/Data/examples/plane.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Data/examples/plane.png
--------------------------------------------------------------------------------
/Data/examples/table.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Data/examples/table.png
--------------------------------------------------------------------------------
/Docs/images/car_example.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Docs/images/car_example.gif
--------------------------------------------------------------------------------
/Docs/images/car_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Docs/images/car_example.png
--------------------------------------------------------------------------------
/Docs/images/plane.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Docs/images/plane.gif
--------------------------------------------------------------------------------
/Docs/images/plane.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/Docs/images/plane.png
--------------------------------------------------------------------------------
/GenerateData/1_sample_points.txt:
--------------------------------------------------------------------------------
1 | Sampling points on the ShapeNet CAD model:
2 |
3 | meshlabserver -i 1a0bc9ab92c915167ae33d942430658c/model.obj -o 1a0bc9ab92c915167ae33d942430658c/model.xyz -s upsample.mlx
4 |
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/model.mtl:
--------------------------------------------------------------------------------
1 | #
2 | ## Alias OBJ Material File
3 | # Exported from SketchUp, (c) 2000-2012 Trimble Navigation Limited
4 |
5 | newmtl BLACKNESS1
6 | Ka 0.000000 0.000000 0.000000
7 | Kd 0.000000 0.000000 0.000000
8 | Ks 0.330000 0.330000 0.330000
9 |
10 | newmtl BODY_COLOR1
11 | Ka 0.000000 0.000000 0.000000
12 | Kd 0.588235 0.588235 0.588235
13 | Ks 0.330000 0.330000 0.330000
14 |
15 | newmtl ANODIZED1
16 | Ka 0.000000 0.000000 0.000000
17 | Kd 0.803922 0.803922 0.803922
18 | Ks 0.330000 0.330000 0.330000
19 |
20 | newmtl CHROME1
21 | Ka 0.000000 0.000000 0.000000
22 | Kd 0.803922 0.803922 0.803922
23 | Ks 0.330000 0.330000 0.330000
24 |
25 | newmtl RIMS_TX1
26 | Ka 0.000000 0.000000 0.000000
27 | Kd 0.803922 0.803922 0.803922
28 | Ks 0.330000 0.330000 0.330000
29 |
30 | newmtl LINES1
31 | Ka 0.000000 0.000000 0.000000
32 | Kd 0.000000 0.000000 0.000000
33 | Ks 0.330000 0.330000 0.330000
34 |
35 | newmtl BLACK_PLASTIC1
36 | Ka 0.000000 0.000000 0.000000
37 | Kd 0.000000 0.000000 0.000000
38 | Ks 0.330000 0.330000 0.330000
39 |
40 | newmtl GLASS_HD1
41 | Ka 0.000000 0.000000 0.000000
42 | Kd 0.784314 0.784314 0.784314
43 | Ks 0.330000 0.330000 0.330000
44 | d 0.500000
45 |
46 | newmtl BLINKERS1
47 | Ka 0.000000 0.000000 0.000000
48 | Kd 1.000000 0.423529 0.141176
49 | Ks 0.330000 0.330000 0.330000
50 |
51 | newmtl BLINKERS2
52 | Ka 0.000000 0.000000 0.000000
53 | Kd 0.756863 0.141176 0.141176
54 | Ks 0.330000 0.330000 0.330000
55 |
56 | newmtl TIRES_BUMPTREAD1
57 | Ka 0.000000 0.000000 0.000000
58 | Kd 0.050980 0.050980 0.050980
59 | Ks 0.330000 0.330000 0.330000
60 |
61 | newmtl DASH1
62 | Ka 0.000000 0.000000 0.000000
63 | Kd 0.584314 0.584314 0.584314
64 | Ks 0.330000 0.330000 0.330000
65 |
66 | newmtl IINTERIOR1
67 | Ka 0.000000 0.000000 0.000000
68 | Kd 0.184314 0.184314 0.184314
69 | Ks 0.330000 0.330000 0.330000
70 |
71 | newmtl RIMS1
72 | Ka 0.000000 0.000000 0.000000
73 | Kd 0.803922 0.803922 0.803922
74 | Ks 0.330000 0.330000 0.330000
75 |
76 | newmtl WINDOWS1
77 | Ka 0.000000 0.000000 0.000000
78 | Kd 0.784314 0.792157 0.815686
79 | Ks 0.330000 0.330000 0.330000
80 | d 0.500000
81 |
82 | newmtl STW1
83 | Ka 0.000000 0.000000 0.000000
84 | Kd 0.588235 0.588235 0.588235
85 | Ks 0.330000 0.330000 0.330000
86 |
87 | newmtl GLASS_BLACK1
88 | Ka 0.000000 0.000000 0.000000
89 | Kd 0.000000 0.000000 0.000000
90 | Ks 0.330000 0.330000 0.330000
91 |
92 | newmtl TAILS1
93 | Ka 0.000000 0.000000 0.000000
94 | Kd 0.584314 0.584314 0.584314
95 | Ks 0.330000 0.330000 0.330000
96 |
97 | newmtl FrontColor
98 | Ka 0.000000 0.000000 0.000000
99 | Kd 1.000000 1.000000 1.000000
100 | Ks 0.330000 0.330000 0.330000
101 |
102 | newmtl Color_A01
103 | Ka 0.000000 0.000000 0.000000
104 | Kd 1.000000 0.000000 0.000000
105 | Ks 0.330000 0.330000 0.330000
106 |
107 | newmtl Color_I01
108 | Ka 0.000000 0.000000 0.000000
109 | Kd 0.000000 0.000000 1.000000
110 | Ks 0.330000 0.330000 0.330000
111 |
112 | newmtl Color_009
113 | Ka 0.000000 0.000000 0.000000
114 | Kd 0.000000 0.000000 0.000000
115 | Ks 0.330000 0.330000 0.330000
116 |
117 | newmtl Color_007
118 | Ka 0.000000 0.000000 0.000000
119 | Kd 0.227451 0.227451 0.227451
120 | Ks 0.330000 0.330000 0.330000
121 |
122 |
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/00.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/00.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/01.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/02.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/03.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/04.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/05.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/06.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/07.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/08.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/08.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/09.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/09.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/10.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/11.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/12.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/13.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/14.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/15.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/16.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/17.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/18.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/19.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/20.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/21.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/22.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/23.png
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/rendering_metadata.txt:
--------------------------------------------------------------------------------
1 | 53.4148856456 25.1270260505 0 0.850870070084 25
2 | 143.273644062 28.0180932672 0 0.899126425664 25
3 | 85.1124149638 27.9514800197 0 0.667085868869 25
4 | 247.383238512 28.3639421468 0 0.743294441578 25
5 | 244.57692584 25.1993747581 0 0.794193547188 25
6 | 291.11136697 29.1055574784 0 0.903424034923 25
7 | 182.493014657 26.5098159178 0 0.893605288721 25
8 | 272.50458903 29.0368532713 0 0.721246732217 25
9 | 314.30165154 28.3378456481 0 0.798682827392 25
10 | 181.675245762 29.5369170391 0 0.828909921947 25
11 | 73.5046764314 26.6665525716 0 0.702062460852 25
12 | 124.569110764 27.0786896863 0 0.934393309518 25
13 | 320.446702213 25.5733484989 0 0.753609249579 25
14 | 3.92074527833 28.1978808552 0 0.894365080218 25
15 | 50.8758203847 27.3605273948 0 0.938366585857 25
16 | 355.068852163 27.7909587322 0 0.804541148674 25
17 | 287.179371263 27.2740450002 0 0.872237713407 25
18 | 199.448626239 26.6753895394 0 0.66276927858 25
19 | 317.23220414 25.5358004294 0 0.889966934007 25
20 | 69.2950421657 28.079733059 0 0.73048324288 25
21 | 217.871354708 26.7145969552 0 0.831260061849 25
22 | 293.555160703 26.8555855843 0 0.837581798871 25
23 | 137.306814863 28.901418334 0 0.739500866469 25
24 | 166.50918954 29.4857061903 0 0.8627209493 25
25 |
--------------------------------------------------------------------------------
/GenerateData/1a0bc9ab92c915167ae33d942430658c/rendering/renderings.txt:
--------------------------------------------------------------------------------
1 | 00.png
2 | 01.png
3 | 02.png
4 | 03.png
5 | 04.png
6 | 05.png
7 | 06.png
8 | 07.png
9 | 08.png
10 | 09.png
11 | 10.png
12 | 11.png
13 | 12.png
14 | 13.png
15 | 14.png
16 | 15.png
17 | 16.png
18 | 17.png
19 | 18.png
20 | 19.png
21 | 20.png
22 | 21.png
23 | 22.png
24 | 23.png
25 |
--------------------------------------------------------------------------------
/GenerateData/2_generate_normal.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Oct 26 16:22:12 2018
4 |
5 | @author: wnylol
6 | """
7 |
8 | import numpy as np
9 | from scipy.spatial import ConvexHull
10 |
11 | def unit(v):
12 | norm = np.linalg.norm(v)
13 | if norm == 0:
14 | return v
15 | return v / norm
16 |
17 | def readFaceInfo(obj_path):
18 | vert_list = np.zeros((1,3), dtype='float32') # all vertex coord
19 | face_pts = np.zeros((1,3,3), dtype='float32') # 3 vertex on triangle face
20 | face_axis = np.zeros((1,3,3), dtype='float32') # x y z new axis on face plane
21 | with open(obj_path, 'r') as f:
22 | while(True):
23 | line = f.readline()
24 | if not line:
25 | break
26 | if line[0:2] == 'v ':
27 | t = line.split('v')[1] # ' 0.1 0.2 0.3'
28 | vertex = np.fromstring(t, sep=' ').reshape((1,3))
29 | vert_list = np.append(vert_list, vertex, axis=0)
30 | elif line[0:2] == 'f ':
31 | t = line.split() # ['f', '1//2', '1/2/3', '1']
32 | p1,p2,p3 = [int(t[i].split('/')[0]) for i in range(1,4)]
33 |
34 | points = np.array([vert_list[p1], vert_list[p2], vert_list[p3]])
35 | face_pts = np.append(face_pts, points.reshape(1,3,3), axis=0)
36 |
37 | ###!!!!!!!!!!!!###
38 | v1 = vert_list[p2] - vert_list[p1] # x axis
39 | v2 = vert_list[p3] - vert_list[p1]
40 | f_n = np.cross(v1, v2) # z axis, face normal
41 | f_y = np.cross(v1, f_n) # y axis
42 | new_axis = np.array([unit(v1), unit(f_y), unit(f_n)])
43 | face_axis = np.append(face_axis, new_axis.reshape((1,3,3)), axis=0)
44 |
45 | face_pts = np.delete(face_pts, 0, 0)
46 | face_axis = np.delete(face_axis, 0, 0)
47 | return face_pts, face_axis
48 |
49 | def generate_normal(pt_position, face_pts, face_axis):
50 | pt_normal = np.zeros_like(pt_position, dtype='float32')
51 |
52 | for points, axis in zip(face_pts, face_axis):
53 | f_org = points[0] # new axis system origin point
54 | f_n = axis[2]
55 |
56 | face_vertex_2d = np.dot(points - f_org, axis.T)[:,:2]
57 |
58 | # check if a valid face
59 | n1,n2,n3 = [np.linalg.norm(face_axis[i]) for i in range(3)]
60 | if n1<0.99 or n2<0.99 or n3<0.99:
61 | continue
62 | # check if 3 point on one line
63 | t = np.sum(np.square(face_vertex_2d), 0)
64 | if t[0]==0 or t[1]==0:
65 | continue
66 |
67 | transform_verts = np.dot(pt_position - f_org, axis.transpose())
68 | vert_idx = np.where(np.abs(transform_verts[:,2]) < 6e-7)[0]
69 |
70 | for idx in vert_idx:
71 | if np.linalg.norm(pt_normal[idx]) == 0:
72 | p4 = transform_verts[idx][:2].reshape(1,2)
73 | pt_4 = np.append(face_vertex_2d, p4, axis=0)
74 | hull = ConvexHull(pt_4)
75 | if len(hull.vertices) == 3:
76 | pt_normal[idx] = f_n * (-1)
77 |
78 | return np.hstack((pt_position, pt_normal))
79 |
80 | if __name__ == '__main__':
81 | vert_path = '1a0bc9ab92c915167ae33d942430658c/model.xyz'
82 | mesh_path = '1a0bc9ab92c915167ae33d942430658c/model.obj'
83 |
84 | face_pts, face_axis = readFaceInfo(mesh_path)
85 | vert = np.loadtxt(vert_path)
86 | vert_with_normal = generate_normal(vert, face_pts, face_axis)
87 | np.savetxt(vert_path.replace('.xyz', '_normal.xyz'), vert_with_normal)
88 |
--------------------------------------------------------------------------------
/GenerateData/3_camera_transform.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Oct 26 12:55:31 2018
4 |
5 | @author: wnylol
6 | """
7 |
8 | import numpy as np
9 | import cPickle as pickle
10 | import cv2
11 | import os
12 |
13 | def unit(v):
14 | norm = np.linalg.norm(v)
15 | if norm == 0:
16 | return v
17 | return v / norm
18 |
19 | def camera_info(param):
20 | theta = np.deg2rad(param[0])
21 | phi = np.deg2rad(param[1])
22 |
23 | camY = param[3]*np.sin(phi)
24 | temp = param[3]*np.cos(phi)
25 | camX = temp * np.cos(theta)
26 | camZ = temp * np.sin(theta)
27 | cam_pos = np.array([camX, camY, camZ])
28 |
29 | axisZ = cam_pos.copy()
30 | axisY = np.array([0,1,0])
31 | axisX = np.cross(axisY, axisZ)
32 | axisY = np.cross(axisZ, axisX)
33 |
34 | cam_mat = np.array([unit(axisX), unit(axisY), unit(axisZ)])
35 | return cam_mat, cam_pos
36 |
37 | if __name__ == '__main__':
38 |
39 | vert_path = '1a0bc9ab92c915167ae33d942430658c/model_normal.xyz'
40 | vert = np.loadtxt(vert_path)
41 | position = vert[:, : 3] * 0.57
42 | normal = vert[:, 3:]
43 |
44 | view_path = '1a0bc9ab92c915167ae33d942430658c/rendering/rendering_metadata.txt'
45 | cam_params = np.loadtxt(view_path)
46 | for index, param in enumerate(cam_params):
47 | # camera tranform
48 | cam_mat, cam_pos = camera_info(param)
49 |
50 | pt_trans = np.dot(position-cam_pos, cam_mat.transpose())
51 | nom_trans = np.dot(normal, cam_mat.transpose())
52 | train_data = np.hstack((pt_trans, nom_trans))
53 |
54 | #### project for sure
55 | img_path = os.path.join(os.path.split(view_path)[0], '%02d.png'%index)
56 | np.savetxt(img_path.replace('png','xyz'), train_data)
57 |
58 | img = cv2.imread(img_path)
59 | img = cv2.resize(img, (224,224))
60 |
61 | X,Y,Z = pt_trans.T
62 | F = 248
63 | h = (-Y)/(-Z)*F + 224/2.0
64 | w = X/(-Z)*F + 224/2.0
65 | h = np.minimum(np.maximum(h, 0), 223)
66 | w = np.minimum(np.maximum(w, 0), 223)
67 | img[np.round(h).astype(int), np.round(w).astype(int), 2] = 0
68 | img[np.round(h).astype(int), np.round(w).astype(int), 1] = 255
69 | cv2.imwrite(img_path.replace('.png','_prj.png'), img)
70 |
--------------------------------------------------------------------------------
/GenerateData/4_make_auxiliary_dat_file.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# NOTE\n",
8 | "\n",
9 | "Please note that this script uses python3, which should still work in the python2 version, but be careful to adjust the usage of the library to match compatibility.\n",
10 | "\n",
11 | "The original version of Pixel2Mesh's `.dat` file has pkl[5] and pkl[6], but they are not used by other code, so only this part is padded to 0 here."
12 | ]
13 | },
14 | {
15 | "cell_type": "code",
16 | "execution_count": 1,
17 | "metadata": {},
18 | "outputs": [],
19 | "source": [
20 | "import numpy as np\n",
21 | "import networkx as nx\n",
22 | "import scipy.sparse as sp\n",
23 | "import sys\n",
24 | "import os\n",
25 | "import pickle\n",
26 | "import trimesh\n",
27 | "\n",
28 | "from IPython.display import Image\n",
29 | "from scipy.sparse.linalg.eigen.arpack import eigsh"
30 | ]
31 | },
32 | {
33 | "cell_type": "code",
34 | "execution_count": 2,
35 | "metadata": {},
36 | "outputs": [],
37 | "source": [
38 | "def load_obj(fn, no_normal=False):\n",
39 | " fin = open(fn, 'r')\n",
40 | " lines = [line.rstrip() for line in fin]\n",
41 | " fin.close()\n",
42 | "\n",
43 | " vertices = []; normals = []; faces = [];\n",
44 | " for line in lines:\n",
45 | " if line.startswith('v '):\n",
46 | " vertices.append(np.float32(line.split()[1:4]))\n",
47 | " elif line.startswith('vn '):\n",
48 | " normals.append(np.float32(line.split()[1:4]))\n",
49 | " elif line.startswith('f '):\n",
50 | " faces.append(np.int32([item.split('/')[0] for item in line.split()[1:4]]))\n",
51 | "\n",
52 | " mesh = dict()\n",
53 | " mesh['faces'] = np.vstack(faces)\n",
54 | " mesh['vertices'] = np.vstack(vertices)\n",
55 | "\n",
56 | " if (not no_normal) and (len(normals) > 0):\n",
57 | " assert len(normals) == len(vertices), 'ERROR: #vertices != #normals'\n",
58 | " mesh['normals'] = np.vstack(normals)\n",
59 | "\n",
60 | " return mesh\n",
61 | "\n",
62 | "def sparse_to_tuple(sparse_mx):\n",
63 | " \"\"\"Convert sparse matrix to tuple representation.\"\"\"\n",
64 | " def to_tuple(mx):\n",
65 | " if not sp.isspmatrix_coo(mx):\n",
66 | " mx = mx.tocoo()\n",
67 | " coords = np.vstack((mx.row, mx.col)).transpose()\n",
68 | " values = mx.data\n",
69 | " shape = mx.shape\n",
70 | " return coords, values, shape\n",
71 | "\n",
72 | " if isinstance(sparse_mx, list):\n",
73 | " for i in range(len(sparse_mx)):\n",
74 | " sparse_mx[i] = to_tuple(sparse_mx[i])\n",
75 | " else:\n",
76 | " sparse_mx = to_tuple(sparse_mx)\n",
77 | "\n",
78 | " return sparse_mx\n",
79 | "\n",
80 | "\n",
81 | "def normalize_adj(adj):\n",
82 | " \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n",
83 | " adj = sp.coo_matrix(adj)\n",
84 | " rowsum = np.array(adj.sum(1))\n",
85 | " d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n",
86 | " d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n",
87 | " d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n",
88 | " return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n",
89 | "\n",
90 | "\n",
91 | "def preprocess_adj(adj):\n",
92 | " \"\"\"Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.\"\"\"\n",
93 | " adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n",
94 | " return sparse_to_tuple(adj_normalized)\n",
95 | "\n",
96 | "\n",
97 | "def construct_feed_dict(features, support, labels, labels_mask, placeholders):\n",
98 | " \"\"\"Construct feed dictionary.\"\"\"\n",
99 | " feed_dict = dict()\n",
100 | " feed_dict.update({placeholders['labels']: labels})\n",
101 | " feed_dict.update({placeholders['labels_mask']: labels_mask})\n",
102 | " feed_dict.update({placeholders['features']: features})\n",
103 | " feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})\n",
104 | " feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})\n",
105 | " return feed_dict\n",
106 | "\n",
107 | "\n",
108 | "def chebyshev_polynomials(adj, k):\n",
109 | " \"\"\"Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).\"\"\"\n",
110 | " print(\"Calculating Chebyshev polynomials up to order {}...\".format(k))\n",
111 | "\n",
112 | " adj_normalized = normalize_adj(adj)\n",
113 | " laplacian = sp.eye(adj.shape[0]) - adj_normalized\n",
114 | " largest_eigval, _ = eigsh(laplacian, 1, which='LM')\n",
115 | " scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])\n",
116 | "\n",
117 | " t_k = list()\n",
118 | " t_k.append(sp.eye(adj.shape[0]))\n",
119 | " t_k.append(scaled_laplacian)\n",
120 | "\n",
121 | " def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):\n",
122 | " s_lap = sp.csr_matrix(scaled_lap, copy=True)\n",
123 | " return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two\n",
124 | "\n",
125 | " for i in range(2, k+1):\n",
126 | " t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))\n",
127 | "\n",
128 | " return sparse_to_tuple(t_k)\n",
129 | "\n",
130 | "\n",
131 | "def dense_cheb(adj, k):\n",
132 | " \"\"\"Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).\"\"\"\n",
133 | " print(\"Calculating Chebyshev polynomials up to order {}...\".format(k))\n",
134 | "\n",
135 | " adj_normalized = normalize_adj(adj)\n",
136 | " laplacian = sp.eye(adj.shape[0]) - adj_normalized\n",
137 | " largest_eigval, _ = eigsh(laplacian, 1, which='LM')\n",
138 | " scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])\n",
139 | "\n",
140 | " t_k = list()\n",
141 | " t_k.append(sp.eye(adj.shape[0]))\n",
142 | " t_k.append(scaled_laplacian)\n",
143 | "\n",
144 | " def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):\n",
145 | " s_lap = sp.csr_matrix(scaled_lap, copy=True)\n",
146 | " return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two\n",
147 | "\n",
148 | " for i in range(2, k+1):\n",
149 | " t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))\n",
150 | "\n",
151 | " return t_k\n",
152 | "\n",
153 | "def unpool_face(old_faces, old_unique_edges, old_vertices):\n",
154 | " old_faces = np.array(old_faces)\n",
155 | " N = old_vertices.shape[0]\n",
156 | " mid_table = np.zeros((N,N), dtype=np.int32)\n",
157 | " new_edges = []\n",
158 | " new_faces = []\n",
159 | " for i, u in enumerate(old_unique_edges):\n",
160 | " mid_table[u[0], u[1]] = N+i\n",
161 | " mid_table[u[1], u[0]] = N+i\n",
162 | " new_edges.append([u[0], N+i])\n",
163 | " new_edges.append([N+i, u[1]])\n",
164 | " \n",
165 | " for i, f in enumerate(old_faces):\n",
166 | " f = np.sort(f)\n",
167 | " mid1 = mid_table[f[0], f[1]]\n",
168 | " mid2 = mid_table[f[0], f[2]]\n",
169 | " mid3 = mid_table[f[1], f[2]]\n",
170 | " \n",
171 | " new_faces.append([f[0], mid1, mid2])\n",
172 | " new_faces.append([f[1], mid1, mid3])\n",
173 | " new_faces.append([f[2], mid2, mid3])\n",
174 | " new_faces.append([mid1, mid2, mid3])\n",
175 | " \n",
176 | " new_edges.append([mid1, mid2])\n",
177 | " new_edges.append([mid2, mid3])\n",
178 | " new_edges.append([mid3, mid1])\n",
179 | " \n",
180 | " new_faces = np.array(new_faces, dtype=np.int32)\n",
181 | " new_edges = np.array(new_edges, dtype=np.int32)\n",
182 | " return new_edges, new_faces\n",
183 | "\n",
184 | "\n",
185 | "def write_obj(path, vertices, faces):\n",
186 | " with open(path, 'w') as o:\n",
187 | " for v in vertices:\n",
188 | " o.write('v {} {} {}\\n'.format(v[0], v[1], v[2]))\n",
189 | " for f in faces:\n",
190 | " o.write('f {} {} {}\\n'.format(f[0]+1, f[1]+1, f[2]+1))\n",
191 | " \n",
192 | "\n",
193 | "def cal_lap_index(mesh_neighbor):\n",
194 | " lap_index = np.zeros([mesh_neighbor.shape[0], 2 + 8]).astype(np.int32)\n",
195 | " for i, j in enumerate(mesh_neighbor):\n",
196 | " lenj = len(j)\n",
197 | " lap_index[i][0:lenj] = j\n",
198 | " lap_index[i][lenj:-2] = -1\n",
199 | " lap_index[i][-2] = i\n",
200 | " lap_index[i][-1] = lenj\n",
201 | " return lap_index"
202 | ]
203 | },
204 | {
205 | "cell_type": "code",
206 | "execution_count": 3,
207 | "metadata": {},
208 | "outputs": [],
209 | "source": [
210 | "# pkl = pickle.load(open('../Data/ellipsoid/info_ellipsoid.dat', 'rb'), encoding='bytes')"
211 | ]
212 | },
213 | {
214 | "cell_type": "code",
215 | "execution_count": 4,
216 | "metadata": {},
217 | "outputs": [],
218 | "source": [
219 | "info = {}\n",
220 | "info['coords'] = None\n",
221 | "info['support'] = {'stage1':None,'stage2':None,'stage3':None, 'stage4':None}\n",
222 | "info['unpool_idx'] = {'stage1_2':None,'stage2_3':None, 'stage3_4':None}\n",
223 | "info['lap_idx'] = {'stage1':None,'stage2':None,'stage3':None,'stage4':None}"
224 | ]
225 | },
226 | {
227 | "cell_type": "markdown",
228 | "metadata": {},
229 | "source": [
230 | "Simply load obj file created by Meshlab"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": 5,
236 | "metadata": {},
237 | "outputs": [],
238 | "source": [
239 | "raw_mesh = load_obj('./init_obj/init1.obj',no_normal=True)"
240 | ]
241 | },
242 | {
243 | "cell_type": "markdown",
244 | "metadata": {},
245 | "source": [
246 | "Reload mesh using trimesh to get adjacent matrix, set `process=Flase` to preserve mesh vertices order"
247 | ]
248 | },
249 | {
250 | "cell_type": "code",
251 | "execution_count": 6,
252 | "metadata": {},
253 | "outputs": [],
254 | "source": [
255 | "mesh = trimesh.Trimesh(vertices=raw_mesh['vertices'], faces=(raw_mesh['faces']-1), process=False)"
256 | ]
257 | },
258 | {
259 | "cell_type": "code",
260 | "execution_count": 7,
261 | "metadata": {},
262 | "outputs": [],
263 | "source": [
264 | "assert np.all(raw_mesh['faces'] == mesh.faces+1)"
265 | ]
266 | },
267 | {
268 | "cell_type": "code",
269 | "execution_count": 8,
270 | "metadata": {},
271 | "outputs": [],
272 | "source": [
273 | "coords_1 = np.array(mesh.vertices, dtype=np.float32)\n",
274 | "info['coords'] = coords_1"
275 | ]
276 | },
277 | {
278 | "cell_type": "markdown",
279 | "metadata": {},
280 | "source": [
281 | "## Stage 1 auxiliary matrix"
282 | ]
283 | },
284 | {
285 | "cell_type": "code",
286 | "execution_count": 9,
287 | "metadata": {},
288 | "outputs": [
289 | {
290 | "name": "stdout",
291 | "output_type": "stream",
292 | "text": [
293 | "Calculating Chebyshev polynomials up to order 1...\n"
294 | ]
295 | }
296 | ],
297 | "source": [
298 | "adj_1 = nx.adjacency_matrix(mesh.vertex_adjacency_graph, nodelist=range(len(coords_1)))\n",
299 | "cheb_1 = chebyshev_polynomials(adj_1,1)\n",
300 | "info['support']['stage1'] = cheb_1\n",
301 | "\n",
302 | "edges_1 = mesh.edges_unique\n",
303 | "edges_1 = edges_1[edges_1[:,1].argsort(kind='mergesort')]\n",
304 | "edges_1 = edges_1[edges_1[:,0].argsort(kind='mergesort')]\n",
305 | "info['unpool_idx']['stage1_2'] = edges_1\n",
306 | "\n",
307 | "lap_1 = cal_lap_index(mesh.vertex_neighbors)\n",
308 | "info['lap_idx']['stage1'] = lap_1"
309 | ]
310 | },
311 | {
312 | "cell_type": "markdown",
313 | "metadata": {},
314 | "source": [
315 | "## Stage 2 auxiliary matrix"
316 | ]
317 | },
318 | {
319 | "cell_type": "code",
320 | "execution_count": 10,
321 | "metadata": {},
322 | "outputs": [
323 | {
324 | "name": "stdout",
325 | "output_type": "stream",
326 | "text": [
327 | "Calculating Chebyshev polynomials up to order 1...\n"
328 | ]
329 | }
330 | ],
331 | "source": [
332 | "faces_1 = np.array(mesh.faces)\n",
333 | "\n",
334 | "edges_2, faces_2 = unpool_face(faces_1, edges_1, coords_1)\n",
335 | "\n",
336 | "tmp_1_2 = 0.5*(coords_1[info['unpool_idx']['stage1_2'][:,0]] + coords_1[info['unpool_idx']['stage1_2'][:,1]])\n",
337 | "coords_2 = np.vstack([coords_1, tmp_1_2])\n",
338 | "\n",
339 | "mesh2 = trimesh.Trimesh(vertices=coords_2, faces=faces_2, process=False)\n",
340 | "\n",
341 | "adj_2 = nx.adjacency_matrix(mesh2.vertex_adjacency_graph, nodelist=range(len(coords_2)))\n",
342 | "cheb_2 = chebyshev_polynomials(adj_2,1)\n",
343 | "info['support']['stage2'] = cheb_2\n",
344 | "\n",
345 | "edges_2 = edges_2[edges_2[:,1].argsort(kind='mergesort')]\n",
346 | "edges_2 = edges_2[edges_2[:,0].argsort(kind='mergesort')]\n",
347 | "info['unpool_idx']['stage2_3'] = edges_2\n",
348 | "\n",
349 | "lap_2 = cal_lap_index(mesh2.vertex_neighbors)\n",
350 | "info['lap_idx']['stage2'] = lap_2"
351 | ]
352 | },
353 | {
354 | "cell_type": "markdown",
355 | "metadata": {},
356 | "source": [
357 | "Save init2.obj, you can only save faces to get face2.obj"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": 11,
363 | "metadata": {},
364 | "outputs": [],
365 | "source": [
366 | "write_obj('./init_obj/init2.obj', coords_2, faces_2)"
367 | ]
368 | },
369 | {
370 | "cell_type": "markdown",
371 | "metadata": {},
372 | "source": [
373 | "## Stage 3 auxiliary matrix"
374 | ]
375 | },
376 | {
377 | "cell_type": "code",
378 | "execution_count": 12,
379 | "metadata": {},
380 | "outputs": [
381 | {
382 | "name": "stdout",
383 | "output_type": "stream",
384 | "text": [
385 | "Calculating Chebyshev polynomials up to order 1...\n"
386 | ]
387 | }
388 | ],
389 | "source": [
390 | "edges_3, faces_3 = unpool_face(faces_2, edges_2, coords_2)\n",
391 | "\n",
392 | "tmp_2_3 = 0.5*(coords_2[info['unpool_idx']['stage2_3'][:,0]] + coords_2[info['unpool_idx']['stage2_3'][:,1]])\n",
393 | "coords_3 = np.vstack([coords_2, tmp_2_3])\n",
394 | "\n",
395 | "mesh3 = trimesh.Trimesh(vertices=coords_3, faces=faces_3, process=False)\n",
396 | "\n",
397 | "adj_3 = nx.adjacency_matrix(mesh3.vertex_adjacency_graph, nodelist=range(len(coords_3)))\n",
398 | "cheb_3 = chebyshev_polynomials(adj_3,1)\n",
399 | "info['support']['stage3'] = cheb_3\n",
400 | "\n",
401 | "edges_3 = edges_3[edges_3[:,1].argsort(kind='mergesort')]\n",
402 | "edges_3 = edges_3[edges_3[:,0].argsort(kind='mergesort')]\n",
403 | "info['unpool_idx']['stage3_4'] = edges_3\n",
404 | "\n",
405 | "lap_3 = cal_lap_index(mesh3.vertex_neighbors)\n",
406 | "info['lap_idx']['stage3'] = lap_3"
407 | ]
408 | },
409 | {
410 | "cell_type": "markdown",
411 | "metadata": {},
412 | "source": [
413 | "Save init3.obj, you can only save faces to get face3.obj"
414 | ]
415 | },
416 | {
417 | "cell_type": "code",
418 | "execution_count": 13,
419 | "metadata": {},
420 | "outputs": [],
421 | "source": [
422 | "write_obj('./init_obj/init3.obj', coords_3, faces_3)"
423 | ]
424 | },
425 | {
426 | "cell_type": "markdown",
427 | "metadata": {},
428 | "source": [
429 | "## Stage 4 auxiliary matrix"
430 | ]
431 | },
432 | {
433 | "cell_type": "code",
434 | "execution_count": 14,
435 | "metadata": {},
436 | "outputs": [
437 | {
438 | "name": "stdout",
439 | "output_type": "stream",
440 | "text": [
441 | "Calculating Chebyshev polynomials up to order 1...\n"
442 | ]
443 | }
444 | ],
445 | "source": [
446 | "edges_4, faces_4 = unpool_face(faces_3, edges_3, coords_3)\n",
447 | "\n",
448 | "tmp_3_4 = 0.5*(coords_3[info['unpool_idx']['stage3_4'][:,0]] + coords_3[info['unpool_idx']['stage3_4'][:,1]])\n",
449 | "coords_4 = np.vstack([coords_3, tmp_3_4])\n",
450 | "\n",
451 | "mesh4 = trimesh.Trimesh(vertices=coords_4, faces=faces_4, process=False)\n",
452 | "\n",
453 | "adj_4 = nx.adjacency_matrix(mesh4.vertex_adjacency_graph, nodelist=range(len(coords_4)))\n",
454 | "cheb_4 = chebyshev_polynomials(adj_4,1)\n",
455 | "info['support']['stage4'] = cheb_4\n",
456 | "\n",
457 | "edges_4 = edges_4[edges_4[:,1].argsort(kind='mergesort')]\n",
458 | "edges_4 = edges_4[edges_4[:,0].argsort(kind='mergesort')]\n",
459 | "info['unpool_idx']['stage4_5'] = edges_4\n",
460 | "\n",
461 | "lap_4 = cal_lap_index(mesh4.vertex_neighbors)\n",
462 | "info['lap_idx']['stage4'] = lap_4"
463 | ]
464 | },
465 | {
466 | "cell_type": "code",
467 | "execution_count": 15,
468 | "metadata": {},
469 | "outputs": [],
470 | "source": [
471 | "write_obj('./init_obj/init4.obj', coords_4, faces_4)"
472 | ]
473 | },
474 | {
475 | "cell_type": "markdown",
476 | "metadata": {},
477 | "source": [
478 | "## Dump .dat file"
479 | ]
480 | },
481 | {
482 | "cell_type": "code",
483 | "execution_count": 16,
484 | "metadata": {},
485 | "outputs": [],
486 | "source": [
487 | "dat = [info['coords'],\n",
488 | " info['support']['stage1'],\n",
489 | " info['support']['stage2'],\n",
490 | " info['support']['stage3'],\n",
491 | " info['support']['stage4'],\n",
492 | " [info['unpool_idx']['stage1_2'],\n",
493 | " info['unpool_idx']['stage2_3'],\n",
494 | " info['unpool_idx']['stage3_4']\n",
495 | " ],\n",
496 | " [np.zeros((1,4), dtype=np.int32)]*4,\n",
497 | " [np.zeros((1,4))]*4,\n",
498 | " [info['lap_idx']['stage1'],\n",
499 | " info['lap_idx']['stage2'],\n",
500 | " info['lap_idx']['stage3'],\n",
501 | " info['lap_idx']['stage4']\n",
502 | " ],\n",
503 | " ]\n",
504 | "pickle.dump(dat, open(\"./init_obj/pixel2mesh_aux_4stages.dat\",\"wb\"), protocol=2)"
505 | ]
506 | },
507 | {
508 | "cell_type": "code",
509 | "execution_count": null,
510 | "metadata": {},
511 | "outputs": [],
512 | "source": []
513 | }
514 | ],
515 | "metadata": {
516 | "kernelspec": {
517 | "display_name": "Python 3",
518 | "language": "python",
519 | "name": "python3"
520 | },
521 | "language_info": {
522 | "codemirror_mode": {
523 | "name": "ipython",
524 | "version": 3
525 | },
526 | "file_extension": ".py",
527 | "mimetype": "text/x-python",
528 | "name": "python",
529 | "nbconvert_exporter": "python",
530 | "pygments_lexer": "ipython3",
531 | "version": "3.7.6"
532 | }
533 | },
534 | "nbformat": 4,
535 | "nbformat_minor": 2
536 | }
537 |
--------------------------------------------------------------------------------
/GenerateData/generate_data.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Fri Oct 26 12:55:31 2018
4 |
5 | @author: nanyang wang
6 | """
7 |
8 | import os,sys
9 | import numpy as np
10 | import cv2
11 | import trimesh
12 | import sklearn.preprocessing
13 |
14 | def camera_info(param):
15 | theta = np.deg2rad(param[0])
16 | phi = np.deg2rad(param[1])
17 |
18 | camY = param[3]*np.sin(phi)
19 | temp = param[3]*np.cos(phi)
20 | camX = temp * np.cos(theta)
21 | camZ = temp * np.sin(theta)
22 | cam_pos = np.array([camX, camY, camZ])
23 |
24 | axisZ = cam_pos.copy()
25 | axisY = np.array([0,1,0])
26 | axisX = np.cross(axisY, axisZ)
27 | axisY = np.cross(axisZ, axisX)
28 |
29 | cam_mat = np.array([axisX, axisY, axisZ])
30 | cam_mat = sklearn.preprocessing.normalize(cam_mat, axis=1)
31 | return cam_mat, cam_pos
32 |
33 | if __name__ == '__main__':
34 |
35 | # 1 sampling
36 | obj_path = '1a0bc9ab92c915167ae33d942430658c/model.obj'
37 | mesh_list = trimesh.load_mesh(obj_path)
38 | if not isinstance(mesh_list, list):
39 | mesh_list = [mesh_list]
40 | area_sum = 0
41 | for mesh in mesh_list:
42 | area_sum += np.sum(mesh.area_faces)
43 |
44 | sample = np.zeros((0,3), dtype=np.float32)
45 | normal = np.zeros((0,3), dtype=np.float32)
46 | for mesh in mesh_list:
47 | number = int(round(16384*np.sum(mesh.area_faces)/area_sum))
48 | if number < 1:
49 | continue
50 | points, index = trimesh.sample.sample_surface_even(mesh, number)
51 | sample = np.append(sample, points, axis=0)
52 |
53 | triangles = mesh.triangles[index]
54 | pt1 = triangles[:,0,:]
55 | pt2 = triangles[:,1,:]
56 | pt3 = triangles[:,2,:]
57 | norm = np.cross(pt3-pt1, pt2-pt1)
58 | norm = sklearn.preprocessing.normalize(norm, axis=1)
59 | normal = np.append(normal, norm, axis=0)
60 |
61 | # 2 tranform to camera view
62 | position = sample * 0.57
63 |
64 | view_path = '1a0bc9ab92c915167ae33d942430658c/rendering/rendering_metadata.txt'
65 | cam_params = np.loadtxt(view_path)
66 | for index, param in enumerate(cam_params):
67 | # camera tranform
68 | cam_mat, cam_pos = camera_info(param)
69 |
70 | pt_trans = np.dot(position-cam_pos, cam_mat.transpose())
71 | nom_trans = np.dot(normal, cam_mat.transpose())
72 | train_data = np.hstack((pt_trans, nom_trans))
73 |
74 | img_path = os.path.join(os.path.split(view_path)[0], '%02d.png'%index)
75 | np.savetxt(img_path.replace('png','xyz'), train_data)
76 |
77 | #### project for sure
78 | img = cv2.imread(img_path)
79 | img = cv2.resize(img, (224,224))
80 |
81 | X,Y,Z = pt_trans.T
82 | F = 250
83 | h = (-Y)/(-Z)*F + 224/2.0
84 | w = X/(-Z)*F + 224/2.0
85 | h = np.minimum(np.maximum(h, 0), 223)
86 | w = np.minimum(np.maximum(w, 0), 223)
87 | img[np.round(h).astype(int), np.round(w).astype(int), 2] = 0
88 | img[np.round(h).astype(int), np.round(w).astype(int), 1] = 255
89 | cv2.imwrite(img_path.replace('.png','_prj.png'), img)
90 |
--------------------------------------------------------------------------------
/GenerateData/init_obj/init1.obj:
--------------------------------------------------------------------------------
1 | v -0.1475 -0.1475 -0.876884
2 | v -0.1475 -0.161837 -0.845
3 | v -0.176174 -0.1475 -0.845
4 | v -0.1475 -0.164438 -0.7675
5 | v -0.181376 -0.1475 -0.7675
6 | v -0.1475 -0.1475 -0.723224
7 | v -0.07 -0.1475 -0.924942
8 | v -0.07 -0.149614 -0.9225
9 | v -0.077241 -0.1475 -0.9225
10 | v -0.07 -0.184463 -0.845
11 | v -0.07 -0.187064 -0.7675
12 | v -0.103966 -0.1475 -0.69
13 | v -0.07 -0.157416 -0.69
14 | v -0.07 -0.1475 -0.677584
15 | v 0.0075 -0.1475 -0.932452
16 | v 0.0075 -0.156116 -0.9225
17 | v 0.0075 -0.190965 -0.845
18 | v 0.0075 -0.193565 -0.7675
19 | v 0.0075 -0.163918 -0.69
20 | v 0.0075 -0.1475 -0.669443
21 | v 0.076892 -0.1475 -0.9225
22 | v 0.085 -0.1475 -0.920261
23 | v 0.085 -0.181342 -0.845
24 | v 0.085 -0.183943 -0.7675
25 | v 0.085 -0.154295 -0.69
26 | v 0.085 -0.1475 -0.681492
27 | v 0.1625 -0.1475 -0.863004
28 | v 0.1625 -0.155596 -0.845
29 | v 0.1625 -0.158196 -0.7675
30 | v 0.1625 -0.1475 -0.73954
31 | v 0.105455 -0.1475 -0.69
32 | v 0.177485 -0.1475 -0.845
33 | v 0.182298 -0.1475 -0.7675
34 | v -0.225 -0.07 -0.891343
35 | v -0.225 -0.10569 -0.845
36 | v -0.254431 -0.07 -0.845
37 | v -0.225 -0.110144 -0.7675
38 | v -0.258104 -0.07 -0.7675
39 | v -0.225 -0.07 -0.706228
40 | v -0.1475 -0.07 -0.951076
41 | v -0.1475 -0.112371 -0.9225
42 | v -0.19698 -0.07 -0.9225
43 | v -0.212584 -0.07 -0.69
44 | v -0.1475 -0.125733 -0.69
45 | v -0.1475 -0.07 -0.649254
46 | v -0.07 -0.07 -0.977209
47 | v -0.07 -0.07 -0.620924
48 | v 0.0075 -0.07 -0.984719
49 | v 0.0075 -0.07 -0.612784
50 | v 0.085 -0.145776 -0.9225
51 | v 0.085 -0.07 -0.973605
52 | v 0.085 -0.07 -0.624832
53 | v 0.1625 -0.07 -0.943866
54 | v 0.1625 -0.101681 -0.9225
55 | v 0.1625 -0.115043 -0.69
56 | v 0.1625 -0.07 -0.657069
57 | v 0.196739 -0.07 -0.9225
58 | v 0.24 -0.07 -0.870522
59 | v 0.24 -0.089655 -0.845
60 | v 0.24 -0.094109 -0.7675
61 | v 0.24 -0.07 -0.730702
62 | v 0.21118 -0.07 -0.69
63 | v 0.255336 -0.07 -0.845
64 | v 0.258812 -0.07 -0.7675
65 | v -0.225 0.0075 -0.920261
66 | v -0.272796 0.0075 -0.845
67 | v -0.276469 0.0075 -0.7675
68 | v -0.225 -0.033 -0.69
69 | v -0.234597 0.0075 -0.69
70 | v -0.225 0.0075 -0.681492
71 | v -0.1475 0.0075 -0.966095
72 | v -0.222987 0.0075 -0.9225
73 | v -0.1475 0.0075 -0.632973
74 | v -0.07 0.0075 -0.992229
75 | v -0.091494 0.0075 -0.6125
76 | v -0.07 -0.0299 -0.6125
77 | v -0.07 0.0075 -0.607334
78 | v 0.0075 0.0075 -0.999738
79 | v 0.0075 -0.06865 -0.6125
80 | v 0.0075 0.0075 -0.601982
81 | v 0.085 0.0075 -0.988624
82 | v 0.085 -0.0113 -0.6125
83 | v 0.085 0.0075 -0.609903
84 | v 0.1625 0.0075 -0.958886
85 | v 0.1625 0.0075 -0.640788
86 | v 0.094495 0.0075 -0.6125
87 | v 0.220807 0.0075 -0.9225
88 | v 0.24 0.0075 -0.89944
89 | v 0.24 0.0075 -0.696711
90 | v 0.235248 0.0075 -0.69
91 | v 0.272713 0.0075 -0.845
92 | v 0.276188 0.0075 -0.7675
93 | v -0.225 0.085 -0.877463
94 | v -0.245616 0.085 -0.845
95 | v -0.249289 0.085 -0.7675
96 | v -0.225 0.034865 -0.69
97 | v -0.225 0.085 -0.722544
98 | v -0.1475 0.085 -0.943866
99 | v -0.184497 0.085 -0.9225
100 | v -0.200101 0.085 -0.69
101 | v -0.1475 0.085 -0.657069
102 | v -0.07 0.085 -0.97
103 | v -0.07 0.03277 -0.6125
104 | v -0.07 0.085 -0.62874
105 | v 0.0075 0.085 -0.97751
106 | v 0.0075 0.058953 -0.6125
107 | v 0.0075 0.085 -0.620599
108 | v 0.085 0.085 -0.966395
109 | v 0.085 0.020203 -0.6125
110 | v 0.085 0.085 -0.632647
111 | v 0.1625 0.085 -0.936657
112 | v 0.1625 0.085 -0.664884
113 | v 0.185186 0.085 -0.9225
114 | v 0.24 0.085 -0.856642
115 | v 0.24 0.085 -0.747018
116 | v 0.199627 0.085 -0.69
117 | v 0.246996 0.085 -0.845
118 | v 0.250471 0.085 -0.7675
119 | v -0.225 0.10697 -0.845
120 | v -0.225 0.110884 -0.7675
121 | v -0.1475 0.112841 -0.9225
122 | v -0.1475 0.1625 -0.849123
123 | v -0.151208 0.1625 -0.845
124 | v -0.156409 0.1625 -0.7675
125 | v -0.1475 0.124583 -0.69
126 | v -0.1475 0.1625 -0.755855
127 | v -0.07 0.146894 -0.9225
128 | v -0.07 0.1625 -0.89944
129 | v -0.07 0.158636 -0.69
130 | v -0.07 0.1625 -0.696711
131 | v 0.0075 0.156679 -0.9225
132 | v 0.0075 0.1625 -0.913899
133 | v -0.0394 0.1625 -0.69
134 | v 0.0075 0.1625 -0.685074
135 | v 0.085 0.142197 -0.9225
136 | v 0.085 0.1625 -0.8925
137 | v 0.085 0.1625 -0.704868
138 | v 0.039189 0.1625 -0.69
139 | v 0.085 0.153939 -0.69
140 | v 0.1625 0.103447 -0.9225
141 | v 0.149293 0.1625 -0.845
142 | v 0.1625 0.155896 -0.845
143 | v 0.157121 0.1625 -0.7675
144 | v 0.1625 0.159811 -0.7675
145 | v 0.1625 0.115189 -0.69
146 | v 0.24 0.092879 -0.845
147 | v 0.24 0.096793 -0.7675
148 | v -0.1475 0.164216 -0.845
149 | v -0.1475 0.166623 -0.7675
150 | v -0.07 0.185155 -0.845
151 | v -0.07 0.187562 -0.7675
152 | v 0.0075 0.191172 -0.845
153 | v 0.0075 0.193579 -0.7675
154 | v 0.0075 0.166141 -0.69
155 | v 0.085 0.182267 -0.845
156 | v 0.085 0.184674 -0.7675
157 | f 1 2 3
158 | f 2 4 5
159 | f 3 2 5
160 | f 4 6 5
161 | f 7 8 9
162 | f 1 9 8
163 | f 1 8 2
164 | f 2 8 10
165 | f 10 11 2
166 | f 2 11 4
167 | f 12 6 4
168 | f 12 4 13
169 | f 13 4 11
170 | f 13 14 12
171 | f 7 15 16
172 | f 8 7 16
173 | f 16 17 8
174 | f 8 17 10
175 | f 17 18 10
176 | f 10 18 11
177 | f 18 19 11
178 | f 11 19 13
179 | f 13 19 20
180 | f 14 13 20
181 | f 16 15 21
182 | f 21 22 23
183 | f 21 23 16
184 | f 16 23 17
185 | f 23 24 17
186 | f 17 24 18
187 | f 24 25 18
188 | f 18 25 19
189 | f 19 25 26
190 | f 20 19 26
191 | f 22 27 28
192 | f 23 22 28
193 | f 28 29 23
194 | f 23 29 24
195 | f 30 31 25
196 | f 30 25 29
197 | f 29 25 24
198 | f 25 31 26
199 | f 28 27 32
200 | f 29 28 32
201 | f 33 29 32
202 | f 29 33 30
203 | f 34 35 36
204 | f 35 37 38
205 | f 36 35 38
206 | f 37 39 38
207 | f 40 41 42
208 | f 3 35 34
209 | f 1 3 34
210 | f 1 34 42
211 | f 1 42 41
212 | f 5 35 3
213 | f 37 35 5
214 | f 37 43 39
215 | f 5 43 37
216 | f 5 44 43
217 | f 5 6 44
218 | f 44 45 43
219 | f 9 41 40
220 | f 9 40 7
221 | f 7 40 46
222 | f 9 1 41
223 | f 12 44 6
224 | f 44 12 14
225 | f 44 14 45
226 | f 45 14 47
227 | f 15 7 48
228 | f 48 7 46
229 | f 20 49 14
230 | f 49 47 14
231 | f 50 21 15
232 | f 50 15 51
233 | f 51 15 48
234 | f 21 50 22
235 | f 26 52 20
236 | f 52 49 20
237 | f 51 53 54
238 | f 50 51 54
239 | f 54 22 50
240 | f 27 22 54
241 | f 30 55 31
242 | f 31 55 56
243 | f 31 56 26
244 | f 26 56 52
245 | f 54 53 57
246 | f 57 58 59
247 | f 54 57 59
248 | f 54 59 32
249 | f 54 32 27
250 | f 32 60 33
251 | f 59 60 32
252 | f 61 62 55
253 | f 60 61 55
254 | f 60 55 30
255 | f 60 30 33
256 | f 55 62 56
257 | f 59 58 63
258 | f 60 59 63
259 | f 64 60 63
260 | f 60 64 61
261 | f 65 34 36
262 | f 66 65 36
263 | f 36 38 66
264 | f 38 67 66
265 | f 39 68 69
266 | f 39 69 38
267 | f 38 69 67
268 | f 68 70 69
269 | f 71 40 42
270 | f 72 71 42
271 | f 42 65 72
272 | f 34 65 42
273 | f 43 68 39
274 | f 68 43 45
275 | f 68 45 70
276 | f 70 45 73
277 | f 46 40 74
278 | f 74 40 71
279 | f 75 47 76
280 | f 75 73 47
281 | f 73 45 47
282 | f 76 77 75
283 | f 48 46 78
284 | f 78 46 74
285 | f 49 76 47
286 | f 79 76 49
287 | f 76 79 80
288 | f 77 76 80
289 | f 51 48 81
290 | f 81 48 78
291 | f 52 79 49
292 | f 82 79 52
293 | f 79 82 83
294 | f 80 79 83
295 | f 53 51 84
296 | f 84 51 81
297 | f 82 85 86
298 | f 82 52 85
299 | f 52 56 85
300 | f 82 86 83
301 | f 87 57 53
302 | f 84 87 53
303 | f 58 87 88
304 | f 57 87 58
305 | f 89 62 61
306 | f 90 62 89
307 | f 62 90 85
308 | f 56 62 85
309 | f 91 63 58
310 | f 88 91 58
311 | f 63 91 64
312 | f 64 91 92
313 | f 64 92 89
314 | f 61 64 89
315 | f 93 65 66
316 | f 94 93 66
317 | f 66 67 94
318 | f 67 95 94
319 | f 96 97 95
320 | f 96 95 69
321 | f 69 95 67
322 | f 69 70 96
323 | f 98 71 72
324 | f 99 98 72
325 | f 72 93 99
326 | f 65 93 72
327 | f 96 100 97
328 | f 100 96 70
329 | f 100 70 101
330 | f 101 70 73
331 | f 74 71 102
332 | f 102 71 98
333 | f 103 73 75
334 | f 103 104 73
335 | f 104 101 73
336 | f 75 77 103
337 | f 78 74 105
338 | f 105 74 102
339 | f 104 106 107
340 | f 103 106 104
341 | f 106 103 77
342 | f 80 106 77
343 | f 81 78 108
344 | f 108 78 105
345 | f 107 109 110
346 | f 106 109 107
347 | f 109 106 80
348 | f 83 109 80
349 | f 84 81 111
350 | f 111 81 108
351 | f 86 110 109
352 | f 86 85 110
353 | f 85 112 110
354 | f 86 109 83
355 | f 113 87 84
356 | f 111 113 84
357 | f 88 113 114
358 | f 87 113 88
359 | f 115 90 89
360 | f 116 90 115
361 | f 90 116 112
362 | f 85 90 112
363 | f 117 91 88
364 | f 114 117 88
365 | f 91 117 92
366 | f 92 117 118
367 | f 92 118 115
368 | f 89 92 115
369 | f 93 94 119
370 | f 94 95 120
371 | f 119 94 120
372 | f 95 97 120
373 | f 98 99 121
374 | f 122 119 123
375 | f 121 119 122
376 | f 121 93 119
377 | f 121 99 93
378 | f 120 123 119
379 | f 124 123 120
380 | f 124 125 126
381 | f 120 125 124
382 | f 120 100 125
383 | f 120 97 100
384 | f 100 101 125
385 | f 102 98 121
386 | f 127 102 121
387 | f 121 128 127
388 | f 122 128 121
389 | f 126 129 130
390 | f 125 129 126
391 | f 129 125 101
392 | f 104 129 101
393 | f 105 102 127
394 | f 131 105 127
395 | f 127 132 131
396 | f 128 132 127
397 | f 129 133 130
398 | f 133 129 104
399 | f 133 104 134
400 | f 134 104 107
401 | f 108 105 131
402 | f 135 108 131
403 | f 131 136 135
404 | f 132 136 131
405 | f 137 138 139
406 | f 139 138 134
407 | f 139 134 110
408 | f 110 134 107
409 | f 111 108 135
410 | f 140 111 135
411 | f 141 135 136
412 | f 141 142 135
413 | f 142 140 135
414 | f 142 143 144
415 | f 141 143 142
416 | f 137 144 143
417 | f 137 139 144
418 | f 139 145 144
419 | f 145 139 110
420 | f 112 145 110
421 | f 113 111 140
422 | f 114 113 140
423 | f 114 140 146
424 | f 146 140 142
425 | f 146 142 147
426 | f 142 144 147
427 | f 116 115 147
428 | f 116 147 145
429 | f 145 147 144
430 | f 116 145 112
431 | f 117 114 146
432 | f 118 117 146
433 | f 147 118 146
434 | f 118 147 115
435 | f 122 123 148
436 | f 123 124 149
437 | f 148 123 149
438 | f 124 126 149
439 | f 128 122 148
440 | f 150 128 148
441 | f 150 148 151
442 | f 148 149 151
443 | f 151 149 126
444 | f 130 151 126
445 | f 132 128 150
446 | f 152 132 150
447 | f 152 150 153
448 | f 150 151 153
449 | f 130 133 154
450 | f 130 154 151
451 | f 151 154 153
452 | f 133 134 154
453 | f 136 132 152
454 | f 155 136 152
455 | f 155 152 156
456 | f 152 153 156
457 | f 138 137 156
458 | f 138 156 154
459 | f 154 156 153
460 | f 138 154 134
461 | f 141 136 155
462 | f 143 141 155
463 | f 156 143 155
464 | f 143 156 137
465 |
--------------------------------------------------------------------------------
/GenerateData/init_obj/init1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/init_obj/init1.png
--------------------------------------------------------------------------------
/GenerateData/init_obj/init2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/init_obj/init2.png
--------------------------------------------------------------------------------
/GenerateData/init_obj/init3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/GenerateData/init_obj/init3.png
--------------------------------------------------------------------------------
/GenerateData/upsample.mlx:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
204 | =======================================================================
205 | Apache Spark Subcomponents:
206 |
207 | The Apache Spark project contains subcomponents with separate copyright
208 | notices and license terms. Your use of the source code for the these
209 | subcomponents is subject to the terms and conditions of the following
210 | licenses.
211 |
212 |
213 | ========================================================================
214 | For heapq (pyspark/heapq3.py):
215 | ========================================================================
216 |
217 | See license/LICENSE-heapq.txt
218 |
219 | ========================================================================
220 | For SnapTree:
221 | ========================================================================
222 |
223 | See license/LICENSE-SnapTree.txt
224 |
225 | ========================================================================
226 | For jbcrypt:
227 | ========================================================================
228 |
229 | See license/LICENSE-jbcrypt.txt
230 |
231 | ========================================================================
232 | BSD-style licenses
233 | ========================================================================
234 |
235 | The following components are provided under a BSD-style license. See project link for details.
236 | The text of each license is also included at licenses/LICENSE-[project].txt.
237 |
238 | (BSD 3 Clause) netlib core (com.github.fommil.netlib:core:1.1.2 - https://github.com/fommil/netlib-java/core)
239 | (BSD 3 Clause) JPMML-Model (org.jpmml:pmml-model:1.2.7 - https://github.com/jpmml/jpmml-model)
240 | (BSD License) AntLR Parser Generator (antlr:antlr:2.7.7 - http://www.antlr.org/)
241 | (BSD License) ANTLR 4.5.2-1 (org.antlr:antlr4:4.5.2-1 - http://wwww.antlr.org/)
242 | (BSD licence) ANTLR ST4 4.0.4 (org.antlr:ST4:4.0.4 - http://www.stringtemplate.org)
243 | (BSD licence) ANTLR StringTemplate (org.antlr:stringtemplate:3.2.1 - http://www.stringtemplate.org)
244 | (BSD License) Javolution (javolution:javolution:5.5.1 - http://javolution.org)
245 | (BSD) JLine (jline:jline:0.9.94 - http://jline.sourceforge.net)
246 | (BSD) ParaNamer Core (com.thoughtworks.paranamer:paranamer:2.3 - http://paranamer.codehaus.org/paranamer)
247 | (BSD) ParaNamer Core (com.thoughtworks.paranamer:paranamer:2.6 - http://paranamer.codehaus.org/paranamer)
248 | (BSD 3 Clause) Scala (http://www.scala-lang.org/download/#License)
249 | (Interpreter classes (all .scala files in repl/src/main/scala
250 | except for Main.Scala, SparkHelper.scala and ExecutorClassLoader.scala),
251 | and for SerializableMapWrapper in JavaUtils.scala)
252 | (BSD-like) Scala Actors library (org.scala-lang:scala-actors:2.11.7 - http://www.scala-lang.org/)
253 | (BSD-like) Scala Compiler (org.scala-lang:scala-compiler:2.11.7 - http://www.scala-lang.org/)
254 | (BSD-like) Scala Compiler (org.scala-lang:scala-reflect:2.11.7 - http://www.scala-lang.org/)
255 | (BSD-like) Scala Library (org.scala-lang:scala-library:2.11.7 - http://www.scala-lang.org/)
256 | (BSD-like) Scalap (org.scala-lang:scalap:2.11.7 - http://www.scala-lang.org/)
257 | (BSD-style) scalacheck (org.scalacheck:scalacheck_2.11:1.10.0 - http://www.scalacheck.org)
258 | (BSD-style) spire (org.spire-math:spire_2.11:0.7.1 - http://spire-math.org)
259 | (BSD-style) spire-macros (org.spire-math:spire-macros_2.11:0.7.1 - http://spire-math.org)
260 | (New BSD License) Kryo (com.esotericsoftware:kryo:3.0.3 - https://github.com/EsotericSoftware/kryo)
261 | (New BSD License) MinLog (com.esotericsoftware:minlog:1.3.0 - https://github.com/EsotericSoftware/minlog)
262 | (New BSD license) Protocol Buffer Java API (com.google.protobuf:protobuf-java:2.5.0 - http://code.google.com/p/protobuf)
263 | (New BSD license) Protocol Buffer Java API (org.spark-project.protobuf:protobuf-java:2.4.1-shaded - http://code.google.com/p/protobuf)
264 | (The BSD License) Fortran to Java ARPACK (net.sourceforge.f2j:arpack_combined_all:0.1 - http://f2j.sourceforge.net)
265 | (The BSD License) xmlenc Library (xmlenc:xmlenc:0.52 - http://xmlenc.sourceforge.net)
266 | (The New BSD License) Py4J (net.sf.py4j:py4j:0.10.4 - http://py4j.sourceforge.net/)
267 | (Two-clause BSD-style license) JUnit-Interface (com.novocode:junit-interface:0.10 - http://github.com/szeiger/junit-interface/)
268 | (BSD licence) sbt and sbt-launch-lib.bash
269 | (BSD 3 Clause) d3.min.js (https://github.com/mbostock/d3/blob/master/LICENSE)
270 | (BSD 3 Clause) DPark (https://github.com/douban/dpark/blob/master/LICENSE)
271 | (BSD 3 Clause) CloudPickle (https://github.com/cloudpipe/cloudpickle/blob/master/LICENSE)
272 |
273 | ========================================================================
274 | MIT licenses
275 | ========================================================================
276 |
277 | The following components are provided under the MIT License. See project link for details.
278 | The text of each license is also included at licenses/LICENSE-[project].txt.
279 |
280 | (MIT License) JCL 1.1.1 implemented over SLF4J (org.slf4j:jcl-over-slf4j:1.7.5 - http://www.slf4j.org)
281 | (MIT License) JUL to SLF4J bridge (org.slf4j:jul-to-slf4j:1.7.5 - http://www.slf4j.org)
282 | (MIT License) SLF4J API Module (org.slf4j:slf4j-api:1.7.5 - http://www.slf4j.org)
283 | (MIT License) SLF4J LOG4J-12 Binding (org.slf4j:slf4j-log4j12:1.7.5 - http://www.slf4j.org)
284 | (MIT License) pyrolite (org.spark-project:pyrolite:2.0.1 - http://pythonhosted.org/Pyro4/)
285 | (MIT License) scopt (com.github.scopt:scopt_2.11:3.2.0 - https://github.com/scopt/scopt)
286 | (The MIT License) Mockito (org.mockito:mockito-core:1.9.5 - http://www.mockito.org)
287 | (MIT License) jquery (https://jquery.org/license/)
288 | (MIT License) AnchorJS (https://github.com/bryanbraun/anchorjs)
289 | (MIT License) graphlib-dot (https://github.com/cpettitt/graphlib-dot)
290 | (MIT License) dagre-d3 (https://github.com/cpettitt/dagre-d3)
291 | (MIT License) sorttable (https://github.com/stuartlangridge/sorttable)
292 | (MIT License) boto (https://github.com/boto/boto/blob/develop/LICENSE)
293 | (MIT License) datatables (http://datatables.net/license)
294 | (MIT License) mustache (https://github.com/mustache/mustache/blob/master/LICENSE)
295 | (MIT License) cookies (http://code.google.com/p/cookies/wiki/License)
296 | (MIT License) blockUI (http://jquery.malsup.com/block/)
297 | (MIT License) RowsGroup (http://datatables.net/license/mit)
298 | (MIT License) jsonFormatter (http://www.jqueryscript.net/other/jQuery-Plugin-For-Pretty-JSON-Formatting-jsonFormatter.html)
299 | (MIT License) modernizr (https://github.com/Modernizr/Modernizr/blob/master/LICENSE)
300 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Pixel2Mesh
2 | This repository contains the TensorFlow implementation for the following paper
3 |
4 | [Pixel2Mesh: Generating 3D Mesh Models from Single RGB Images (ECCV2018)](http://openaccess.thecvf.com/content_ECCV_2018/papers/Nanyang_Wang_Pixel2Mesh_Generating_3D_ECCV_2018_paper.pdf)
5 |
6 | Nanyang Wang, [Yinda Zhang](http://robots.princeton.edu/people/yindaz/), [Zhuwen Li](http://www.lizhuwen.com/), [Yanwei Fu](http://yanweifu.github.io/), [Wei Liu](http://www.ee.columbia.edu/~wliu/), [Yu-Gang Jiang](http://www.yugangjiang.info/).
7 |
8 | #### Citation
9 | If you use this code for your research, please consider citing:
10 |
11 | @inProceedings{wang2018pixel2mesh,
12 | title={Pixel2Mesh: Generating 3D Mesh Models from Single RGB Images},
13 | author={Nanyang Wang and Yinda Zhang and Zhuwen Li and Yanwei Fu and Wei Liu and Yu-Gang Jiang},
14 | booktitle={ECCV},
15 | year={2018}
16 | }
17 |
18 | # Try it on Colab
19 |
20 | Installing all the dependencies might be tricky and you need a computer with a CUDA enabled GPU. To get started fast you can just try [this](https://colab.research.google.com/drive/13xkSkvPaF5GU6Wpf35nVHUdP77oBVHlT#scrollTo=xXxbMrF4fdZs) demo developed by [Mathias Gatti](https://github.com/mathigatti) using Google Colab.
21 |
22 | [](https://colab.research.google.com/drive/13xkSkvPaF5GU6Wpf35nVHUdP77oBVHlT#scrollTo=xXxbMrF4fdZs)
23 |
24 |
25 | # Project Page
26 | The project page is available at https://nywang16.github.io/p2m/index.html
27 |
28 | # Dependencies
29 | Requirements:
30 | * Python2.7+ with Numpy and scikit-image
31 | * [Tensorflow (version 1.0+)](https://www.tensorflow.org/install/)
32 | * [TFLearn](http://tflearn.org/installation/)
33 |
34 | Our code has been tested with Python 2.7, **TensorFlow 1.3.0**, TFLearn 0.3.2, CUDA 8.0 on Ubuntu 14.04.
35 |
36 | # News
37 | - Nov. 8, we update the script for generate auxiliary data.
38 |
39 | # Running the demo
40 | ```
41 | git clone https://github.com/nywang16/Pixel2Mesh.git
42 | cd Data/
43 | ```
44 | Download the pre-trained model and unzip to the `Data/` folder.
45 | * https://drive.google.com/file/d/1gD-dk-XrAa5mfrgdZSunjaS6pUUWsZgU/view?usp=sharing
46 | ```
47 | unzip checkpoint.zip
48 | ```
49 |
50 | #### Reconstructing shapes
51 | python demo.py --image Data/examples/plane.png
52 | Run the demo code and the output mesh file is saved in `Data/examples/plane.obj`
53 |
54 | #### Input image, output mesh
55 | 
56 |
57 | # Installation
58 |
59 | If you use CD and EMD for training or evaluation, we have included the cuda implementations of [Fan et. al.](https://github.com/fanhqme/PointSetGeneration) in external/
60 |
61 | cd Pixel2Mesh/external/
62 |
63 | Modify the first 3 lines of the makefile to point to your nvcc, cudalib and tensorflow library.
64 |
65 | make
66 |
67 |
68 | # Dataset
69 |
70 | We used the [ShapeNet](https://www.shapenet.org) dataset for 3D models, and rendered views from [3D-R2N2](https://github.com/chrischoy/3D-R2N2):
71 | When using the provided data make sure to respect the shapenet [license](https://shapenet.org/terms).
72 |
73 | Below is the complete set of training data. Download it into the `Data/` folder.
74 |
75 | https://drive.google.com/open?id=131dH36qXCabym1JjSmEpSQZg4dmZVQid
76 |
77 |
78 | The training/testing split can be found in `Data/train_list.txt` and `Data/test_list.txt`
79 |
80 | Each .dat file in the provided data contain:
81 | * The sampled point cloud (with vertex normal) from ShapeNet. We transformed it to corresponding coordinates in camera coordinate based on camera parameters from the Rendering Dataset.
82 |
83 | **Input image, ground truth point cloud.**
84 |
85 | 
86 |
87 | # Training
88 | python train.py
89 | You can change the training data, learning rate and other parameters by editing `train.py`
90 |
91 | The total number of training epoch is 30; the learning rate is initialized as 3e-5 and drops to 1e-5 after 25 epochs.
92 |
93 | # Evaluation
94 | The evaluation code was released, please refer to `eval_testset.py` for more details.
95 |
96 | Notice that the 3D shape are downscaled by a factor of 0.57 to generate rendering. As result, all the numbers shown in experiments used 0.57xRaw Shape for evaluation. This scale may be related to the render proccess, we used the rendering data from 3DR2N2 paper, and this scale was there since then for reason that we don't know.
97 |
98 | # Statement
99 | This software is for research purpose only.
100 | Please contact us for the licence of commercial purposes. All rights are preserved.
101 |
102 | # Contact
103 | Nanyang Wang (nywang16 AT fudan.edu.cn)
104 |
105 | Yinda Zhang (yindaz AT cs.princeton.edu)
106 |
107 | Zhuwen Li (lzhuwen AT gmail.com)
108 |
109 | Yanwei Fu (yanweifu AT fudan.edu.cn)
110 |
111 | Yu-Gang Jiang (ygj AT fudan.edu.cn)
112 |
113 | # License
114 | Apache License version 2.0
115 |
--------------------------------------------------------------------------------
/demo.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | import tensorflow as tf
18 | import cPickle as pickle
19 | from skimage import io,transform
20 | from p2m.api import GCN
21 | from p2m.utils import *
22 |
23 | # Set random seed
24 | seed = 1024
25 | np.random.seed(seed)
26 | tf.set_random_seed(seed)
27 |
28 | # Settings
29 | flags = tf.app.flags
30 | FLAGS = flags.FLAGS
31 | flags.DEFINE_string('image', 'Data/examples/plane.png', 'Testing image.')
32 | flags.DEFINE_float('learning_rate', 0., 'Initial learning rate.')
33 | flags.DEFINE_integer('hidden', 256, 'Number of units in hidden layer.')
34 | flags.DEFINE_integer('feat_dim', 963, 'Number of units in perceptual feature layer.')
35 | flags.DEFINE_integer('coord_dim', 3, 'Number of units in output layer.')
36 | flags.DEFINE_float('weight_decay', 5e-6, 'Weight decay for L2 loss.')
37 |
38 | # Define placeholders(dict) and model
39 | num_blocks = 3
40 | num_supports = 2
41 | placeholders = {
42 | 'features': tf.placeholder(tf.float32, shape=(None, 3)), # initial 3D coordinates
43 | 'img_inp': tf.placeholder(tf.float32, shape=(224, 224, 3)), # input image to network
44 | 'labels': tf.placeholder(tf.float32, shape=(None, 6)), # ground truth (point cloud with vertex normal)
45 | 'support1': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], # graph structure in the first block
46 | 'support2': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], # graph structure in the second block
47 | 'support3': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], # graph structure in the third block
48 | 'faces': [tf.placeholder(tf.int32, shape=(None, 4)) for _ in range(num_blocks)], # helper for face loss (not used)
49 | 'edges': [tf.placeholder(tf.int32, shape=(None, 2)) for _ in range(num_blocks)], # helper for normal loss
50 | 'lape_idx': [tf.placeholder(tf.int32, shape=(None, 10)) for _ in range(num_blocks)], # helper for laplacian regularization
51 | 'pool_idx': [tf.placeholder(tf.int32, shape=(None, 2)) for _ in range(num_blocks-1)] # helper for graph unpooling
52 | }
53 | model = GCN(placeholders, logging=True)
54 |
55 | def load_image(img_path):
56 | img = io.imread(img_path)
57 | if img.shape[2] == 4:
58 | img[np.where(img[:,:,3]==0)] = 255
59 | img = transform.resize(img, (224,224))
60 | img = img[:,:,:3].astype('float32')
61 |
62 | return img
63 |
64 | # Load data, initialize session
65 | config=tf.ConfigProto()
66 | config.gpu_options.allow_growth=True
67 | config.allow_soft_placement=True
68 | sess = tf.Session(config=config)
69 | sess.run(tf.global_variables_initializer())
70 | model.load(sess)
71 |
72 | # Runing the demo
73 | pkl = pickle.load(open('Data/ellipsoid/info_ellipsoid.dat', 'rb'))
74 | feed_dict = construct_feed_dict(pkl, placeholders)
75 |
76 | img_inp = load_image(FLAGS.image)
77 | feed_dict.update({placeholders['img_inp']: img_inp})
78 | feed_dict.update({placeholders['labels']: np.zeros([10,6])})
79 |
80 | vert = sess.run(model.output3, feed_dict=feed_dict)
81 | vert = np.hstack((np.full([vert.shape[0],1], 'v'), vert))
82 | face = np.loadtxt('Data/ellipsoid/face3.obj', dtype='|S32')
83 | mesh = np.vstack((vert, face))
84 | pred_path = FLAGS.image.replace('.png', '.obj')
85 | np.savetxt(pred_path, mesh, fmt='%s', delimiter=' ')
86 |
87 | print 'Saved to', pred_path
88 |
--------------------------------------------------------------------------------
/eval_testset.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | import os, sys
18 | import tensorflow as tf
19 | from pixel2mesh.models import GCN
20 | from pixel2mesh.fetcher import *
21 | from pixel2mesh.cd_dist import nn_distance
22 | sys.path.append('external')
23 | from tf_approxmatch import approx_match, match_cost
24 |
25 | # Settings
26 | flags = tf.app.flags
27 | FLAGS = flags.FLAGS
28 | flags.DEFINE_string('data_list', 'Data/test_list.txt', 'Data list path.')
29 | flags.DEFINE_float('learning_rate', 3e-5, 'Initial learning rate.')
30 | flags.DEFINE_integer('hidden', 192, 'Number of units in hidden layer.')
31 | flags.DEFINE_integer('feat_dim', 963, 'Number of units in perceptual feature layer.')
32 | flags.DEFINE_integer('coord_dim', 3, 'Number of units in output layer.')
33 | flags.DEFINE_float('weight_decay', 5e-6, 'Weight decay for L2 loss.')
34 |
35 | # Define placeholders(dict) and model
36 | num_blocks = 3
37 | num_supports = 2
38 | placeholders = {
39 | 'features': tf.placeholder(tf.float32, shape=(None, 3)), # initial 3D coordinates
40 | 'img_inp': tf.placeholder(tf.float32, shape=(224, 224, 3)), # input image to network
41 | 'labels': tf.placeholder(tf.float32, shape=(None, 6)), # ground truth (point cloud with vertex normal)
42 | 'support1': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], # graph structure in the first block
43 | 'support2': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], # graph structure in the second block
44 | 'support3': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], # graph structure in the third block
45 | 'faces': [tf.placeholder(tf.int32, shape=(None, 4)) for _ in range(num_blocks)], # helper for face loss (not used)
46 | 'edges': [tf.placeholder(tf.int32, shape=(None, 2)) for _ in range(num_blocks)], # helper for normal loss
47 | 'lape_idx': [tf.placeholder(tf.int32, shape=(None, 10)) for _ in range(num_blocks)], # helper for laplacian regularization
48 | 'pool_idx': [tf.placeholder(tf.int32, shape=(None, 2)) for _ in range(num_blocks-1)] # helper for graph unpooling
49 | }
50 | model = GCN(placeholders, logging=True)
51 |
52 | # Construct feed dictionary
53 | def construct_feed_dict(pkl, placeholders):
54 | coord = pkl[0]
55 | pool_idx = pkl[4]
56 | faces = pkl[5]
57 | lape_idx = pkl[7]
58 | edges = []
59 | for i in range(1,4):
60 | adj = pkl[i][1]
61 | edges.append(adj[0])
62 |
63 | feed_dict = dict()
64 | feed_dict.update({placeholders['features']: coord})
65 | feed_dict.update({placeholders['edges'][i]: edges[i] for i in range(len(edges))})
66 | feed_dict.update({placeholders['faces'][i]: faces[i] for i in range(len(faces))})
67 | feed_dict.update({placeholders['pool_idx'][i]: pool_idx[i] for i in range(len(pool_idx))})
68 | feed_dict.update({placeholders['lape_idx'][i]: lape_idx[i] for i in range(len(lape_idx))})
69 | feed_dict.update({placeholders['support1'][i]: pkl[1][i] for i in range(len(pkl[1]))})
70 | feed_dict.update({placeholders['support2'][i]: pkl[2][i] for i in range(len(pkl[2]))})
71 | feed_dict.update({placeholders['support3'][i]: pkl[3][i] for i in range(len(pkl[3]))})
72 | return feed_dict
73 |
74 | def f_score(label, predict, dist_label, dist_pred, threshold):
75 | num_label = label.shape[0]
76 | num_predict = predict.shape[0]
77 |
78 | f_scores = []
79 | for i in range(len(threshold)):
80 | num = len(np.where(dist_label <= threshold[i])[0])
81 | recall = 100.0 * num / num_label
82 | num = len(np.where(dist_pred <= threshold[i])[0])
83 | precision = 100.0 * num / num_predict
84 |
85 | f_scores.append((2*precision*recall)/(precision+recall+1e-8))
86 | return np.array(f_scores)
87 |
88 | # Load data
89 | data = DataFetcher(FLAGS.data_list)
90 | data.setDaemon(True) ####
91 | data.start()
92 | train_number = data.number
93 |
94 | # Initialize session
95 | # xyz1:dataset_points * 3, xyz2:query_points * 3
96 | xyz1=tf.placeholder(tf.float32,shape=(None, 3))
97 | xyz2=tf.placeholder(tf.float32,shape=(None, 3))
98 | # chamfer distance
99 | dist1,idx1,dist2,idx2 = nn_distance(xyz1, xyz2)
100 | # earth mover distance, notice that emd_dist return the sum of all distance
101 | match = approx_match(xyz1, xyz2)
102 | emd_dist = match_cost(xyz1, xyz2, match)
103 |
104 | config=tf.ConfigProto()
105 | config.gpu_options.allow_growth=True
106 | config.allow_soft_placement=True
107 | sess = tf.Session(config=config)
108 | sess.run(tf.global_variables_initializer())
109 | model.load(sess)
110 |
111 | # Construct feed dictionary
112 | pkl = pickle.load(open('Data/ellipsoid/info_ellipsoid.dat', 'rb'))
113 | feed_dict = construct_feed_dict(pkl, placeholders)
114 |
115 | ###
116 | class_name = {'02828884':'bench','03001627':'chair','03636649':'lamp','03691459':'speaker','04090263':'firearm','04379243':'table','04530566':'watercraft','02691156':'plane','02933112':'cabinet','02958343':'car','03211117':'monitor','04256520':'couch','04401088':'cellphone'}
117 | model_number = {i:0 for i in class_name}
118 | sum_f = {i:0 for i in class_name}
119 | sum_cd = {i:0 for i in class_name}
120 | sum_emd = {i:0 for i in class_name}
121 |
122 | for iters in range(train_number):
123 | # Fetch training data
124 | img_inp, label, model_id = data.fetch()
125 | feed_dict.update({placeholders['img_inp']: img_inp})
126 | feed_dict.update({placeholders['labels']: label})
127 | # Training step
128 | predict = sess.run(model.output3, feed_dict=feed_dict)
129 |
130 | label = label[:, :3]
131 | d1,i1,d2,i2,emd = sess.run([dist1,idx1,dist2,idx2, emd_dist], feed_dict={xyz1:label,xyz2:predict})
132 | cd = np.mean(d1) + np.mean(d2)
133 |
134 | class_id = model_id.split('_')[0]
135 | model_number[class_id] += 1.0
136 |
137 | sum_f[class_id] += f_score(label,predict,d1,d2,[0.0001, 0.0002])
138 | sum_cd[class_id] += cd # cd is the mean of all distance
139 | sum_emd[class_id] += emd[0] # emd is the sum of all distance
140 | print 'processed number', iters
141 |
142 | log = open('record_evaluation.txt', 'a')
143 | for item in model_number:
144 | number = model_number[item] + 1e-8
145 | f = sum_f[item] / number
146 | cd = (sum_cd[item] / number) * 1000 #cd is the mean of all distance, cd is L2
147 | emd = (sum_emd[item] / number) * 0.01 #emd is the sum of all distance, emd is L1
148 | print class_name[item], int(number), f, cd, emd
149 | print >> log, class_name[item], int(number), f, cd, emd
150 | log.close()
151 | sess.close()
152 | data.shutdown()
153 | print 'Testing Finished!'
154 |
--------------------------------------------------------------------------------
/external/approxmatch.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include
7 | #include
8 | using namespace std;
9 | float randomf(){
10 | return (rand()+0.5)/(RAND_MAX+1.0);
11 | }
12 | static double get_time(){
13 | timespec tp;
14 | clock_gettime(CLOCK_MONOTONIC,&tp);
15 | return tp.tv_sec+tp.tv_nsec*1e-9;
16 | }
17 | void approxmatch_cpu(int b,int n,int m,float * xyz1,float * xyz2,float * match){
18 | for (int i=0;i saturatedl(n,double(factorl)),saturatedr(m,double(factorr));
22 | vector weight(n*m);
23 | for (int j=0;j=-2;j--){
26 | //printf("i=%d j=%d\n",i,j);
27 | double level=-powf(4.0,j);
28 | if (j==-2)
29 | level=0;
30 | for (int k=0;k ss(m,1e-9);
42 | for (int k=0;k ss2(m,0);
59 | for (int k=0;k1){
154 | printf("bad i=%d j=%d k=%d u=%f\n",i,j,k,u);
155 | }
156 | s+=u;
157 | }
158 | if (s<0.999 || s>1.001){
159 | printf("bad i=%d j=%d s=%f\n",i,j,s);
160 | }
161 | }
162 | for (int j=0;j4.001){
168 | printf("bad i=%d j=%d s=%f\n",i,j,s);
169 | }
170 | }
171 | }*/
172 | /*for (int j=0;j1e-3)
222 | if (fabs(double(match[i*n*m+k*n+j]-match_cpu[i*n*m+j*m+k]))>1e-2){
223 | printf("i %d j %d k %d m %f %f\n",i,j,k,match[i*n*m+k*n+j],match_cpu[i*n*m+j*m+k]);
224 | flag=false;
225 | break;
226 | }
227 | //emax=max(emax,fabs(double(match[i*n*m+k*n+j]-match_cpu[i*n*m+j*m+k])));
228 | emax+=fabs(double(match[i*n*m+k*n+j]-match_cpu[i*n*m+j*m+k]));
229 | }
230 | }
231 | printf("emax_match=%f\n",emax/2/n/m);
232 | emax=0;
233 | for (int i=0;i<2;i++)
234 | emax+=fabs(double(cost[i]-cost_cpu[i]));
235 | printf("emax_cost=%f\n",emax/2);
236 | emax=0;
237 | for (int i=0;i<2*m*3;i++)
238 | emax+=fabs(double(grad[i]-grad_cpu[i]));
239 | //for (int i=0;i<3*m;i++){
240 | //if (grad[i]!=0)
241 | //printf("i %d %f %f\n",i,grad[i],grad_cpu[i]);
242 | //}
243 | printf("emax_grad=%f\n",emax/(2*m*3));
244 |
245 | cudaFree(xyz1_g);
246 | cudaFree(xyz2_g);
247 | cudaFree(match_g);
248 | cudaFree(cost_g);
249 | cudaFree(grad_g);
250 |
251 | return 0;
252 | }
253 |
254 |
--------------------------------------------------------------------------------
/external/approxmatch.cu:
--------------------------------------------------------------------------------
1 | //n<=4096, m<=1024
2 | __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match){
3 | const int MaxN=4096,MaxM=1024;
4 | __shared__ float remainL[MaxN],remainR[MaxM],ratioR[MaxM],ratioL[MaxN];
5 | __shared__ int listR[MaxM],lc;
6 | float multiL,multiR;
7 | if (n>=m){
8 | multiL=1;
9 | multiR=n/m;
10 | }else{
11 | multiL=m/n;
12 | multiR=1;
13 | }
14 | for (int i=blockIdx.x;i=-2;j--){
23 | float level=-powf(4.0f,j);
24 | if (j==-2){
25 | level=0;
26 | }
27 | if (threadIdx.x==0){
28 | lc=0;
29 | for (int k=0;k0)
31 | listR[lc++]=k;
32 | }
33 | __syncthreads();
34 | int _lc=lc;
35 | for (int k=threadIdx.x;k>>(b,n,m,xyz1,xyz2,match);
94 | }
95 | __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
96 | __shared__ float allsum[512];
97 | const int Block=256;
98 | __shared__ float buf[Block*3];
99 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,out);
138 | }
139 | __global__ void matchcostgrad(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * grad2){
140 | __shared__ float sum_grad[256*3];
141 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,grad2);
182 | }
183 |
184 |
--------------------------------------------------------------------------------
/external/makefile:
--------------------------------------------------------------------------------
1 | nvcc = /usr/local/cuda-8.0/bin/nvcc
2 | cudalib = /usr/local/cuda-8.0/lib64
3 | tensorflow = /home/wnylol/tools/anaconda2/envs/tensorflow/lib/python2.7/site-packages/tensorflow/include
4 |
5 | all: tf_approxmatch_so.so tf_approxmatch_g.cu.o tf_nndistance_so.so tf_nndistance_g.cu.o
6 |
7 |
8 | tf_approxmatch_so.so: tf_approxmatch_g.cu.o tf_approxmatch.cpp
9 | g++ -std=c++11 tf_approxmatch.cpp tf_approxmatch_g.cu.o -o tf_approxmatch_so.so -shared -fPIC -I $(tensorflow) -lcudart -L $(cudalib) -O2 -D_GLIBCXX_USE_CXX11_ABI=0
10 |
11 |
12 | tf_approxmatch_g.cu.o: tf_approxmatch_g.cu
13 | $(nvcc) -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11 -c -o tf_approxmatch_g.cu.o tf_approxmatch_g.cu -I $(tensorflow) -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC -O2
14 |
15 |
16 | tf_nndistance_so.so: tf_nndistance_g.cu.o tf_nndistance.cpp
17 | g++ -std=c++11 tf_nndistance.cpp tf_nndistance_g.cu.o -o tf_nndistance_so.so -shared -fPIC -I $(tensorflow) -lcudart -L $(cudalib) -O2 -D_GLIBCXX_USE_CXX11_ABI=0
18 |
19 |
20 | tf_nndistance_g.cu.o: tf_nndistance_g.cu
21 | $(nvcc) -D_GLIBCXX_USE_CXX11_ABI=0 -std=c++11 -c -o tf_nndistance_g.cu.o tf_nndistance_g.cu -I $(tensorflow) -DGOOGLE_CUDA=1 -x cu -Xcompiler -fPIC -O2
22 |
23 |
24 | clean:
25 | rm tf_approxmatch_so.so
26 | rm tf_nndistance_so.so
27 | rm *.cu.o
28 |
--------------------------------------------------------------------------------
/external/tf_approxmatch.cpp:
--------------------------------------------------------------------------------
1 | #include "tensorflow/core/framework/op.h"
2 | #include "tensorflow/core/framework/op_kernel.h"
3 | #include
4 | #include
5 | #include
6 | using namespace tensorflow;
7 | REGISTER_OP("ApproxMatch")
8 | .Input("xyz1: float32")
9 | .Input("xyz2: float32")
10 | .Output("match: float32");
11 | REGISTER_OP("MatchCost")
12 | .Input("xyz1: float32")
13 | .Input("xyz2: float32")
14 | .Input("match: float32")
15 | .Output("cost: float32");
16 | REGISTER_OP("MatchCostGrad")
17 | .Input("xyz1: float32")
18 | .Input("xyz2: float32")
19 | .Input("match: float32")
20 | .Output("grad1: float32")
21 | .Output("grad2: float32");
22 |
23 | void approxmatch_cpu(int b,int n,int m,const float * xyz1,const float * xyz2,float * match){
24 | for (int i=0;i saturatedl(n,double(factorl)),saturatedr(m,double(factorr));
28 | std::vector weight(n*m);
29 | for (int j=0;j=-2;j--){
32 | //printf("i=%d j=%d\n",i,j);
33 | double level=-powf(4.0,j);
34 | if (j==-2)
35 | level=0;
36 | for (int k=0;k ss(m,1e-9);
48 | for (int k=0;k ss2(m,0);
65 | for (int k=0;kinput(0);
150 | OP_REQUIRES(context,xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("ApproxMatch expects (batch_size,num_points,3) xyz1 shape"));
151 | auto xyz1_flat=xyz1_tensor.flat();
152 | const float * xyz1=&(xyz1_flat(0));
153 | int b=xyz1_tensor.shape().dim_size(0);
154 | int n=xyz1_tensor.shape().dim_size(1);
155 | //OP_REQUIRES(context,n<=4096,errors::InvalidArgument("ApproxMatch handles at most 4096 dataset points"));
156 |
157 | const Tensor& xyz2_tensor=context->input(1);
158 | OP_REQUIRES(context,xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3 && xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("ApproxMatch expects (batch_size,num_points,3) xyz2 shape, and batch_size must match"));
159 | int m=xyz2_tensor.shape().dim_size(1);
160 | //OP_REQUIRES(context,m<=1024,errors::InvalidArgument("ApproxMatch handles at most 1024 query points"));
161 | auto xyz2_flat=xyz2_tensor.flat();
162 | const float * xyz2=&(xyz2_flat(0));
163 | Tensor * match_tensor=NULL;
164 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m,n},&match_tensor));
165 | auto match_flat=match_tensor->flat();
166 | float * match=&(match_flat(0));
167 | Tensor temp_tensor;
168 | OP_REQUIRES_OK(context,context->allocate_temp(DataTypeToEnum::value,TensorShape{b,(n+m)*2},&temp_tensor));
169 | auto temp_flat=temp_tensor.flat();
170 | float * temp=&(temp_flat(0));
171 | approxmatchLauncher(b,n,m,xyz1,xyz2,match,temp);
172 | }
173 | };
174 | REGISTER_KERNEL_BUILDER(Name("ApproxMatch").Device(DEVICE_GPU), ApproxMatchGpuOp);
175 | class ApproxMatchOp: public OpKernel{
176 | public:
177 | explicit ApproxMatchOp(OpKernelConstruction* context):OpKernel(context){}
178 | void Compute(OpKernelContext * context)override{
179 | const Tensor& xyz1_tensor=context->input(0);
180 | OP_REQUIRES(context,xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("ApproxMatch expects (batch_size,num_points,3) xyz1 shape"));
181 | auto xyz1_flat=xyz1_tensor.flat();
182 | const float * xyz1=&(xyz1_flat(0));
183 | int b=xyz1_tensor.shape().dim_size(0);
184 | int n=xyz1_tensor.shape().dim_size(1);
185 | //OP_REQUIRES(context,n<=4096,errors::InvalidArgument("ApproxMatch handles at most 4096 dataset points"));
186 |
187 | const Tensor& xyz2_tensor=context->input(1);
188 | OP_REQUIRES(context,xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3 && xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("ApproxMatch expects (batch_size,num_points,3) xyz2 shape, and batch_size must match"));
189 | int m=xyz2_tensor.shape().dim_size(1);
190 | //OP_REQUIRES(context,m<=1024,errors::InvalidArgument("ApproxMatch handles at most 1024 query points"));
191 | auto xyz2_flat=xyz2_tensor.flat();
192 | const float * xyz2=&(xyz2_flat(0));
193 | Tensor * match_tensor=NULL;
194 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,m,n},&match_tensor));
195 | auto match_flat=match_tensor->flat();
196 | float * match=&(match_flat(0));
197 | approxmatch_cpu(b,n,m,xyz1,xyz2,match);
198 | }
199 | };
200 | REGISTER_KERNEL_BUILDER(Name("ApproxMatch").Device(DEVICE_CPU), ApproxMatchOp);
201 | class MatchCostGpuOp: public OpKernel{
202 | public:
203 | explicit MatchCostGpuOp(OpKernelConstruction* context):OpKernel(context){}
204 | void Compute(OpKernelContext * context)override{
205 | const Tensor& xyz1_tensor=context->input(0);
206 | OP_REQUIRES(context,xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("MatchCost expects (batch_size,num_points,3) xyz1 shape"));
207 | auto xyz1_flat=xyz1_tensor.flat();
208 | const float * xyz1=&(xyz1_flat(0));
209 | int b=xyz1_tensor.shape().dim_size(0);
210 | int n=xyz1_tensor.shape().dim_size(1);
211 |
212 | const Tensor& xyz2_tensor=context->input(1);
213 | OP_REQUIRES(context,xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3 && xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("MatchCost expects (batch_size,num_points,3) xyz2 shape, and batch_size must match"));
214 | int m=xyz2_tensor.shape().dim_size(1);
215 | auto xyz2_flat=xyz2_tensor.flat();
216 | const float * xyz2=&(xyz2_flat(0));
217 |
218 | const Tensor& match_tensor=context->input(2);
219 | OP_REQUIRES(context,match_tensor.dims()==3 && match_tensor.shape().dim_size(0)==b && match_tensor.shape().dim_size(1)==m && match_tensor.shape().dim_size(2)==n,errors::InvalidArgument("MatchCost expects (batch_size,#query,#dataset) match shape"));
220 | auto match_flat=match_tensor.flat();
221 | const float * match=&(match_flat(0));
222 |
223 | Tensor * cost_tensor=NULL;
224 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b},&cost_tensor));
225 | auto cost_flat=cost_tensor->flat();
226 | float * cost=&(cost_flat(0));
227 | matchcostLauncher(b,n,m,xyz1,xyz2,match,cost);
228 | }
229 | };
230 | REGISTER_KERNEL_BUILDER(Name("MatchCost").Device(DEVICE_GPU), MatchCostGpuOp);
231 | class MatchCostOp: public OpKernel{
232 | public:
233 | explicit MatchCostOp(OpKernelConstruction* context):OpKernel(context){}
234 | void Compute(OpKernelContext * context)override{
235 | const Tensor& xyz1_tensor=context->input(0);
236 | OP_REQUIRES(context,xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("MatchCost expects (batch_size,num_points,3) xyz1 shape"));
237 | auto xyz1_flat=xyz1_tensor.flat();
238 | const float * xyz1=&(xyz1_flat(0));
239 | int b=xyz1_tensor.shape().dim_size(0);
240 | int n=xyz1_tensor.shape().dim_size(1);
241 |
242 | const Tensor& xyz2_tensor=context->input(1);
243 | OP_REQUIRES(context,xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3 && xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("MatchCost expects (batch_size,num_points,3) xyz2 shape, and batch_size must match"));
244 | int m=xyz2_tensor.shape().dim_size(1);
245 | auto xyz2_flat=xyz2_tensor.flat();
246 | const float * xyz2=&(xyz2_flat(0));
247 |
248 | const Tensor& match_tensor=context->input(2);
249 | OP_REQUIRES(context,match_tensor.dims()==3 && match_tensor.shape().dim_size(0)==b && match_tensor.shape().dim_size(1)==m && match_tensor.shape().dim_size(2)==n,errors::InvalidArgument("MatchCost expects (batch_size,#query,#dataset) match shape"));
250 | auto match_flat=match_tensor.flat();
251 | const float * match=&(match_flat(0));
252 |
253 | Tensor * cost_tensor=NULL;
254 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b},&cost_tensor));
255 | auto cost_flat=cost_tensor->flat();
256 | float * cost=&(cost_flat(0));
257 | matchcost_cpu(b,n,m,xyz1,xyz2,match,cost);
258 | }
259 | };
260 | REGISTER_KERNEL_BUILDER(Name("MatchCost").Device(DEVICE_CPU), MatchCostOp);
261 |
262 | class MatchCostGradGpuOp: public OpKernel{
263 | public:
264 | explicit MatchCostGradGpuOp(OpKernelConstruction* context):OpKernel(context){}
265 | void Compute(OpKernelContext * context)override{
266 | const Tensor& xyz1_tensor=context->input(0);
267 | OP_REQUIRES(context,xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("MatchCostGrad expects (batch_size,num_points,3) xyz1 shape"));
268 | auto xyz1_flat=xyz1_tensor.flat();
269 | const float * xyz1=&(xyz1_flat(0));
270 | int b=xyz1_tensor.shape().dim_size(0);
271 | int n=xyz1_tensor.shape().dim_size(1);
272 |
273 | const Tensor& xyz2_tensor=context->input(1);
274 | OP_REQUIRES(context,xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3 && xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("MatchCostGrad expects (batch_size,num_points,3) xyz2 shape, and batch_size must match"));
275 | int m=xyz2_tensor.shape().dim_size(1);
276 | auto xyz2_flat=xyz2_tensor.flat();
277 | const float * xyz2=&(xyz2_flat(0));
278 |
279 | const Tensor& match_tensor=context->input(2);
280 | OP_REQUIRES(context,match_tensor.dims()==3 && match_tensor.shape().dim_size(0)==b && match_tensor.shape().dim_size(1)==m && match_tensor.shape().dim_size(2)==n,errors::InvalidArgument("MatchCost expects (batch_size,#query,#dataset) match shape"));
281 | auto match_flat=match_tensor.flat();
282 | const float * match=&(match_flat(0));
283 |
284 | Tensor * grad1_tensor=NULL;
285 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n,3},&grad1_tensor));
286 | auto grad1_flat=grad1_tensor->flat();
287 | float * grad1=&(grad1_flat(0));
288 | Tensor * grad2_tensor=NULL;
289 | OP_REQUIRES_OK(context,context->allocate_output(1,TensorShape{b,m,3},&grad2_tensor));
290 | auto grad2_flat=grad2_tensor->flat();
291 | float * grad2=&(grad2_flat(0));
292 | matchcostgradLauncher(b,n,m,xyz1,xyz2,match,grad1,grad2);
293 | }
294 | };
295 | REGISTER_KERNEL_BUILDER(Name("MatchCostGrad").Device(DEVICE_GPU), MatchCostGradGpuOp);
296 | class MatchCostGradOp: public OpKernel{
297 | public:
298 | explicit MatchCostGradOp(OpKernelConstruction* context):OpKernel(context){}
299 | void Compute(OpKernelContext * context)override{
300 | const Tensor& xyz1_tensor=context->input(0);
301 | OP_REQUIRES(context,xyz1_tensor.dims()==3 && xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("MatchCost expects (batch_size,num_points,3) xyz1 shape"));
302 | auto xyz1_flat=xyz1_tensor.flat();
303 | const float * xyz1=&(xyz1_flat(0));
304 | int b=xyz1_tensor.shape().dim_size(0);
305 | int n=xyz1_tensor.shape().dim_size(1);
306 |
307 | const Tensor& xyz2_tensor=context->input(1);
308 | OP_REQUIRES(context,xyz2_tensor.dims()==3 && xyz2_tensor.shape().dim_size(2)==3 && xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("MatchCost expects (batch_size,num_points,3) xyz2 shape, and batch_size must match"));
309 | int m=xyz2_tensor.shape().dim_size(1);
310 | auto xyz2_flat=xyz2_tensor.flat();
311 | const float * xyz2=&(xyz2_flat(0));
312 |
313 | const Tensor& match_tensor=context->input(2);
314 | OP_REQUIRES(context,match_tensor.dims()==3 && match_tensor.shape().dim_size(0)==b && match_tensor.shape().dim_size(1)==m && match_tensor.shape().dim_size(2)==n,errors::InvalidArgument("MatchCost expects (batch_size,#query,#dataset) match shape"));
315 | auto match_flat=match_tensor.flat();
316 | const float * match=&(match_flat(0));
317 |
318 | Tensor * grad1_tensor=NULL;
319 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n,3},&grad1_tensor));
320 | auto grad1_flat=grad1_tensor->flat();
321 | float * grad1=&(grad1_flat(0));
322 | Tensor * grad2_tensor=NULL;
323 | OP_REQUIRES_OK(context,context->allocate_output(1,TensorShape{b,m,3},&grad2_tensor));
324 | auto grad2_flat=grad2_tensor->flat();
325 | float * grad2=&(grad2_flat(0));
326 | matchcostgrad_cpu(b,n,m,xyz1,xyz2,match,grad1,grad2);
327 | }
328 | };
329 | REGISTER_KERNEL_BUILDER(Name("MatchCostGrad").Device(DEVICE_CPU), MatchCostGradOp);
330 |
--------------------------------------------------------------------------------
/external/tf_approxmatch.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.python.framework import ops
3 | import os.path as osp
4 |
5 | base_dir = osp.dirname(osp.abspath(__file__))
6 |
7 | approxmatch_module = tf.load_op_library(osp.join(base_dir, 'tf_approxmatch_so.so'))
8 |
9 |
10 | def approx_match(xyz1,xyz2):
11 | '''
12 | input:
13 | xyz1 : batch_size * #dataset_points * 3
14 | xyz2 : batch_size * #query_points * 3
15 | returns:
16 | match : batch_size * #query_points * #dataset_points
17 | '''
18 | xyz1 = tf.expand_dims(xyz1, 0)
19 | xyz2 = tf.expand_dims(xyz2, 0)
20 | return approxmatch_module.approx_match(xyz1,xyz2)
21 | ops.NoGradient('ApproxMatch')
22 | #@tf.RegisterShape('ApproxMatch')
23 | @ops.RegisterShape('ApproxMatch')
24 | def _approx_match_shape(op):
25 | shape1=op.inputs[0].get_shape().with_rank(3)
26 | shape2=op.inputs[1].get_shape().with_rank(3)
27 | return [tf.TensorShape([shape1.dims[0],shape2.dims[1],shape1.dims[1]])]
28 |
29 | def match_cost(xyz1,xyz2,match):
30 | '''
31 | input:
32 | xyz1 : batch_size * #dataset_points * 3
33 | xyz2 : batch_size * #query_points * 3
34 | match : batch_size * #query_points * #dataset_points
35 | returns:
36 | cost : batch_size
37 | '''
38 | xyz1 = tf.expand_dims(xyz1, 0)
39 | xyz2 = tf.expand_dims(xyz2, 0)
40 | return approxmatch_module.match_cost(xyz1,xyz2,match)
41 | #@tf.RegisterShape('MatchCost')
42 | @ops.RegisterShape('MatchCost')
43 | def _match_cost_shape(op):
44 | shape1=op.inputs[0].get_shape().with_rank(3)
45 | shape2=op.inputs[1].get_shape().with_rank(3)
46 | shape3=op.inputs[2].get_shape().with_rank(3)
47 | return [tf.TensorShape([shape1.dims[0]])]
48 | @tf.RegisterGradient('MatchCost')
49 | def _match_cost_grad(op,grad_cost):
50 | xyz1=op.inputs[0]
51 | xyz2=op.inputs[1]
52 | match=op.inputs[2]
53 | grad_1,grad_2=approxmatch_module.match_cost_grad(xyz1,xyz2,match)
54 | return [grad_1*tf.expand_dims(tf.expand_dims(grad_cost,1),2),grad_2*tf.expand_dims(tf.expand_dims(grad_cost,1),2),None]
55 |
56 | if __name__=='__main__':
57 | alpha=0.5
58 | beta=2.0
59 | import bestmatch
60 | import numpy as np
61 | import math
62 | import random
63 | import cv2
64 |
65 | import tf_nndistance
66 |
67 | npoint=100
68 |
69 | with tf.device('/gpu:2'):
70 | pt_in=tf.placeholder(tf.float32,shape=(1,npoint*4,3))
71 | mypoints=tf.Variable(np.random.randn(1,npoint,3).astype('float32'))
72 | match=approx_match(pt_in,mypoints)
73 | loss=tf.reduce_sum(match_cost(pt_in,mypoints,match))
74 | #match=approx_match(mypoints,pt_in)
75 | #loss=tf.reduce_sum(match_cost(mypoints,pt_in,match))
76 | #distf,_,distb,_=tf_nndistance.nn_distance(pt_in,mypoints)
77 | #loss=tf.reduce_sum((distf+1e-9)**0.5)*0.5+tf.reduce_sum((distb+1e-9)**0.5)*0.5
78 | #loss=tf.reduce_max((distf+1e-9)**0.5)*0.5*npoint+tf.reduce_max((distb+1e-9)**0.5)*0.5*npoint
79 |
80 | optimizer=tf.train.GradientDescentOptimizer(1e-4).minimize(loss)
81 | with tf.Session('') as sess:
82 | sess.run(tf.initialize_all_variables())
83 | while True:
84 | meanloss=0
85 | meantrueloss=0
86 | for i in xrange(1001):
87 | #phi=np.random.rand(4*npoint)*math.pi*2
88 | #tpoints=(np.hstack([np.cos(phi)[:,None],np.sin(phi)[:,None],(phi*0)[:,None]])*random.random())[None,:,:]
89 | #tpoints=((np.random.rand(400)-0.5)[:,None]*[0,2,0]+[(random.random()-0.5)*2,0,0]).astype('float32')[None,:,:]
90 | tpoints=np.hstack([np.linspace(-1,1,400)[:,None],(random.random()*2*np.linspace(1,0,400)**2)[:,None],np.zeros((400,1))])[None,:,:]
91 | trainloss,_=sess.run([loss,optimizer],feed_dict={pt_in:tpoints.astype('float32')})
92 | trainloss,trainmatch=sess.run([loss,match],feed_dict={pt_in:tpoints.astype('float32')})
93 | #trainmatch=trainmatch.transpose((0,2,1))
94 | show=np.zeros((400,400,3),dtype='uint8')^255
95 | trainmypoints=sess.run(mypoints)
96 | for i in xrange(len(tpoints[0])):
97 | u=np.random.choice(range(len(trainmypoints[0])),p=trainmatch[0].T[i])
98 | cv2.line(show,
99 | (int(tpoints[0][i,1]*100+200),int(tpoints[0][i,0]*100+200)),
100 | (int(trainmypoints[0][u,1]*100+200),int(trainmypoints[0][u,0]*100+200)),
101 | cv2.cv.CV_RGB(0,255,0))
102 | for x,y,z in tpoints[0]:
103 | cv2.circle(show,(int(y*100+200),int(x*100+200)),2,cv2.cv.CV_RGB(255,0,0))
104 | for x,y,z in trainmypoints[0]:
105 | cv2.circle(show,(int(y*100+200),int(x*100+200)),3,cv2.cv.CV_RGB(0,0,255))
106 | cost=((tpoints[0][:,None,:]-np.repeat(trainmypoints[0][None,:,:],4,axis=1))**2).sum(axis=2)**0.5
107 | #trueloss=bestmatch.bestmatch(cost)[0]
108 | print trainloss#,trueloss
109 | cv2.imshow('show',show)
110 | cmd=cv2.waitKey(10)%256
111 | if cmd==ord('q'):
112 | break
113 |
--------------------------------------------------------------------------------
/external/tf_approxmatch_g.cu:
--------------------------------------------------------------------------------
1 | __global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){
2 | float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n;
3 | float multiL,multiR;
4 | if (n>=m){
5 | multiL=1;
6 | multiR=n/m;
7 | }else{
8 | multiL=m/n;
9 | multiR=1;
10 | }
11 | const int Block=1024;
12 | __shared__ float buf[Block*4];
13 | for (int i=blockIdx.x;i=-2;j--){
22 | float level=-powf(4.0f,j);
23 | if (j==-2){
24 | level=0;
25 | }
26 | for (int k0=0;k0>>(b,n,m,xyz1,xyz2,match,temp);
182 | }
183 | __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){
184 | __shared__ float allsum[512];
185 | const int Block=1024;
186 | __shared__ float buf[Block*3];
187 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,out);
228 | }
229 | __global__ void matchcostgrad2(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){
230 | __shared__ float sum_grad[256*3];
231 | for (int i=blockIdx.x;i>>(b,n,m,xyz1,xyz2,match,grad1);
294 | matchcostgrad2<<>>(b,n,m,xyz1,xyz2,match,grad2);
295 | }
296 |
297 |
--------------------------------------------------------------------------------
/external/tf_approxmatch_g.cu.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/external/tf_approxmatch_g.cu.o
--------------------------------------------------------------------------------
/external/tf_approxmatch_so.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/external/tf_approxmatch_so.so
--------------------------------------------------------------------------------
/external/tf_nndistance.cpp:
--------------------------------------------------------------------------------
1 | #include "tensorflow/core/framework/op.h"
2 | #include "tensorflow/core/framework/op_kernel.h"
3 | REGISTER_OP("NnDistance")
4 | .Input("xyz1: float32")
5 | .Input("xyz2: float32")
6 | .Output("dist1: float32")
7 | .Output("idx1: int32")
8 | .Output("dist2: float32")
9 | .Output("idx2: int32");
10 | REGISTER_OP("NnDistanceGrad")
11 | .Input("xyz1: float32")
12 | .Input("xyz2: float32")
13 | .Input("grad_dist1: float32")
14 | .Input("idx1: int32")
15 | .Input("grad_dist2: float32")
16 | .Input("idx2: int32")
17 | .Output("grad_xyz1: float32")
18 | .Output("grad_xyz2: float32");
19 | using namespace tensorflow;
20 |
21 | static void nnsearch(int b,int n,int m,const float * xyz1,const float * xyz2,float * dist,int * idx){
22 | for (int i=0;iinput(0);
50 | const Tensor& xyz2_tensor=context->input(1);
51 | OP_REQUIRES(context,xyz1_tensor.dims()==3,errors::InvalidArgument("NnDistance requires xyz1 be of shape (batch,#points,3)"));
52 | OP_REQUIRES(context,xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("NnDistance only accepts 3d point set xyz1"));
53 | int b=xyz1_tensor.shape().dim_size(0);
54 | int n=xyz1_tensor.shape().dim_size(1);
55 | OP_REQUIRES(context,xyz2_tensor.dims()==3,errors::InvalidArgument("NnDistance requires xyz2 be of shape (batch,#points,3)"));
56 | OP_REQUIRES(context,xyz2_tensor.shape().dim_size(2)==3,errors::InvalidArgument("NnDistance only accepts 3d point set xyz2"));
57 | int m=xyz2_tensor.shape().dim_size(1);
58 | OP_REQUIRES(context,xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("NnDistance expects xyz1 and xyz2 have same batch size"));
59 | auto xyz1_flat=xyz1_tensor.flat();
60 | const float * xyz1=&xyz1_flat(0);
61 | auto xyz2_flat=xyz2_tensor.flat();
62 | const float * xyz2=&xyz2_flat(0);
63 | Tensor * dist1_tensor=NULL;
64 | Tensor * idx1_tensor=NULL;
65 | Tensor * dist2_tensor=NULL;
66 | Tensor * idx2_tensor=NULL;
67 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n},&dist1_tensor));
68 | OP_REQUIRES_OK(context,context->allocate_output(1,TensorShape{b,n},&idx1_tensor));
69 | auto dist1_flat=dist1_tensor->flat();
70 | auto idx1_flat=idx1_tensor->flat();
71 | OP_REQUIRES_OK(context,context->allocate_output(2,TensorShape{b,m},&dist2_tensor));
72 | OP_REQUIRES_OK(context,context->allocate_output(3,TensorShape{b,m},&idx2_tensor));
73 | auto dist2_flat=dist2_tensor->flat();
74 | auto idx2_flat=idx2_tensor->flat();
75 | float * dist1=&(dist1_flat(0));
76 | int * idx1=&(idx1_flat(0));
77 | float * dist2=&(dist2_flat(0));
78 | int * idx2=&(idx2_flat(0));
79 | nnsearch(b,n,m,xyz1,xyz2,dist1,idx1);
80 | nnsearch(b,m,n,xyz2,xyz1,dist2,idx2);
81 | }
82 | };
83 | REGISTER_KERNEL_BUILDER(Name("NnDistance").Device(DEVICE_CPU), NnDistanceOp);
84 | class NnDistanceGradOp : public OpKernel{
85 | public:
86 | explicit NnDistanceGradOp(OpKernelConstruction* context):OpKernel(context){}
87 | void Compute(OpKernelContext * context)override{
88 | const Tensor& xyz1_tensor=context->input(0);
89 | const Tensor& xyz2_tensor=context->input(1);
90 | const Tensor& grad_dist1_tensor=context->input(2);
91 | const Tensor& idx1_tensor=context->input(3);
92 | const Tensor& grad_dist2_tensor=context->input(4);
93 | const Tensor& idx2_tensor=context->input(5);
94 | OP_REQUIRES(context,xyz1_tensor.dims()==3,errors::InvalidArgument("NnDistanceGrad requires xyz1 be of shape (batch,#points,3)"));
95 | OP_REQUIRES(context,xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("NnDistanceGrad only accepts 3d point set xyz1"));
96 | int b=xyz1_tensor.shape().dim_size(0);
97 | int n=xyz1_tensor.shape().dim_size(1);
98 | OP_REQUIRES(context,xyz2_tensor.dims()==3,errors::InvalidArgument("NnDistanceGrad requires xyz2 be of shape (batch,#points,3)"));
99 | OP_REQUIRES(context,xyz2_tensor.shape().dim_size(2)==3,errors::InvalidArgument("NnDistanceGrad only accepts 3d point set xyz2"));
100 | int m=xyz2_tensor.shape().dim_size(1);
101 | OP_REQUIRES(context,xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("NnDistanceGrad expects xyz1 and xyz2 have same batch size"));
102 | OP_REQUIRES(context,grad_dist1_tensor.shape()==(TensorShape{b,n}),errors::InvalidArgument("NnDistanceGrad requires grad_dist1 be of shape(batch,#points)"));
103 | OP_REQUIRES(context,idx1_tensor.shape()==(TensorShape{b,n}),errors::InvalidArgument("NnDistanceGrad requires idx1 be of shape(batch,#points)"));
104 | OP_REQUIRES(context,grad_dist2_tensor.shape()==(TensorShape{b,m}),errors::InvalidArgument("NnDistanceGrad requires grad_dist2 be of shape(batch,#points)"));
105 | OP_REQUIRES(context,idx2_tensor.shape()==(TensorShape{b,m}),errors::InvalidArgument("NnDistanceGrad requires idx2 be of shape(batch,#points)"));
106 | auto xyz1_flat=xyz1_tensor.flat();
107 | const float * xyz1=&xyz1_flat(0);
108 | auto xyz2_flat=xyz2_tensor.flat();
109 | const float * xyz2=&xyz2_flat(0);
110 | auto idx1_flat=idx1_tensor.flat();
111 | const int * idx1=&idx1_flat(0);
112 | auto idx2_flat=idx2_tensor.flat();
113 | const int * idx2=&idx2_flat(0);
114 | auto grad_dist1_flat=grad_dist1_tensor.flat();
115 | const float * grad_dist1=&grad_dist1_flat(0);
116 | auto grad_dist2_flat=grad_dist2_tensor.flat();
117 | const float * grad_dist2=&grad_dist2_flat(0);
118 | Tensor * grad_xyz1_tensor=NULL;
119 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n,3},&grad_xyz1_tensor));
120 | Tensor * grad_xyz2_tensor=NULL;
121 | OP_REQUIRES_OK(context,context->allocate_output(1,TensorShape{b,m,3},&grad_xyz2_tensor));
122 | auto grad_xyz1_flat=grad_xyz1_tensor->flat();
123 | float * grad_xyz1=&grad_xyz1_flat(0);
124 | auto grad_xyz2_flat=grad_xyz2_tensor->flat();
125 | float * grad_xyz2=&grad_xyz2_flat(0);
126 | for (int i=0;iinput(0);
174 | const Tensor& xyz2_tensor=context->input(1);
175 | OP_REQUIRES(context,xyz1_tensor.dims()==3,errors::InvalidArgument("NnDistance requires xyz1 be of shape (batch,#points,3)"));
176 | OP_REQUIRES(context,xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("NnDistance only accepts 3d point set xyz1"));
177 | int b=xyz1_tensor.shape().dim_size(0);
178 | int n=xyz1_tensor.shape().dim_size(1);
179 | OP_REQUIRES(context,xyz2_tensor.dims()==3,errors::InvalidArgument("NnDistance requires xyz2 be of shape (batch,#points,3)"));
180 | OP_REQUIRES(context,xyz2_tensor.shape().dim_size(2)==3,errors::InvalidArgument("NnDistance only accepts 3d point set xyz2"));
181 | int m=xyz2_tensor.shape().dim_size(1);
182 | OP_REQUIRES(context,xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("NnDistance expects xyz1 and xyz2 have same batch size"));
183 | auto xyz1_flat=xyz1_tensor.flat();
184 | const float * xyz1=&xyz1_flat(0);
185 | auto xyz2_flat=xyz2_tensor.flat();
186 | const float * xyz2=&xyz2_flat(0);
187 | Tensor * dist1_tensor=NULL;
188 | Tensor * idx1_tensor=NULL;
189 | Tensor * dist2_tensor=NULL;
190 | Tensor * idx2_tensor=NULL;
191 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n},&dist1_tensor));
192 | OP_REQUIRES_OK(context,context->allocate_output(1,TensorShape{b,n},&idx1_tensor));
193 | auto dist1_flat=dist1_tensor->flat();
194 | auto idx1_flat=idx1_tensor->flat();
195 | OP_REQUIRES_OK(context,context->allocate_output(2,TensorShape{b,m},&dist2_tensor));
196 | OP_REQUIRES_OK(context,context->allocate_output(3,TensorShape{b,m},&idx2_tensor));
197 | auto dist2_flat=dist2_tensor->flat();
198 | auto idx2_flat=idx2_tensor->flat();
199 | float * dist1=&(dist1_flat(0));
200 | int * idx1=&(idx1_flat(0));
201 | float * dist2=&(dist2_flat(0));
202 | int * idx2=&(idx2_flat(0));
203 | NmDistanceKernelLauncher(b,n,xyz1,m,xyz2,dist1,idx1,dist2,idx2);
204 | }
205 | };
206 | REGISTER_KERNEL_BUILDER(Name("NnDistance").Device(DEVICE_GPU), NnDistanceGpuOp);
207 |
208 | void NmDistanceGradKernelLauncher(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2);
209 | class NnDistanceGradGpuOp : public OpKernel{
210 | public:
211 | explicit NnDistanceGradGpuOp(OpKernelConstruction* context):OpKernel(context){}
212 | void Compute(OpKernelContext * context)override{
213 | const Tensor& xyz1_tensor=context->input(0);
214 | const Tensor& xyz2_tensor=context->input(1);
215 | const Tensor& grad_dist1_tensor=context->input(2);
216 | const Tensor& idx1_tensor=context->input(3);
217 | const Tensor& grad_dist2_tensor=context->input(4);
218 | const Tensor& idx2_tensor=context->input(5);
219 | OP_REQUIRES(context,xyz1_tensor.dims()==3,errors::InvalidArgument("NnDistanceGrad requires xyz1 be of shape (batch,#points,3)"));
220 | OP_REQUIRES(context,xyz1_tensor.shape().dim_size(2)==3,errors::InvalidArgument("NnDistanceGrad only accepts 3d point set xyz1"));
221 | int b=xyz1_tensor.shape().dim_size(0);
222 | int n=xyz1_tensor.shape().dim_size(1);
223 | OP_REQUIRES(context,xyz2_tensor.dims()==3,errors::InvalidArgument("NnDistanceGrad requires xyz2 be of shape (batch,#points,3)"));
224 | OP_REQUIRES(context,xyz2_tensor.shape().dim_size(2)==3,errors::InvalidArgument("NnDistanceGrad only accepts 3d point set xyz2"));
225 | int m=xyz2_tensor.shape().dim_size(1);
226 | OP_REQUIRES(context,xyz2_tensor.shape().dim_size(0)==b,errors::InvalidArgument("NnDistanceGrad expects xyz1 and xyz2 have same batch size"));
227 | OP_REQUIRES(context,grad_dist1_tensor.shape()==(TensorShape{b,n}),errors::InvalidArgument("NnDistanceGrad requires grad_dist1 be of shape(batch,#points)"));
228 | OP_REQUIRES(context,idx1_tensor.shape()==(TensorShape{b,n}),errors::InvalidArgument("NnDistanceGrad requires idx1 be of shape(batch,#points)"));
229 | OP_REQUIRES(context,grad_dist2_tensor.shape()==(TensorShape{b,m}),errors::InvalidArgument("NnDistanceGrad requires grad_dist2 be of shape(batch,#points)"));
230 | OP_REQUIRES(context,idx2_tensor.shape()==(TensorShape{b,m}),errors::InvalidArgument("NnDistanceGrad requires idx2 be of shape(batch,#points)"));
231 | auto xyz1_flat=xyz1_tensor.flat();
232 | const float * xyz1=&xyz1_flat(0);
233 | auto xyz2_flat=xyz2_tensor.flat();
234 | const float * xyz2=&xyz2_flat(0);
235 | auto idx1_flat=idx1_tensor.flat();
236 | const int * idx1=&idx1_flat(0);
237 | auto idx2_flat=idx2_tensor.flat();
238 | const int * idx2=&idx2_flat(0);
239 | auto grad_dist1_flat=grad_dist1_tensor.flat();
240 | const float * grad_dist1=&grad_dist1_flat(0);
241 | auto grad_dist2_flat=grad_dist2_tensor.flat();
242 | const float * grad_dist2=&grad_dist2_flat(0);
243 | Tensor * grad_xyz1_tensor=NULL;
244 | OP_REQUIRES_OK(context,context->allocate_output(0,TensorShape{b,n,3},&grad_xyz1_tensor));
245 | Tensor * grad_xyz2_tensor=NULL;
246 | OP_REQUIRES_OK(context,context->allocate_output(1,TensorShape{b,m,3},&grad_xyz2_tensor));
247 | auto grad_xyz1_flat=grad_xyz1_tensor->flat();
248 | float * grad_xyz1=&grad_xyz1_flat(0);
249 | auto grad_xyz2_flat=grad_xyz2_tensor->flat();
250 | float * grad_xyz2=&grad_xyz2_flat(0);
251 | NmDistanceGradKernelLauncher(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_dist2,idx2,grad_xyz1,grad_xyz2);
252 | }
253 | };
254 | REGISTER_KERNEL_BUILDER(Name("NnDistanceGrad").Device(DEVICE_GPU), NnDistanceGradGpuOp);
255 |
--------------------------------------------------------------------------------
/external/tf_nndistance.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from tensorflow.python.framework import ops
3 | import os.path as osp
4 |
5 | base_dir = osp.dirname(osp.abspath(__file__))
6 |
7 | nn_distance_module = tf.load_op_library(osp.join(base_dir, 'tf_nndistance_so.so'))
8 |
9 |
10 | def nn_distance(xyz1, xyz2):
11 | '''
12 | Computes the distance of nearest neighbors for a pair of point clouds
13 | input: xyz1: (batch_size,#points_1,3) the first point cloud
14 | input: xyz2: (batch_size,#points_2,3) the second point cloud
15 | output: dist1: (batch_size,#point_1) distance from first to second
16 | output: idx1: (batch_size,#point_1) nearest neighbor from first to second
17 | output: dist2: (batch_size,#point_2) distance from second to first
18 | output: idx2: (batch_size,#point_2) nearest neighbor from second to first
19 | '''
20 | xyz1 = tf.expand_dims(xyz1, 0)
21 | xyz2 = tf.expand_dims(xyz2, 0)
22 | return nn_distance_module.nn_distance(xyz1,xyz2)
23 |
24 | #@tf.RegisterShape('NnDistance')
25 | @ops.RegisterShape('NnDistance')
26 | def _nn_distance_shape(op):
27 | shape1=op.inputs[0].get_shape().with_rank(3)
28 | shape2=op.inputs[1].get_shape().with_rank(3)
29 | return [tf.TensorShape([shape1.dims[0],shape1.dims[1]]),tf.TensorShape([shape1.dims[0],shape1.dims[1]]),
30 | tf.TensorShape([shape2.dims[0],shape2.dims[1]]),tf.TensorShape([shape2.dims[0],shape2.dims[1]])]
31 | @ops.RegisterGradient('NnDistance')
32 | def _nn_distance_grad(op,grad_dist1,grad_idx1,grad_dist2,grad_idx2):
33 | xyz1=op.inputs[0]
34 | xyz2=op.inputs[1]
35 | idx1=op.outputs[1]
36 | idx2=op.outputs[3]
37 | return nn_distance_module.nn_distance_grad(xyz1,xyz2,grad_dist1,idx1,grad_dist2,idx2)
38 |
39 |
40 | if __name__=='__main__':
41 | import numpy as np
42 | import random
43 | import time
44 | #from tensorflow.python.kernel_tests.gradient_checker import compute_gradient
45 | from tensorflow.python.ops.gradient_checker import compute_gradient
46 | random.seed(100)
47 | np.random.seed(100)
48 | with tf.Session('') as sess:
49 | xyz1=np.random.randn(32,16384,3).astype('float32')
50 | xyz2=np.random.randn(32,1024,3).astype('float32')
51 | with tf.device('/gpu:0'):
52 | inp1=tf.Variable(xyz1)
53 | inp2=tf.constant(xyz2)
54 | reta,retb,retc,retd=nn_distance(inp1,inp2)
55 | loss=tf.reduce_sum(reta)+tf.reduce_sum(retc)
56 | train=tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss)
57 | sess.run(tf.initialize_all_variables())
58 | t0=time.time()
59 | t1=t0
60 | best=1e100
61 | for i in xrange(100):
62 | trainloss,_=sess.run([loss,train])
63 | newt=time.time()
64 | best=min(best,newt-t1)
65 | print i,trainloss,(newt-t0)/(i+1),best
66 | t1=newt
67 | #print sess.run([inp1,retb,inp2,retd])
68 | #grads=compute_gradient([inp1,inp2],[(16,32,3),(16,32,3)],loss,(1,),[xyz1,xyz2])
69 | #for i,j in grads:
70 | #print i.shape,j.shape,np.mean(np.abs(i-j)),np.mean(np.abs(i)),np.mean(np.abs(j))
71 | #for i in xrange(10):
72 | #t0=time.time()
73 | #a,b,c,d=sess.run([reta,retb,retc,retd],feed_dict={inp1:xyz1,inp2:xyz2})
74 | #print 'time',time.time()-t0
75 | #print a.shape,b.shape,c.shape,d.shape
76 | #print a.dtype,b.dtype,c.dtype,d.dtype
77 | #samples=np.array(random.sample(range(xyz2.shape[1]),100),dtype='int32')
78 | #dist1=((xyz1[:,samples,None,:]-xyz2[:,None,:,:])**2).sum(axis=-1).min(axis=-1)
79 | #idx1=((xyz1[:,samples,None,:]-xyz2[:,None,:,:])**2).sum(axis=-1).argmin(axis=-1)
80 | #print np.abs(dist1-a[:,samples]).max()
81 | #print np.abs(idx1-b[:,samples]).max()
82 | #dist2=((xyz2[:,samples,None,:]-xyz1[:,None,:,:])**2).sum(axis=-1).min(axis=-1)
83 | #idx2=((xyz2[:,samples,None,:]-xyz1[:,None,:,:])**2).sum(axis=-1).argmin(axis=-1)
84 | #print np.abs(dist2-c[:,samples]).max()
85 | #print np.abs(idx2-d[:,samples]).max()
86 |
87 |
--------------------------------------------------------------------------------
/external/tf_nndistance_g.cu:
--------------------------------------------------------------------------------
1 | #if GOOGLE_CUDA
2 | #define EIGEN_USE_GPU
3 | #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
4 |
5 | __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
6 | const int batch=512;
7 | __shared__ float buf[batch*3];
8 | for (int i=blockIdx.x;ibest){
120 | result[(i*n+j)]=best;
121 | result_i[(i*n+j)]=best_i;
122 | }
123 | }
124 | __syncthreads();
125 | }
126 | }
127 | }
128 | void NmDistanceKernelLauncher(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i){
129 | NmDistanceKernel<<>>(b,n,xyz,m,xyz2,result,result_i);
130 | NmDistanceKernel<<>>(b,m,xyz2,n,xyz,result2,result2_i);
131 | }
132 | __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
133 | for (int i=blockIdx.x;i>>(b,n,xyz1,m,xyz2,grad_dist1,idx1,grad_xyz1,grad_xyz2);
156 | NmDistanceGradKernel<<>>(b,m,xyz2,n,xyz1,grad_dist2,idx2,grad_xyz2,grad_xyz1);
157 | }
158 |
159 | #endif
160 |
--------------------------------------------------------------------------------
/external/tf_nndistance_g.cu.o:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/external/tf_nndistance_g.cu.o
--------------------------------------------------------------------------------
/external/tf_nndistance_so.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/external/tf_nndistance_so.so
--------------------------------------------------------------------------------
/p2m/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/nywang16/Pixel2Mesh/930a016b3e80713bb61c3671bee3ecfe5bce6c49/p2m/__init__.py
--------------------------------------------------------------------------------
/p2m/api.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | from __future__ import division
18 | import tflearn
19 | from layers import *
20 |
21 | flags = tf.app.flags
22 | FLAGS = flags.FLAGS
23 |
24 | class Model(object):
25 | def __init__(self, **kwargs):
26 | allowed_kwargs = {'name', 'logging'}
27 | for kwarg in kwargs.keys():
28 | assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
29 | name = kwargs.get('name')
30 | if not name:
31 | name = self.__class__.__name__.lower()
32 | self.name = name
33 |
34 | logging = kwargs.get('logging', False)
35 | self.logging = logging
36 |
37 | self.vars = {}
38 | self.placeholders = {}
39 |
40 | self.layers = []
41 | self.activations = []
42 |
43 | self.inputs = None
44 | self.output1 = None
45 | self.output2 = None
46 | self.output3 = None
47 | self.output1_2 = None
48 | self.output2_2 = None
49 |
50 | self.loss = 0
51 | self.optimizer = None
52 | self.opt_op = None
53 |
54 | def _build(self):
55 | raise NotImplementedError
56 |
57 | def build(self):
58 | """ Wrapper for _build() """
59 | #with tf.device('/gpu:0'):
60 | with tf.variable_scope(self.name):
61 | self._build()
62 |
63 | # Build sequential resnet model
64 | eltwise = [3,5,7,9,11,13, 19,21,23,25,27,29, 35,37,39,41,43,45]
65 | concat = [15, 31]
66 | self.activations.append(self.inputs)
67 | for idx,layer in enumerate(self.layers):
68 | hidden = layer(self.activations[-1])
69 | if idx in eltwise:
70 | hidden = tf.add(hidden, self.activations[-2]) * 0.5
71 | if idx in concat:
72 | hidden = tf.concat([hidden, self.activations[-2]], 1)
73 | self.activations.append(hidden)
74 |
75 | self.output1 = self.activations[15]
76 | unpool_layer = GraphPooling(placeholders=self.placeholders, pool_id=1)
77 | self.output1_2 = unpool_layer(self.output1)
78 |
79 | self.output2 = self.activations[31]
80 | unpool_layer = GraphPooling(placeholders=self.placeholders, pool_id=2)
81 | self.output2_2 = unpool_layer(self.output2)
82 |
83 | self.output3 = self.activations[-1]
84 |
85 | # Store model variables for easy access
86 | variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
87 | self.vars = {var.name: var for var in variables}
88 |
89 | def predict(self):
90 | pass
91 |
92 | def _loss(self):
93 | raise NotImplementedError
94 |
95 | def save(self, sess=None):
96 | if not sess:
97 | raise AttributeError("TensorFlow session not provided.")
98 | saver = tf.train.Saver(self.vars)
99 | save_path = saver.save(sess, "Data/checkpoint/%s.ckpt" % self.name)
100 | print("Model saved in file: %s" % save_path)
101 |
102 | def load(self, sess=None):
103 | if not sess:
104 | raise AttributeError("TensorFlow session not provided.")
105 | saver = tf.train.Saver(self.vars)
106 | save_path = "Data/checkpoint/%s.ckpt" % self.name
107 | #save_path = "checks/tmp/%s.ckpt" % self.name
108 | saver.restore(sess, save_path)
109 | print("Model restored from file: %s" % save_path)
110 |
111 | class GCN(Model):
112 | def __init__(self, placeholders, **kwargs):
113 | super(GCN, self).__init__(**kwargs)
114 |
115 | self.inputs = placeholders['features']
116 | self.placeholders = placeholders
117 |
118 | self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
119 |
120 | self.build()
121 |
122 | def _build(self):
123 | self.build_cnn18() #update image feature
124 | # first project block
125 | self.layers.append(GraphProjection(placeholders=self.placeholders))
126 | self.layers.append(GraphConvolution(input_dim=FLAGS.feat_dim,
127 | output_dim=FLAGS.hidden,
128 | gcn_block_id=1,
129 | placeholders=self.placeholders, logging=self.logging))
130 | for _ in range(12):
131 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
132 | output_dim=FLAGS.hidden,
133 | gcn_block_id=1,
134 | placeholders=self.placeholders, logging=self.logging))
135 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
136 | output_dim=FLAGS.coord_dim,
137 | act=lambda x: x,
138 | gcn_block_id=1,
139 | placeholders=self.placeholders, logging=self.logging))
140 | # second project block
141 | self.layers.append(GraphProjection(placeholders=self.placeholders))
142 | self.layers.append(GraphPooling(placeholders=self.placeholders, pool_id=1)) # unpooling
143 | self.layers.append(GraphConvolution(input_dim=FLAGS.feat_dim+FLAGS.hidden,
144 | output_dim=FLAGS.hidden,
145 | gcn_block_id=2,
146 | placeholders=self.placeholders, logging=self.logging))
147 | for _ in range(12):
148 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
149 | output_dim=FLAGS.hidden,
150 | gcn_block_id=2,
151 | placeholders=self.placeholders, logging=self.logging))
152 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
153 | output_dim=FLAGS.coord_dim,
154 | act=lambda x: x,
155 | gcn_block_id=2,
156 | placeholders=self.placeholders, logging=self.logging))
157 | # third project block
158 | self.layers.append(GraphProjection(placeholders=self.placeholders))
159 | self.layers.append(GraphPooling(placeholders=self.placeholders, pool_id=2)) # unpooling
160 | self.layers.append(GraphConvolution(input_dim=FLAGS.feat_dim+FLAGS.hidden,
161 | output_dim=FLAGS.hidden,
162 | gcn_block_id=3,
163 | placeholders=self.placeholders, logging=self.logging))
164 | for _ in range(12):
165 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
166 | output_dim=FLAGS.hidden,
167 | gcn_block_id=3,
168 | placeholders=self.placeholders, logging=self.logging))
169 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
170 | output_dim=int(FLAGS.hidden/2),
171 | gcn_block_id=3,
172 | placeholders=self.placeholders, logging=self.logging))
173 | self.layers.append(GraphConvolution(input_dim=int(FLAGS.hidden/2),
174 | output_dim=FLAGS.coord_dim,
175 | act=lambda x: x,
176 | gcn_block_id=3,
177 | placeholders=self.placeholders, logging=self.logging))
178 |
179 | def build_cnn18(self):
180 | x=self.placeholders['img_inp']
181 | x=tf.expand_dims(x, 0)
182 | #224 224
183 | x=tflearn.layers.conv.conv_2d(x,16,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
184 | x=tflearn.layers.conv.conv_2d(x,16,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
185 | x0=x
186 | x=tflearn.layers.conv.conv_2d(x,32,(3,3),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
187 | #112 112
188 | x=tflearn.layers.conv.conv_2d(x,32,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
189 | x=tflearn.layers.conv.conv_2d(x,32,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
190 | x1=x
191 | x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
192 | #56 56
193 | x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
194 | x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
195 | x2=x
196 | x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
197 | #28 28
198 | x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
199 | x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
200 | x3=x
201 | x=tflearn.layers.conv.conv_2d(x,256,(5,5),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
202 | #14 14
203 | x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
204 | x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
205 | x4=x
206 | x=tflearn.layers.conv.conv_2d(x,512,(5,5),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
207 | #7 7
208 | x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
209 | x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
210 | x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
211 | x5=x
212 | #updata image feature
213 | self.placeholders.update({'img_feat': [tf.squeeze(x2), tf.squeeze(x3), tf.squeeze(x4), tf.squeeze(x5)]})
214 |
--------------------------------------------------------------------------------
/p2m/chamfer.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | import tensorflow as tf
18 | from tensorflow.python.framework import ops
19 | nn_distance_module=tf.load_op_library('./external/tf_nndistance_so.so')
20 |
21 | def nn_distance(xyz1,xyz2):
22 | '''
23 | Computes the distance of nearest neighbors for a pair of point clouds
24 | input: xyz1: (batch_size,#points_1,3) the first point cloud
25 | input: xyz2: (batch_size,#points_2,3) the second point cloud
26 | output: dist1: (batch_size,#point_1) distance from first to second
27 | output: idx1: (batch_size,#point_1) nearest neighbor from first to second
28 | output: dist2: (batch_size,#point_2) distance from second to first
29 | output: idx2: (batch_size,#point_2) nearest neighbor from second to first
30 | '''
31 | xyz1 = tf.expand_dims(xyz1, 0)
32 | xyz2 = tf.expand_dims(xyz2, 0)
33 | return nn_distance_module.nn_distance(xyz1,xyz2)
34 | #@tf.RegisterShape('NnDistance')
35 | #def _nn_distance_shape(op):
36 | #shape1=op.inputs[0].get_shape().with_rank(3)
37 | #shape2=op.inputs[1].get_shape().with_rank(3)
38 | #return [tf.TensorShape([shape1.dims[0],shape1.dims[1]]),tf.TensorShape([shape1.dims[0],shape1.dims[1]]),
39 | #tf.TensorShape([shape2.dims[0],shape2.dims[1]]),tf.TensorShape([shape2.dims[0],shape2.dims[1]])]
40 | @ops.RegisterGradient('NnDistance')
41 | def _nn_distance_grad(op,grad_dist1,grad_idx1,grad_dist2,grad_idx2):
42 | xyz1=op.inputs[0]
43 | xyz2=op.inputs[1]
44 | idx1=op.outputs[1]
45 | idx2=op.outputs[3]
46 | return nn_distance_module.nn_distance_grad(xyz1,xyz2,grad_dist1,idx1,grad_dist2,idx2)
47 |
48 |
49 | if __name__=='__main__':
50 | import numpy as np
51 | import random
52 | import time
53 | from tensorflow.python.ops.gradient_checker import compute_gradient
54 | random.seed(100)
55 | np.random.seed(100)
56 | with tf.Session('') as sess:
57 | xyz1=np.random.randn(32,16384,3).astype('float32')
58 | xyz2=np.random.randn(32,1024,3).astype('float32')
59 | #with tf.device('/gpu:0'):
60 | if True:
61 | inp1=tf.Variable(xyz1)
62 | inp2=tf.constant(xyz2)
63 | reta,retb,retc,retd=nn_distance(inp1,inp2)
64 | loss=tf.reduce_sum(reta)+tf.reduce_sum(retc)
65 | train=tf.train.GradientDescentOptimizer(learning_rate=0.05).minimize(loss)
66 | sess.run(tf.initialize_all_variables())
67 | t0=time.time()
68 | t1=t0
69 | best=1e100
70 | for i in xrange(100):
71 | trainloss,_=sess.run([loss,train])
72 | newt=time.time()
73 | best=min(best,newt-t1)
74 | print i,trainloss,(newt-t0)/(i+1),best
75 | t1=newt
76 | #print sess.run([inp1,retb,inp2,retd])
77 | #grads=compute_gradient([inp1,inp2],[(16,32,3),(16,32,3)],loss,(1,),[xyz1,xyz2])
78 | #for i,j in grads:
79 | #print i.shape,j.shape,np.mean(np.abs(i-j)),np.mean(np.abs(i)),np.mean(np.abs(j))
80 | #for i in xrange(10):
81 | #t0=time.time()
82 | #a,b,c,d=sess.run([reta,retb,retc,retd],feed_dict={inp1:xyz1,inp2:xyz2})
83 | #print 'time',time.time()-t0
84 | #print a.shape,b.shape,c.shape,d.shape
85 | #print a.dtype,b.dtype,c.dtype,d.dtype
86 | #samples=np.array(random.sample(range(xyz2.shape[1]),100),dtype='int32')
87 | #dist1=((xyz1[:,samples,None,:]-xyz2[:,None,:,:])**2).sum(axis=-1).min(axis=-1)
88 | #idx1=((xyz1[:,samples,None,:]-xyz2[:,None,:,:])**2).sum(axis=-1).argmin(axis=-1)
89 | #print np.abs(dist1-a[:,samples]).max()
90 | #print np.abs(idx1-b[:,samples]).max()
91 | #dist2=((xyz2[:,samples,None,:]-xyz1[:,None,:,:])**2).sum(axis=-1).min(axis=-1)
92 | #idx2=((xyz2[:,samples,None,:]-xyz1[:,None,:,:])**2).sum(axis=-1).argmin(axis=-1)
93 | #print np.abs(dist2-c[:,samples]).max()
94 | #print np.abs(idx2-d[:,samples]).max()
95 |
96 |
--------------------------------------------------------------------------------
/p2m/fetcher.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | import numpy as np
18 | import cPickle as pickle
19 | import threading
20 | import Queue
21 | import sys
22 | from skimage import io,transform
23 |
24 | class DataFetcher(threading.Thread):
25 | def __init__(self, file_list):
26 | super(DataFetcher, self).__init__()
27 | self.stopped = False
28 | self.queue = Queue.Queue(64)
29 |
30 | self.pkl_list = []
31 | with open(file_list, 'r') as f:
32 | while(True):
33 | line = f.readline().strip()
34 | if not line:
35 | break
36 | self.pkl_list.append(line)
37 | self.index = 0
38 | self.number = len(self.pkl_list)
39 | np.random.shuffle(self.pkl_list)
40 |
41 | def work(self, idx):
42 | pkl_path = self.pkl_list[idx]
43 | label = pickle.load(open(pkl_path, 'rb'))
44 |
45 | img_path = pkl_path.replace('.dat', '.png')
46 | '''
47 | img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
48 | img[np.where(img[:,:,3]==0)] = 255
49 | img = cv2.resize(img, (224,224))
50 | img = img[:,:,:3]/255.
51 | '''
52 | img = io.imread(img_path)
53 | img[np.where(img[:,:,3]==0)] = 255
54 | img = transform.resize(img, (224,224))
55 | img = img[:,:,:3].astype('float32')
56 |
57 | return img, label, pkl_path.split('/')[-1]
58 |
59 | def run(self):
60 | while self.index < 90000000 and not self.stopped:
61 | self.queue.put(self.work(self.index % self.number))
62 | self.index += 1
63 | if self.index % self.number == 0:
64 | np.random.shuffle(self.pkl_list)
65 |
66 | def fetch(self):
67 | if self.stopped:
68 | return None
69 | return self.queue.get()
70 |
71 | def shutdown(self):
72 | self.stopped = True
73 | while not self.queue.empty():
74 | self.queue.get()
75 |
76 | if __name__ == '__main__':
77 | file_list = sys.argv[1]
78 | data = DataFetcher(file_list)
79 | data.start()
80 |
81 | image,point,normal,_,_ = data.fetch()
82 | print image.shape
83 | print point.shape
84 | print normal.shape
85 | data.stopped = True
86 |
--------------------------------------------------------------------------------
/p2m/inits.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | from __future__ import division
18 | import tensorflow as tf
19 | import numpy as np
20 |
21 | def uniform(shape, scale=0.05, name=None):
22 | """Uniform init."""
23 | initial = tf.random_uniform(shape, minval=-scale, maxval=scale, dtype=tf.float32)
24 | return tf.Variable(initial, name=name)
25 |
26 |
27 | def glorot(shape, name=None):
28 | """Glorot & Bengio (AISTATS 2010) init."""
29 | init_range = np.sqrt(6.0/(shape[0]+shape[1]))
30 | initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
31 | return tf.Variable(initial, name=name)
32 |
33 |
34 | def zeros(shape, name=None):
35 | """All zeros."""
36 | initial = tf.zeros(shape, dtype=tf.float32)
37 | return tf.Variable(initial, name=name)
38 |
39 |
40 | def ones(shape, name=None):
41 | """All ones."""
42 | initial = tf.ones(shape, dtype=tf.float32)
43 | return tf.Variable(initial, name=name)
44 |
--------------------------------------------------------------------------------
/p2m/layers.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | from __future__ import division
18 | from inits import *
19 | import tensorflow as tf
20 |
21 | flags = tf.app.flags
22 | FLAGS = flags.FLAGS
23 |
24 | # global unique layer ID dictionary for layer name assignment
25 | _LAYER_UIDS = {}
26 |
27 | def project(img_feat, x, y, dim):
28 | x1 = tf.floor(x)
29 | x2 = tf.minimum(tf.ceil(x), tf.cast(tf.shape(img_feat)[0], tf.float32) - 1)
30 | y1 = tf.floor(y)
31 | y2 = tf.minimum(tf.ceil(y), tf.cast(tf.shape(img_feat)[1], tf.float32) - 1)
32 | Q11 = tf.gather_nd(img_feat, tf.stack([tf.cast(x1,tf.int32), tf.cast(y1,tf.int32)],1))
33 | Q12 = tf.gather_nd(img_feat, tf.stack([tf.cast(x1,tf.int32), tf.cast(y2,tf.int32)],1))
34 | Q21 = tf.gather_nd(img_feat, tf.stack([tf.cast(x2,tf.int32), tf.cast(y1,tf.int32)],1))
35 | Q22 = tf.gather_nd(img_feat, tf.stack([tf.cast(x2,tf.int32), tf.cast(y2,tf.int32)],1))
36 |
37 | weights = tf.multiply(tf.subtract(x2,x), tf.subtract(y2,y))
38 | Q11 = tf.multiply(tf.tile(tf.reshape(weights,[-1,1]),[1,dim]), Q11)
39 |
40 | weights = tf.multiply(tf.subtract(x,x1), tf.subtract(y2,y))
41 | Q21 = tf.multiply(tf.tile(tf.reshape(weights,[-1,1]),[1,dim]), Q21)
42 |
43 | weights = tf.multiply(tf.subtract(x2,x), tf.subtract(y,y1))
44 | Q12 = tf.multiply(tf.tile(tf.reshape(weights,[-1,1]),[1,dim]), Q12)
45 |
46 | weights = tf.multiply(tf.subtract(x,x1), tf.subtract(y,y1))
47 | Q22 = tf.multiply(tf.tile(tf.reshape(weights,[-1,1]),[1,dim]), Q22)
48 |
49 | outputs = tf.add_n([Q11, Q21, Q12, Q22])
50 | return outputs
51 |
52 | def get_layer_uid(layer_name=''):
53 | """Helper function, assigns unique layer IDs."""
54 | if layer_name not in _LAYER_UIDS:
55 | _LAYER_UIDS[layer_name] = 1
56 | return 1
57 | else:
58 | _LAYER_UIDS[layer_name] += 1
59 | return _LAYER_UIDS[layer_name]
60 |
61 |
62 | def sparse_dropout(x, keep_prob, noise_shape):
63 | """Dropout for sparse tensors."""
64 | random_tensor = keep_prob
65 | random_tensor += tf.random_uniform(noise_shape)
66 | dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
67 | pre_out = tf.sparse_retain(x, dropout_mask)
68 | return pre_out * (1./keep_prob)
69 |
70 |
71 | def dot(x, y, sparse=False):
72 | """Wrapper for tf.matmul (sparse vs dense)."""
73 | if sparse:
74 | res = tf.sparse_tensor_dense_matmul(x, y)
75 | else:
76 | res = tf.matmul(x, y)
77 | return res
78 |
79 |
80 | class Layer(object):
81 | """Base layer class. Defines basic API for all layer objects.
82 | Implementation inspired by keras (http://keras.io).
83 |
84 | # Properties
85 | name: String, defines the variable scope of the layer.
86 | logging: Boolean, switches Tensorflow histogram logging on/off
87 |
88 | # Methods
89 | _call(inputs): Defines computation graph of layer
90 | (i.e. takes input, returns output)
91 | __call__(inputs): Wrapper for _call()
92 | _log_vars(): Log all variables
93 | """
94 |
95 | def __init__(self, **kwargs):
96 | allowed_kwargs = {'name', 'logging'}
97 | for kwarg in kwargs.keys():
98 | assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
99 | name = kwargs.get('name')
100 | if not name:
101 | layer = self.__class__.__name__.lower()
102 | name = layer + '_' + str(get_layer_uid(layer))
103 | self.name = name
104 | self.vars = {}
105 | logging = kwargs.get('logging', False)
106 | self.logging = logging
107 | self.sparse_inputs = False
108 |
109 | def _call(self, inputs):
110 | return inputs
111 |
112 | def __call__(self, inputs):
113 | with tf.name_scope(self.name):
114 | if self.logging and not self.sparse_inputs:
115 | tf.summary.histogram(self.name + '/inputs', inputs)
116 | outputs = self._call(inputs)
117 | if self.logging:
118 | tf.summary.histogram(self.name + '/outputs', outputs)
119 | return outputs
120 |
121 | def _log_vars(self):
122 | for var in self.vars:
123 | tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
124 |
125 | class GraphConvolution(Layer):
126 | """Graph convolution layer."""
127 | def __init__(self, input_dim, output_dim, placeholders, dropout=False,
128 | sparse_inputs=False, act=tf.nn.relu, bias=True, gcn_block_id=1,
129 | featureless=False, **kwargs):
130 | super(GraphConvolution, self).__init__(**kwargs)
131 |
132 | if dropout:
133 | self.dropout = placeholders['dropout']
134 | else:
135 | self.dropout = 0.
136 |
137 | self.act = act
138 | if gcn_block_id == 1:
139 | self.support = placeholders['support1']
140 | elif gcn_block_id == 2:
141 | self.support = placeholders['support2']
142 | elif gcn_block_id == 3:
143 | self.support = placeholders['support3']
144 |
145 | self.sparse_inputs = sparse_inputs
146 | self.featureless = featureless
147 | self.bias = bias
148 |
149 | # helper variable for sparse dropout
150 | self.num_features_nonzero = 3#placeholders['num_features_nonzero']
151 |
152 | with tf.variable_scope(self.name + '_vars'):
153 | for i in range(len(self.support)):
154 | self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
155 | name='weights_' + str(i))
156 | if self.bias:
157 | self.vars['bias'] = zeros([output_dim], name='bias')
158 |
159 | if self.logging:
160 | self._log_vars()
161 |
162 | def _call(self, inputs):
163 | x = inputs
164 |
165 | # dropout
166 | if self.sparse_inputs:
167 | x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
168 | else:
169 | x = tf.nn.dropout(x, 1-self.dropout)
170 |
171 | # convolve
172 | supports = list()
173 | for i in range(len(self.support)):
174 | if not self.featureless:
175 | pre_sup = dot(x, self.vars['weights_' + str(i)],
176 | sparse=self.sparse_inputs)
177 | else:
178 | pre_sup = self.vars['weights_' + str(i)]
179 | support = dot(self.support[i], pre_sup, sparse=True)
180 | supports.append(support)
181 | output = tf.add_n(supports)
182 |
183 | # bias
184 | if self.bias:
185 | output += self.vars['bias']
186 |
187 | return self.act(output)
188 |
189 | class GraphPooling(Layer):
190 | """Graph Pooling layer."""
191 | def __init__(self, placeholders, pool_id=1, **kwargs):
192 | super(GraphPooling, self).__init__(**kwargs)
193 |
194 | self.pool_idx = placeholders['pool_idx'][pool_id-1]
195 |
196 | def _call(self, inputs):
197 | X = inputs
198 |
199 | add_feat = (1/2.0) * tf.reduce_sum(tf.gather(X, self.pool_idx), 1)
200 | outputs = tf.concat([X, add_feat], 0)
201 |
202 | return outputs
203 |
204 | class GraphProjection(Layer):
205 | """Graph Pooling layer."""
206 | def __init__(self, placeholders, **kwargs):
207 | super(GraphProjection, self).__init__(**kwargs)
208 |
209 | self.img_feat = placeholders['img_feat']
210 |
211 | '''
212 | def _call(self, inputs):
213 | coord = inputs
214 | X = inputs[:, 0]
215 | Y = inputs[:, 1]
216 | Z = inputs[:, 2]
217 |
218 | #h = (-Y)/(-Z)*248 + 224/2.0 - 1
219 | #w = X/(-Z)*248 + 224/2.0 - 1 [28,14,7,4]
220 | h = 248.0 * tf.divide(-Y, -Z) + 112.0
221 | w = 248.0 * tf.divide(X, -Z) + 112.0
222 |
223 | h = tf.minimum(tf.maximum(h, 0), 223)
224 | w = tf.minimum(tf.maximum(w, 0), 223)
225 | indeces = tf.stack([h,w], 1)
226 |
227 | idx = tf.cast(indeces/(224.0/56.0), tf.int32)
228 | out1 = tf.gather_nd(self.img_feat[0], idx)
229 | idx = tf.cast(indeces/(224.0/28.0), tf.int32)
230 | out2 = tf.gather_nd(self.img_feat[1], idx)
231 | idx = tf.cast(indeces/(224.0/14.0), tf.int32)
232 | out3 = tf.gather_nd(self.img_feat[2], idx)
233 | idx = tf.cast(indeces/(224.0/7.00), tf.int32)
234 | out4 = tf.gather_nd(self.img_feat[3], idx)
235 |
236 | outputs = tf.concat([coord,out1,out2,out3,out4], 1)
237 | return outputs
238 | '''
239 | def _call(self, inputs):
240 | coord = inputs
241 | X = inputs[:, 0]
242 | Y = inputs[:, 1]
243 | Z = inputs[:, 2]
244 |
245 | h = 250 * tf.divide(-Y, -Z) + 112
246 | w = 250 * tf.divide(X, -Z) + 112
247 |
248 | h = tf.minimum(tf.maximum(h, 0), 223)
249 | w = tf.minimum(tf.maximum(w, 0), 223)
250 |
251 | x = h/(224.0/56)
252 | y = w/(224.0/56)
253 | out1 = project(self.img_feat[0], x, y, 64)
254 |
255 | x = h/(224.0/28)
256 | y = w/(224.0/28)
257 | out2 = project(self.img_feat[1], x, y, 128)
258 |
259 | x = h/(224.0/14)
260 | y = w/(224.0/14)
261 | out3 = project(self.img_feat[2], x, y, 256)
262 |
263 | x = h/(224.0/7)
264 | y = w/(224.0/7)
265 | out4 = project(self.img_feat[3], x, y, 512)
266 | outputs = tf.concat([coord,out1,out2,out3,out4], 1)
267 | return outputs
268 |
--------------------------------------------------------------------------------
/p2m/losses.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | import tensorflow as tf
18 | from chamfer import *
19 |
20 | def laplace_coord(pred, placeholders, block_id):
21 | vertex = tf.concat([pred, tf.zeros([1,3])], 0)
22 | indices = placeholders['lape_idx'][block_id-1][:, :8]
23 | weights = tf.cast(placeholders['lape_idx'][block_id-1][:,-1], tf.float32)
24 |
25 | weights = tf.tile(tf.reshape(tf.reciprocal(weights), [-1,1]), [1,3])
26 | laplace = tf.reduce_sum(tf.gather(vertex, indices), 1)
27 | laplace = tf.subtract(pred, tf.multiply(laplace, weights))
28 | return laplace
29 |
30 | def laplace_loss(pred1, pred2, placeholders, block_id):
31 | # laplace term
32 | lap1 = laplace_coord(pred1, placeholders, block_id)
33 | lap2 = laplace_coord(pred2, placeholders, block_id)
34 | laplace_loss = tf.reduce_mean(tf.reduce_sum(tf.square(tf.subtract(lap1,lap2)), 1)) * 1500
35 |
36 | move_loss = tf.reduce_mean(tf.reduce_sum(tf.square(tf.subtract(pred1, pred2)), 1)) * 100
37 | move_loss = tf.cond(tf.equal(block_id,1), lambda:0., lambda:move_loss)
38 | return laplace_loss + move_loss
39 |
40 | def unit(tensor):
41 | return tf.nn.l2_normalize(tensor, dim=1)
42 |
43 | def mesh_loss(pred, placeholders, block_id):
44 | gt_pt = placeholders['labels'][:, :3] # gt points
45 | gt_nm = placeholders['labels'][:, 3:] # gt normals
46 |
47 | # edge in graph
48 | nod1 = tf.gather(pred, placeholders['edges'][block_id-1][:,0])
49 | nod2 = tf.gather(pred, placeholders['edges'][block_id-1][:,1])
50 | edge = tf.subtract(nod1, nod2)
51 |
52 | # edge length loss
53 | edge_length = tf.reduce_sum(tf.square(edge), 1)
54 | edge_loss = tf.reduce_mean(edge_length) * 300
55 |
56 | # chamer distance
57 | dist1,idx1,dist2,idx2 = nn_distance(gt_pt, pred)
58 | point_loss = (tf.reduce_mean(dist1) + 0.55*tf.reduce_mean(dist2)) * 3000
59 |
60 | # normal cosine loss
61 | normal = tf.gather(gt_nm, tf.squeeze(idx2, 0))
62 | normal = tf.gather(normal, placeholders['edges'][block_id-1][:,0])
63 | cosine = tf.abs(tf.reduce_sum(tf.multiply(unit(normal), unit(edge)), 1))
64 | # cosine = tf.where(tf.greater(cosine,0.866), tf.zeros_like(cosine), cosine) # truncated
65 | normal_loss = tf.reduce_mean(cosine) * 0.5
66 |
67 | total_loss = point_loss + edge_loss + normal_loss
68 | return total_loss
69 |
--------------------------------------------------------------------------------
/p2m/models.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | from __future__ import division
18 | import tflearn
19 | from layers import *
20 | from losses import *
21 |
22 | flags = tf.app.flags
23 | FLAGS = flags.FLAGS
24 |
25 | class Model(object):
26 | def __init__(self, **kwargs):
27 | allowed_kwargs = {'name', 'logging'}
28 | for kwarg in kwargs.keys():
29 | assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
30 | name = kwargs.get('name')
31 | if not name:
32 | name = self.__class__.__name__.lower()
33 | self.name = name
34 |
35 | logging = kwargs.get('logging', False)
36 | self.logging = logging
37 |
38 | self.vars = {}
39 | self.placeholders = {}
40 |
41 | self.layers = []
42 | self.activations = []
43 |
44 | self.inputs = None
45 | self.output1 = None
46 | self.output2 = None
47 | self.output3 = None
48 | self.output1_2 = None
49 | self.output2_2 = None
50 |
51 | self.loss = 0
52 | self.optimizer = None
53 | self.opt_op = None
54 |
55 | def _build(self):
56 | raise NotImplementedError
57 |
58 | def build(self):
59 | """ Wrapper for _build() """
60 | #with tf.device('/gpu:0'):
61 | with tf.variable_scope(self.name):
62 | self._build()
63 |
64 | # Build sequential resnet model
65 | eltwise = [3,5,7,9,11,13, 19,21,23,25,27,29, 35,37,39,41,43,45]
66 | concat = [15, 31]
67 | self.activations.append(self.inputs)
68 | for idx,layer in enumerate(self.layers):
69 | hidden = layer(self.activations[-1])
70 | if idx in eltwise:
71 | hidden = tf.add(hidden, self.activations[-2]) * 0.5
72 | if idx in concat:
73 | hidden = tf.concat([hidden, self.activations[-2]], 1)
74 | self.activations.append(hidden)
75 |
76 | self.output1 = self.activations[15]
77 | unpool_layer = GraphPooling(placeholders=self.placeholders, pool_id=1)
78 | self.output1_2 = unpool_layer(self.output1)
79 |
80 | self.output2 = self.activations[31]
81 | unpool_layer = GraphPooling(placeholders=self.placeholders, pool_id=2)
82 | self.output2_2 = unpool_layer(self.output2)
83 |
84 | self.output3 = self.activations[-1]
85 |
86 | # Store model variables for easy access
87 | variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
88 | self.vars = {var.name: var for var in variables}
89 |
90 | # Build metrics
91 | self._loss()
92 |
93 | self.opt_op = self.optimizer.minimize(self.loss)
94 |
95 | def predict(self):
96 | pass
97 |
98 | def _loss(self):
99 | raise NotImplementedError
100 |
101 | def save(self, sess=None):
102 | if not sess:
103 | raise AttributeError("TensorFlow session not provided.")
104 | saver = tf.train.Saver(self.vars)
105 | save_path = saver.save(sess, "Data/checkpoint/%s.ckpt" % self.name)
106 | print("Model saved in file: %s" % save_path)
107 |
108 | def load(self, sess=None):
109 | if not sess:
110 | raise AttributeError("TensorFlow session not provided.")
111 | saver = tf.train.Saver(self.vars)
112 | save_path = "Data/checkpoint/%s.ckpt" % self.name
113 | #save_path = "checks/tmp/%s.ckpt" % self.name
114 | saver.restore(sess, save_path)
115 | print("Model restored from file: %s" % save_path)
116 |
117 | class GCN(Model):
118 | def __init__(self, placeholders, **kwargs):
119 | super(GCN, self).__init__(**kwargs)
120 |
121 | self.inputs = placeholders['features']
122 | self.placeholders = placeholders
123 |
124 | self.optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
125 |
126 | self.build()
127 |
128 | def _loss(self):
129 | '''
130 | for var in self.layers[0].vars.values():
131 | self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
132 | '''
133 | self.loss += mesh_loss(self.output1, self.placeholders, 1)
134 | self.loss += mesh_loss(self.output2, self.placeholders, 2)
135 | self.loss += mesh_loss(self.output3, self.placeholders, 3)
136 | self.loss += .1*laplace_loss(self.inputs, self.output1, self.placeholders, 1)
137 | self.loss += laplace_loss(self.output1_2, self.output2, self.placeholders, 2)
138 | self.loss += laplace_loss(self.output2_2, self.output3, self.placeholders, 3)
139 |
140 | # Weight decay loss
141 | conv_layers = range(1,15) + range(17,31) + range(33,48)
142 | for layer_id in conv_layers:
143 | for var in self.layers[layer_id].vars.values():
144 | self.loss += FLAGS.weight_decay * tf.nn.l2_loss(var)
145 |
146 | def _build(self):
147 | self.build_cnn18() #update image feature
148 | # first project block
149 | self.layers.append(GraphProjection(placeholders=self.placeholders))
150 | self.layers.append(GraphConvolution(input_dim=FLAGS.feat_dim,
151 | output_dim=FLAGS.hidden,
152 | gcn_block_id=1,
153 | placeholders=self.placeholders, logging=self.logging))
154 | for _ in range(12):
155 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
156 | output_dim=FLAGS.hidden,
157 | gcn_block_id=1,
158 | placeholders=self.placeholders, logging=self.logging))
159 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
160 | output_dim=FLAGS.coord_dim,
161 | act=lambda x: x,
162 | gcn_block_id=1,
163 | placeholders=self.placeholders, logging=self.logging))
164 | # second project block
165 | self.layers.append(GraphProjection(placeholders=self.placeholders))
166 | self.layers.append(GraphPooling(placeholders=self.placeholders, pool_id=1)) # unpooling
167 | self.layers.append(GraphConvolution(input_dim=FLAGS.feat_dim+FLAGS.hidden,
168 | output_dim=FLAGS.hidden,
169 | gcn_block_id=2,
170 | placeholders=self.placeholders, logging=self.logging))
171 | for _ in range(12):
172 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
173 | output_dim=FLAGS.hidden,
174 | gcn_block_id=2,
175 | placeholders=self.placeholders, logging=self.logging))
176 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
177 | output_dim=FLAGS.coord_dim,
178 | act=lambda x: x,
179 | gcn_block_id=2,
180 | placeholders=self.placeholders, logging=self.logging))
181 | # third project block
182 | self.layers.append(GraphProjection(placeholders=self.placeholders))
183 | self.layers.append(GraphPooling(placeholders=self.placeholders, pool_id=2)) # unpooling
184 | self.layers.append(GraphConvolution(input_dim=FLAGS.feat_dim+FLAGS.hidden,
185 | output_dim=FLAGS.hidden,
186 | gcn_block_id=3,
187 | placeholders=self.placeholders, logging=self.logging))
188 | for _ in range(12):
189 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
190 | output_dim=FLAGS.hidden,
191 | gcn_block_id=3,
192 | placeholders=self.placeholders, logging=self.logging))
193 | self.layers.append(GraphConvolution(input_dim=FLAGS.hidden,
194 | output_dim=int(FLAGS.hidden/2),
195 | gcn_block_id=3,
196 | placeholders=self.placeholders, logging=self.logging))
197 | self.layers.append(GraphConvolution(input_dim=int(FLAGS.hidden/2),
198 | output_dim=FLAGS.coord_dim,
199 | act=lambda x: x,
200 | gcn_block_id=3,
201 | placeholders=self.placeholders, logging=self.logging))
202 |
203 | def build_cnn18(self):
204 | x=self.placeholders['img_inp']
205 | x=tf.expand_dims(x, 0)
206 | #224 224
207 | x=tflearn.layers.conv.conv_2d(x,16,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
208 | x=tflearn.layers.conv.conv_2d(x,16,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
209 | x0=x
210 | x=tflearn.layers.conv.conv_2d(x,32,(3,3),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
211 | #112 112
212 | x=tflearn.layers.conv.conv_2d(x,32,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
213 | x=tflearn.layers.conv.conv_2d(x,32,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
214 | x1=x
215 | x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
216 | #56 56
217 | x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
218 | x=tflearn.layers.conv.conv_2d(x,64,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
219 | x2=x
220 | x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
221 | #28 28
222 | x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
223 | x=tflearn.layers.conv.conv_2d(x,128,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
224 | x3=x
225 | x=tflearn.layers.conv.conv_2d(x,256,(5,5),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
226 | #14 14
227 | x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
228 | x=tflearn.layers.conv.conv_2d(x,256,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
229 | x4=x
230 | x=tflearn.layers.conv.conv_2d(x,512,(5,5),strides=2,activation='relu',weight_decay=1e-5,regularizer='L2')
231 | #7 7
232 | x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
233 | x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
234 | x=tflearn.layers.conv.conv_2d(x,512,(3,3),strides=1,activation='relu',weight_decay=1e-5,regularizer='L2')
235 | x5=x
236 | #updata image feature
237 | self.placeholders.update({'img_feat': [tf.squeeze(x2), tf.squeeze(x3), tf.squeeze(x4), tf.squeeze(x5)]})
238 | self.loss += tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)) * 0.3
239 |
--------------------------------------------------------------------------------
/p2m/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | import numpy as np
18 |
19 | def construct_feed_dict(pkl, placeholders):
20 | """Construct feed dictionary."""
21 | coord = pkl[0]
22 | pool_idx = pkl[4]
23 | faces = pkl[5]
24 | # laplace = pkl[6]
25 | lape_idx = pkl[7]
26 |
27 | edges = []
28 | for i in range(1,4):
29 | adj = pkl[i][1]
30 | edges.append(adj[0])
31 |
32 | feed_dict = dict()
33 | feed_dict.update({placeholders['features']: coord})
34 | feed_dict.update({placeholders['edges'][i]: edges[i] for i in range(len(edges))})
35 | feed_dict.update({placeholders['faces'][i]: faces[i] for i in range(len(faces))})
36 | feed_dict.update({placeholders['pool_idx'][i]: pool_idx[i] for i in range(len(pool_idx))})
37 | feed_dict.update({placeholders['lape_idx'][i]: lape_idx[i] for i in range(len(lape_idx))})
38 | feed_dict.update({placeholders['support1'][i]: pkl[1][i] for i in range(len(pkl[1]))})
39 | feed_dict.update({placeholders['support2'][i]: pkl[2][i] for i in range(len(pkl[2]))})
40 | feed_dict.update({placeholders['support3'][i]: pkl[3][i] for i in range(len(pkl[3]))})
41 | return feed_dict
42 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, Yu-Gang Jiang, Fudan University
2 | #
3 | # Licensed to the Apache Software Foundation (ASF) under one or more
4 | # contributor license agreements.
5 | # The ASF licenses this file to You under the Apache License, Version 2.0
6 | # (the "License"); you may not use this file except in compliance with
7 | # the License. You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | #
17 | import tensorflow as tf
18 | from p2m.utils import *
19 | from p2m.models import GCN
20 | from p2m.fetcher import *
21 | import os
22 | os.environ['CUDA_VISIBLE_DEVICES'] = '1'
23 |
24 | # Set random seed
25 | seed = 1024
26 | np.random.seed(seed)
27 | tf.set_random_seed(seed)
28 |
29 | # Settings
30 | flags = tf.app.flags
31 | FLAGS = flags.FLAGS
32 | flags.DEFINE_string('data_list', 'Data/train_list.txt', 'Data list.') # training data list
33 | flags.DEFINE_float('learning_rate', 1e-5, 'Initial learning rate.')
34 | flags.DEFINE_integer('epochs', 5, 'Number of epochs to train.')
35 | flags.DEFINE_integer('hidden', 256, 'Number of units in hidden layer.') # gcn hidden layer channel
36 | flags.DEFINE_integer('feat_dim', 963, 'Number of units in feature layer.') # image feature dim
37 | flags.DEFINE_integer('coord_dim', 3, 'Number of units in output layer.')
38 | flags.DEFINE_float('weight_decay', 5e-6, 'Weight decay for L2 loss.')
39 |
40 | # Define placeholders(dict) and model
41 | num_blocks = 3
42 | num_supports = 2
43 | placeholders = {
44 | 'features': tf.placeholder(tf.float32, shape=(None, 3)),
45 | 'img_inp': tf.placeholder(tf.float32, shape=(224, 224, 3)),
46 | 'labels': tf.placeholder(tf.float32, shape=(None, 6)),
47 | 'support1': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
48 | 'support2': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
49 | 'support3': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)],
50 | 'faces': [tf.placeholder(tf.int32, shape=(None, 4)) for _ in range(num_blocks)], #for face loss, not used.
51 | 'edges': [tf.placeholder(tf.int32, shape=(None, 2)) for _ in range(num_blocks)],
52 | 'lape_idx': [tf.placeholder(tf.int32, shape=(None, 10)) for _ in range(num_blocks)], #for laplace term
53 | 'pool_idx': [tf.placeholder(tf.int32, shape=(None, 2)) for _ in range(num_blocks-1)] #for unpooling
54 | }
55 | model = GCN(placeholders, logging=True)
56 |
57 | # Load data, initialize session
58 | data = DataFetcher(FLAGS.data_list)
59 | data.setDaemon(True) ####
60 | data.start()
61 | config=tf.ConfigProto()
62 | #config.gpu_options.allow_growth=True
63 | config.allow_soft_placement=True
64 | sess = tf.Session(config=config)
65 | sess.run(tf.global_variables_initializer())
66 | #model.load(sess)
67 |
68 | # Train graph model
69 | train_loss = open('record_train_loss.txt', 'a')
70 | train_loss.write('Start training, lr = %f\n'%(FLAGS.learning_rate))
71 | pkl = pickle.load(open('Data/ellipsoid/info_ellipsoid.dat', 'rb'))
72 | feed_dict = construct_feed_dict(pkl, placeholders)
73 |
74 | train_number = data.number
75 | for epoch in range(FLAGS.epochs):
76 | all_loss = np.zeros(train_number,dtype='float32')
77 | for iters in range(train_number):
78 | # Fetch training data
79 | img_inp, y_train, data_id = data.fetch()
80 | feed_dict.update({placeholders['img_inp']: img_inp})
81 | feed_dict.update({placeholders['labels']: y_train})
82 |
83 | # Training step
84 | _, dists,out1,out2,out3 = sess.run([model.opt_op,model.loss,model.output1,model.output2,model.output3], feed_dict=feed_dict)
85 | all_loss[iters] = dists
86 | mean_loss = np.mean(all_loss[np.where(all_loss)])
87 | if (iters+1) % 128 == 0:
88 | print 'Epoch %d, Iteration %d'%(epoch + 1,iters + 1)
89 | print 'Mean loss = %f, iter loss = %f, %d'%(mean_loss,dists,data.queue.qsize())
90 | # Save model
91 | model.save(sess)
92 | train_loss.write('Epoch %d, loss %f\n'%(epoch+1, mean_loss))
93 | train_loss.flush()
94 |
95 | data.shutdown()
96 | print 'Training Finished!'
97 |
--------------------------------------------------------------------------------