├── .gitignore ├── README.md ├── matlab ├── ACAPOpt.mexw64 ├── AddPartFace2TotalFace.m ├── AddPriori.m ├── AdjustOutputEncode.m ├── CGAL-vc140-mt-4.11.dll ├── CalculateEdgeIndex.m ├── DATA │ └── Chair │ │ └── Chair │ │ └── d374912c3a9fca96c141a04b2a487fd9 │ │ └── models │ │ ├── back.obj │ │ ├── leg_ver_1.obj │ │ ├── leg_ver_2.obj │ │ ├── leg_ver_3.obj │ │ ├── leg_ver_4.obj │ │ ├── model_normalized.obj │ │ └── seat.obj ├── FeatureMap.m ├── GenerateData.m ├── GetBoundingBox4PointCloud.m ├── GetBoundingBox4PointCloudWithFaces.m ├── GetOptimizedObj.m ├── GetTransformedCube.m ├── MergeOBJWithTexture.m ├── Pipeline.m ├── PrepareForTraining.m ├── ReconstructFromCodeMixInteger.m ├── ReconstructFromCodeMixIntegerReadingObjinAdvance.m ├── SaveObj.m ├── SaveObjT.mexw64 ├── ShapeNet │ └── Chair │ │ └── d374912c3a9fca96c141a04b2a487fd9 │ │ ├── images │ │ ├── texture0.jpg │ │ └── texture1.jpg │ │ ├── leaf_part_ids.json │ │ ├── leaf_part_obj │ │ ├── 1.obj │ │ └── 2.obj │ │ ├── leaf_part_obj_normalized │ │ ├── 1.obj │ │ └── 2.obj │ │ ├── models │ │ ├── model_normalized.json │ │ ├── model_normalized.mtl │ │ ├── model_normalized.obj │ │ ├── model_normalized.solid.binvox │ │ └── model_normalized.surface.binvox │ │ └── normalization_params.txt ├── SupportAnalysis.m ├── SupportAnalysisScript.m ├── TransferColorPerPixelScript.m ├── ViewOBJandTexture.m ├── WriteMtl.m ├── WriteOBJwithMtl.m ├── axisangle2matrix.m ├── boundbox.m ├── cell2file.m ├── changebbxvert.m ├── cotlp.m ├── cotlpvf.m ├── create_regular_grid.m ├── cube.m ├── cube_car │ ├── car.obj │ ├── cube_std.obj │ └── cube_std_2d.obj ├── cube_chair │ ├── chair.obj │ ├── cube_std.obj │ └── cube_std_2d.obj ├── cube_plane │ ├── cube_std.obj │ ├── cube_std_2d.obj │ └── plane.obj ├── cube_table │ ├── cube_std.obj │ ├── cube_std_2d.obj │ └── table.obj ├── file2cellArray.m ├── getlabel.m ├── isout.m ├── libgmp-10.dll ├── libmpfr-4.dll ├── meshlp.mexw64 ├── meshlpvf.mexw64 ├── mpfr-vc80-mt.dll ├── nonregistration │ ├── NonRigidAlignment3Dnew.m │ ├── TimedProgressBar.m │ ├── mkdirOptional.m │ ├── nonrigidregis.m │ ├── register_script.m │ └── search_nn_bidirector.m ├── normalizerow.m ├── opencv_world420.dll ├── patchslim.m ├── point_mesh_squared_distance.mexw64 ├── ray_tracing.exe ├── ray_tracing_4car.exe ├── ray_tracing_4carbody.exe ├── ray_tracing_transparency.exe ├── readOBJ.m ├── readobjfromfile.mexw64 ├── regist.m ├── remove_duplicate_vertices.m ├── removeoutliner.m ├── sort_nat.m └── teaser.jpg ├── python ├── agent │ ├── __init__.py │ ├── agent_geovae.py │ ├── agent_pixelsnail.py │ ├── agent_pixelsnail_others.py │ ├── agent_spvae.py │ ├── agent_vqvae.py │ └── base.py ├── conditional_sample_2levels_central_part.py ├── conditional_sample_2levels_other_parts.py ├── config │ └── __init__.py ├── dataset │ ├── __init__.py │ ├── dataset.py │ ├── dataset_geoall.py │ ├── dataset_geovae.py │ ├── dataset_latent_geo.py │ ├── dataset_latent_geo_2levels.py │ ├── dataset_latent_geo_VGG_2levels.py │ ├── dataset_spvae.py │ └── dataset_vqvae.py ├── extract_latents_central_part.py ├── extract_latents_geo_only_all_parts.py ├── extract_latents_other_parts.py ├── networks │ ├── GraphConvyj.py │ ├── ModelParallel.py │ ├── __init__.py │ ├── networks_geovae.py │ ├── networks_pixelsnail.py │ ├── networks_spvae.py │ └── networks_vqvae.py ├── preprocess │ └── split_dataset.py ├── test.py ├── train.py ├── util │ ├── change_color.py │ ├── copy_dir.py │ ├── random_cmap.py │ ├── utils.py │ └── visualization.py └── yaml │ └── table │ ├── leg │ ├── geovae.yml │ ├── pixelsnail_bottom.yml │ └── pixelsnail_top.yml │ ├── spvae.yml │ ├── surface │ ├── geovae.yml │ ├── pixelsnail_bottom.yml │ └── pixelsnail_top.yml │ └── vqvae.yml └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | *.pyc 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TM-NET: Deep Generative Networks for Textured Meshes 2 | 3 | This is a Python3 / Pytorch implementation of TM-NET. 4 | 5 | 6 | - [Setup](#setup) 7 | - [Prepare Data](#prepare-data) 8 | - [Training and Test](#training-and-test) 9 | 10 | # Setup 11 | 12 | To run this code you need the following: 13 | 14 | - A machine with multiple GPUs(memory >= 12GB) 15 | 16 | - Python packages in the requirements.txt 17 | ``` 18 | pip install -r requirements.txt 19 | ``` 20 | 21 | # Prepare Data 22 | 23 | [Data Link](https://mailsucaseducn-my.sharepoint.com/:f:/g/personal/yangjie161_mails_ucas_edu_cn/Eo-bBsAGSjZHpURudUZZvUkBzT1Kv_WxMu971z5Xb4tFgw?e=9ahATN) 24 | 25 | 1. Run ```GetTransformedCube.m``` to get transformed a mini bounding box which will be used as source shape in non-rigid registration for each partial 3D model. 26 | 2. Run ```SupportAnalysis.m``` to extract structure information from the partial ```obj``` files producing a corresponding ```code.mat``` for each 3D model. 27 | 3. Run ```register.m``` to perform non-rigid registrations from transformed mini bounding boxes to original partial 3D models. 28 | 4. Run ```GenerateData.m``` to extract deformation information between source shapes and registered shapes which will be used as the input of ```TM-NET```. 29 | 5. Run ```TransferColorPerPixelScript.m``` to generate texture images for registered parts. 30 | 6. Run ```PrepareForTraining.m``` to split geometry, structure, image data to training or test dir. It will also divide texture image to six patches. 31 | 32 | An example is shown in ```Pipeline.m```. After you successfully run the code, the dir structure will be like follows: 33 | ```txt 34 | ├─box50 35 | │ ├─37b6df64a97a5c29369151623ac3890b 36 | │ └─d374912c3a9fca96c141a04b2a487fd9 37 | ├─Chair 38 | │ ├─37b6df64a97a5c29369151623ac3890b 39 | │ | └─models 40 | │ └─d374912c3a9fca96c141a04b2a487fd9 41 | │ └─models 42 | ├─final50 43 | │ ├─test 44 | │ │ └─37b6df64a97a5c29369151623ac3890b 45 | │ └─train 46 | │ └─d374912c3a9fca96c141a04b2a487fd9 47 | └─vaenew50 48 | ├─37b6df64a97a5c29369151623ac3890b 49 | │ ├─back 50 | │ ├─leg_ver_1 51 | │ ├─leg_ver_2 52 | │ ├─leg_ver_3 53 | │ ├─leg_ver_4 54 | │ └─seat 55 | └─d374912c3a9fca96c141a04b2a487fd9 56 | ├─back 57 | ├─leg_ver_1 58 | ├─leg_ver_2 59 | ├─leg_ver_3 60 | ├─leg_ver_4 61 | └─seat 62 | ``` 63 | Folder ```final50``` is all we need for training and test. 64 | 65 | # Training and Test 66 | 67 | - Train PartVAE for each part 68 | ```shell 69 | python ./python/train.py --yaml ./python/yaml/table/surface/geovae.yml 70 | python ./python/train.py --yaml ./python/yaml/table/leg/geovae.yml 71 | ``` 72 | 73 | - Train VQVAE 74 | ```shell 75 | python ./python/train.py --yaml ./python/yaml/table/vqvae.yml 76 | ``` 77 | 78 | - Extract discrete code for the seed part 79 | ```shell 80 | python ./python/extract_latents_central_part.py \ 81 | --image_dir ../data/table/ \ 82 | --mat_dir ../data/table \ 83 | --vqvae_ckpt ./table_vqvae/latest.pth \ 84 | --vqvae_yaml ./python/yaml/table/vqvae.yml \ 85 | --geovae_ckpt ./table_geovae/surface/latest.pth \ 86 | --geovae_yaml ./python/yaml/table/surface/geovae.yml \ 87 | --category table \ 88 | --save_path ./table_latents \ 89 | --device 0 \ 90 | --mode 'train' or 'test' or 'val' 91 | ``` 92 | 93 | - Train conditional PixelSNAIL for the seed part 94 | ```shell 95 | python ./python/train.py --yaml ./python/yaml/table/surface/pixelsnail_top.yml 96 | python ./python/train.py --yaml ./python/yaml/table/surface/pixelsnail_bottom.yml 97 | ``` 98 | 99 | - Extract discrete code for other parts 100 | ```shell 101 | python ./python/extract_latents_other_parts.py \ 102 | --image_dir ../data/table/ \ 103 | --mat_dir ../data/table \ 104 | --vqvae_ckpt ./table_vqvae/latest.pth \ 105 | --vqvae_yaml ./python/yaml/table/vqvae.yml \ 106 | --geovae_ckpt_dir ./table_geovae \ 107 | --geovae_yaml ./python/yaml/table/geovae.yml \ 108 | --category table \ 109 | --save_path ./table_latents \ 110 | --device 0 \ 111 | --mode 'train' or 'test' or 'val' 112 | ``` 113 | 114 | - Train conditional PixelSNAIL for other parts 115 | ```shell 116 | python ./python/train.py --yaml ./python/yaml/table/leg/pixelsnail_top.yml 117 | python ./python/train.py --yaml ./python/yaml/table/leg/pixelsnail_bottom.yml 118 | ``` 119 | 120 | - Sample texture for the seed part 121 | ```shell 122 | python ./python/conditional_sample_2levels_central_part.py \ 123 | --path ./table_latents \ 124 | --part_name surface \ 125 | --vqvae ./table_vqvae/latest.pth \ 126 | --vqvae_yaml ./python/yaml/table/vqvae.yml \ 127 | --top ./table_pixelsnail/top_16/latest.pth \ 128 | --top_yaml ./python/yaml/table/pixelsnail_top_center_16.yml \ 129 | --bottom ./table_pixelsnail/bottom/latest.pth \ 130 | --bottom_yaml ./python/yaml/table/pixelsnail_bottom_center.yml \ 131 | --device 0 \ 132 | --batch 1 133 | ``` 134 | 135 | - Sample texture for other parts 136 | ```shell 137 | python ./python/conditional_sample_2levels_other_parts.py \ 138 | --path ./table_latents \ 139 | --central_part_name surface \ 140 | --part_name leg \ 141 | --vqvae ./table_vqvae/latest.pth \ 142 | --vqvae_yaml ./python/yaml/table/vqvae.yml \ 143 | --top ./table_pixelsnail/leg/top_16/latest.pth \ 144 | --top_yaml ./python/yaml/table/leg/pixelsnail_top_center_16.yml \ 145 | --bottom ./table_pixelsnail/leg/bottom/latest.pth \ 146 | --bottom_yaml ./python/yaml/table/leg/pixelsnail_bottom_center.yml \ 147 | --central_part_sample_dir ./table_pixelsnail/top_16/auto_texture \ 148 | --device 0 \ 149 | --batch 1 150 | ``` 151 | -------------------------------------------------------------------------------- /matlab/ACAPOpt.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ACAPOpt.mexw64 -------------------------------------------------------------------------------- /matlab/AddPartFace2TotalFace.m: -------------------------------------------------------------------------------- 1 | function new_total_f = AddPartFace2TotalFace(total_f, part_f) 2 | max_point = max(total_f(:)); 3 | if isempty(max_point) 4 | new_total_f = [total_f; part_f]; 5 | else 6 | part_f = part_f + max_point; 7 | new_total_f = [total_f; part_f]; 8 | end 9 | end -------------------------------------------------------------------------------- /matlab/AddPriori.m: -------------------------------------------------------------------------------- 1 | function [code] = AddPriori(code, type, part_names) 2 | part_num = size(part_names, 2); 3 | if strcmp(type, 'chair') == 1 4 | code(8, 1+3) = 1; 5 | code(3, 1+8+part_num) = 1; 6 | for i = 4:7 7 | code(i, 1+8) = 1; 8 | code(8, 1+i+part_num) = 1; 9 | end 10 | symmetry = [ 11 | 1, 1, 0, 0, 0; 12 | 1, 1, 0, 0, 0; 13 | 0, 0, 0, 0, 0; 14 | 1, 1, 0, 0, 0; 15 | 1, 1, 0, 0, 0; 16 | 1, 1, 0, 0, 0; 17 | 1, 1, 0, 0, 0; 18 | 0, 0, 0, 0, 0; 19 | ]; 20 | code(:, 2*part_num+5:2*part_num+9) = symmetry; 21 | elseif strcmp(type, 'knife') == 1 22 | code(2, 1+1) = 1; 23 | code(1, 1+2+part_num) = 1; 24 | symmetry = [ 25 | 0, 0, 0, 0, 0; 26 | 0, 0, 0, 0, 0; 27 | ]; 28 | code(:, 2*part_num+5:2*part_num+9) = symmetry; 29 | elseif strcmp(type, 'guitar') == 1 30 | supporting = [ 31 | 0, 0, 0; 32 | 1, 0, 0; 33 | 0, 1, 0 34 | ]; 35 | code(:, 2:1+part_num) = supporting; 36 | code(:, 2+part_num:1+2*part_num) = supporting'; 37 | symmetry = [ 38 | 0, 0, 0, 0, 0; 39 | 0, 0, 0, 0, 0; 40 | 0, 0, 0, 0, 0; 41 | ]; 42 | code(:, 2*part_num+5:2*part_num+9) = symmetry; 43 | elseif strcmp(type, 'skateboard') == 1 44 | supporting = [ 45 | 0, 0, 0, 0, 0, 0, 0; 46 | 1, 0, 0, 1, 1, 1, 1; 47 | 1, 0, 0, 1, 1, 1, 1; 48 | 1, 0, 0, 0, 0, 0, 0; 49 | 1, 0, 0, 0, 0, 0, 0; 50 | 1, 0, 0, 0, 0, 0, 0; 51 | 1, 0, 0, 0, 0, 0, 0; 52 | ]; 53 | code(:, 2:1+part_num) = supporting; 54 | code(:, 2+part_num:1+2*part_num) = supporting'; 55 | symmetry = [ 56 | 0, 0, 0, 0, 0; 57 | 0, 0, 0, 0, 0; 58 | 0, 0, 0, 0, 0; 59 | 1, 1, 0, 0, 0; 60 | 1, 1, 0, 0, 0; 61 | 1, 1, 0, 0, 0; 62 | 1, 1, 0, 0, 0; 63 | ]; 64 | code(:, 2*part_num+5:2*part_num+9) = symmetry; 65 | elseif strcmp(type, 'cup') == 1 66 | supporting = [ 67 | 0, 0; 68 | 1, 0; 69 | ]; 70 | code(:, 2:1+part_num) = supporting; 71 | code(:, 2+part_num:1+2*part_num) = supporting'; 72 | symmetry = [ 73 | 0, 0, 0, 0, 0; 74 | 0, 0, 0, 0, 0; 75 | ]; 76 | code(:, 2*part_num+5:2*part_num+9) = symmetry; 77 | elseif strcmp(type, 'car') == 1 78 | supporting = [ 79 | 0, 0, 0, 0, 0, 1, 1; 80 | 1, 0, 0, 0, 0, 0, 0; 81 | 1, 0, 0, 0, 0, 0, 0; 82 | 1, 0, 0, 0, 0, 0, 0; 83 | 1, 0, 0, 0, 0, 0, 0; 84 | 0, 0, 0, 0, 0, 0, 0; 85 | 0, 0, 0, 0, 0, 0, 0; 86 | ]; 87 | code(:, 2:1+part_num) = supporting; 88 | code(:, 2+part_num:1+2*part_num) = supporting'; 89 | symmetry = [ 90 | 0, 0, 0, 0, 0; 91 | 1, 1, 0, 0, 0; 92 | 1, 1, 0, 0, 0; 93 | 1, 1, 0, 0, 0; 94 | 1, 1, 0, 0, 0; 95 | 1, 1, 0, 0, 0; 96 | 1, 1, 0, 0, 0; 97 | ]; 98 | code(:, 2*part_num+5:2*part_num+9) = symmetry; 99 | elseif strcmp(type, 'plane') == 1 100 | supporting = [ 101 | 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0; 102 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0; 103 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1; 104 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 105 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 106 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 107 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 108 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 109 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 110 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 111 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 112 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 113 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 114 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0; 115 | ]; 116 | code(:, 2:1+part_num) = supporting; 117 | code(:, 2+part_num:1+2*part_num) = supporting'; 118 | symmetry = [ 119 | 0, 0, 0, 0, 0; 120 | 1, 1, 0, 0, 0; 121 | 1, 1, 0, 0, 0; 122 | 1, 1, 0, 0, 0; 123 | 1, 1, 0, 0, 0; 124 | 0, 0, 0, 0, 0; 125 | 0, 0, 0, 0, 0; 126 | 0, 0, 0, 0, 0; 127 | 1, 1, 0, 0, 0; 128 | 1, 1, 0, 0, 0; 129 | 1, 1, 0, 0, 0; 130 | 1, 1, 0, 0, 0; 131 | 1, 1, 0, 0, 0; 132 | 1, 1, 0, 0, 0; 133 | ]; 134 | code(:, 2*part_num+5:2*part_num+9) = symmetry; 135 | elseif strcmp(type, 'table') == 1 136 | supporting = [ 137 | 0, 0, 0, 0, 0, 0, 0, 0; 138 | 1, 0, 0, 0, 0, 0, 0, 0; 139 | 1, 0, 0, 0, 0, 0, 0, 0; 140 | 1, 0, 0, 0, 0, 0, 0, 0; 141 | 1, 0, 0, 0, 0, 0, 0, 0; 142 | 1, 0, 0, 0, 0, 0, 0, 0; 143 | 1, 0, 0, 0, 0, 0, 0, 0; 144 | 1, 0, 0, 0, 0, 0, 0, 0; 145 | 1, 0, 0, 0, 0, 0, 0, 0; 146 | ]; 147 | code(:, 2:1+part_num) = supporting; 148 | code(:, 2+part_num:1+2*part_num) = supporting'; 149 | symmetry = [ 150 | 0, 0, 0, 0, 0; 151 | 1, 1, 0, 0, 0; 152 | 1, 1, 0, 0, 0; 153 | 1, 1, 0, 0, 0; 154 | 1, 1, 0, 0, 0; 155 | 1, 1, 0, 0, 0; 156 | 1, 1, 0, 0, 0; 157 | 1, 1, 0, 0, 0; 158 | 1, 1, 0, 0, 0; 159 | ]; 160 | code(:, 2*part_num+5:2*part_num+9) = symmetry; 161 | end 162 | end -------------------------------------------------------------------------------- /matlab/AdjustOutputEncode.m: -------------------------------------------------------------------------------- 1 | function [new_encode] = AdjustOutputEncode(encode) 2 | new_encode = zeros(size(encode)); 3 | n = size(encode, 1); 4 | for i = 1:n 5 | for j = 1:2*n+1 6 | if encode(i, j) < 0.5 7 | new_encode(i, j) = 0; 8 | else 9 | new_encode(i, j) = 1; 10 | end 11 | end 12 | for j = 2*n+2:2*n+4 13 | new_encode(i, j) = encode(i, j); 14 | end 15 | for j = 2*n+5:2*n+9 16 | if encode(i, j) < 0.5 17 | new_encode(i, j) = 0; 18 | else 19 | new_encode(i, j) = 1; 20 | end 21 | end 22 | end 23 | end -------------------------------------------------------------------------------- /matlab/CGAL-vc140-mt-4.11.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/CGAL-vc140-mt-4.11.dll -------------------------------------------------------------------------------- /matlab/CalculateEdgeIndex.m: -------------------------------------------------------------------------------- 1 | function CalculateEdgeIndex(obj_file) 2 | [V, F, ~, ~, ~, VVsimp, CotWeight,~,~,~,edge_index] = cotlp(obj_file); 3 | edge_index=[edge_index;edge_index(:,[2,1])]'; 4 | F = F'; 5 | edge_index = edge_index'; 6 | [filepath,name,ext] = fileparts(obj_file); 7 | save(fullfile(filepath, [name, '.mat']), 'V', 'F', 'edge_index'); 8 | end 9 | -------------------------------------------------------------------------------- /matlab/DATA/Chair/Chair/d374912c3a9fca96c141a04b2a487fd9/models/leg_ver_1.obj: -------------------------------------------------------------------------------- 1 | #### 2 | # 3 | # OBJ File Generated by Meshlab 4 | # 5 | #### 6 | # Object leg_ver_1.obj 7 | # 8 | # Vertices: 53 9 | # Faces: 202 10 | # 11 | #### 12 | v -0.143501 -0.144077 -0.236513 13 | v -0.143501 -0.158457 -0.200564 14 | v -0.107552 -0.158457 -0.200564 15 | v -0.143501 -0.158457 -0.236513 16 | v -0.107552 -0.158457 -0.236513 17 | v -0.141306 -0.188366 -0.234317 18 | v -0.109748 -0.188366 -0.234317 19 | v -0.141306 -0.188366 -0.202760 20 | v -0.109748 -0.188366 -0.202760 21 | v -0.139508 -0.218301 -0.232520 22 | v -0.111546 -0.218301 -0.232520 23 | v -0.139508 -0.218301 -0.204557 24 | v -0.111546 -0.218301 -0.204557 25 | v -0.138110 -0.248258 -0.231121 26 | v -0.112944 -0.248258 -0.231121 27 | v -0.138110 -0.248258 -0.205956 28 | v -0.112944 -0.248258 -0.205956 29 | v -0.137111 -0.278230 -0.230123 30 | v -0.113943 -0.278230 -0.230123 31 | v -0.137111 -0.278230 -0.206955 32 | v -0.113943 -0.278230 -0.206955 33 | v -0.136511 -0.308213 -0.229523 34 | v -0.114542 -0.308213 -0.229523 35 | v -0.136511 -0.308213 -0.207554 36 | v -0.114542 -0.308213 -0.207554 37 | v -0.136312 -0.338202 -0.229323 38 | v -0.114742 -0.338202 -0.229323 39 | v -0.136312 -0.338202 -0.207754 40 | v -0.114742 -0.338202 -0.207754 41 | v -0.136511 -0.368190 -0.207554 42 | v -0.114542 -0.368190 -0.207554 43 | v -0.136511 -0.368190 -0.229523 44 | v -0.114542 -0.368190 -0.229523 45 | v -0.137111 -0.398173 -0.206955 46 | v -0.113943 -0.398173 -0.206955 47 | v -0.137111 -0.398173 -0.230123 48 | v -0.113943 -0.398173 -0.230123 49 | v -0.138110 -0.428146 -0.205956 50 | v -0.112944 -0.428146 -0.205956 51 | v -0.138110 -0.428146 -0.231121 52 | v -0.112944 -0.428146 -0.231121 53 | v -0.139508 -0.458103 -0.204557 54 | v -0.111546 -0.458103 -0.204557 55 | v -0.139508 -0.458103 -0.232520 56 | v -0.111546 -0.458103 -0.232520 57 | v -0.141306 -0.488038 -0.202760 58 | v -0.109748 -0.488038 -0.202760 59 | v -0.141306 -0.488038 -0.234317 60 | v -0.109748 -0.488038 -0.234317 61 | v -0.143501 -0.517947 -0.200564 62 | v -0.107552 -0.517947 -0.200564 63 | v -0.143501 -0.517947 -0.236513 64 | v -0.107552 -0.517947 -0.236513 65 | # 53 vertices, 0 vertices normals 66 | 67 | f 2 1 4 68 | f 6 2 4 69 | f 3 7 5 70 | f 8 3 2 71 | f 2 6 8 72 | f 7 3 9 73 | f 3 8 9 74 | f 10 8 6 75 | f 9 11 7 76 | f 12 9 8 77 | f 8 10 12 78 | f 11 9 13 79 | f 9 12 13 80 | f 14 12 10 81 | f 13 15 11 82 | f 16 13 12 83 | f 12 14 16 84 | f 15 13 17 85 | f 13 16 17 86 | f 18 16 14 87 | f 17 19 15 88 | f 20 17 16 89 | f 16 18 20 90 | f 19 17 21 91 | f 17 20 21 92 | f 22 20 18 93 | f 21 23 19 94 | f 24 21 20 95 | f 20 22 24 96 | f 23 21 25 97 | f 21 24 25 98 | f 26 24 22 99 | f 25 27 23 100 | f 28 25 24 101 | f 24 26 28 102 | f 27 25 29 103 | f 25 28 29 104 | f 26 30 28 105 | f 31 27 29 106 | f 30 29 28 107 | f 30 26 32 108 | f 27 31 33 109 | f 29 30 31 110 | f 32 34 30 111 | f 35 33 31 112 | f 34 31 30 113 | f 34 32 36 114 | f 33 35 37 115 | f 31 34 35 116 | f 36 38 34 117 | f 39 37 35 118 | f 38 35 34 119 | f 38 36 40 120 | f 37 39 41 121 | f 35 38 39 122 | f 40 42 38 123 | f 43 41 39 124 | f 42 39 38 125 | f 42 40 44 126 | f 41 43 45 127 | f 39 42 43 128 | f 44 46 42 129 | f 47 45 43 130 | f 46 43 42 131 | f 46 44 48 132 | f 45 47 49 133 | f 43 46 47 134 | f 48 50 46 135 | f 51 49 47 136 | f 50 47 46 137 | f 50 48 52 138 | f 49 51 53 139 | f 47 50 51 140 | f 53 50 52 141 | f 50 53 51 142 | f 4 1 2 143 | f 5 2 4 144 | f 4 2 5 145 | f 4 2 6 146 | f 2 5 3 147 | f 3 5 2 148 | f 5 7 3 149 | f 2 3 8 150 | f 5 4 7 151 | f 8 6 2 152 | f 6 7 4 153 | f 9 3 7 154 | f 9 8 3 155 | f 6 8 10 156 | f 7 6 11 157 | f 7 11 9 158 | f 8 9 12 159 | f 12 10 8 160 | f 10 11 6 161 | f 13 9 11 162 | f 13 12 9 163 | f 10 12 14 164 | f 11 10 15 165 | f 11 15 13 166 | f 12 13 16 167 | f 16 14 12 168 | f 14 15 10 169 | f 17 13 15 170 | f 17 16 13 171 | f 14 16 18 172 | f 15 14 19 173 | f 15 19 17 174 | f 16 17 20 175 | f 20 18 16 176 | f 18 19 14 177 | f 21 17 19 178 | f 21 20 17 179 | f 18 20 22 180 | f 19 18 23 181 | f 19 23 21 182 | f 20 21 24 183 | f 24 22 20 184 | f 22 23 18 185 | f 25 21 23 186 | f 25 24 21 187 | f 22 24 26 188 | f 23 22 27 189 | f 23 27 25 190 | f 24 25 28 191 | f 28 26 24 192 | f 26 27 22 193 | f 29 25 27 194 | f 29 28 25 195 | f 28 30 26 196 | f 27 26 33 197 | f 29 27 31 198 | f 28 29 30 199 | f 32 26 30 200 | f 32 33 26 201 | f 33 31 27 202 | f 31 30 29 203 | f 30 34 32 204 | f 33 32 37 205 | f 31 33 35 206 | f 30 31 34 207 | f 36 32 34 208 | f 36 37 32 209 | f 37 35 33 210 | f 35 34 31 211 | f 34 38 36 212 | f 37 36 41 213 | f 35 37 39 214 | f 34 35 38 215 | f 40 36 38 216 | f 40 41 36 217 | f 41 39 37 218 | f 39 38 35 219 | f 38 42 40 220 | f 41 40 45 221 | f 39 41 43 222 | f 38 39 42 223 | f 44 40 42 224 | f 44 45 40 225 | f 45 43 41 226 | f 43 42 39 227 | f 42 46 44 228 | f 45 44 49 229 | f 43 45 47 230 | f 42 43 46 231 | f 48 44 46 232 | f 48 49 44 233 | f 49 47 45 234 | f 47 46 43 235 | f 46 50 48 236 | f 49 48 53 237 | f 47 49 51 238 | f 46 47 50 239 | f 52 48 50 240 | f 52 53 48 241 | f 53 51 49 242 | f 51 50 47 243 | f 52 50 53 244 | f 51 53 50 245 | f 7 4 5 246 | f 4 7 6 247 | f 11 6 7 248 | f 6 11 10 249 | f 15 10 11 250 | f 10 15 14 251 | f 19 14 15 252 | f 14 19 18 253 | f 23 18 19 254 | f 18 23 22 255 | f 27 22 23 256 | f 22 27 26 257 | f 33 26 27 258 | f 26 33 32 259 | f 37 32 33 260 | f 32 37 36 261 | f 41 36 37 262 | f 36 41 40 263 | f 45 40 41 264 | f 40 45 44 265 | f 49 44 45 266 | f 44 49 48 267 | f 53 48 49 268 | f 48 53 52 269 | # 202 faces, 0 coords texture 270 | 271 | # End of File 272 | -------------------------------------------------------------------------------- /matlab/DATA/Chair/Chair/d374912c3a9fca96c141a04b2a487fd9/models/leg_ver_2.obj: -------------------------------------------------------------------------------- 1 | #### 2 | # 3 | # OBJ File Generated by Meshlab 4 | # 5 | #### 6 | # Object leg_ver_2.obj 7 | # 8 | # Vertices: 53 9 | # Faces: 204 10 | # 11 | #### 12 | v 0.144090 -0.158903 -0.201427 13 | v 0.110337 -0.188812 -0.203623 14 | v 0.141894 -0.188812 -0.203623 15 | v 0.108141 -0.158903 -0.201427 16 | v 0.112135 -0.218747 -0.205420 17 | v 0.141894 -0.188812 -0.235180 18 | v 0.110337 -0.188812 -0.235180 19 | v 0.140097 -0.218747 -0.205420 20 | v 0.112135 -0.218747 -0.233382 21 | v 0.144090 -0.158903 -0.237376 22 | v 0.140097 -0.218747 -0.233382 23 | v 0.108141 -0.158903 -0.237268 24 | v 0.113533 -0.248703 -0.206819 25 | v 0.113533 -0.248703 -0.231984 26 | v 0.108141 -0.158903 -0.237376 27 | v 0.138699 -0.248703 -0.206819 28 | v 0.138699 -0.248703 -0.231984 29 | v 0.114532 -0.278676 -0.207817 30 | v 0.114532 -0.278676 -0.230985 31 | v 0.137700 -0.278676 -0.230985 32 | v 0.137700 -0.278676 -0.207817 33 | v 0.115131 -0.308659 -0.208417 34 | v 0.115131 -0.308659 -0.230386 35 | v 0.137100 -0.308659 -0.230386 36 | v 0.137100 -0.308659 -0.208417 37 | v 0.115331 -0.338648 -0.208617 38 | v 0.115331 -0.338648 -0.230186 39 | v 0.136901 -0.338648 -0.230186 40 | v 0.136901 -0.338648 -0.208617 41 | v 0.115131 -0.368636 -0.208417 42 | v 0.137100 -0.368636 -0.230386 43 | v 0.137100 -0.368636 -0.208417 44 | v 0.115131 -0.368636 -0.230386 45 | v 0.114532 -0.398619 -0.207817 46 | v 0.137700 -0.398619 -0.230985 47 | v 0.137700 -0.398619 -0.207817 48 | v 0.114532 -0.398619 -0.230985 49 | v 0.113533 -0.428592 -0.206819 50 | v 0.138699 -0.428592 -0.231984 51 | v 0.138699 -0.428592 -0.206819 52 | v 0.113533 -0.428592 -0.231984 53 | v 0.112135 -0.458548 -0.205420 54 | v 0.140097 -0.458548 -0.233382 55 | v 0.140097 -0.458548 -0.205420 56 | v 0.112135 -0.458548 -0.233382 57 | v 0.110337 -0.488484 -0.203623 58 | v 0.141894 -0.488484 -0.235180 59 | v 0.141894 -0.488484 -0.203623 60 | v 0.110337 -0.488484 -0.235180 61 | v 0.108141 -0.518392 -0.201427 62 | v 0.144090 -0.518392 -0.237376 63 | v 0.144090 -0.518392 -0.201427 64 | v 0.108141 -0.518392 -0.237376 65 | # 53 vertices, 0 vertices normals 66 | 67 | f 1 2 3 68 | f 2 1 4 69 | f 5 3 2 70 | f 6 1 3 71 | f 4 7 2 72 | f 3 5 8 73 | f 2 9 5 74 | f 1 6 10 75 | f 3 11 6 76 | f 12 7 4 77 | f 9 2 7 78 | f 13 8 5 79 | f 11 3 8 80 | f 14 5 9 81 | f 6 15 10 82 | f 11 7 6 83 | f 7 12 15 84 | f 7 11 9 85 | f 8 13 16 86 | f 5 14 13 87 | f 8 17 11 88 | f 9 17 14 89 | f 15 6 7 90 | f 17 9 11 91 | f 18 16 13 92 | f 17 8 16 93 | f 19 13 14 94 | f 20 14 17 95 | f 16 18 21 96 | f 13 19 18 97 | f 16 20 17 98 | f 14 20 19 99 | f 22 21 18 100 | f 20 16 21 101 | f 23 18 19 102 | f 24 19 20 103 | f 21 22 25 104 | f 18 23 22 105 | f 21 24 20 106 | f 19 24 23 107 | f 26 25 22 108 | f 24 21 25 109 | f 27 22 23 110 | f 28 23 24 111 | f 25 26 29 112 | f 22 27 26 113 | f 25 28 24 114 | f 23 28 27 115 | f 30 29 26 116 | f 28 25 29 117 | f 27 30 26 118 | f 31 27 28 119 | f 29 30 32 120 | f 32 28 29 121 | f 30 27 33 122 | f 27 31 33 123 | f 28 32 31 124 | f 34 32 30 125 | f 33 34 30 126 | f 35 33 31 127 | f 36 31 32 128 | f 32 34 36 129 | f 34 33 37 130 | f 33 35 37 131 | f 31 36 35 132 | f 38 36 34 133 | f 37 38 34 134 | f 39 37 35 135 | f 40 35 36 136 | f 36 38 40 137 | f 38 37 41 138 | f 37 39 41 139 | f 35 40 39 140 | f 42 40 38 141 | f 41 42 38 142 | f 43 41 39 143 | f 44 39 40 144 | f 40 42 44 145 | f 42 41 45 146 | f 41 43 45 147 | f 39 44 43 148 | f 46 44 42 149 | f 45 46 42 150 | f 47 45 43 151 | f 48 43 44 152 | f 44 46 48 153 | f 46 45 49 154 | f 45 47 49 155 | f 43 48 47 156 | f 50 48 46 157 | f 49 50 46 158 | f 51 49 47 159 | f 52 47 48 160 | f 48 50 52 161 | f 50 49 53 162 | f 49 51 53 163 | f 47 52 51 164 | f 50 51 52 165 | f 51 50 53 166 | f 3 2 1 167 | f 4 1 2 168 | f 2 3 5 169 | f 3 1 6 170 | f 1 12 4 171 | f 4 12 1 172 | f 2 7 4 173 | f 8 5 3 174 | f 5 9 2 175 | f 10 6 1 176 | f 6 11 3 177 | f 12 1 10 178 | f 10 1 12 179 | f 4 7 12 180 | f 7 2 9 181 | f 5 8 13 182 | f 8 3 11 183 | f 9 5 14 184 | f 10 15 6 185 | f 6 7 11 186 | f 12 10 15 187 | f 15 10 12 188 | f 15 12 7 189 | f 9 11 7 190 | f 16 13 8 191 | f 13 14 5 192 | f 11 17 8 193 | f 14 17 9 194 | f 7 6 15 195 | f 11 9 17 196 | f 13 16 18 197 | f 16 8 17 198 | f 14 13 19 199 | f 17 14 20 200 | f 21 18 16 201 | f 18 19 13 202 | f 17 20 16 203 | f 19 20 14 204 | f 18 21 22 205 | f 21 16 20 206 | f 19 18 23 207 | f 20 19 24 208 | f 25 22 21 209 | f 22 23 18 210 | f 20 24 21 211 | f 23 24 19 212 | f 22 25 26 213 | f 25 21 24 214 | f 23 22 27 215 | f 24 23 28 216 | f 29 26 25 217 | f 26 27 22 218 | f 24 28 25 219 | f 27 28 23 220 | f 26 29 30 221 | f 29 25 28 222 | f 26 30 27 223 | f 28 27 31 224 | f 32 30 29 225 | f 29 28 32 226 | f 33 27 30 227 | f 33 31 27 228 | f 31 32 28 229 | f 30 32 34 230 | f 30 34 33 231 | f 31 33 35 232 | f 32 31 36 233 | f 36 34 32 234 | f 37 33 34 235 | f 37 35 33 236 | f 35 36 31 237 | f 34 36 38 238 | f 34 38 37 239 | f 35 37 39 240 | f 36 35 40 241 | f 40 38 36 242 | f 41 37 38 243 | f 41 39 37 244 | f 39 40 35 245 | f 38 40 42 246 | f 38 42 41 247 | f 39 41 43 248 | f 40 39 44 249 | f 44 42 40 250 | f 45 41 42 251 | f 45 43 41 252 | f 43 44 39 253 | f 42 44 46 254 | f 42 46 45 255 | f 43 45 47 256 | f 44 43 48 257 | f 48 46 44 258 | f 49 45 46 259 | f 49 47 45 260 | f 47 48 43 261 | f 46 48 50 262 | f 46 50 49 263 | f 47 49 51 264 | f 48 47 52 265 | f 52 50 48 266 | f 53 49 50 267 | f 53 51 49 268 | f 51 52 47 269 | f 52 51 50 270 | f 53 50 51 271 | # 204 faces, 0 coords texture 272 | 273 | # End of File 274 | -------------------------------------------------------------------------------- /matlab/DATA/Chair/Chair/d374912c3a9fca96c141a04b2a487fd9/models/leg_ver_3.obj: -------------------------------------------------------------------------------- 1 | #### 2 | # 3 | # OBJ File Generated by Meshlab 4 | # 5 | #### 6 | # Object leg_ver_3.obj 7 | # 8 | # Vertices: 55 9 | # Faces: 208 10 | # 11 | #### 12 | v 0.108141 -0.144077 0.050216 13 | v 0.108141 -0.144077 0.014267 14 | v 0.108141 -0.158457 0.050216 15 | v 0.144090 -0.144077 0.014267 16 | v 0.144090 -0.158457 0.050216 17 | v 0.110337 -0.188366 0.048020 18 | v 0.144090 -0.158457 0.014267 19 | v 0.108141 -0.158457 0.014267 20 | v 0.110337 -0.188366 0.016463 21 | v 0.141894 -0.188366 0.016463 22 | v 0.141894 -0.188366 0.048020 23 | v 0.112135 -0.218301 0.046223 24 | v 0.112135 -0.218301 0.018260 25 | v 0.140097 -0.218301 0.018260 26 | v 0.140097 -0.218301 0.046223 27 | v 0.113533 -0.248258 0.044824 28 | v 0.113533 -0.248258 0.019658 29 | v 0.138699 -0.248258 0.019658 30 | v 0.138699 -0.248258 0.044824 31 | v 0.114532 -0.278230 0.043826 32 | v 0.114532 -0.278230 0.020657 33 | v 0.137700 -0.278230 0.020657 34 | v 0.137700 -0.278230 0.043826 35 | v 0.115131 -0.308213 0.043226 36 | v 0.115131 -0.308213 0.021257 37 | v 0.137100 -0.308213 0.021257 38 | v 0.137100 -0.308213 0.043226 39 | v 0.115331 -0.338202 0.043026 40 | v 0.115331 -0.338202 0.021457 41 | v 0.136901 -0.338202 0.021457 42 | v 0.136901 -0.338202 0.043026 43 | v 0.115131 -0.368190 0.043226 44 | v 0.137100 -0.368190 0.021257 45 | v 0.137100 -0.368190 0.043226 46 | v 0.115131 -0.368190 0.021257 47 | v 0.114532 -0.398173 0.043826 48 | v 0.137700 -0.398173 0.020657 49 | v 0.137700 -0.398173 0.043826 50 | v 0.114532 -0.398173 0.020657 51 | v 0.113533 -0.428146 0.044824 52 | v 0.138699 -0.428146 0.019658 53 | v 0.138699 -0.428146 0.044824 54 | v 0.113533 -0.428146 0.019658 55 | v 0.112135 -0.458103 0.046223 56 | v 0.140097 -0.458103 0.018260 57 | v 0.140097 -0.458103 0.046223 58 | v 0.112135 -0.458103 0.018260 59 | v 0.110337 -0.488038 0.048020 60 | v 0.141894 -0.488038 0.016463 61 | v 0.141894 -0.488038 0.048020 62 | v 0.110337 -0.488038 0.016463 63 | v 0.108141 -0.517947 0.050216 64 | v 0.144090 -0.517947 0.014267 65 | v 0.144090 -0.517947 0.050216 66 | v 0.108141 -0.517947 0.014267 67 | # 55 vertices, 0 vertices normals 68 | 69 | f 6 5 3 70 | f 9 3 8 71 | f 5 10 7 72 | f 5 6 11 73 | f 3 9 6 74 | f 8 10 9 75 | f 10 8 7 76 | f 10 5 11 77 | f 12 11 6 78 | f 13 6 9 79 | f 14 9 10 80 | f 11 14 10 81 | f 11 12 15 82 | f 6 13 12 83 | f 9 14 13 84 | f 14 11 15 85 | f 16 15 12 86 | f 17 12 13 87 | f 18 13 14 88 | f 15 18 14 89 | f 15 16 19 90 | f 12 17 16 91 | f 13 18 17 92 | f 18 15 19 93 | f 20 19 16 94 | f 21 16 17 95 | f 22 17 18 96 | f 19 22 18 97 | f 19 20 23 98 | f 16 21 20 99 | f 17 22 21 100 | f 22 19 23 101 | f 24 23 20 102 | f 25 20 21 103 | f 26 21 22 104 | f 23 26 22 105 | f 23 24 27 106 | f 20 25 24 107 | f 21 26 25 108 | f 26 23 27 109 | f 28 27 24 110 | f 29 24 25 111 | f 30 25 26 112 | f 27 30 26 113 | f 27 28 31 114 | f 24 29 28 115 | f 25 30 29 116 | f 30 27 31 117 | f 32 31 28 118 | f 29 32 28 119 | f 33 29 30 120 | f 34 30 31 121 | f 31 32 34 122 | f 32 29 35 123 | f 29 33 35 124 | f 30 34 33 125 | f 36 34 32 126 | f 35 36 32 127 | f 37 35 33 128 | f 38 33 34 129 | f 34 36 38 130 | f 36 35 39 131 | f 35 37 39 132 | f 33 38 37 133 | f 40 38 36 134 | f 39 40 36 135 | f 41 39 37 136 | f 42 37 38 137 | f 38 40 42 138 | f 40 39 43 139 | f 39 41 43 140 | f 37 42 41 141 | f 44 42 40 142 | f 43 44 40 143 | f 45 43 41 144 | f 46 41 42 145 | f 42 44 46 146 | f 44 43 47 147 | f 43 45 47 148 | f 41 46 45 149 | f 48 46 44 150 | f 47 48 44 151 | f 49 47 45 152 | f 50 45 46 153 | f 46 48 50 154 | f 48 47 51 155 | f 47 49 51 156 | f 45 50 49 157 | f 52 50 48 158 | f 51 52 48 159 | f 53 51 49 160 | f 54 49 50 161 | f 50 52 54 162 | f 52 51 55 163 | f 51 53 55 164 | f 49 54 53 165 | f 52 53 54 166 | f 53 52 55 167 | f 8 1 3 168 | f 3 1 8 169 | f 1 8 2 170 | f 2 8 1 171 | f 8 4 2 172 | f 2 4 8 173 | f 3 7 5 174 | f 5 7 3 175 | f 3 5 6 176 | f 8 3 9 177 | f 7 3 8 178 | f 8 3 7 179 | f 4 8 7 180 | f 7 8 4 181 | f 7 10 5 182 | f 11 6 5 183 | f 6 9 3 184 | f 9 10 8 185 | f 7 8 10 186 | f 11 5 10 187 | f 6 11 12 188 | f 9 6 13 189 | f 10 9 14 190 | f 10 14 11 191 | f 15 12 11 192 | f 12 13 6 193 | f 13 14 9 194 | f 15 11 14 195 | f 12 15 16 196 | f 13 12 17 197 | f 14 13 18 198 | f 14 18 15 199 | f 19 16 15 200 | f 16 17 12 201 | f 17 18 13 202 | f 19 15 18 203 | f 16 19 20 204 | f 17 16 21 205 | f 18 17 22 206 | f 18 22 19 207 | f 23 20 19 208 | f 20 21 16 209 | f 21 22 17 210 | f 23 19 22 211 | f 20 23 24 212 | f 21 20 25 213 | f 22 21 26 214 | f 22 26 23 215 | f 27 24 23 216 | f 24 25 20 217 | f 25 26 21 218 | f 27 23 26 219 | f 24 27 28 220 | f 25 24 29 221 | f 26 25 30 222 | f 26 30 27 223 | f 31 28 27 224 | f 28 29 24 225 | f 29 30 25 226 | f 31 27 30 227 | f 28 31 32 228 | f 28 32 29 229 | f 30 29 33 230 | f 31 30 34 231 | f 34 32 31 232 | f 35 29 32 233 | f 35 33 29 234 | f 33 34 30 235 | f 32 34 36 236 | f 32 36 35 237 | f 33 35 37 238 | f 34 33 38 239 | f 38 36 34 240 | f 39 35 36 241 | f 39 37 35 242 | f 37 38 33 243 | f 36 38 40 244 | f 36 40 39 245 | f 37 39 41 246 | f 38 37 42 247 | f 42 40 38 248 | f 43 39 40 249 | f 43 41 39 250 | f 41 42 37 251 | f 40 42 44 252 | f 40 44 43 253 | f 41 43 45 254 | f 42 41 46 255 | f 46 44 42 256 | f 47 43 44 257 | f 47 45 43 258 | f 45 46 41 259 | f 44 46 48 260 | f 44 48 47 261 | f 45 47 49 262 | f 46 45 50 263 | f 50 48 46 264 | f 51 47 48 265 | f 51 49 47 266 | f 49 50 45 267 | f 48 50 52 268 | f 48 52 51 269 | f 49 51 53 270 | f 50 49 54 271 | f 54 52 50 272 | f 55 51 52 273 | f 55 53 51 274 | f 53 54 49 275 | f 54 53 52 276 | f 55 52 53 277 | # 208 faces, 0 coords texture 278 | 279 | # End of File 280 | -------------------------------------------------------------------------------- /matlab/DATA/Chair/Chair/d374912c3a9fca96c141a04b2a487fd9/models/leg_ver_4.obj: -------------------------------------------------------------------------------- 1 | #### 2 | # 3 | # OBJ File Generated by Meshlab 4 | # 5 | #### 6 | # Object leg_ver_4.obj 7 | # 8 | # Vertices: 55 9 | # Faces: 212 10 | # 11 | #### 12 | v -0.143518 -0.144077 0.014267 13 | v -0.107569 -0.144077 0.050216 14 | v -0.107569 -0.144077 0.014267 15 | v -0.143518 -0.158457 0.050216 16 | v -0.107569 -0.158457 0.050216 17 | v -0.143518 -0.158457 0.014267 18 | v -0.107569 -0.158457 0.014267 19 | v -0.109765 -0.188366 0.016463 20 | v -0.141322 -0.188366 0.016463 21 | v -0.111563 -0.218301 0.018260 22 | v -0.109765 -0.188366 0.048020 23 | v -0.139525 -0.218301 0.018260 24 | v -0.112961 -0.248258 0.019658 25 | v -0.111563 -0.218301 0.046223 26 | v -0.138126 -0.248258 0.019658 27 | v -0.113960 -0.278230 0.020657 28 | v -0.112961 -0.248258 0.044824 29 | v -0.137128 -0.278230 0.020657 30 | v -0.114559 -0.308213 0.021257 31 | v -0.113960 -0.278230 0.043826 32 | v -0.136528 -0.308213 0.021257 33 | v -0.114759 -0.338202 0.021457 34 | v -0.114559 -0.308213 0.043226 35 | v -0.136328 -0.338202 0.021457 36 | v -0.114559 -0.368190 0.021257 37 | v -0.114759 -0.338202 0.043026 38 | v -0.136528 -0.368190 0.021257 39 | v -0.114559 -0.368190 0.043226 40 | v -0.113960 -0.398173 0.020657 41 | v -0.113960 -0.398173 0.043826 42 | v -0.137128 -0.398173 0.020657 43 | v -0.112961 -0.428146 0.019658 44 | v -0.112961 -0.428146 0.044824 45 | v -0.138126 -0.428146 0.019658 46 | v -0.111563 -0.458103 0.018260 47 | v -0.111563 -0.458103 0.046223 48 | v -0.139525 -0.458103 0.018260 49 | v -0.109765 -0.488038 0.016463 50 | v -0.109765 -0.488038 0.048020 51 | v -0.141322 -0.488038 0.016463 52 | v -0.107569 -0.517947 0.014267 53 | v -0.107569 -0.517947 0.050216 54 | v -0.143518 -0.517947 0.014267 55 | v -0.143518 -0.517947 0.050216 56 | v -0.141322 -0.188366 0.048020 57 | v -0.139525 -0.218301 0.046223 58 | v -0.138126 -0.248258 0.044824 59 | v -0.137128 -0.278230 0.043826 60 | v -0.136528 -0.308213 0.043226 61 | v -0.136328 -0.338202 0.043026 62 | v -0.136528 -0.368190 0.043226 63 | v -0.137128 -0.398173 0.043826 64 | v -0.138126 -0.428146 0.044824 65 | v -0.139525 -0.458103 0.046223 66 | v -0.141322 -0.488038 0.048020 67 | # 55 vertices, 0 vertices normals 68 | 69 | f 2 4 5 70 | f 8 6 7 71 | f 6 8 9 72 | f 5 8 7 73 | f 10 9 8 74 | f 8 5 11 75 | f 9 10 12 76 | f 11 10 8 77 | f 13 12 10 78 | f 10 11 14 79 | f 12 13 15 80 | f 14 13 10 81 | f 16 15 13 82 | f 13 14 17 83 | f 15 16 18 84 | f 17 16 13 85 | f 19 18 16 86 | f 16 17 20 87 | f 18 19 21 88 | f 20 19 16 89 | f 22 21 19 90 | f 19 20 23 91 | f 21 22 24 92 | f 23 22 19 93 | f 25 24 22 94 | f 22 23 26 95 | f 24 25 27 96 | f 22 28 25 97 | f 28 22 26 98 | f 29 27 25 99 | f 30 25 28 100 | f 27 29 31 101 | f 25 30 29 102 | f 32 31 29 103 | f 33 29 30 104 | f 31 32 34 105 | f 29 33 32 106 | f 35 34 32 107 | f 36 32 33 108 | f 34 35 37 109 | f 32 36 35 110 | f 38 37 35 111 | f 39 35 36 112 | f 37 38 40 113 | f 35 39 38 114 | f 41 40 38 115 | f 42 38 39 116 | f 40 41 43 117 | f 38 42 41 118 | f 41 44 43 119 | f 44 41 42 120 | f 6 3 1 121 | f 1 3 6 122 | f 6 1 4 123 | f 5 4 2 124 | f 3 5 2 125 | f 2 5 3 126 | f 3 6 7 127 | f 7 6 3 128 | f 7 4 6 129 | f 6 4 7 130 | f 6 4 9 131 | f 4 7 5 132 | f 5 7 4 133 | f 4 5 45 134 | f 5 3 7 135 | f 7 3 5 136 | f 7 6 8 137 | f 45 9 4 138 | f 9 8 6 139 | f 7 8 5 140 | f 11 45 5 141 | f 9 45 12 142 | f 8 9 10 143 | f 11 5 8 144 | f 45 11 46 145 | f 46 12 45 146 | f 12 10 9 147 | f 8 10 11 148 | f 14 46 11 149 | f 12 46 15 150 | f 10 12 13 151 | f 14 11 10 152 | f 46 14 47 153 | f 47 15 46 154 | f 15 13 12 155 | f 10 13 14 156 | f 17 47 14 157 | f 15 47 18 158 | f 13 15 16 159 | f 17 14 13 160 | f 47 17 48 161 | f 48 18 47 162 | f 18 16 15 163 | f 13 16 17 164 | f 20 48 17 165 | f 18 48 21 166 | f 16 18 19 167 | f 20 17 16 168 | f 48 20 49 169 | f 49 21 48 170 | f 21 19 18 171 | f 16 19 20 172 | f 23 49 20 173 | f 21 49 24 174 | f 19 21 22 175 | f 23 20 19 176 | f 49 23 50 177 | f 50 24 49 178 | f 24 22 21 179 | f 19 22 23 180 | f 26 50 23 181 | f 50 51 24 182 | f 22 24 25 183 | f 26 23 22 184 | f 50 26 51 185 | f 27 24 51 186 | f 27 25 24 187 | f 25 28 22 188 | f 26 22 28 189 | f 28 51 26 190 | f 51 52 27 191 | f 25 27 29 192 | f 28 25 30 193 | f 51 28 52 194 | f 31 27 52 195 | f 31 29 27 196 | f 29 30 25 197 | f 30 52 28 198 | f 52 53 31 199 | f 29 31 32 200 | f 30 29 33 201 | f 52 30 53 202 | f 34 31 53 203 | f 34 32 31 204 | f 32 33 29 205 | f 33 53 30 206 | f 53 54 34 207 | f 32 34 35 208 | f 33 32 36 209 | f 53 33 54 210 | f 37 34 54 211 | f 37 35 34 212 | f 35 36 32 213 | f 36 54 33 214 | f 54 55 37 215 | f 35 37 38 216 | f 36 35 39 217 | f 54 36 55 218 | f 40 37 55 219 | f 40 38 37 220 | f 38 39 35 221 | f 39 55 36 222 | f 55 44 40 223 | f 38 40 41 224 | f 39 38 42 225 | f 55 39 44 226 | f 43 40 44 227 | f 43 41 40 228 | f 41 42 38 229 | f 42 44 39 230 | f 43 44 41 231 | f 42 41 44 232 | f 4 1 6 233 | f 9 4 6 234 | f 45 5 4 235 | f 4 9 45 236 | f 5 45 11 237 | f 12 45 9 238 | f 46 11 45 239 | f 45 12 46 240 | f 11 46 14 241 | f 15 46 12 242 | f 47 14 46 243 | f 46 15 47 244 | f 14 47 17 245 | f 18 47 15 246 | f 48 17 47 247 | f 47 18 48 248 | f 17 48 20 249 | f 21 48 18 250 | f 49 20 48 251 | f 48 21 49 252 | f 20 49 23 253 | f 24 49 21 254 | f 50 23 49 255 | f 49 24 50 256 | f 23 50 26 257 | f 24 51 50 258 | f 51 26 50 259 | f 51 24 27 260 | f 26 51 28 261 | f 27 52 51 262 | f 52 28 51 263 | f 52 27 31 264 | f 28 52 30 265 | f 31 53 52 266 | f 53 30 52 267 | f 53 31 34 268 | f 30 53 33 269 | f 34 54 53 270 | f 54 33 53 271 | f 54 34 37 272 | f 33 54 36 273 | f 37 55 54 274 | f 55 36 54 275 | f 55 37 40 276 | f 36 55 39 277 | f 40 44 55 278 | f 44 39 55 279 | f 44 40 43 280 | f 39 44 42 281 | # 212 faces, 0 coords texture 282 | 283 | # End of File 284 | -------------------------------------------------------------------------------- /matlab/DATA/Chair/Chair/d374912c3a9fca96c141a04b2a487fd9/models/seat.obj: -------------------------------------------------------------------------------- 1 | #### 2 | # 3 | # OBJ File Generated by Meshlab 4 | # 5 | #### 6 | # Object seat.obj 7 | # 8 | # Vertices: 26 9 | # Faces: 86 10 | # 11 | #### 12 | v 0.144090 -0.158903 -0.201427 13 | v 0.108141 -0.158903 -0.201427 14 | v 0.144090 -0.158903 -0.237376 15 | v 0.108141 -0.158903 -0.237268 16 | v 0.108141 -0.158903 -0.237376 17 | v 0.108141 -0.144077 0.050216 18 | v 0.108141 -0.144077 0.014267 19 | v -0.143518 -0.144077 0.014267 20 | v -0.107569 -0.144077 0.050216 21 | v 0.108141 -0.158457 0.050216 22 | v 0.144090 -0.144077 0.014267 23 | v -0.107569 -0.144077 0.014267 24 | v -0.143518 -0.158457 0.050216 25 | v 0.144090 -0.158457 0.050216 26 | v -0.107569 -0.158457 0.050216 27 | v -0.143501 -0.144077 -0.236513 28 | v 0.144090 -0.158457 0.014267 29 | v 0.108141 -0.158457 0.014267 30 | v 0.144090 -0.144077 -0.237376 31 | v -0.143501 -0.158457 -0.200564 32 | v -0.143518 -0.158457 0.014267 33 | v -0.107552 -0.158457 -0.200564 34 | v 0.144090 -0.158457 -0.237376 35 | v -0.107569 -0.158457 0.014267 36 | v -0.143501 -0.158457 -0.236513 37 | v -0.107552 -0.158457 -0.236513 38 | # 26 vertices, 0 vertices normals 39 | 40 | f 1 4 2 41 | f 2 4 1 42 | f 4 1 3 43 | f 3 1 4 44 | f 4 3 5 45 | f 5 3 4 46 | f 6 15 10 47 | f 7 16 6 48 | f 11 16 7 49 | f 12 16 8 50 | f 9 13 15 51 | f 15 6 9 52 | f 15 18 10 53 | f 6 16 12 54 | f 16 11 19 55 | f 9 6 12 56 | f 8 20 21 57 | f 16 20 8 58 | f 18 15 22 59 | f 18 23 17 60 | f 23 11 17 61 | f 11 23 19 62 | f 23 16 19 63 | f 20 24 21 64 | f 20 16 25 65 | f 22 15 24 66 | f 26 18 22 67 | f 23 18 26 68 | f 16 23 25 69 | f 22 24 20 70 | f 23 26 25 71 | f 10 15 6 72 | f 18 6 10 73 | f 10 6 18 74 | f 6 16 7 75 | f 6 18 7 76 | f 7 18 6 77 | f 18 11 7 78 | f 7 11 18 79 | f 7 16 11 80 | f 21 12 8 81 | f 8 12 21 82 | f 8 16 12 83 | f 21 8 13 84 | f 15 13 9 85 | f 10 17 14 86 | f 14 17 10 87 | f 9 6 15 88 | f 10 18 15 89 | f 17 10 18 90 | f 18 10 17 91 | f 12 16 6 92 | f 11 18 17 93 | f 17 18 11 94 | f 19 11 16 95 | f 12 6 9 96 | f 12 15 9 97 | f 9 15 12 98 | f 12 21 24 99 | f 24 21 12 100 | f 21 20 8 101 | f 8 20 16 102 | f 24 13 21 103 | f 21 13 24 104 | f 13 24 15 105 | f 15 24 13 106 | f 22 15 18 107 | f 17 23 18 108 | f 17 11 23 109 | f 19 23 11 110 | f 19 16 23 111 | f 15 12 24 112 | f 24 12 15 113 | f 21 24 20 114 | f 25 16 20 115 | f 24 15 22 116 | f 22 18 26 117 | f 26 18 23 118 | f 25 23 16 119 | f 20 24 22 120 | f 26 20 25 121 | f 25 20 26 122 | f 20 26 22 123 | f 22 26 20 124 | f 25 26 23 125 | f 13 8 21 126 | # 86 faces, 0 coords texture 127 | 128 | # End of File 129 | -------------------------------------------------------------------------------- /matlab/FeatureMap.m: -------------------------------------------------------------------------------- 1 | function [ fmlogdr, fms ] = FeatureMap( LOGDR, S ) 2 | 3 | %LOGDR = LOGDR'; 4 | [mlogdr, nlogdr] = size(LOGDR); 5 | [ms, ns] = size(S); 6 | edgenum = nlogdr/9; 7 | snum = ns/9; 8 | %create 9 | fmlogdr = zeros(mlogdr, edgenum*3); 10 | fms = zeros(ms, snum*6); 11 | 12 | for i = 1 : mlogdr 13 | for j = 0 : edgenum-1 14 | fmlogdr(i, j*3+1) = LOGDR(i, j*9+2); 15 | fmlogdr(i, j*3+2) = LOGDR(i, j*9+3); 16 | fmlogdr(i, j*3+3) = LOGDR(i, j*9+6); 17 | end 18 | end 19 | 20 | 21 | for i = 1 : ms 22 | for j = 0 : snum-1 23 | fms(i, j*6+1) = S(i, j*9+1); 24 | fms(i, j*6+2) = S(i, j*9+2); 25 | fms(i, j*6+3) = S(i, j*9+3); 26 | fms(i, j*6+4) = S(i, j*9+5); 27 | fms(i, j*6+5) = S(i, j*9+6); 28 | fms(i, j*6+6) = S(i, j*9+9); 29 | end 30 | end 31 | 32 | end 33 | 34 | -------------------------------------------------------------------------------- /matlab/GenerateData.m: -------------------------------------------------------------------------------- 1 | function GenerateData(box_dir, acap_output_dir, category) 2 | part_names = getlabel(category); 3 | d = dir(box_dir); 4 | isub = [d(:).isdir]; 5 | nameFolds = {d(isub).name}'; 6 | nameFolds(ismember(nameFolds,{'.','..'})) = []; 7 | 8 | ref_mesh = fullfile(['.\cube_',category], 'cube_std.obj'); 9 | if ~exist(ref_mesh,'file') 10 | error('No cube mesh!') 11 | end 12 | if ~exist(acap_output_dir, 'dir') 13 | mkdir(acap_output_dir); 14 | end 15 | 16 | for i = 1:size(nameFolds, 1) 17 | id = nameFolds{i}; 18 | output_dir = fullfile(acap_output_dir, id); 19 | if ~exist(output_dir, 'dir') 20 | mkdir(output_dir); 21 | end 22 | if exist(fullfile(box_dir,id,'code.bad'),'file') 23 | copyfile(fullfile(box_dir, id, 'code.bad'),fullfile(output_dir,'code.mat')) 24 | elseif exist(fullfile(box_dir,id,'code.mat'),'file') 25 | copyfile(fullfile(box_dir, id, 'code.mat'),fullfile(output_dir,'code.mat')) 26 | end 27 | 28 | for j = 1:size(part_names, 2) 29 | deformed_mesh = fullfile(box_dir, id, [part_names{j}, '_reg.obj']); 30 | if exist(deformed_mesh, 'file') 31 | part_output_dir = fullfile(output_dir, part_names{j}); 32 | if ~exist(part_output_dir, 'dir') 33 | mkdir(part_output_dir); 34 | end 35 | mat_file = fullfile(output_dir, [id, '_', part_names{j}, '.mat']); 36 | if ~exist(fullfile(part_output_dir, 'S.txt'), 'file') || ~exist(fullfile(part_output_dir, 'LOGRNEW.txt'), 'file') 37 | copyfile(ref_mesh, fullfile(part_output_dir, '0.obj')); 38 | copyfile(deformed_mesh, part_output_dir); 39 | try 40 | ACAPOpt(part_output_dir); 41 | catch 42 | fprint('error id %s, %s', id, part_names{j}); 43 | continue; 44 | end 45 | % ACAPOpt2Meshes(ref_mesh, deformed_mesh, part_output_dir); 46 | end 47 | if exist(fullfile(part_output_dir, 'LOGRNEW.txt'), 'file') && exist(fullfile(part_output_dir, 'S.txt'), 'file') && ~exist(mat_file,'file') 48 | LOGRNEW = dlmread(fullfile(part_output_dir, 'LOGRNEW.txt')); 49 | S = dlmread(fullfile(part_output_dir, 'S.txt')); 50 | LOGRNEW = LOGRNEW(2, :); 51 | S = S(2, :); 52 | pointnum = size(S, 2)/9; 53 | 54 | [ fmlogdr, fms ] = FeatureMap( LOGRNEW, S ); 55 | fmlogdr = permute(reshape(fmlogdr,size(fmlogdr,1),3,pointnum),[1,3,2]); 56 | fms = permute(reshape(fms,size(fms,1),6,pointnum),[1,3,2]); 57 | % acap_feature = cat(3, fmlogdr, fms); 58 | save(mat_file, 'fmlogdr', 'fms'); 59 | end 60 | end 61 | end 62 | disp([id, ' ACAP finish!']); 63 | end 64 | 65 | end 66 | 67 | -------------------------------------------------------------------------------- /matlab/GetBoundingBox4PointCloud.m: -------------------------------------------------------------------------------- 1 | function [local_bbox,local_bbox_face] = GetBoundingBox4PointCloud(pc) 2 | if size(pc, 1) == 0 3 | local_bbox = []; 4 | return; 5 | end 6 | % get bounding box 7 | max_point = max(pc, [], 1); 8 | maxx = max_point(1)+0.001; 9 | maxy = max_point(2)+0.001; 10 | maxz = max_point(3)+0.001; 11 | 12 | min_point = min(pc, [], 1); 13 | minx = min_point(1)-0.001; 14 | miny = min_point(2)-0.001; 15 | minz = min_point(3)-0.001; 16 | 17 | x_diff = maxx - minx; 18 | y_diff = maxy - miny; 19 | z_diff = maxz - minz; 20 | local_bbox(1, :) = [minx, miny, minz]; 21 | local_bbox(2, :) = [minx+x_diff, miny, minz]; 22 | local_bbox(3, :) = [minx+x_diff, miny, minz+z_diff]; 23 | local_bbox(4, :) = [minx, miny, minz+z_diff]; 24 | local_bbox(5, :) = [minx, miny+y_diff, minz]; 25 | local_bbox(6, :) = [minx+x_diff, miny+y_diff, minz]; 26 | local_bbox(7, :) = [minx+x_diff, miny+y_diff, minz+z_diff]; 27 | local_bbox(8, :) = [minx, miny+y_diff, minz+z_diff]; 28 | local_bbox_face=[1 6 2;5 6 1;7 3 2 ;7 2 6 ;4 3 7; 8 4 7;1 4 8;1 8 5;5 7 6;5 8 7 ;1 3 4; 1 2 3]; 29 | end -------------------------------------------------------------------------------- /matlab/GetBoundingBox4PointCloudWithFaces.m: -------------------------------------------------------------------------------- 1 | function [v, f] = GetBoundingBox4PointCloudWithFaces(pc) 2 | if size(pc, 1) == 0 3 | v = []; 4 | f = []; 5 | return; 6 | end 7 | % get bounding box 8 | max_point = max(pc, [], 1); 9 | maxx = max_point(1); 10 | maxy = max_point(2); 11 | maxz = max_point(3); 12 | 13 | min_point = min(pc, [], 1); 14 | minx = min_point(1); 15 | miny = min_point(2); 16 | minz = min_point(3); 17 | 18 | x_diff = maxx - minx; 19 | y_diff = maxy - miny; 20 | z_diff = maxz - minz; 21 | v(1, :) = [minx, miny, minz]; 22 | v(2, :) = [minx+x_diff, miny, minz]; 23 | v(3, :) = [minx+x_diff, miny, minz+z_diff]; 24 | v(4, :) = [minx, miny, minz+z_diff]; 25 | v(5, :) = [minx, miny+y_diff, minz]; 26 | v(6, :) = [minx+x_diff, miny+y_diff, minz]; 27 | v(7, :) = [minx+x_diff, miny+y_diff, minz+z_diff]; 28 | v(8, :) = [minx, miny+y_diff, minz+z_diff]; 29 | 30 | f = [1,2,3,4;5,6,7,8;1,2,6,5;3,4,8,7;1,4,8,5;2,3,7,6]; 31 | end -------------------------------------------------------------------------------- /matlab/GetOptimizedObj.m: -------------------------------------------------------------------------------- 1 | % recon_inter_random_dir: contain all parts dir 2 | % type: plane, knife and so on 3 | % recon_inter_random: 1 represent recon, 2 represent inter, 3 represent random 4 | % use_struct: struc_part or part 5 | function GetOptimizedObj(recon_inter_random_dir, type, recon_inter_random, use_struct, use_origin) 6 | part_names = getlabel(type); 7 | 8 | mat_prefix = ''; 9 | if 1 == recon_inter_random 10 | mat_prefix = 'recover'; 11 | elseif 2 == recon_inter_random 12 | mat_prefix = 'inter'; 13 | elseif 3 == recon_inter_random 14 | mat_prefix = 'random'; 15 | else 16 | return; 17 | end 18 | 19 | n = size(part_names, 2); 20 | if 0 == use_origin && 0 == use_struct 21 | merge_dir = fullfile(recon_inter_random_dir, 'merge'); 22 | elseif 0 == use_origin && 1 == use_struct 23 | merge_dir = fullfile(recon_inter_random_dir, 'merge_struct'); 24 | elseif 1 == use_origin && 1 == use_struct 25 | merge_dir = fullfile(recon_inter_random_dir, 'merge_struct_origin'); 26 | elseif 1 == use_origin && 0 == use_struct 27 | merge_dir = fullfile(recon_inter_random_dir, 'merge_origin'); 28 | end 29 | 30 | if ~exist(merge_dir, 'dir') 31 | mkdir(merge_dir); 32 | end 33 | 34 | if 0 == use_struct 35 | part_dirs = fullfile(recon_inter_random_dir, part_names); 36 | else 37 | part_dirs = fullfile(recon_inter_random_dir, cellfun(@(x) ['struc_',x] ,part_names, 'UniformOutput',false)); 38 | end 39 | 40 | if 0 == use_origin 41 | mat = load(fullfile(recon_inter_random_dir, [mat_prefix, '_sym.mat'])); 42 | else 43 | % for debug 44 | mat = load(fullfile(recon_inter_random_dir, '..', '..', [type, '_vaefeature.mat'])); 45 | end 46 | 47 | max_num = 0; 48 | max_dir_files = {}; 49 | for i = 1:n 50 | sub_dir = fullfile(recon_inter_random_dir, part_names{i}); 51 | sub_dir_files = dir(fullfile(sub_dir, ['*_', part_names{i}, '.obj'])); 52 | sub_num = size(sub_dir_files, 1); 53 | if sub_num > max_num 54 | max_num = sub_num; 55 | max_dir_files = sub_dir_files; 56 | end 57 | end 58 | max_dir_names = {max_dir_files.name}; 59 | [max_dir_names, ~] = sort_nat(max_dir_names); 60 | 61 | if 1 == use_origin 62 | max_dir_names = mat.modelname; 63 | else 64 | 65 | end 66 | 67 | 68 | for i = 1:length(max_dir_names) 69 | if i <= 2 70 | continue; 71 | end 72 | id = max_dir_names{i}; 73 | splitparts = strsplit(id, '_'); 74 | id = splitparts{1}(1:end); 75 | disp(id); 76 | if 0 == use_origin 77 | code = reshape(mat.symmetry_feature(i, :), n, 2*n+9); 78 | else 79 | code = squeeze(mat.symmetryf(i, :, :)); 80 | end 81 | code = AdjustOutputEncode(code); 82 | 83 | part_pcs = cell(n, 1); 84 | part_faces = cell(n, 1); 85 | for j = 1:n 86 | obj_filename = fullfile(part_dirs{j}, [id, '_', part_names{j}, '.obj']); 87 | if exist(obj_filename, 'file') 88 | if ispc 89 | [part_pc, part_face] = cotlp(obj_filename); 90 | else 91 | [part_pc, part_face] = readObj(obj_filename); 92 | part_face = part_face'; 93 | end 94 | part_pcs{j} = part_pc; 95 | part_faces{j} = part_face'; 96 | end 97 | end 98 | tic; 99 | [total_pc, total_face, total_optimized_pc, total_optimized_face] = ReconstructFromCodeMixIntegerReadingObjinAdvance(code, part_pcs, part_faces, type); 100 | toc; 101 | if ispc 102 | SaveObjT(fullfile(merge_dir, ['optimized_', id, '.obj']), total_optimized_pc', total_optimized_face'); 103 | SaveObjT(fullfile(merge_dir, ['unoptimized_', id, '.obj']), total_pc', total_face'); 104 | else 105 | SaveObj(fullfile(merge_dir, ['optimized_', id, '.obj']), total_optimized_pc', total_optimized_face'); 106 | SaveObj(fullfile(merge_dir, ['unoptimized_', id, '.obj']), total_pc', total_face'); 107 | end 108 | end 109 | end -------------------------------------------------------------------------------- /matlab/GetTransformedCube.m: -------------------------------------------------------------------------------- 1 | function GetTransformedCube(pts_labels_dir, dir_postfix, type) 2 | % if use_postfix == 0 3 | % dir_postfix = 2000; 4 | % pts_dir = fullfile(pts_labels_dir, 'points'); 5 | % else 6 | % pts_dir = fullfile(pts_labels_dir, ['points', num2str(dir_postfix)]); 7 | % end 8 | pts_dir = pts_labels_dir; 9 | model_normalizedobj='model_normalized.obj'; 10 | pts_list = dir([pts_dir, '\*']); 11 | pts_list(1:2)=[]; 12 | 13 | divide_with_face_dir = fullfile(pts_labels_dir, ['../box', num2str(dir_postfix)]); 14 | if ~exist(divide_with_face_dir, 'dir') 15 | mkdir(divide_with_face_dir); 16 | end 17 | part_names = getlabel(type); 18 | 19 | origin_cube_dir = ['cube_', type]; 20 | cube_vs = {}; 21 | cube_fs = {}; 22 | for i = 1:size(part_names, 2) 23 | cubename=fullfile(origin_cube_dir, ['cube_', part_names{i}, '.obj']); 24 | if ~exist(cubename,'file') 25 | cubename=fullfile(origin_cube_dir, ['cube_std.obj']); 26 | end 27 | [v, f] = readobjfromfile(cubename); 28 | cube_vs = [cube_vs, v]; 29 | cube_fs = [cube_fs, f+1]; 30 | end 31 | 32 | for i=1:size(pts_list, 1) 33 | disp(i); 34 | splitparts = strsplit(pts_list(i).name, '.'); 35 | pts_numstr = splitparts{1}; 36 | disp(pts_numstr); 37 | mesh_filename = fullfile(pts_labels_dir, pts_numstr,'models',model_normalizedobj); 38 | 39 | [vertex, ~] = readobjfromfile(mesh_filename); 40 | if size(vertex,1)>400000 41 | continue 42 | end 43 | 44 | % used to store divide point cloud 45 | sub_divide_without_face_dir = fullfile(pts_labels_dir, pts_numstr,'models'); 46 | if ~exist(sub_divide_without_face_dir, 'dir') 47 | continue; 48 | end 49 | 50 | sub_divide_with_face_dir = fullfile(divide_with_face_dir, pts_numstr); 51 | if ~exist(sub_divide_with_face_dir, 'dir') 52 | mkdir(sub_divide_with_face_dir); 53 | end 54 | 55 | copyfile(mesh_filename, fullfile(sub_divide_with_face_dir, model_normalizedobj)) 56 | for j = 1:size(part_names, 2) 57 | if exist(fullfile(sub_divide_with_face_dir, ['transformed_cube_', part_names{j}, '.obj']), 'file') 58 | continue; 59 | end 60 | if ~exist(fullfile(sub_divide_without_face_dir, [part_names{j}, '.obj']), 'file') 61 | continue; 62 | end 63 | try 64 | [part_points,~] = readobjfromfile(fullfile(sub_divide_without_face_dir, [part_names{j}, '.obj'])); 65 | catch 66 | continue; 67 | end 68 | % part_points=removeoutliner(part_points); 69 | % if size(part_points, 1) == 0 70 | % continue; 71 | % end 72 | 73 | transformed_box = zeros(8, 3); 74 | try 75 | [~,cornerpoints,h,~,~] = boundbox(part_points(:,1),part_points(:,2),part_points(:,3),'v',1); 76 | transformed_box=changebbxvert(cornerpoints); 77 | catch 78 | max_point = max(part_points, [], 1); 79 | maxx = max_point(1); 80 | maxy = max_point(2); 81 | maxz = max_point(3); 82 | 83 | min_point = min(part_points, [], 1); 84 | minx = min_point(1); 85 | miny = min_point(2); 86 | minz = min_point(3); 87 | 88 | x_diff = maxx - minx; 89 | y_diff = maxy - miny; 90 | z_diff = maxz - minz; 91 | transformed_box(1, :) = [minx, miny, minz]; 92 | transformed_box(2, :) = [minx+x_diff, miny, minz]; 93 | transformed_box(3, :) = [minx+x_diff, miny, minz+z_diff]; 94 | transformed_box(4, :) = [minx, miny, minz+z_diff]; 95 | 96 | transformed_box(5, :) = [minx, miny+y_diff, minz]; 97 | transformed_box(6, :) = [minx+x_diff, miny+y_diff, minz]; 98 | transformed_box(7, :) = [minx+x_diff, miny+y_diff, minz+z_diff]; 99 | transformed_box(8, :) = [minx, miny+y_diff, minz+z_diff]; 100 | end 101 | 102 | transformed_box(:, 4) = 1; 103 | 104 | origin_box = zeros(8, 3); 105 | origin_box(1, :) = [-0.5, -0.5, -0.5]+[0.5, 0.5, 0.5]; 106 | origin_box(2, :) = [0.5, -0.5, -0.5]+[0.5, 0.5, 0.5]; 107 | origin_box(3, :) = [0.5, -0.5, 0.5]+[0.5, 0.5, 0.5]; 108 | origin_box(4, :) = [-0.5, -0.5, 0.5]+[0.5, 0.5, 0.5]; 109 | 110 | origin_box(5, :) = [-0.5, 0.5, -0.5]+[0.5, 0.5, 0.5]; 111 | origin_box(6, :) = [0.5, 0.5, -0.5]+[0.5, 0.5, 0.5]; 112 | origin_box(7, :) = [0.5, 0.5, 0.5]+[0.5, 0.5, 0.5]; 113 | origin_box(8, :) = [-0.5, 0.5, 0.5]+[0.5, 0.5, 0.5]; 114 | origin_box(:, 4) = 1; 115 | 116 | % transpose to 4 * N 117 | transformed_box = transformed_box'; 118 | origin_box = origin_box'; 119 | transform_matrix = transformed_box/origin_box; 120 | disp(transform_matrix); 121 | 122 | % save objs 123 | local_cubeid_v = cube_vs{j}; 124 | local_cubeid_f = cube_fs{j}; 125 | tempV = local_cubeid_v; 126 | tempV(:, 4) = 1; 127 | transformed_v = transform_matrix*tempV'; 128 | 129 | SaveObjT(fullfile(sub_divide_with_face_dir, ['transformed_cube_', part_names{j}, '.obj']), transformed_v(1:3, :), local_cubeid_f'); 130 | copyfile(fullfile(sub_divide_without_face_dir, [part_names{j}, '.obj']), fullfile(sub_divide_with_face_dir, [part_names{j}, '.obj'])) 131 | end 132 | end 133 | end -------------------------------------------------------------------------------- /matlab/MergeOBJWithTexture.m: -------------------------------------------------------------------------------- 1 | function MergeOBJWithTexture(obj_dir, category, model_id) 2 | if nargin < 3 3 | model_id = ''; 4 | end 5 | labels = getlabel(category); 6 | if strcmp(category,'chair') || strcmp(category,'table') 7 | alpha_open = 1; 8 | else 9 | alpha_open = 0; 10 | end 11 | 12 | d = dir(obj_dir); 13 | isub = [d(:).isdir]; 14 | nameFolds = {d(isub).name}'; 15 | nameFolds(ismember(nameFolds,{'.','..'})) = []; 16 | 17 | parfor i = 1:size(nameFolds, 1) 18 | id = nameFolds{i}; 19 | if ~strcmp(model_id,'') && ~strcmp(id, model_id) 20 | continue; 21 | end 22 | disp(id); 23 | if exist(fullfile(obj_dir, id, 'merge.obj'), 'file') 24 | continue; 25 | end 26 | merge_V = []; 27 | merge_F = []; 28 | merge_UV = []; 29 | merge_TF = []; 30 | merge_texture = []; 31 | if alpha_open 32 | merge_alpha = []; 33 | end 34 | true_part_num = 0; 35 | for j = 1:size(labels, 2) 36 | part_name = fullfile(obj_dir, id, [labels{j}, '_reg.obj']); 37 | if exist(part_name, 'file') 38 | true_part_num = true_part_num + 1; 39 | end 40 | end 41 | count = 1; 42 | for j = 1:size(labels, 2) 43 | part_name = fullfile(obj_dir, id, [labels{j}, '_reg.obj']); 44 | png_name = fullfile(obj_dir, id, [labels{j}, '_reg.png']); 45 | if exist(part_name, 'file') && exist(png_name, 'file') 46 | [V, F, UV, TF] = readOBJ(part_name); 47 | [texture,~,alpha] = imread(png_name); 48 | else 49 | continue; 50 | end 51 | 52 | F = F + size(merge_V, 1); 53 | merge_F = [merge_F; F]; 54 | 55 | TF = TF + size(merge_UV, 1); 56 | merge_TF = [merge_TF; TF]; 57 | 58 | UV(:, 1) = 1.0*(count-1)/true_part_num + 1.0*UV(:, 1)/true_part_num; 59 | merge_UV = [merge_UV; UV]; 60 | 61 | merge_V = [merge_V; V]; 62 | 63 | if isempty(merge_texture) 64 | merge_texture = texture; 65 | else 66 | merge_texture = cat(2,merge_texture,texture); 67 | end 68 | if alpha_open 69 | if isempty(merge_alpha) 70 | merge_alpha = alpha; 71 | else 72 | merge_alpha = cat(2,merge_alpha,alpha); 73 | end 74 | end 75 | count = count + 1; 76 | end 77 | merge_name = fullfile(obj_dir, id, 'merge.obj'); 78 | mtl_name = fullfile(obj_dir, id, 'merge.mtl'); 79 | png_name = fullfile(obj_dir, id, 'merge.png'); 80 | if ~isempty(merge_V) 81 | WriteOBJwithMtl(merge_name, merge_V, merge_F, merge_UV, merge_TF); 82 | WriteMtl(mtl_name); 83 | if alpha_open 84 | imwrite(merge_texture, png_name, 'Alpha', merge_alpha); 85 | else 86 | imwrite(merge_texture, png_name); 87 | end 88 | end 89 | end 90 | end -------------------------------------------------------------------------------- /matlab/Pipeline.m: -------------------------------------------------------------------------------- 1 | % Prepare data: 2 | % 1. divide model to multiple parts 3 | % 2. get transformed cube 4 | % 3. regist cube to part 5 | % 4. get structure code 6 | % 5. generate deformation feature 7 | % 6. generate texture image 8 | 9 | % Chair 10 | addpath('.\nonregistration') 11 | cate = 'chair'; 12 | postfix = 50; 13 | data_dir = '.\DATA\Chair\Chair'; 14 | shapenet_root = '.\ShapeNet\Chair'; 15 | box_dir = fullfile(data_dir,'..',['box',num2str(postfix)]); 16 | vae_dir = fullfile(data_dir,'..',['vaenew',num2str(postfix)]); 17 | final_dir = fullfile(data_dir,'..',['final',num2str(postfix)]); 18 | 19 | GetTransformedCube(data_dir, postfix, cate); 20 | regist(box_dir, cate) 21 | SupportAnalysisScript(box_dir, cate); 22 | GenerateData(box_dir, vae_dir, cate); 23 | TransferColorPerPixelScript(cate, box_dir, shapenet_root, vae_dir) 24 | PrepareForTraining(cate, vae_dir, final_dir, 0.75); 25 | 26 | % next steps: 27 | % 1. train TM-NET 28 | % 2. generate single textured part by 'ViewOBJandTexture.m' 29 | % 3. merge parts to the whole textured model by 'MergeOBJWithTexture.m' 30 | -------------------------------------------------------------------------------- /matlab/PrepareForTraining.m: -------------------------------------------------------------------------------- 1 | function PrepareForTraining(category, vae_dir, final_dir, train_percent) 2 | % generate and move data for training and test 3 | 4 | if nargin < 4 5 | train_percent = 0.75; 6 | end 7 | 8 | part_names = getlabel(category); 9 | id_list = dir(vae_dir); 10 | dir_flag = [id_list.isdir]; 11 | id_list = {id_list(dir_flag).name}; 12 | id_list(ismember(id_list,{'.','..'})) = []; 13 | data_num = length(id_list); 14 | train_num = floor(data_num * train_percent); 15 | rand_nums = randperm(data_num); 16 | H_begin = [257, 1, 257, 513, 769, 257]; 17 | W_begin = [1, 257, 257, 257, 257, 513]; 18 | for i = 1:data_num 19 | data_id = rand_nums(i); 20 | model_id = id_list{data_id}; 21 | if i <= train_num 22 | model_output = fullfile(final_dir, 'train', model_id); 23 | else 24 | model_output = fullfile(final_dir, 'test', model_id); 25 | end 26 | if ~exist(model_output,'dir') 27 | mkdir(model_output); 28 | end 29 | copyfile(fullfile(vae_dir, model_id, 'code.mat'),fullfile(model_output,'code.mat')) 30 | for j = 1:length(part_names) 31 | mat_file = fullfile(vae_dir, model_id, [model_id,'_',part_names{j},'.mat']); 32 | png_file = fullfile(vae_dir, model_id, [model_id,'_',part_names{j},'.png']); 33 | if exist(mat_file,'file') 34 | copyfile(mat_file,fullfile(model_output,[model_id,'_',part_names{j},'.mat'])) 35 | if exist(png_file,'file') 36 | [img,~,alpha] = imread(png_file); 37 | img = imresize(img,[768,1024]); 38 | if isempty(alpha) 39 | imwrite(img,fullfile(model_output,[model_id,'_',part_names{j},'.png'])) 40 | else 41 | alpha = imresize(alpha,[768,1024]); 42 | imwrite(img,fullfile(model_output,[model_id,'_',part_names{j},'.png']), 'Alpha', alpha) 43 | end 44 | for m = 1:6 45 | patch_file = fullfile(model_output,[model_id,'_',part_names{j},'_patch',num2str(m),'.png']); 46 | patch_img = img(W_begin(m):W_begin(m)+255, H_begin(m):H_begin(m)+255, :); 47 | if isempty(alpha) 48 | imwrite(patch_img, patch_file); 49 | else 50 | patch_alpha = alpha(W_begin(m):W_begin(m)+255, H_begin(m):H_begin(m)+255); 51 | imwrite(patch_img, patch_file, 'Alpha', patch_alpha); 52 | end 53 | end 54 | else 55 | warning([model_id,'_',part_names{j},'.png loss!']) 56 | end 57 | end 58 | end 59 | end 60 | 61 | end -------------------------------------------------------------------------------- /matlab/SaveObj.m: -------------------------------------------------------------------------------- 1 | function [ ] = SaveObj( inputname, ver, face) 2 | %UNTITLED3 Summary of this function goes here 3 | % Detailed explanation goes here 4 | [m,n]=size(ver); 5 | if n==1 6 | newver=reshape(ver, 3,m/3); 7 | else 8 | newver=ver; 9 | end 10 | [m,n]=size(newver); 11 | fid=fopen(inputname,'w'); 12 | for i = 1 : n 13 | verline=['v ',num2str(newver(1,i)),' ',num2str(newver(2,i)),' ',num2str(newver(3,i))]; 14 | fprintf(fid,'%s\n',verline); 15 | end 16 | [m,n]=size(face); 17 | for i = 1:n 18 | faceline=['f ',num2str(face(1,i)),' ',num2str(face(2,i)),' ',num2str(face(3,i))]; 19 | fprintf(fid,'%s\n',faceline); 20 | end 21 | fclose(fid); 22 | end 23 | 24 | -------------------------------------------------------------------------------- /matlab/SaveObjT.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/SaveObjT.mexw64 -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/images/texture0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/images/texture0.jpg -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/images/texture1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/images/texture1.jpg -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/leaf_part_ids.json: -------------------------------------------------------------------------------- 1 | [1, 2] -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/leaf_part_obj/1.obj: -------------------------------------------------------------------------------- 1 | # Generated with PyMesh 2 | v 0.14409 -0.158903 -0.201427 3 | v 0.110337 -0.188812 -0.203623 4 | v 0.141894 -0.188812 -0.203623 5 | v 0.108141 -0.158903 -0.201427 6 | v 0.112135 -0.218747 -0.20542 7 | v 0.141894 -0.188812 -0.23518 8 | v 0.110337 -0.188812 -0.23518 9 | v 0.140097 -0.218747 -0.20542 10 | v 0.112135 -0.218747 -0.233382 11 | v 0.14409 -0.158903 -0.237376 12 | v 0.140097 -0.218747 -0.233382 13 | v 0.108141 -0.158903 -0.237268 14 | v 0.113533 -0.248703 -0.206819 15 | v 0.113533 -0.248703 -0.231984 16 | v 0.108141 -0.158903 -0.237376 17 | v 0.138699 -0.248703 -0.206819 18 | v 0.138699 -0.248703 -0.231984 19 | v 0.114532 -0.278676 -0.207817 20 | v 0.114532 -0.278676 -0.230985 21 | v 0.1377 -0.278676 -0.230985 22 | v 0.1377 -0.278676 -0.207817 23 | v 0.115131 -0.308659 -0.208417 24 | v 0.115131 -0.308659 -0.230386 25 | v 0.1371 -0.308659 -0.230386 26 | v 0.1371 -0.308659 -0.208417 27 | v 0.115331 -0.338648 -0.208617 28 | v 0.115331 -0.338648 -0.230186 29 | v 0.136901 -0.338648 -0.230186 30 | v 0.136901 -0.338648 -0.208617 31 | v 0.115131 -0.368636 -0.208417 32 | v 0.1371 -0.368636 -0.230386 33 | v 0.1371 -0.368636 -0.208417 34 | v 0.115131 -0.368636 -0.230386 35 | v 0.114532 -0.398619 -0.207817 36 | v 0.1377 -0.398619 -0.230985 37 | v 0.1377 -0.398619 -0.207817 38 | v 0.114532 -0.398619 -0.230985 39 | v 0.113533 -0.428592 -0.206819 40 | v 0.138699 -0.428592 -0.231984 41 | v 0.138699 -0.428592 -0.206819 42 | v 0.113533 -0.428592 -0.231984 43 | v 0.112135 -0.458548 -0.20542 44 | v 0.140097 -0.458548 -0.233382 45 | v 0.140097 -0.458548 -0.20542 46 | v 0.112135 -0.458548 -0.233382 47 | v 0.110337 -0.488484 -0.203623 48 | v 0.141894 -0.488484 -0.23518 49 | v 0.141894 -0.488484 -0.203623 50 | v 0.110337 -0.488484 -0.23518 51 | v 0.108141 -0.518392 -0.201427 52 | v 0.14409 -0.518392 -0.237376 53 | v 0.14409 -0.518392 -0.201427 54 | v 0.108141 -0.518392 -0.237376 55 | f 1 2 3 56 | f 2 1 4 57 | f 3 2 1 58 | f 4 1 2 59 | f 5 3 2 60 | f 2 3 5 61 | f 6 1 3 62 | f 3 1 6 63 | f 1 12 4 64 | f 4 12 1 65 | f 4 7 2 66 | f 2 7 4 67 | f 3 5 8 68 | f 8 5 3 69 | f 2 9 5 70 | f 5 9 2 71 | f 1 6 10 72 | f 10 6 1 73 | f 3 11 6 74 | f 6 11 3 75 | f 12 1 10 76 | f 10 1 12 77 | f 12 7 4 78 | f 4 7 12 79 | f 9 2 7 80 | f 7 2 9 81 | f 13 8 5 82 | f 5 8 13 83 | f 11 3 8 84 | f 8 3 11 85 | f 14 5 9 86 | f 9 5 14 87 | f 6 15 10 88 | f 10 15 6 89 | f 11 7 6 90 | f 6 7 11 91 | f 12 10 15 92 | f 15 10 12 93 | f 7 12 15 94 | f 15 12 7 95 | f 7 11 9 96 | f 9 11 7 97 | f 8 13 16 98 | f 16 13 8 99 | f 5 14 13 100 | f 13 14 5 101 | f 8 17 11 102 | f 11 17 8 103 | f 9 17 14 104 | f 14 17 9 105 | f 15 6 7 106 | f 7 6 15 107 | f 17 9 11 108 | f 11 9 17 109 | f 18 16 13 110 | f 13 16 18 111 | f 17 8 16 112 | f 16 8 17 113 | f 19 13 14 114 | f 14 13 19 115 | f 20 14 17 116 | f 17 14 20 117 | f 16 18 21 118 | f 21 18 16 119 | f 13 19 18 120 | f 18 19 13 121 | f 16 20 17 122 | f 17 20 16 123 | f 14 20 19 124 | f 19 20 14 125 | f 22 21 18 126 | f 18 21 22 127 | f 20 16 21 128 | f 21 16 20 129 | f 23 18 19 130 | f 19 18 23 131 | f 24 19 20 132 | f 20 19 24 133 | f 21 22 25 134 | f 25 22 21 135 | f 18 23 22 136 | f 22 23 18 137 | f 21 24 20 138 | f 20 24 21 139 | f 19 24 23 140 | f 23 24 19 141 | f 26 25 22 142 | f 22 25 26 143 | f 24 21 25 144 | f 25 21 24 145 | f 27 22 23 146 | f 23 22 27 147 | f 28 23 24 148 | f 24 23 28 149 | f 25 26 29 150 | f 29 26 25 151 | f 22 27 26 152 | f 26 27 22 153 | f 25 28 24 154 | f 24 28 25 155 | f 23 28 27 156 | f 27 28 23 157 | f 30 29 26 158 | f 26 29 30 159 | f 28 25 29 160 | f 29 25 28 161 | f 27 30 26 162 | f 26 30 27 163 | f 31 27 28 164 | f 28 27 31 165 | f 29 30 32 166 | f 32 30 29 167 | f 32 28 29 168 | f 29 28 32 169 | f 30 27 33 170 | f 33 27 30 171 | f 27 31 33 172 | f 33 31 27 173 | f 28 32 31 174 | f 31 32 28 175 | f 34 32 30 176 | f 30 32 34 177 | f 33 34 30 178 | f 30 34 33 179 | f 35 33 31 180 | f 31 33 35 181 | f 36 31 32 182 | f 32 31 36 183 | f 32 34 36 184 | f 36 34 32 185 | f 34 33 37 186 | f 37 33 34 187 | f 33 35 37 188 | f 37 35 33 189 | f 31 36 35 190 | f 35 36 31 191 | f 38 36 34 192 | f 34 36 38 193 | f 37 38 34 194 | f 34 38 37 195 | f 39 37 35 196 | f 35 37 39 197 | f 40 35 36 198 | f 36 35 40 199 | f 36 38 40 200 | f 40 38 36 201 | f 38 37 41 202 | f 41 37 38 203 | f 37 39 41 204 | f 41 39 37 205 | f 35 40 39 206 | f 39 40 35 207 | f 42 40 38 208 | f 38 40 42 209 | f 41 42 38 210 | f 38 42 41 211 | f 43 41 39 212 | f 39 41 43 213 | f 44 39 40 214 | f 40 39 44 215 | f 40 42 44 216 | f 44 42 40 217 | f 42 41 45 218 | f 45 41 42 219 | f 41 43 45 220 | f 45 43 41 221 | f 39 44 43 222 | f 43 44 39 223 | f 46 44 42 224 | f 42 44 46 225 | f 45 46 42 226 | f 42 46 45 227 | f 47 45 43 228 | f 43 45 47 229 | f 48 43 44 230 | f 44 43 48 231 | f 44 46 48 232 | f 48 46 44 233 | f 46 45 49 234 | f 49 45 46 235 | f 45 47 49 236 | f 49 47 45 237 | f 43 48 47 238 | f 47 48 43 239 | f 50 48 46 240 | f 46 48 50 241 | f 49 50 46 242 | f 46 50 49 243 | f 51 49 47 244 | f 47 49 51 245 | f 52 47 48 246 | f 48 47 52 247 | f 48 50 52 248 | f 52 50 48 249 | f 50 49 53 250 | f 53 49 50 251 | f 49 51 53 252 | f 53 51 49 253 | f 47 52 51 254 | f 51 52 47 255 | f 50 51 52 256 | f 52 51 50 257 | f 51 50 53 258 | f 53 50 51 259 | -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/leaf_part_obj_normalized/1.obj: -------------------------------------------------------------------------------- 1 | v 0.282811 -0.183791 -0.326839 2 | v 0.216839 -0.242250 -0.331131 3 | v 0.278519 -0.242250 -0.331131 4 | v 0.212547 -0.183791 -0.326839 5 | v 0.220353 -0.300760 -0.334643 6 | v 0.278519 -0.242250 -0.392811 7 | v 0.216839 -0.242250 -0.392811 8 | v 0.275007 -0.300760 -0.334643 9 | v 0.220353 -0.300760 -0.389297 10 | v 0.282811 -0.183791 -0.397103 11 | v 0.275007 -0.300760 -0.389297 12 | v 0.212547 -0.183791 -0.396892 13 | v 0.223086 -0.359311 -0.337378 14 | v 0.223086 -0.359311 -0.386564 15 | v 0.212547 -0.183791 -0.397103 16 | v 0.272274 -0.359311 -0.337378 17 | v 0.272274 -0.359311 -0.386564 18 | v 0.225038 -0.417895 -0.339328 19 | v 0.225038 -0.417895 -0.384612 20 | v 0.270322 -0.417895 -0.384612 21 | v 0.270322 -0.417895 -0.339328 22 | v 0.226209 -0.476498 -0.340501 23 | v 0.226209 -0.476498 -0.383441 24 | v 0.269149 -0.476498 -0.383441 25 | v 0.269149 -0.476498 -0.340501 26 | v 0.226600 -0.535114 -0.340892 27 | v 0.226600 -0.535114 -0.383050 28 | v 0.268760 -0.535114 -0.383050 29 | v 0.268760 -0.535114 -0.340892 30 | v 0.226209 -0.593727 -0.340501 31 | v 0.269149 -0.593727 -0.383441 32 | v 0.269149 -0.593727 -0.340501 33 | v 0.226209 -0.593727 -0.383441 34 | v 0.225038 -0.652331 -0.339328 35 | v 0.270322 -0.652331 -0.384612 36 | v 0.270322 -0.652331 -0.339328 37 | v 0.225038 -0.652331 -0.384612 38 | v 0.223086 -0.710915 -0.337378 39 | v 0.272274 -0.710915 -0.386564 40 | v 0.272274 -0.710915 -0.337378 41 | v 0.223086 -0.710915 -0.386564 42 | v 0.220353 -0.769466 -0.334643 43 | v 0.275007 -0.769466 -0.389297 44 | v 0.275007 -0.769466 -0.334643 45 | v 0.220353 -0.769466 -0.389297 46 | v 0.216839 -0.827978 -0.331131 47 | v 0.278519 -0.827978 -0.392811 48 | v 0.278519 -0.827978 -0.331131 49 | v 0.216839 -0.827978 -0.392811 50 | v 0.212547 -0.886435 -0.326839 51 | v 0.282811 -0.886435 -0.397103 52 | v 0.282811 -0.886435 -0.326839 53 | v 0.212547 -0.886435 -0.397103 54 | f 1 2 3 55 | f 2 1 4 56 | f 3 2 1 57 | f 4 1 2 58 | f 5 3 2 59 | f 2 3 5 60 | f 6 1 3 61 | f 3 1 6 62 | f 1 12 4 63 | f 4 12 1 64 | f 4 7 2 65 | f 2 7 4 66 | f 3 5 8 67 | f 8 5 3 68 | f 2 9 5 69 | f 5 9 2 70 | f 1 6 10 71 | f 10 6 1 72 | f 3 11 6 73 | f 6 11 3 74 | f 12 1 10 75 | f 10 1 12 76 | f 12 7 4 77 | f 4 7 12 78 | f 9 2 7 79 | f 7 2 9 80 | f 13 8 5 81 | f 5 8 13 82 | f 11 3 8 83 | f 8 3 11 84 | f 14 5 9 85 | f 9 5 14 86 | f 6 15 10 87 | f 10 15 6 88 | f 11 7 6 89 | f 6 7 11 90 | f 12 10 15 91 | f 15 10 12 92 | f 7 12 15 93 | f 15 12 7 94 | f 7 11 9 95 | f 9 11 7 96 | f 8 13 16 97 | f 16 13 8 98 | f 5 14 13 99 | f 13 14 5 100 | f 8 17 11 101 | f 11 17 8 102 | f 9 17 14 103 | f 14 17 9 104 | f 15 6 7 105 | f 7 6 15 106 | f 17 9 11 107 | f 11 9 17 108 | f 18 16 13 109 | f 13 16 18 110 | f 17 8 16 111 | f 16 8 17 112 | f 19 13 14 113 | f 14 13 19 114 | f 20 14 17 115 | f 17 14 20 116 | f 16 18 21 117 | f 21 18 16 118 | f 13 19 18 119 | f 18 19 13 120 | f 16 20 17 121 | f 17 20 16 122 | f 14 20 19 123 | f 19 20 14 124 | f 22 21 18 125 | f 18 21 22 126 | f 20 16 21 127 | f 21 16 20 128 | f 23 18 19 129 | f 19 18 23 130 | f 24 19 20 131 | f 20 19 24 132 | f 21 22 25 133 | f 25 22 21 134 | f 18 23 22 135 | f 22 23 18 136 | f 21 24 20 137 | f 20 24 21 138 | f 19 24 23 139 | f 23 24 19 140 | f 26 25 22 141 | f 22 25 26 142 | f 24 21 25 143 | f 25 21 24 144 | f 27 22 23 145 | f 23 22 27 146 | f 28 23 24 147 | f 24 23 28 148 | f 25 26 29 149 | f 29 26 25 150 | f 22 27 26 151 | f 26 27 22 152 | f 25 28 24 153 | f 24 28 25 154 | f 23 28 27 155 | f 27 28 23 156 | f 30 29 26 157 | f 26 29 30 158 | f 28 25 29 159 | f 29 25 28 160 | f 27 30 26 161 | f 26 30 27 162 | f 31 27 28 163 | f 28 27 31 164 | f 29 30 32 165 | f 32 30 29 166 | f 32 28 29 167 | f 29 28 32 168 | f 30 27 33 169 | f 33 27 30 170 | f 27 31 33 171 | f 33 31 27 172 | f 28 32 31 173 | f 31 32 28 174 | f 34 32 30 175 | f 30 32 34 176 | f 33 34 30 177 | f 30 34 33 178 | f 35 33 31 179 | f 31 33 35 180 | f 36 31 32 181 | f 32 31 36 182 | f 32 34 36 183 | f 36 34 32 184 | f 34 33 37 185 | f 37 33 34 186 | f 33 35 37 187 | f 37 35 33 188 | f 31 36 35 189 | f 35 36 31 190 | f 38 36 34 191 | f 34 36 38 192 | f 37 38 34 193 | f 34 38 37 194 | f 39 37 35 195 | f 35 37 39 196 | f 40 35 36 197 | f 36 35 40 198 | f 36 38 40 199 | f 40 38 36 200 | f 38 37 41 201 | f 41 37 38 202 | f 37 39 41 203 | f 41 39 37 204 | f 35 40 39 205 | f 39 40 35 206 | f 42 40 38 207 | f 38 40 42 208 | f 41 42 38 209 | f 38 42 41 210 | f 43 41 39 211 | f 39 41 43 212 | f 44 39 40 213 | f 40 39 44 214 | f 40 42 44 215 | f 44 42 40 216 | f 42 41 45 217 | f 45 41 42 218 | f 41 43 45 219 | f 45 43 41 220 | f 39 44 43 221 | f 43 44 39 222 | f 46 44 42 223 | f 42 44 46 224 | f 45 46 42 225 | f 42 46 45 226 | f 47 45 43 227 | f 43 45 47 228 | f 48 43 44 229 | f 44 43 48 230 | f 44 46 48 231 | f 48 46 44 232 | f 46 45 49 233 | f 49 45 46 234 | f 45 47 49 235 | f 49 47 45 236 | f 43 48 47 237 | f 47 48 43 238 | f 50 48 46 239 | f 46 48 50 240 | f 49 50 46 241 | f 46 50 49 242 | f 51 49 47 243 | f 47 49 51 244 | f 52 47 48 245 | f 48 47 52 246 | f 48 50 52 247 | f 52 50 48 248 | f 50 49 53 249 | f 53 49 50 250 | f 49 51 53 251 | f 53 51 49 252 | f 47 52 51 253 | f 51 52 47 254 | f 50 51 52 255 | f 52 51 50 256 | f 51 50 53 257 | f 53 50 51 258 | -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/models/model_normalized.json: -------------------------------------------------------------------------------- 1 | {"max": [0.0, 1.27062, 0.4], "centroid": [-0.20040965936073096, 0.7210116505936071, 0.33015679251141544], "id": "d374912c3a9fca96c141a04b2a487fd9", "numVertices": 876, "min": [-0.400023, 0.0, 0.0]} 2 | -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/models/model_normalized.mtl: -------------------------------------------------------------------------------- 1 | # File produced by Open Asset Import Library (http://www.assimp.sf.net) 2 | # (assimp v3.2.202087883) 3 | 4 | newmtl material_0_1_8 5 | Kd 1 1 1 6 | Ka 0 0 0 7 | Ks 0.4 0.4 0.4 8 | Ke 0 0 0 9 | Ns 10 10 | illum 2 11 | map_Kd ../images/texture0.jpg 12 | 13 | newmtl material_1_0 14 | Kd 0 0 0 15 | Ka 0 0 0 16 | Ks 0.4 0.4 0.4 17 | Ke 0 0 0 18 | Ns 10 19 | illum 2 20 | 21 | newmtl material_1_24 22 | Kd 1 1 1 23 | Ka 0 0 0 24 | Ks 0.4 0.4 0.4 25 | Ke 0 0 0 26 | Ns 10 27 | illum 2 28 | 29 | newmtl material_2_2_8 30 | Kd 1 1 1 31 | Ka 0 0 0 32 | Ks 0.4 0.4 0.4 33 | Ke 0 0 0 34 | Ns 10 35 | illum 2 36 | map_Kd ../images/texture1.jpg 37 | 38 | -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/models/model_normalized.solid.binvox: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/models/model_normalized.solid.binvox -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/models/model_normalized.surface.binvox: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/models/model_normalized.surface.binvox -------------------------------------------------------------------------------- /matlab/ShapeNet/Chair/d374912c3a9fca96c141a04b2a487fd9/normalization_params.txt: -------------------------------------------------------------------------------- 1 | -0.000603 -0.064872 -0.034209 2 | 0.511623 -------------------------------------------------------------------------------- /matlab/SupportAnalysis.m: -------------------------------------------------------------------------------- 1 | function SupportAnalysis(part_objs_dir, type) 2 | part_names = getlabel(type); 3 | 4 | global_symmetry_plane = [1, 0, 0, 0]; 5 | 6 | part_num = size(part_names, 2); 7 | code = zeros(part_num, part_num*2+9); 8 | code = AddPriori(code, type, part_names); 9 | 10 | part_pcs = cell(part_num, 1); 11 | part_bboxs = cell(part_num, 1); 12 | i = 1; 13 | while i <= part_num 14 | part_name = part_names{i}; 15 | part_obj = fullfile(part_objs_dir, [part_name, '.obj']); 16 | if exist(part_obj, 'file') 17 | code(i, 1) = 1; 18 | [part_pc,~] = readobjfromfile(part_obj); 19 | 20 | if i+1 <= part_num 21 | next_part_name = part_names{i+1}; 22 | % if symmetry 23 | part_name_split = strsplit(part_name, '_'); 24 | next_part_name_split = strsplit(next_part_name, '_'); 25 | if 1 == strcmp(part_name_split{1}, next_part_name_split{1}) 26 | code(i, 2*part_num+5) = 1; 27 | code(i, 2*part_num+6:2*part_num+9) = global_symmetry_plane; 28 | code(i+1, 2*part_num+5) = 1; 29 | code(i+1, 2*part_num+6:2*part_num+9) = global_symmetry_plane; 30 | end 31 | end 32 | 33 | % fill center part 34 | [part_pc_bbox, ~] = GetBoundingBox4PointCloud(part_pc); 35 | % code(i, 2*part_num+2:2*part_num+4) = (part_pc_bbox(7, :) + part_pc_bbox(1, :))/2; 36 | code(i, 2*part_num+2:2*part_num+4) = mean(part_pc); 37 | else 38 | part_pc = []; 39 | end 40 | 41 | part_pcs{i} = part_pc; 42 | part_bbox = GetBoundingBox4PointCloud(part_pc); 43 | part_bboxs{i} = part_bbox; 44 | i=i+1; 45 | end 46 | 47 | distance = zeros(part_num, part_num); 48 | for i = 1:part_num 49 | for j = 1:part_num 50 | if i ~= j 51 | if size(part_pcs{i}, 1) == 0 || size(part_pcs{j}, 1) == 0 52 | distance(i, j) = inf; 53 | else 54 | [IDX, D] = knnsearch(part_pcs{i}, part_pcs{j}); 55 | dist = min(D, [], 1); 56 | distance(i, j) = dist; 57 | end 58 | else 59 | distance(i, j) = Inf; 60 | end 61 | end 62 | end 63 | for i = 1:part_num 64 | if size(part_pcs{i}, 1) == 0 || sum(code(i, 2:1+part_num)) ~= 0 65 | continue; 66 | end 67 | [~, j] = min(distance(i, :)); 68 | bboxa = part_bboxs{i}; 69 | bboxb = part_bboxs{j}; 70 | % if IsIntersected(bboxa, bboxb) 71 | i_mean = mean(part_pcs{i}, 1); 72 | j_mean = mean(part_pcs{j}, 1); 73 | % i support j 74 | if i_mean(2) < j_mean(2) 75 | code(i, 1+j) = 1; 76 | code(j, part_num+1+i) = 1; 77 | % j support i 78 | else 79 | code(j, i+1) = 1; 80 | code(i, part_num+1+j) = 1; 81 | end 82 | % end 83 | end 84 | save(fullfile(part_objs_dir, 'code.mat'), 'code'); 85 | end -------------------------------------------------------------------------------- /matlab/SupportAnalysisScript.m: -------------------------------------------------------------------------------- 1 | function SupportAnalysisScript(divide_dir, type) 2 | d = dir(divide_dir); 3 | isub = [d(:).isdir]; 4 | name_folds = {d(isub).name}'; 5 | name_folds(ismember(name_folds,{'.','..'})) = []; 6 | 7 | for i = 1:size(name_folds, 1) 8 | SupportAnalysis(fullfile(divide_dir, name_folds{i}), type); 9 | end 10 | end -------------------------------------------------------------------------------- /matlab/TransferColorPerPixelScript.m: -------------------------------------------------------------------------------- 1 | function TransferColorPerPixelScript(category,register_root,shapenet_root,save_dir) 2 | % This function is used to generate texture images for registered parts from 3 | % the origin model of ShapeNet. 4 | % Input: 5 | % register_root: path to registered objs 6 | % shapenet_root: path to ShapeNet models 7 | % registered_unwrapper_obj_path: path to unwrapper box obj 8 | % save_dir: path to save texture image 9 | 10 | part_names = getlabel(category); 11 | if strcmp(category,'chair') || strcmp(category,'table') 12 | % alpha will help chair and table get better visual performance 13 | alpha_open = 1; 14 | else 15 | alpha_open = 0; 16 | end 17 | registered_unwrapper_obj_path = fullfile(['.\cube_',category], 'cube_std_2d.obj'); 18 | if ~exist(registered_unwrapper_obj_path,'file') 19 | error('No unwrapper cube mesh cube_std_2d.obj!') 20 | end 21 | 22 | id_list = dir(register_root); 23 | id_list = {id_list(:).name}; 24 | id_list(ismember(id_list,{'.','..'})) = []; 25 | for i = 1:length(id_list) 26 | model_id = id_list{i}; 27 | shapenet_path = fullfile(shapenet_root, model_id, 'models', 'model_normalized.obj'); 28 | mtl_path = fullfile(shapenet_root, model_id, 'models', 'model_normalized.mtl'); 29 | if ~exist(mtl_path,'file') 30 | mtl_path = fullfile(shapenet_root, model_id, 'model.mtl'); 31 | end 32 | model_save_dir = fullfile(save_dir,model_id); 33 | if ~exist(model_save_dir,'dir') 34 | mkdir(model_save_dir) 35 | end 36 | 37 | for j = 1:size(part_names, 2) 38 | part_name = part_names{j}; 39 | registered_obj_path = fullfile(register_root, model_id, [part_name, '_reg.obj']); 40 | if ~exist(registered_obj_path, 'file') 41 | continue; 42 | end 43 | save_name = fullfile(model_save_dir, [model_id, '_', part_name, '.png']); 44 | 45 | if strcmp(category,'car') 46 | if strcmp(part_name,'body') 47 | cmd = ['ray_tracing_4carbody.exe ', shapenet_path, ' ', mtl_path, ' ', registered_obj_path, ' ', registered_unwrapper_obj_path, ' ', save_name]; 48 | else 49 | cmd = ['ray_tracing_4car.exe ', shapenet_path, ' ', mtl_path, ' ', registered_obj_path, ' ', registered_unwrapper_obj_path, ' ', save_name, ' 3']; 50 | end 51 | else 52 | cmd = ['ray_tracing.exe ', shapenet_path, ' ', mtl_path, ' ', registered_obj_path, ' ', registered_unwrapper_obj_path, ' ', save_name]; 53 | end 54 | if ~exist(save_name,'file') 55 | system(cmd); 56 | end 57 | 58 | if alpha_open 59 | alpha_save_name = fullfile(model_save_dir, [model_id, '_', part_name, '_alpha.png']); 60 | cmd = ['ray_tracing_transparency.exe ', shapenet_path, ' ', mtl_path, ' ', registered_obj_path, ' ', registered_unwrapper_obj_path, ' ', alpha_save_name]; 61 | system(cmd); 62 | origin_img = imread(save_name); 63 | alpha_img = imread(alpha_save_name); 64 | alpha = alpha_img(:,:,2); 65 | imwrite(origin_img, save_name, 'Alpha', alpha); 66 | delete(alpha_save_name); 67 | end 68 | end 69 | disp([model_id, ' texture finish!']); 70 | end -------------------------------------------------------------------------------- /matlab/ViewOBJandTexture.m: -------------------------------------------------------------------------------- 1 | function ViewOBJandTexture(obj_dir, texture_dir, cube_std_obj, model_id) 2 | [V1,F1,UV1,TF1,N1,NF1] = readOBJ(cube_std_obj); 3 | filenames = dir(fullfile(obj_dir,'*_reg.obj')); 4 | for k = 1:numel(filenames) 5 | filename = fullfile(filenames(k).folder, filenames(k).name); 6 | strparts = strsplit(filenames(k).name, '.'); 7 | name = strparts{1}; 8 | strparts = strsplit(name, '_'); 9 | part_name = strparts{1}; 10 | for j = 2:size(strparts, 2)-1 11 | part_name = [part_name, '_', strparts{j}]; 12 | end 13 | 14 | src_filename = fullfile(texture_dir, [model_id, '_', part_name, '.png']); 15 | tar_filename = fullfile(obj_dir, [part_name, '_reg.png']); 16 | if exist(src_filename, 'file') 17 | copyfile(src_filename, tar_filename); 18 | [V,F,UV,TF,N,NF] = readOBJ(filename); 19 | WriteOBJwithMtl(filename, V,F,UV1,TF1,N1,NF1); 20 | WriteMtl(fullfile(obj_dir, [part_name, '.mtl'])); 21 | end 22 | end 23 | end -------------------------------------------------------------------------------- /matlab/WriteMtl.m: -------------------------------------------------------------------------------- 1 | function WriteMtl(filename) 2 | [~, name, ~] = fileparts(filename); 3 | f = fopen(filename, 'w'); 4 | format = ['newmtl material', newline, 'Kd 1 1 1', newline, 'Ka 0 0 0', newline, 'Ks 0.4 0.4 0.4', newline, 'Ke 0 0 0', newline, 'Ns 10', newline, 'illum 2', newline, 'map_Kd ./%s.png', newline]; 5 | fprintf(f, format, name); 6 | fclose(f); 7 | end 8 | -------------------------------------------------------------------------------- /matlab/WriteOBJwithMtl.m: -------------------------------------------------------------------------------- 1 | function WriteOBJwithMtl(filename, V,F,UV,TF,N,NF) 2 | % WRITEOBJ writes an OBJ file with vertex/face information 3 | % 4 | % writeOBJ(filename,V,F,UV,N) 5 | % 6 | % Input: 7 | % filename path to .obj file 8 | % V #V by 3 list of vertices 9 | % F #F by 3 list of triangle indices 10 | % UV #UV by 2 list of texture coordinates 11 | % TF #TF by 3 list of corner texture indices into UV 12 | % N #N by 3 list of normals 13 | % NF #NF by 3 list of corner normal indices into N 14 | % 15 | 16 | %disp(['writing: ',filename]); 17 | f = fopen( filename, 'w' ); 18 | 19 | % add by wutong 20 | [path, name, ext] = fileparts(filename); 21 | fprintf(f, ['mtllib %s', newline], [name, '.mtl']); 22 | % add by wutong 23 | 24 | if size(V,2) == 2 25 | warning('Appending 0s as z-coordinate'); 26 | V(:,end+1:3) = 0; 27 | else 28 | assert(size(V,2) == 3); 29 | end 30 | fprintf( f, 'v %0.17g %0.17g %0.17g\n', V'); 31 | 32 | hasN = exist('N','var') && ~isempty(N); 33 | hasUV = exist('UV','var') && ~isempty(UV); 34 | 35 | if hasUV 36 | switch size(UV,2) 37 | case 2 38 | fprintf( f, 'vt %0.17g %0.17g\n', UV'); 39 | case 3 40 | fprintf( f, 'vt %0.17g %0.17g %0.17g\n', UV'); 41 | end 42 | end 43 | 44 | if hasN 45 | %for k=1:size(N,1) 46 | % fprintf( f, 'vn %f %f %f\n', N(k,1), N(k,2), N(k,3) ); 47 | %end 48 | fprintf( f, 'vn %0.17g %0.17g %0.17g\n', N'); 49 | end 50 | 51 | if hasUV && (~exist('TF','var') || isempty(TF)) 52 | TF = F; 53 | end 54 | if hasN && (~exist('NF','var') || isempty(NF)) 55 | NF = F; 56 | end 57 | 58 | % add by wutong 59 | fprintf(f, ['usemtl material', newline]); 60 | % add by wutong 61 | 62 | if ~hasN && ~hasUV 63 | % A lot faster if we just have faces and they're all triangles 64 | fmt = repmat(' %d',1,size(F,2)); 65 | fprintf( f,['f' fmt '\n'], F'); 66 | else 67 | for k=1:size(F,1) 68 | if ( (~hasN) && (~hasUV) ) || (any(TF(k,:)<=0,2) && any(NF(k,:)<=0,2)) 69 | fmt = repmat(' %d',1,size(F,2)); 70 | fprintf( f,['f' fmt '\n'], F(k,:)); 71 | elseif ( hasUV && (~hasN || any(NF(k,:)<=0,2))) 72 | fmt = repmat(' %d/%d',1,size(F,2)); 73 | fprintf( f, ['f' fmt '\n'], [F(k,:);TF(k,:)]); 74 | elseif ( (hasN) && (~hasUV || any(TF(k,:)<=0,2))) 75 | fmt = repmat(' %d//%d',1,size(F,2)); 76 | fprintf( f, ['f' fmt '\n'],[F(k,:);TF(k,:)]'); 77 | elseif ( (hasN) && (hasUV) ) 78 | assert(all(NF(k,:)>0)); 79 | assert(all(TF(k,:)>0)); 80 | fmt = repmat(' %d/%d/%d',1,size(F,2)); 81 | fprintf( f, ['f' fmt '\n'],[F(k,:);TF(k,:);NF(k,:)]); 82 | end 83 | end 84 | end 85 | 86 | 87 | fclose(f); 88 | -------------------------------------------------------------------------------- /matlab/axisangle2matrix.m: -------------------------------------------------------------------------------- 1 | function R = axisangle2matrix(w,a) 2 | % AXISANGLE2MATRIX Conver axis angle rotations into corresponding rotation 3 | % matrices 4 | % 5 | % R = axisangle2matrix(w,a) 6 | % 7 | % Inputs: 8 | % w n by 3 list of axis vectors 9 | % a n by 1 list of angles 10 | % Output: 11 | % R 3 by 3 by n array of rotation matrices 12 | % 13 | 14 | % For now NaNs are not allowed 15 | assert(~any(isnan(w(:)))); 16 | assert(size(w,1) == size(a,1)); 17 | n = size(w,1); 18 | assert(size(w,2) == 3); 19 | 20 | 21 | % build the rotation matrix 22 | s = sin(a); 23 | c = cos(a); 24 | t = 1 - c; 25 | 26 | w = normalizerow(w); 27 | 28 | x = w(:,1); 29 | y = w(:,2); 30 | z = w(:,3); 31 | R = zeros([3,3,n]); 32 | R(1,1,:) = t.*x.*x + c; 33 | R(2,1,:) = t.*y.*x + s.*z; 34 | R(3,1,:) = t.*z.*x - s.*y; 35 | 36 | R(1,2,:) = t.*x.*y - s.*z; 37 | R(2,2,:) = t.*y.*y + c; 38 | R(3,2,:) = t.*z.*y + s.*x; 39 | 40 | R(1,3,:) = t.*x.*z + s.*y; 41 | R(2,3,:) = t.*y.*z - s.*x; 42 | R(3,3,:) = t.*z.*z + c; 43 | 44 | end 45 | -------------------------------------------------------------------------------- /matlab/cell2file.m: -------------------------------------------------------------------------------- 1 | % cell2file writes a cell array of strings, or number into a file 2 | % ==This function has been obtained modifyng the file str2file.m I found in the FEX.== 3 | % Last Modified: 2008/01/09 (yyyy/mm/dd). 4 | % 5 | % Syntax: cell2file(fid,cell,....) where ... is PARAMETER, VALUE (in pair). 6 | % 7 | % Possible Parameter are: 'EndOfLine' and 'Delimiter' 8 | % 9 | % cell2str can write to a file both sting and numeric format. 10 | % 11 | % You just have to pass the handle to the file 12 | %(so that you can choice your opening attribute) and the cellarray. 13 | % 14 | % Ex: 15 | % A={'prova',[1],'prova'; 16 | % [3],'txt',[4]}; 17 | % 18 | % fid=fopen('prova.log','a+'); 19 | % cell2file(fid,A,'EndOfLine','\r\n'); 20 | % fclose(fid); 21 | 22 | 23 | 24 | function cell2file(fid,str,varargin) 25 | 26 | id=find(strcmpi('Delimiter',varargin)==1); 27 | if isempty(id) 28 | delimiter=';'; 29 | else 30 | delimiter=varargin{id+1}; 31 | end 32 | 33 | id=find(strcmpi('EndOfLine',varargin)==1); 34 | if isempty(id) 35 | EndOfLine='\n'; 36 | else 37 | EndOfLine=varargin{id+1}; 38 | end 39 | 40 | 41 | for k=1:size(str,1) 42 | for j=1:size(str,2) 43 | app=[str{k,j}]; 44 | if isnumeric(app) 45 | app=num2str(app); 46 | end 47 | if not(j==size(str,2)) 48 | fprintf(fid,['%s' delimiter],app); 49 | else 50 | fprintf(fid,'%s',app); 51 | end 52 | end 53 | 54 | if not(k==size(str,1)) 55 | fprintf(fid,EndOfLine); 56 | end 57 | end 58 | -------------------------------------------------------------------------------- /matlab/changebbxvert.m: -------------------------------------------------------------------------------- 1 | function bbxnew=changebbxvert(cornerpoints) 2 | 3 | 4 | n1=cornerpoints(2,:)-cornerpoints(1,:); 5 | n2=cornerpoints(4,:)-cornerpoints(1,:); 6 | n3=cornerpoints(5,:)-cornerpoints(1,:); 7 | n1=n1/norm(n1); 8 | n2=n2/norm(n2); 9 | n3=n3/norm(n3); 10 | 11 | dot1=n1-[1 1 1]; 12 | dot2=n2-[1 1 1]; 13 | dot3=n3-[1 1 1]; 14 | 15 | [~,id1]=min(abs(dot1)); 16 | [~,id2]=min(abs(dot2)); 17 | [~,id3]=min(abs(dot3)); 18 | 19 | assert(length(unique([id1,id2,id3]))==3) 20 | 21 | [n1,id11]=makedirect(n1,id1); 22 | [n2,id22]=makedirect(n2,id2); 23 | [n3,id33]=makedirect(n3,id3); 24 | 25 | h(id11,:)=n1; 26 | h(id22,:)=n2; 27 | h(id33,:)=n3; 28 | 29 | bbxc=cornerpoints-mean(cornerpoints); 30 | bbxsign=sign((h*bbxc')'); 31 | % bbxsign=sign(bbxc); 32 | for i=1:8 33 | 34 | if bbxsign(i,1)<0&&bbxsign(i,2)<0&&bbxsign(i,3)<0 35 | bbxnew(1,:)=cornerpoints(i,:); 36 | continue; 37 | end 38 | if bbxsign(i,1)>0&&bbxsign(i,2)<0&&bbxsign(i,3)<0 39 | bbxnew(2,:)=cornerpoints(i,:); 40 | continue; 41 | end 42 | if bbxsign(i,1)>0&&bbxsign(i,2)<0&&bbxsign(i,3)>0 43 | bbxnew(3,:)=cornerpoints(i,:); 44 | continue; 45 | end 46 | if bbxsign(i,1)<0&&bbxsign(i,2)<0&&bbxsign(i,3)>0 47 | bbxnew(4,:)=cornerpoints(i,:); 48 | continue; 49 | end 50 | if bbxsign(i,1)<0&&bbxsign(i,2)>0&&bbxsign(i,3)<0 51 | bbxnew(5,:)=cornerpoints(i,:); 52 | continue; 53 | end 54 | if bbxsign(i,1)>0&&bbxsign(i,2)>0&&bbxsign(i,3)<0 55 | bbxnew(6,:)=cornerpoints(i,:); 56 | continue; 57 | end 58 | if bbxsign(i,1)>0&&bbxsign(i,2)>0&&bbxsign(i,3)>0 59 | bbxnew(7,:)=cornerpoints(i,:); 60 | continue; 61 | end 62 | if bbxsign(i,1)<0&&bbxsign(i,2)>0&&bbxsign(i,3)>0 63 | bbxnew(8,:)=cornerpoints(i,:); 64 | continue; 65 | end 66 | end 67 | end 68 | 69 | 70 | function [n1,id]=makedirect(n1,id1) 71 | if (id1==1) 72 | if n1(1)<0 73 | n1=-n1; 74 | end 75 | id=1; 76 | elseif id1==2 77 | if n1(2)<0 78 | n1=-n1; 79 | end 80 | id=1; 81 | elseif id1==3 82 | if n1(3)<0 83 | n1=-n1; 84 | end 85 | id=1; 86 | else 87 | error('error') 88 | end 89 | end -------------------------------------------------------------------------------- /matlab/cotlp.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/cotlp.m -------------------------------------------------------------------------------- /matlab/cotlpvf.m: -------------------------------------------------------------------------------- 1 | function [ v, f, n, fn ] = cotlpvf( filename ) 2 | 3 | [v, f, n, fn] = meshlpvf(filename); 4 | v = v'; 5 | n = n'; 6 | fn = fn'; 7 | 8 | end 9 | 10 | -------------------------------------------------------------------------------- /matlab/create_regular_grid.m: -------------------------------------------------------------------------------- 1 | function [UV,F, res, edge_norms] = ... 2 | create_regular_grid(xRes, yRes, xWrap, yWrap, near, far) 3 | % Creates list of triangle vertex indices for a rectangular domain, 4 | % optionally wrapping around in X/Y direction. 5 | % 6 | % Usage: 7 | % [UV,F,res,edge_norms] = create_regular_grid(xRes, yRes, xWrap, yWrap) 8 | % 9 | % Input: 10 | % xRes, yRes: number of points in X/Y direction 11 | % wrapX, wrapY: wrap around in X/Y direction 12 | % near, far: near and far should be fractions of one which control the 13 | % pinching of the domain at the center and sides 14 | % 15 | % Output: 16 | % F : mesh connectivity (triangles) 17 | % UV: UV coordinates in interval [0,1]x[0,1] 18 | % res: mesh resolution 19 | % 20 | % Example: 21 | % % Create and m by n cylinder 22 | % m = 10; n = 20; 23 | % [V,F] = create_regular_grid(m,n,1,0); 24 | % V = [sin(2*pi*V(:,1)) cos(2*pi*V(:,1)) (n-1)*2*pi/(m-1)*V(:,2)]; 25 | % tsurf(F,V); axis equal; 26 | % 27 | 28 | if (nargin<2) yRes=xRes; end 29 | if (nargin<3) xWrap=0; end 30 | if (nargin<4) yWrap=0; end 31 | if (nargin<5) overlap=0; end 32 | 33 | %res = [yRes, xRes]; 34 | res_wrap = [yRes+yWrap, xRes+xWrap]; 35 | 36 | %xSpace = linspace(0,1,xRes+xWrap); if (xWrap) xSpace = xSpace(1:end-1); end 37 | %ySpace = linspace(0,1,yRes+yWrap); if (yWrap) ySpace = ySpace(1:end-1); end 38 | xSpace = linspace(0,1,xRes+xWrap); 39 | ySpace = linspace(0,1,yRes+yWrap); 40 | 41 | [X, Y] = meshgrid(xSpace, ySpace); 42 | UV_wrap = [X(:), Y(:)]; 43 | 44 | % Must perform pinch before edge_norms are taken 45 | if(exist('near') & exist('far')) 46 | if(near>0 & far>0) 47 | t = ( ... 48 | UV_wrap(:,1).*(UV_wrap(:,1)<0.5)+ ... 49 | (1-UV_wrap(:,1)).*(UV_wrap(:,1)>=0.5) ... 50 | )/0.5; 51 | t = 1-sin(t*pi/2+pi/2); 52 | UV_wrap(:,2) = ... 53 | far/2 + ... 54 | near*(UV_wrap(:,2)-0.5).*(1-t) + ... 55 | far*(UV_wrap(:,2)-0.5).*t; 56 | else 57 | %error('Pinch must be between 0 and 1'); 58 | end 59 | end 60 | 61 | 62 | idx_wrap = reshape(1:prod(res_wrap), res_wrap); 63 | 64 | v1_wrap = idx_wrap(1:end-1, 1:end-1); v1_wrap=v1_wrap(:)'; 65 | v2_wrap = idx_wrap(1:end-1, 2:end ); v2_wrap=v2_wrap(:)'; 66 | v3_wrap = idx_wrap(2:end , 1:end-1); v3_wrap=v3_wrap(:)'; 67 | v4_wrap = idx_wrap(2:end , 2:end ); v4_wrap=v4_wrap(:)'; 68 | 69 | F_wrap = [v1_wrap;v2_wrap;v3_wrap; v2_wrap;v4_wrap;v3_wrap]; 70 | F_wrap = reshape(F_wrap, [3, 2*length(v1_wrap)])'; 71 | 72 | % old way 73 | % edges = [F_wrap(:,1) F_wrap(:,2); F_wrap(:,2) F_wrap(:,3); F_wrap(:,3) F_wrap(:,1)]; 74 | % edge_norms = sqrt(sum((UV_wrap(edges(:,1),:)-UV_wrap(edges(:,2),:)).^2,2)); 75 | % edge_norms = reshape(edge_norms,size(F_wrap,1),3); 76 | 77 | % edges numbered same as opposite vertices 78 | edge_norms = [ ... 79 | sqrt(sum((UV_wrap(F_wrap(:,2),:)-UV_wrap(F_wrap(:,3),:)).^2,2)) ... 80 | sqrt(sum((UV_wrap(F_wrap(:,3),:)-UV_wrap(F_wrap(:,1),:)).^2,2)) ... 81 | sqrt(sum((UV_wrap(F_wrap(:,1),:)-UV_wrap(F_wrap(:,2),:)).^2,2)) ... 82 | ]; 83 | 84 | % correct indices 85 | res = [yRes,xRes]; 86 | idx = reshape(1:prod(res),res); 87 | if (xWrap) idx = [idx, idx(:,1)]; end 88 | if (yWrap) idx = [idx; idx(1,:)]; end 89 | idx_flat = idx(:); 90 | 91 | % this might not be neccessary, could just rebuild UV like before 92 | UV = reshape(UV_wrap,[size(idx_wrap),2]); 93 | UV = UV(1:end-yWrap,1:end-xWrap,:); 94 | UV = reshape(UV,xRes*yRes,2); 95 | 96 | F = [idx_flat(F_wrap(:,1)),idx_flat(F_wrap(:,2)),idx_flat(F_wrap(:,3))]; 97 | -------------------------------------------------------------------------------- /matlab/cube.m: -------------------------------------------------------------------------------- 1 | function [V,F] = cube(x,y,z) 2 | % CUBE Construct a mesh of the unit cube. Sides are ordered like sides of a 3 | % die (one of many dice). 4 | % 5 | % [V,F] = cube(x,y,z) 6 | % 7 | % Inputs: 8 | % x number of vertices along x-axis 9 | % y number of vertices along y-ayis 10 | % z number of vertices along z-azis 11 | % Outputs: 12 | % V x*y*z by 3 list of vertex positions 13 | % F #F by 3 list of triangle indices 14 | % 15 | % 16 | if nargin<2 17 | y = x; 18 | end 19 | if nargin<3 20 | z = y; 21 | end 22 | 23 | sam = [x y;z y;x z;x z;z y;x y]; 24 | axes = [0 1 0;0 1 0;1 0 0;1 0 0;0 1 0;0 1 0]; 25 | angles = [0 pi/2 pi/2 -pi/2 -pi/2 pi]; 26 | V = []; 27 | F = []; 28 | for s = 1:6 29 | [CV,CF] = create_regular_grid(sam(s,1),sam(s,2),0,0); 30 | CV(:,3) = 0; 31 | R = round(axisangle2matrix(axes(s,:),angles(s))); 32 | F = [F;size(V,1)+CF]; 33 | V = [V;(CV-0.5)*R+0.5]; 34 | end 35 | 36 | % Should be able to do this procedurally 37 | [V,~,J] = remove_duplicate_vertices(V,1e-12); 38 | F = J(F); 39 | % oops inside out 40 | F = fliplr(F); 41 | 42 | end 43 | -------------------------------------------------------------------------------- /matlab/file2cellArray.m: -------------------------------------------------------------------------------- 1 | function CA = file2cellArray(fname) 2 | % fname is a string that names a .txt file in the current directory. 3 | % CA is a cell array with CA{k} being the k-th line in the file. 4 | 5 | fid= fopen(fname, 'r'); 6 | ik= 0; 7 | while ~feof(fid) 8 | ik= ik+1; 9 | CA{ik}= fgetl(fid); 10 | end 11 | fclose(fid); -------------------------------------------------------------------------------- /matlab/getlabel.m: -------------------------------------------------------------------------------- 1 | function part_names = getlabel(cate) 2 | type = cate; 3 | if strcmp(type, 'chair') == 1 4 | part_names = {'back', 'seat', 'hand_1', 'hand_2', 'leg_ver_1', 'leg_ver_2', 'leg_ver_3', 'leg_ver_4'}; 5 | elseif strcmp(type, 'knife') == 1 6 | part_names = {'part1', 'part2'}; 7 | elseif strcmp(type, 'guitar') == 1 8 | part_names = {'part1', 'part2', 'part3'}; 9 | elseif strcmp(type, 'monitor') == 1 10 | part_names = {'display', 'connector', 'base'}; 11 | elseif strcmp(type, 'skateboard') 12 | part_names = {'surface', 'bearing1', 'bearing2', 'wheel1_1', 'wheel1_2', 'wheel2_1', 'wheel2_2'}; 13 | elseif strcmp(type, 'cup') == 1 14 | part_names = {'part1', 'part2'}; 15 | elseif strcmp(type, 'car') == 1 16 | part_names = {'body', 'left_front_wheel', 'right_front_wheel', 'left_back_wheel', 'right_back_wheel', 'left_mirror', 'right_mirror'}; 17 | elseif strcmp(type, 'plane') == 1 18 | part_names = {'body', 'left_wing', 'right_wing', 'left_tail', 'right_tail', 'up_tail', 'down_tail', 'front_gear', 'left_gear', 'right_gear', 'left_engine1', 'right_engine1', 'left_engine2', 'right_engine2'}; 19 | elseif strcmp(type, 'table') == 1 20 | part_names = {'surface', 'left_leg1', 'left_leg2', 'left_leg3', 'left_leg4', 'right_leg1', 'right_leg2', 'right_leg3', 'right_leg4'}; 21 | end 22 | end -------------------------------------------------------------------------------- /matlab/isout.m: -------------------------------------------------------------------------------- 1 | function all=isout(data) 2 | % c=mean(data); 3 | % data=abs(data-c); 4 | all=zeros(length(data),1); 5 | [data_sort,id]=sort(data); 6 | grad=diff(data_sort); 7 | 8 | idx = find(grad > 0.02); 9 | if ~isempty(idx) 10 | if idx1 6 | numpart=1; 7 | else 8 | samplelist=dir([folder,'\transformed_cube_*.obj']); 9 | numpart=length(samplelist); 10 | end 11 | for i=1:numpart 12 | 13 | if nargin<1 14 | samplefile = samplelist(i).name; 15 | splitparts = strsplit(samplefile, '_'); 16 | [~,name]=fileparts(splitparts{3}); 17 | end 18 | 19 | srcfile = fullfile(folder, ['transformed_cube_',name, '.obj']); 20 | tarfile = fullfile(folder, [name,'.obj']); 21 | savefile = fullfile(folder, [name, '_reg.obj']); 22 | 23 | % srcfile=[folder,'\part',num2str(i),'.obj']; 24 | % tarfile=[folder,'\sample_part',num2str(i),'_pc.obj']; 25 | % savefile =[folder,'\part',num2str(i),'_reg.obj']; 26 | w1 = 7; 27 | w2 = 0.0; 28 | w3 = 0; 29 | w4 = 40; 30 | % w1 = 50.0; 31 | % w2 = 100; 32 | % w3 = 1500; 33 | % w4 = 90; 34 | iter = 40; 35 | % regis_union(srcfile, tarfile, savefile); 36 | if exist(srcfile,'file')&&exist(tarfile,'file') 37 | 38 | 39 | if ~exist(savefile,'file') 40 | try 41 | NonRigidAlignment3Dnew(srcfile,tarfile, iter,savefile,w1,w2,w3,w4); 42 | % denoise_mesh1(tmpfile, savefile) 43 | % end 44 | catch 45 | continue 46 | end 47 | end 48 | end 49 | 50 | end 51 | end -------------------------------------------------------------------------------- /matlab/nonregistration/register_script.m: -------------------------------------------------------------------------------- 1 | % register 2 | pathFolder = 'G:\wutong\sig2019\shapenetcore_segmentation\03624134_knif\divide_with_face30000' 3 | 4 | d = dir(pathFolder); 5 | isub = [d(:).isdir]; %# returns logical vector 6 | nameFolds = {d(isub).name}'; 7 | nameFolds(ismember(nameFolds,{'.','..'})) = []; 8 | parfor i = 1:size(nameFolds, 1) 9 | nonrigidregis([pathFolder, '\', nameFolds{i}],'part1') 10 | end 11 | parfor i = 1:size(nameFolds, 1) 12 | nonrigidregis([pathFolder, '\', nameFolds{i}],'part2') 13 | end 14 | 15 | 16 | -------------------------------------------------------------------------------- /matlab/nonregistration/search_nn_bidirector.m: -------------------------------------------------------------------------------- 1 | function [sqrDz,P,NP]=search_nn_bidirector(Z,ZF,Y,YF,NFY) 2 | % find the nnpoint bi-directory 3 | [sqrDz,Iz,Cz] = point_mesh_squared_distance(Z,Y,YF); 4 | % idz=knnsearch(Y,Cz); 5 | 6 | [sqrDy,Iy,Cy] = point_mesh_squared_distance(Y,Z,ZF); 7 | id_out=find(sqrDy>0.0001); 8 | id_out=[]; 9 | if ~isempty(id_out) 10 | on_Source = Cy(id_out,:); 11 | idy=knnsearch(Z,on_Source); 12 | y_out=Y(id_out,:); 13 | Cz(idy,:)=y_out; 14 | sqrDz(idy,:)=sqrDy(id_out,:); 15 | 16 | P=Cz; 17 | NP=NFY(Iz,:); 18 | 19 | for i=1:length(id_out) 20 | id=find(sum(YF==id_out(i),2)==1); 21 | if isempty(id) 22 | continue; 23 | end 24 | NP(idy(i),:)=mean(NFY(id,:)); 25 | end 26 | else 27 | P = Cz; 28 | NP = NFY(Iz,:); 29 | end 30 | end -------------------------------------------------------------------------------- /matlab/normalizerow.m: -------------------------------------------------------------------------------- 1 | function [ A ] = normalizerow( A ) %#codegen 2 | % NORMALIZEROW Normalize each row so that each row's l2 norm as a vector is 1 3 | % 4 | % [ A ] = normalizerow( A ) 5 | % 6 | % Input: 7 | % A #A by D list of row vectors of dimension D 8 | % Output: 9 | % B #B by D list of normalized row vectors 10 | % 11 | % Copyright 2011, Alec Jacobson (jacobson@inf.ethz.ch), Daniele Panozzo 12 | % 13 | 14 | if issparse(A) 15 | % speed up (20x) for large sparse matrices 16 | A = bsxfun(@times,A,1./sqrt(sum(A.^2,2))); 17 | else 18 | A = A./repmat(sqrt(sum(A.^2,2)),1,size(A,2)); 19 | end 20 | end 21 | 22 | -------------------------------------------------------------------------------- /matlab/opencv_world420.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/opencv_world420.dll -------------------------------------------------------------------------------- /matlab/patchslim.m: -------------------------------------------------------------------------------- 1 | function [vnew, fnew]=patchslim(v, f) 2 | % PATCHSLIM removes duplicate vertices in surface meshes. 3 | % 4 | % This function finds and removes duplicate vertices. 5 | % 6 | % USAGE: [v, f]=patchslim(v, f) 7 | % 8 | % Where v is the vertex list and f is the face list specifying vertex 9 | % connectivity. 10 | % 11 | % v contains the vertices for all triangles [3*n x 3]. 12 | % f contains the vertex lists defining each triangle face [n x 3]. 13 | % 14 | % This will reduce the size of typical v matrix by about a factor of 6. 15 | % 16 | % For more information see: 17 | % http://www.esmonde-white.com/home/diversions/matlab-program-for-loading-stl-files 18 | % 19 | % Francis Esmonde-White, May 2010 20 | if ~exist('v','var') 21 | error('The vertex list (v) must be specified.'); 22 | end 23 | if ~exist('f','var') 24 | error('The vertex connectivity of the triangle faces (f) must be specified.'); 25 | end 26 | [vnew, indexm, indexn] = unique(v, 'rows'); 27 | fnew = indexn(f); -------------------------------------------------------------------------------- /matlab/point_mesh_squared_distance.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/point_mesh_squared_distance.mexw64 -------------------------------------------------------------------------------- /matlab/ray_tracing.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ray_tracing.exe -------------------------------------------------------------------------------- /matlab/ray_tracing_4car.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ray_tracing_4car.exe -------------------------------------------------------------------------------- /matlab/ray_tracing_4carbody.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ray_tracing_4carbody.exe -------------------------------------------------------------------------------- /matlab/ray_tracing_transparency.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/ray_tracing_transparency.exe -------------------------------------------------------------------------------- /matlab/readobjfromfile.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/readobjfromfile.mexw64 -------------------------------------------------------------------------------- /matlab/regist.m: -------------------------------------------------------------------------------- 1 | function regist(Dirpath, type) 2 | %% This process takes a long time 3 | d = dir(Dirpath); 4 | isub = [d(:).isdir]; %# returns logical vector 5 | nameFolds = {d(isub).name}'; 6 | nameFolds(ismember(nameFolds,{'.','..'})) = []; 7 | 8 | part_names=getlabel(type); 9 | 10 | 11 | for p=1:length(part_names) 12 | parfor i = 1:size(nameFolds, 1) 13 | nonrigidregis([Dirpath, '\', nameFolds{i}],part_names{p}) 14 | end 15 | end 16 | 17 | 18 | end -------------------------------------------------------------------------------- /matlab/remove_duplicate_vertices.m: -------------------------------------------------------------------------------- 1 | function [SV,SVI,SVJ] = remove_duplicate_vertices(V,epsilon,varargin) 2 | % REMOVE_DUPLICATE_VERTICES Remove duplicate vertices upto a uniqueness 3 | % tolerance (epsilon) 4 | % 5 | % SV = remove_duplicate_vertices(V,epsilon) 6 | % [SV,SVI,SVJ] = ... 7 | % remove_duplicate_vertices(V,epsilon,'ParameterName',ParameterValue,...) 8 | % 9 | % Inputs: 10 | % V #V by dim list of vertex positions 11 | % epsilon uniqueness tolerance (significant digit) 12 | % Optional: 13 | % 'WhiteList' Only merge vertices from the following selection (not 14 | % working correctly, yet) 15 | % Outputs: 16 | % SV #SV by dim new list of vertex positions 17 | % SVI #SV by 1 list of indices so SV = V(SVI,:) 18 | % SVJ #V by 1 list of indices so V = SV(SVJ,:) 19 | % 20 | % Example: 21 | % % Mesh in (V,F) 22 | % [SV,SVI,SVJ] = remove_duplicate_vertices(V,1e-7); 23 | % % remap faces 24 | % SF = SVJ(F); 25 | % 26 | 27 | % default values 28 | whitelist = []; 29 | % Map of parameter names to variable names 30 | params_to_variables = containers.Map( ... 31 | {'WhiteList'}, ... 32 | {'whitelist'}); 33 | v = 1; 34 | while v <= numel(varargin) 35 | param_name = varargin{v}; 36 | if isKey(params_to_variables,param_name) 37 | assert(v+1<=numel(varargin)); 38 | v = v+1; 39 | % Trick: use feval on anonymous function to use assignin to this workspace 40 | feval(@()assignin('caller',params_to_variables(param_name),varargin{v})); 41 | else 42 | error('Unsupported parameter: %s',varargin{v}); 43 | end 44 | v=v+1; 45 | end 46 | 47 | if isempty(whitelist) 48 | assert(nargin==1 || epsilon >= 0); 49 | if nargin==1 || epsilon == 0 50 | [SV,SVI,SVJ] = unique(V,'rows','stable'); 51 | else 52 | [~,SVI,SVJ] = unique(round(V/(10*epsilon)),'rows','stable'); 53 | SV = V(SVI,:); 54 | end 55 | else 56 | error('not implemented correctly') 57 | VW = V(whitelist,:); 58 | J = 1:size(V,1); 59 | JW = J(whitelist); 60 | [SVW,SVIW,SVJW] = remove_duplicate_vertices(VW,epsilon); 61 | SJ = 1:size(V,1); 62 | SJ(whitelist) = JW(SVJ); 63 | end 64 | end 65 | -------------------------------------------------------------------------------- /matlab/removeoutliner.m: -------------------------------------------------------------------------------- 1 | function pcout=removeoutliner(pc) 2 | x=pc(:,1); 3 | y=pc(:,2); 4 | z=pc(:,3); 5 | tfx=isout(x); 6 | tfy=isout(y); 7 | tfz=isout(z); 8 | isouta=any([tfx,tfy,tfz],2); 9 | pcout=pc(~isouta,:); 10 | end -------------------------------------------------------------------------------- /matlab/sort_nat.m: -------------------------------------------------------------------------------- 1 | function [cs,index] = sort_nat(c,mode) 2 | %sort_nat: Natural order sort of cell array of strings. 3 | % usage: [S,INDEX] = sort_nat(C) 4 | % 5 | % where, 6 | % C is a cell array (vector) of strings to be sorted. 7 | % S is C, sorted in natural order. 8 | % INDEX is the sort order such that S = C(INDEX); 9 | % 10 | % Natural order sorting sorts strings containing digits in a way such that 11 | % the numerical value of the digits is taken into account. It is 12 | % especially useful for sorting file names containing index numbers with 13 | % different numbers of digits. Often, people will use leading zeros to get 14 | % the right sort order, but with this function you don't have to do that. 15 | % For example, if C = {'file1.txt','file2.txt','file10.txt'}, a normal sort 16 | % will give you 17 | % 18 | % {'file1.txt' 'file10.txt' 'file2.txt'} 19 | % 20 | % whereas, sort_nat will give you 21 | % 22 | % {'file1.txt' 'file2.txt' 'file10.txt'} 23 | % 24 | % See also: sort 25 | 26 | % Version: 1.4, 22 January 2011 27 | % Author: Douglas M. Schwarz 28 | % Email: dmschwarz=ieee*org, dmschwarz=urgrad*rochester*edu 29 | % Real_email = regexprep(Email,{'=','*'},{'@','.'}) 30 | 31 | 32 | % Set default value for mode if necessary. 33 | if nargin < 2 34 | mode = 'ascend'; 35 | end 36 | 37 | % Make sure mode is either 'ascend' or 'descend'. 38 | modes = strcmpi(mode,{'ascend','descend'}); 39 | is_descend = modes(2); 40 | if ~any(modes) 41 | error('sort_nat:sortDirection',... 42 | 'sorting direction must be ''ascend'' or ''descend''.') 43 | end 44 | 45 | % Replace runs of digits with '0'. 46 | c2 = regexprep(c,'\d+','0'); 47 | 48 | % Compute char version of c2 and locations of zeros. 49 | s1 = char(c2); 50 | z = s1 == '0'; 51 | 52 | % Extract the runs of digits and their start and end indices. 53 | [digruns,first,last] = regexp(c,'\d+','match','start','end'); 54 | 55 | % Create matrix of numerical values of runs of digits and a matrix of the 56 | % number of digits in each run. 57 | num_str = length(c); 58 | max_len = size(s1,2); 59 | num_val = NaN(num_str,max_len); 60 | num_dig = NaN(num_str,max_len); 61 | for i = 1:num_str 62 | num_val(i,z(i,:)) = sscanf(sprintf('%s ',digruns{i}{:}),'%f'); 63 | num_dig(i,z(i,:)) = last{i} - first{i} + 1; 64 | end 65 | 66 | % Find columns that have at least one non-NaN. Make sure activecols is a 67 | % 1-by-n vector even if n = 0. 68 | activecols = reshape(find(~all(isnan(num_val))),1,[]); 69 | n = length(activecols); 70 | 71 | % Compute which columns in the composite matrix get the numbers. 72 | numcols = activecols + (1:2:2*n); 73 | 74 | % Compute which columns in the composite matrix get the number of digits. 75 | ndigcols = numcols + 1; 76 | 77 | % Compute which columns in the composite matrix get chars. 78 | charcols = true(1,max_len + 2*n); 79 | charcols(numcols) = false; 80 | charcols(ndigcols) = false; 81 | 82 | % Create and fill composite matrix, comp. 83 | comp = zeros(num_str,max_len + 2*n); 84 | comp(:,charcols) = double(s1); 85 | comp(:,numcols) = num_val(:,activecols); 86 | comp(:,ndigcols) = num_dig(:,activecols); 87 | 88 | % Sort rows of composite matrix and use index to sort c in ascending or 89 | % descending order, depending on mode. 90 | [unused,index] = sortrows(comp); 91 | if is_descend 92 | index = index(end:-1:1); 93 | end 94 | index = reshape(index,size(c)); 95 | cs = c(index); 96 | -------------------------------------------------------------------------------- /matlab/teaser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/TM-NET/5cb318cac05bf2ca639abf53c91ae99744559e32/matlab/teaser.jpg -------------------------------------------------------------------------------- /python/agent/__init__.py: -------------------------------------------------------------------------------- 1 | from agent.agent_vqvae import VQVAEAgent 2 | from agent.agent_geovae import GeoVAEAgent 3 | from agent.agent_spvae import SPVAEAgent 4 | from agent.agent_pixelsnail import PixelSNAILAgent 5 | from agent.agent_pixelsnail_others import PixelSNAILOthersAgent 6 | 7 | def get_agent(config): 8 | model_name = config['model']['name'] 9 | if model_name == 'vqvae': 10 | return VQVAEAgent(config) 11 | elif model_name == 'geovae': 12 | return GeoVAEAgent(config) 13 | elif model_name == 'spvae': 14 | return SPVAEAgent(config) 15 | elif model_name == 'pixelsnail_top_center' or \ 16 | model_name == 'pixelsnail_bottom_center': 17 | return PixelSNAILAgent(config) 18 | elif model_name == 'pixelsnail_top_others' or \ 19 | model_name == 'pixelsnail_bottom_others': 20 | return PixelSNAILOthersAgent(config) 21 | else: 22 | raise ValueError -------------------------------------------------------------------------------- /python/agent/agent_geovae.py: -------------------------------------------------------------------------------- 1 | import os 2 | import scipy.io as sio 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | from einops import rearrange, reduce, repeat 8 | from torch.nn.modules import loss 9 | from networks import get_network 10 | from torchvision import utils 11 | 12 | from agent.base import BaseAgent 13 | from util.visualization import merge_patches 14 | 15 | class GeoVAEAgent(BaseAgent): 16 | def __init__(self, config): 17 | super(GeoVAEAgent, self).__init__(config) 18 | self.device = config[config['mode']]['device'] 19 | print(config['train']['lr']) 20 | 21 | def build_net(self, config): 22 | net = get_network(config) 23 | print('-----Part Sequence architecture-----') 24 | print(net) 25 | if config['data']['parallel']: 26 | net = nn.DataParallel(net) 27 | net = net.to(self.device) 28 | return net 29 | 30 | def set_loss_function(self): 31 | self.criterion = nn.MSELoss(reduction='mean') 32 | 33 | def forward(self, data): 34 | outputs = {} 35 | losses = {} 36 | kl_loss_weight = 0.001 37 | geo_input, origin_geo_input, logrmax, logrmin, smax, smin, fullname = data 38 | geo_input = geo_input.to(self.device).float().contiguous() 39 | geo_z, geo_output, mu, logvar = self.net(geo_input) 40 | 41 | geo_recon_loss = self.criterion(geo_input, geo_output)*self.net.num_point*9 42 | kl_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())/geo_output.shape[0] 43 | 44 | outputs['z'] = geo_z 45 | outputs['dec'] = geo_output 46 | losses['recon'] = geo_recon_loss 47 | losses['kl'] = kl_loss * kl_loss_weight 48 | return outputs, losses 49 | 50 | def train_func(self, data): 51 | """one step of training""" 52 | self.net.train() 53 | 54 | outputs, losses = self.forward(data) 55 | 56 | self.update_network(losses) 57 | self.record_losses(losses, 'train') 58 | 59 | return outputs, losses 60 | 61 | def normalize_back(self, logrmax, logrmin, smax, smin, geo_output): 62 | logr = geo_output[:, :, :3] 63 | s = geo_output[:, :, 3:9] 64 | resultmax = 0.95 65 | resultmin = -0.95 66 | 67 | s = (smax - smin) * (s - resultmin) / (resultmax - resultmin) + smin 68 | # logr = np.expand_dims(logrmax - logrmin, axis=(1, 2)) * (logr - resultmin) / (resultmax - resultmin) + np.expand_dims(logrmin, axis=(1, 2)) 69 | logr = (logrmax - logrmin) * (logr - resultmin) / (resultmax - resultmin) + logrmin 70 | geo_output = np.concatenate((logr, s), axis = 2) 71 | return geo_output 72 | 73 | def visualize_batch(self, data, mode, outputs=None): 74 | if mode == 'autoencode': 75 | geo_input, origin_geo_input, logrmax, logrmin, smax, smin, filename = data 76 | filename = filename[0] 77 | filename = os.path.basename(filename) 78 | filename = filename.split('.')[0] 79 | 80 | geo_input = geo_input.to(self.device).float().contiguous() 81 | geo_z, geo_output, mu, logvar = self.net(geo_input) 82 | 83 | origin_geo_input = torch.Tensor.cpu(origin_geo_input).detach().numpy() 84 | geo_input = torch.Tensor.cpu(geo_input).detach().numpy() 85 | geo_output = torch.Tensor.cpu(geo_output).detach().numpy() 86 | logrmax = torch.Tensor.cpu(logrmax).detach().numpy() 87 | logrmin = torch.Tensor.cpu(logrmin).detach().numpy() 88 | origin_geo_output = self.normalize_back(logrmax, logrmin, smax, smin, geo_output) 89 | print('{} {} {}'.format(filename, np.linalg.norm(origin_geo_input-origin_geo_output), np.linalg.norm(geo_input-geo_output))) 90 | 91 | autoencode_dir = os.path.join(self.model_dir, 'autoencode'.format(mode)) 92 | if not os.path.exists(autoencode_dir): 93 | os.mkdir(autoencode_dir) 94 | 95 | sio.savemat(os.path.join(autoencode_dir, filename+'.mat'), {'geo_output': origin_geo_output}, do_compression=False) 96 | elif mode == 'interpolate': 97 | # removed 98 | pass 99 | elif mode == 'generate': 100 | geo_inputs, origin_geo_inputs, logrmaxs, logrmins, smaxs, smins, filenames = data 101 | 102 | N = len(filenames) 103 | mean = outputs['z'].mean() 104 | std = outputs['z'].std() 105 | print('{} {}'.format(mean, std)) 106 | random_z = torch.normal(mean, std, size=(N, self.net.geo_hidden_dim)).to(self.device) 107 | random_outputs = self.net.geo_decoder(random_z) 108 | 109 | origin_geo_inputs = torch.Tensor.cpu(origin_geo_inputs).detach().numpy() 110 | geo_inputs = torch.Tensor.cpu(geo_inputs).detach().numpy() 111 | geo_outputs = torch.Tensor.cpu(random_outputs).detach().numpy() 112 | logrmaxs = torch.Tensor.cpu(logrmaxs).detach().numpy() 113 | logrmins = torch.Tensor.cpu(logrmins).detach().numpy() 114 | for i in range(N): 115 | origin_random_output = self.normalize_back(logrmaxs[i], logrmins[i], smaxs[i], smins[i], geo_outputs[i:i+1, :, :]) 116 | 117 | generate_dir = os.path.join(self.model_dir, 'generate'.format(mode)) 118 | if not os.path.exists(generate_dir): 119 | os.mkdir(generate_dir) 120 | 121 | sio.savemat(os.path.join(generate_dir, '{}.mat'.format(i)), {'geo_output': origin_random_output}, do_compression=False) 122 | else: 123 | pass -------------------------------------------------------------------------------- /python/agent/agent_pixelsnail.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | from einops import rearrange, reduce, repeat 8 | from networks import get_network 9 | from torchvision import utils 10 | 11 | from agent.base import BaseAgent 12 | from util.visualization import merge_patches 13 | 14 | class PixelSNAILAgent(BaseAgent): 15 | def __init__(self, config): 16 | super(PixelSNAILAgent, self).__init__(config) 17 | self.device = config[config['mode']]['device'] 18 | self.hier = config['model']['name'].split('_')[1] 19 | print(config['train']['lr']) 20 | 21 | def build_net(self, config): 22 | net = get_network(config) 23 | print('-----Part Sequence architecture-----') 24 | print(net) 25 | if config['data']['parallel']: 26 | net = nn.DataParallel(net, device_ids=config['data']['parallel_devices']) 27 | # net = net.to(self.device) 28 | return net 29 | 30 | def set_loss_function(self): 31 | self.criterion = nn.CrossEntropyLoss() 32 | 33 | def forward(self, data): 34 | outputs = {} 35 | losses = {} 36 | geo_zs, top, bottom, filenames = data 37 | geo_zs = geo_zs.unsqueeze(1) 38 | # top = top.reshape(top.shape[0], top.shape[2], top.shape[1]*top.shape[3]) 39 | # bottom = bottom.reshape(bottom.shape[0], bottom.shape[2], bottom.shape[1]*bottom.shape[3]) 40 | top = top.reshape(top.shape[0], top.shape[1]*top.shape[2], top.shape[3]) 41 | bottom = bottom.reshape(bottom.shape[0], bottom.shape[1]*bottom.shape[2], bottom.shape[3]) 42 | # print(top.shape) 43 | # print(bottom.shape) 44 | if self.hier == 'top': 45 | top = top.to(self.device).contiguous() 46 | geo_zs = geo_zs.to(self.device).contiguous() 47 | target = top 48 | out, _, latent_diff = self.net(top, condition=geo_zs) 49 | elif self.hier == 'bottom': 50 | top = top.to(self.device).contiguous() 51 | bottom = bottom.to(self.device).contiguous() 52 | target = bottom 53 | out, _, latent_diff = self.net(bottom, condition=top) 54 | # cross_entropy_loss 55 | CE_loss = self.criterion(out, target) 56 | _, pred = out.max(1) 57 | correct = (pred == target).float() 58 | accuracy = correct.sum() / target.numel() 59 | print(accuracy) 60 | 61 | outputs['out'] = out 62 | losses['CE'] = CE_loss 63 | if latent_diff is not None: 64 | losses['latent'] = latent_diff 65 | return outputs, losses 66 | 67 | def train_func(self, data): 68 | """one step of training""" 69 | self.net.train() 70 | 71 | outputs, losses = self.forward(data) 72 | 73 | self.update_network(losses) 74 | self.record_losses(losses, 'train') 75 | 76 | return outputs, losses 77 | 78 | def visualize_batch(self, data, mode, outputs=None): 79 | if mode == 'train': 80 | return 81 | imgs = data[0] 82 | filenames = data[1] 83 | recon_dir = os.path.join(self.model_dir, '{}_recon'.format(mode)) 84 | if not os.path.exists(recon_dir): 85 | os.mkdir(recon_dir) 86 | 87 | pass 88 | -------------------------------------------------------------------------------- /python/agent/agent_pixelsnail_others.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | from einops import rearrange, reduce, repeat 8 | from networks import get_network 9 | from torchvision import utils 10 | 11 | from agent.base import BaseAgent 12 | from util.visualization import merge_patches 13 | 14 | class PixelSNAILOthersAgent(BaseAgent): 15 | def __init__(self, config): 16 | super(PixelSNAILOthersAgent, self).__init__(config) 17 | self.device = config[config['mode']]['device'] 18 | self.hier = config['model']['name'].split('_')[1] 19 | print(config['train']['lr']) 20 | 21 | def build_net(self, config): 22 | net = get_network(config) 23 | print('-----Part Sequence architecture-----') 24 | print(net) 25 | if config['data']['parallel']: 26 | net = nn.DataParallel(net, device_ids=config['data']['parallel_devices']) 27 | # net = net.to(self.device) 28 | return net 29 | 30 | def set_loss_function(self): 31 | self.criterion = nn.CrossEntropyLoss() 32 | 33 | def forward(self, data): 34 | outputs = {} 35 | losses = {} 36 | geo_zs, top, bottom, central_vggs, filenames = data 37 | geo_zs = geo_zs.unsqueeze(1) 38 | # top = top.reshape(top.shape[0], top.shape[2], top.shape[1]*top.shape[3]) 39 | # bottom = bottom.reshape(bottom.shape[0], bottom.shape[2], bottom.shape[1]*bottom.shape[3]) 40 | top = top.reshape(top.shape[0], top.shape[1]*top.shape[2], top.shape[3]) 41 | bottom = bottom.reshape(bottom.shape[0], bottom.shape[1]*bottom.shape[2], bottom.shape[3]) 42 | central_vggs = central_vggs.reshape(central_vggs.shape[0], 1, 1, 6000) 43 | # print(top.shape) 44 | # print(bottom.shape) 45 | if self.hier == 'top': 46 | top = top.to(self.device).contiguous() 47 | geo_zs = geo_zs.to(self.device).contiguous() 48 | central_vggs = central_vggs.to(self.device).contiguous() 49 | # print(geo_zs.shape) 50 | # print(central_vggs.shape) 51 | concated_condition = torch.cat([central_vggs, geo_zs], -1) 52 | # print(concated_condition.shape) 53 | target = top 54 | out, _, latent_diff = self.net(top, condition=concated_condition) 55 | elif self.hier == 'bottom': 56 | top = top.to(self.device).contiguous() 57 | bottom = bottom.to(self.device).contiguous() 58 | target = bottom 59 | out, _, latent_diff = self.net(bottom, condition=top) 60 | # cross_entropy_loss 61 | CE_loss = self.criterion(out, target) 62 | _, pred = out.max(1) 63 | correct = (pred == target).float() 64 | accuracy = correct.sum() / target.numel() 65 | print(accuracy) 66 | 67 | outputs['out'] = out 68 | losses['CE'] = CE_loss 69 | if latent_diff is not None: 70 | losses['latent'] = latent_diff 71 | return outputs, losses 72 | 73 | def train_func(self, data): 74 | """one step of training""" 75 | self.net.train() 76 | 77 | outputs, losses = self.forward(data) 78 | 79 | self.update_network(losses) 80 | self.record_losses(losses, 'train') 81 | 82 | return outputs, losses 83 | 84 | def visualize_batch(self, data, mode, outputs=None): 85 | if mode == 'train': 86 | return 87 | imgs = data[0] 88 | filenames = data[1] 89 | recon_dir = os.path.join(self.model_dir, '{}_recon'.format(mode)) 90 | if not os.path.exists(recon_dir): 91 | os.mkdir(recon_dir) 92 | pass 93 | -------------------------------------------------------------------------------- /python/agent/agent_spvae.py: -------------------------------------------------------------------------------- 1 | import os 2 | import einops 3 | import scipy.io as sio 4 | import numpy as np 5 | import torch 6 | import torch.nn as nn 7 | import torch.nn.functional as F 8 | from einops import rearrange, reduce, repeat 9 | from torch.nn.modules import loss 10 | from networks import get_network 11 | from torchvision import utils 12 | 13 | from agent.base import BaseAgent 14 | from util.visualization import merge_patches 15 | 16 | class SPVAEAgent(BaseAgent): 17 | def __init__(self, config): 18 | super(SPVAEAgent, self).__init__(config) 19 | self.device = config[config['mode']]['device'] 20 | print(config['train']['lr']) 21 | 22 | def build_net(self, config): 23 | net = get_network(config) 24 | print('-----Part Sequence architecture-----') 25 | print(net) 26 | if config['data']['parallel']: 27 | net = nn.DataParallel(net) 28 | net = net.to(self.device) 29 | return net 30 | 31 | def set_loss_function(self): 32 | self.criterion = nn.MSELoss(reduction='sum') 33 | 34 | def forward(self, data): 35 | outputs = {} 36 | losses = {} 37 | kl_loss_weight = 0.001 38 | geo_input, fullname = data 39 | geo_input = einops.rearrange(geo_input, 'b n p l -> b (n p l)') 40 | geo_input = geo_input.to(self.device).float().contiguous() 41 | geo_z, geo_output, mu, logvar = self.net(geo_input) 42 | 43 | geo_recon_loss = self.criterion(geo_input, geo_output)/geo_output.shape[0] 44 | kl_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())/geo_output.shape[0] 45 | 46 | outputs['z'] = geo_z 47 | outputs['dec'] = geo_output 48 | losses['recon'] = geo_recon_loss 49 | losses['kl'] = kl_loss * kl_loss_weight 50 | return outputs, losses 51 | 52 | def train_func(self, data): 53 | """one step of training""" 54 | self.net.train() 55 | 56 | outputs, losses = self.forward(data) 57 | 58 | self.update_network(losses) 59 | self.record_losses(losses, 'train') 60 | 61 | return outputs, losses 62 | 63 | def visualize_batch(self, data, mode, outputs=None): 64 | pass -------------------------------------------------------------------------------- /python/agent/agent_vqvae.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | from einops import rearrange, reduce, repeat 8 | from networks import get_network 9 | from torchvision import utils 10 | 11 | from agent.base import BaseAgent 12 | from util.visualization import merge_patches 13 | 14 | class VQVAEAgent(BaseAgent): 15 | def __init__(self, config): 16 | super(VQVAEAgent, self).__init__(config) 17 | self.device = config[config['mode']]['device'] 18 | # print(config['train']['lr']) 19 | self.in_channel = config['model']['in_channel'] 20 | self.latent_loss_weight = config['model']['alpha'] 21 | 22 | def build_net(self, config): 23 | net = get_network(config) 24 | print('-----Part Sequence architecture-----') 25 | print(net) 26 | if config['data']['parallel']: 27 | net = nn.DataParallel(net) 28 | print(self.device) 29 | net = net.to(self.device) 30 | return net 31 | 32 | def set_loss_function(self): 33 | self.criterion = nn.L1Loss(reduction='sum') 34 | 35 | def get_seam_loss(self, recon_batches): 36 | if recon_batches.shape[0] % 6 != 0: 37 | print('batch size shoule be set as a multiply of 6.') 38 | return 0 39 | model_num = recon_batches.shape[0]/6 40 | loss = 0 41 | 42 | for i in range(int(model_num)): 43 | patch0 = recon_batches[6*i, :, :, :] 44 | patch1 = recon_batches[6*i+1, :, :, :] 45 | patch2 = recon_batches[6*i+2, :, :, :] 46 | patch3 = recon_batches[6*i+3, :, :, :] 47 | patch4 = recon_batches[6*i+4, :, :, :] 48 | patch5 = recon_batches[6*i+5, :, :, :] 49 | 50 | loss += ( 51 | self.criterion(patch0[:, :, 0], patch1[:, 0, :]) + \ 52 | self.criterion(patch0[:, 255, :], patch2[:, 0, :]) + \ 53 | self.criterion(patch0[:, :, 255], torch.flip(patch3[:, 0, :], [1])) + \ 54 | self.criterion(patch0[:, 0, :], torch.flip(patch4[:, 0, :], [1])) + \ 55 | self.criterion(patch1[:, :, 255], patch2[:, :, 0]) + \ 56 | self.criterion(patch1[:, :, 0], patch4[:, :, 255]) + \ 57 | self.criterion(patch1[:, 255, :], torch.flip(patch5[:, :, 0], [1])) + \ 58 | self.criterion(patch2[:, :, 255], patch3[:, :, 0]) + \ 59 | self.criterion(patch2[:, 255, :], patch5[:, 0, :]) + \ 60 | self.criterion(patch3[:, :, 255], patch4[:, :, 0]) + \ 61 | self.criterion(patch3[:, 255, :], patch5[:, :, 255]) + \ 62 | self.criterion(patch4[:, 255, :], torch.flip(patch5[:, 255, :], [1])) \ 63 | )/model_num 64 | return loss 65 | 66 | def forward(self, data): 67 | outputs = {} 68 | losses = {} 69 | latent_loss_weight = self.latent_loss_weight 70 | 71 | img = rearrange(data[0], 'B P C H W -> (B P) C H W') 72 | filenames = data[1] 73 | 74 | img = img.to(self.device).contiguous() 75 | dec, latent_loss, quant_t, quant_b = self.net(img) 76 | 77 | # print('{} {} '.format(quant_t.shape, quant_b.shape)) 78 | # texture 79 | recon_loss = self.criterion(dec, img)/img.shape[0] 80 | latent_loss = latent_loss.mean() * 64 * 16 * 16 81 | # seam_loss 82 | seam_loss = self.get_seam_loss(dec) 83 | 84 | outputs['dec'] = dec 85 | losses['recon'] = recon_loss 86 | losses['latent'] = latent_loss * latent_loss_weight 87 | return outputs, losses 88 | 89 | def train_func(self, data): 90 | """one step of training""" 91 | self.net.train() 92 | 93 | outputs, losses = self.forward(data) 94 | 95 | self.update_network(losses) 96 | self.record_losses(losses, 'train') 97 | 98 | return outputs, losses 99 | 100 | def visualize_batch(self, data, mode, outputs=None): 101 | if mode == 'train': 102 | return 103 | imgs = data[0] 104 | filenames = data[1] 105 | # flat 106 | flat_filenames = [] 107 | for i in range(len(filenames[0])): 108 | for j in range(len(filenames)): 109 | flat_filenames.append(filenames[j][i]) 110 | filenames = flat_filenames 111 | 112 | recon_dir = os.path.join(self.model_dir, '{}_recon'.format(mode)) 113 | if not os.path.exists(recon_dir): 114 | os.mkdir(recon_dir) 115 | 116 | dec = outputs['dec'] 117 | # if dec.shape[1] == 4: 118 | # dec[:, 3, :, :] = ((dec[:, 3, :, :] > 0).float()-0.5)*2 119 | dec = dec.clamp(-1, 1) 120 | for i in range(dec.shape[0]): 121 | filename = filenames[i] 122 | utils.save_image( 123 | dec[i, :, :, :], 124 | # imgs[i, :, :, :], 125 | os.path.join(recon_dir, filename+'.png'), 126 | nrow=1, 127 | normalize=True, 128 | range=(-1, 1), 129 | ) 130 | merge_patches(recon_dir, channel=self.in_channel) -------------------------------------------------------------------------------- /python/agent/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import torch.optim as optim 4 | import torch.nn as nn 5 | from abc import abstractmethod 6 | from tensorboardX import SummaryWriter 7 | from util.utils import TrainClock 8 | 9 | 10 | class BaseAgent(object): 11 | """Base trainer that provides commom training behavior. 12 | All trainer should be subclass of this class. 13 | """ 14 | def __init__(self, config): 15 | self.log_dir = config['train']['log_dir'] 16 | self.model_dir = config['train']['model_dir'] 17 | self.clock = TrainClock() 18 | self.batch_size = config['train']['batch_size'] 19 | self.device = config[config['mode']]['device'] 20 | # build network 21 | self.net = self.build_net(config) 22 | self.net = self.net.to(self.device) 23 | # set loss function 24 | self.set_loss_function() 25 | 26 | # set optimizer 27 | self.set_optimizer(config) 28 | 29 | # set tensorboard writer 30 | self.train_tb = SummaryWriter(os.path.join(self.log_dir, 'train.events')) 31 | self.val_tb = SummaryWriter(os.path.join(self.log_dir, 'val.events')) 32 | 33 | @abstractmethod 34 | def build_net(self, config): 35 | raise NotImplementedError 36 | 37 | def set_loss_function(self): 38 | """set loss function used in training""" 39 | # self.criterion = nn.MSELoss().to(self.device) 40 | raise NotImplementedError 41 | 42 | def set_optimizer(self, config): 43 | """set optimizer and lr scheduler used in training""" 44 | 45 | self.optimizer = optim.Adam(self.net.parameters(), config['train']['lr']) 46 | self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, config['train']['lr_step_size']) 47 | 48 | def save_ckpt(self, name=None): 49 | """save checkpoint during training for future restore""" 50 | if name is None: 51 | save_path = os.path.join(self.model_dir, "ckpt_epoch{}.pth".format(self.clock.epoch)) 52 | print("Checkpoint saved at {}".format(save_path)) 53 | else: 54 | save_path = os.path.join(self.model_dir, "{}.pth".format(name)) 55 | if isinstance(self.net, nn.DataParallel): 56 | torch.save({ 57 | 'clock': self.clock.make_checkpoint(), 58 | 'model_state_dict': self.net.module.cpu().state_dict(), 59 | 'optimizer_state_dict': self.optimizer.state_dict(), 60 | 'scheduler_state_dict': self.scheduler.state_dict(), 61 | }, save_path) 62 | else: 63 | torch.save({ 64 | 'clock': self.clock.make_checkpoint(), 65 | 'model_state_dict': self.net.cpu().state_dict(), 66 | 'optimizer_state_dict': self.optimizer.state_dict(), 67 | 'scheduler_state_dict': self.scheduler.state_dict(), 68 | }, save_path) 69 | self.net = self.net.to(self.device) 70 | 71 | def load_ckpt(self, name=None): 72 | """load checkpoint from saved checkpoint""" 73 | name = name if name == 'latest' else "ckpt_epoch{}".format(name) 74 | load_path = os.path.join(self.model_dir, "{}.pth".format(name)) 75 | if not os.path.exists(load_path): 76 | raise ValueError("Checkpoint {} not exists.".format(load_path)) 77 | 78 | checkpoint = torch.load(load_path, map_location=torch.device(self.device)) 79 | print("Checkpoint loaded from {}".format(load_path)) 80 | if isinstance(self.net, nn.DataParallel): 81 | self.net.module.load_state_dict(checkpoint['model_state_dict']) 82 | else: 83 | self.net.load_state_dict(checkpoint['model_state_dict']) 84 | self.optimizer.load_state_dict(checkpoint['optimizer_state_dict']) 85 | self.scheduler.load_state_dict(checkpoint['scheduler_state_dict']) 86 | self.clock.restore_checkpoint(checkpoint['clock']) 87 | 88 | @abstractmethod 89 | def forward(self, data): 90 | pass 91 | 92 | def update_network(self, loss_dict): 93 | """update network by back propagation""" 94 | loss = sum(loss_dict.values()) 95 | self.optimizer.zero_grad() 96 | loss.backward() 97 | self.optimizer.step() 98 | 99 | def update_learning_rate(self): 100 | """record and update learning rate""" 101 | self.train_tb.add_scalar('learning_rate', self.optimizer.param_groups[-1]['lr'], self.clock.epoch) 102 | self.scheduler.step(self.clock.epoch) 103 | 104 | def record_losses(self, loss_dict, mode='train'): 105 | losses_values = {k: v.item() for k, v in loss_dict.items()} 106 | 107 | # record loss to tensorboard 108 | tb = self.train_tb if mode == 'train' else self.val_tb 109 | for k, v in losses_values.items(): 110 | tb.add_scalar(k, v, self.clock.step) 111 | 112 | def train_func(self, data): 113 | """one step of training""" 114 | self.net.train() 115 | 116 | outputs, losses = self.forward(data) 117 | 118 | self.update_network(losses) 119 | self.record_losses(losses, 'train') 120 | 121 | return outputs, losses 122 | 123 | def val_func(self, data): 124 | """one step of validation""" 125 | self.net.eval() 126 | 127 | with torch.no_grad(): 128 | outputs, losses = self.forward(data) 129 | 130 | self.record_losses(losses, 'validation') 131 | 132 | return outputs, losses 133 | 134 | def visualize_batch(self, data, mode, **kwargs): 135 | """write visualization results to tensorboard writer""" 136 | raise NotImplementedError -------------------------------------------------------------------------------- /python/config/__init__.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | cate_2_part_num_dict = {'Laptop': 8, 'Dishwasher': 11, 'Bowl': 4, 'Bed': 24, 'Clock': 20, 'Bottle': 10, 'Mug': 4, 'Refrigerator': 13, 'Door': 8, 'Vase': 11, 'Display': 8, 'Lamp': 43, 'TrashCan': 16, 'Keyboard': 3, 'Scissors': 5, 'Hat': 8, 'Knife': 10, 'Faucet': 11, 'Table': 53, 'Earphone': 15, 'StorageFurniture': 36, 'Chair': 58, 'Bag': 5, 'Microwave': 12} 4 | 5 | # General config 6 | def load_config(path, default_path=None): 7 | ''' Loads config file. 8 | Args: 9 | path (str): path to config file 10 | default_path (bool): whether to use default path 11 | ''' 12 | # Load configuration from file itself 13 | with open(path, 'r') as f: 14 | cfg_special = yaml.load(f) 15 | 16 | # Check if we should inherit from a config 17 | inherit_from = cfg_special.get('inherit_from') 18 | 19 | # If yes, load this config first as default 20 | # If no, use the default_path 21 | if inherit_from is not None: 22 | cfg = load_config(inherit_from, default_path) 23 | elif default_path is not None: 24 | with open(default_path, 'r') as f: 25 | cfg = yaml.load(f) 26 | else: 27 | cfg = dict() 28 | 29 | # Include main configuration 30 | update_recursive(cfg, cfg_special) 31 | 32 | return cfg 33 | 34 | def update_recursive(dict1, dict2): 35 | ''' Update two config dictionaries recursively. 36 | Args: 37 | dict1 (dict): first dictionary to be updated 38 | dict2 (dict): second dictionary which entries should be used 39 | ''' 40 | for k, v in dict2.items(): 41 | if k not in dict1: 42 | dict1[k] = dict() 43 | if isinstance(v, dict): 44 | update_recursive(dict1[k], v) 45 | else: 46 | dict1[k] = v -------------------------------------------------------------------------------- /python/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data import DataLoader 2 | from dataset.dataset_vqvae import VQVAEDataset 3 | from dataset.dataset_spvae import SPVAEDataset 4 | from dataset.dataset_geovae import GeoVAEDataset 5 | from dataset.dataset_latent_geo_2levels import LatentGeo2LevelsDataset 6 | from dataset.dataset_latent_geo_VGG_2levels import LatentGeoVGG2LevelsDataset 7 | 8 | def get_dataloader(config, mode): 9 | if not (mode == 'train' or mode == 'test' or mode == 'val'): 10 | print('mode should be train test or val, but got {} instead'.format(mode)) 11 | raise NotImplementedError 12 | 13 | module = config['model']['name'] 14 | data_root = config['data']['data_root'] 15 | batch_size = config[mode]['batch_size'] 16 | is_shuffle = config[mode]['is_shuffle'] 17 | num_workers = config[mode]['num_workers'] 18 | 19 | if module == 'vqvae': 20 | category = config['data']['category'] 21 | part_name = config['data']['part_name'] 22 | height = config['data']['height'] 23 | width = config['data']['width'] 24 | dataset = VQVAEDataset( 25 | data_root, 26 | mode, 27 | category=category, 28 | part_name=part_name, 29 | height=height, 30 | width=width) 31 | dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=is_shuffle, num_workers=num_workers, drop_last=True) 32 | elif module == 'spvae': 33 | dataset = SPVAEDataset(data_root, 34 | mode, ) 35 | dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=is_shuffle, num_workers=num_workers) 36 | elif module == 'geovae': 37 | part_name = config['data']['part_name'] 38 | dataset = GeoVAEDataset(data_root, 39 | mode, 40 | part_name=part_name) 41 | dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=is_shuffle, num_workers=num_workers) 42 | elif module == 'pixelsnail_top_center' or module == 'pixelsnail_bottom_center': 43 | part_name = config['data']['part_name'] 44 | dataset = LatentGeo2LevelsDataset(data_root, 45 | mode, 46 | part_name) 47 | dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=is_shuffle, num_workers=num_workers) 48 | elif module == 'pixelsnail_top_others' or module == 'pixelsnail_bottom_others': 49 | part_name = config['data']['part_name'] 50 | dataset = LatentGeoVGG2LevelsDataset(data_root, 51 | mode, 52 | part_name) 53 | dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=is_shuffle, num_workers=num_workers) 54 | else: 55 | raise NotImplementedError 56 | return dataloader 57 | 58 | def get_part_names(category): 59 | if category == 'chair': 60 | part_names = ['back', 'seat', 'leg_ver_1', 'leg_ver_2', 'leg_ver_3', 'leg_ver_4', 'hand_1', 'hand_2'] 61 | elif category == 'knife': 62 | part_names = ['part1', 'part2'] 63 | elif category == 'guitar': 64 | part_names = ['part1', 'part2', 'part3'] 65 | elif category == 'cup': 66 | part_names = ['part1', 'part2'] 67 | elif category == 'car': 68 | part_names = ['body', 'left_front_wheel', 'right_front_wheel', 'left_back_wheel', 'right_back_wheel','left_mirror','right_mirror'] 69 | elif category == 'table': 70 | part_names = ['surface', 'left_leg1', 'left_leg2', 'left_leg3', 'left_leg4', 'right_leg1', 'right_leg2', 'right_leg3', 'right_leg4'] 71 | elif category == 'plane': 72 | part_names = ['body', 'left_wing', 'right_wing', 'left_tail', 'right_tail', 'up_tail', 'down_tail', 'front_gear', 'left_gear', 'right_gear', 'left_engine1', 'right_engine1', 'left_engine2', 'right_engine2'] 73 | else: 74 | raise Exception("Error") 75 | return part_names 76 | 77 | def get_central_part_name(category): 78 | if category == 'chair': 79 | central_part_name = 'back' 80 | elif category == 'knife': 81 | central_part_name = 'part2' 82 | elif category == 'guitar': 83 | central_part_name = 'part3' 84 | elif category == 'cup': 85 | central_part_name = 'part1' 86 | elif category == 'car': 87 | central_part_name = 'body' 88 | elif category == 'table': 89 | central_part_name = 'surface' 90 | elif category == 'plane': 91 | central_part_name = 'body' 92 | else: 93 | raise Exception("Error") 94 | return central_part_name -------------------------------------------------------------------------------- /python/dataset/dataset_geoall.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pickle 4 | import re 5 | from collections import namedtuple 6 | 7 | import lmdb 8 | import numpy as np 9 | import scipy.io as sio 10 | import torch 11 | from PIL import Image 12 | from torch.utils.data import Dataset 13 | from torchvision import datasets 14 | 15 | category_val_dict = { 16 | 'car': [3.14917, (- 3.1562), 21.0342, (- 11.9353)], 17 | 'car_new_reg': [3.14917, (- 3.1562), 21.0342, (- 11.9353)], 18 | 'chair': [3.14224, (- 3.14214), 14.3377, (- 6.70715)], 19 | 'plane': [3.14295, (- 3.1416), 15.4599, (- 5.63864)], 20 | 'table': [3.14515, (- 3.14813), 11.043, (- 4.80372)] 21 | } 22 | 23 | class GeometryAllPartsDataset(Dataset): 24 | """docstring for GeometryAllPartsDataset""" 25 | def __init__(self, mat_dir, part_names, vertex_num, mode): 26 | super(GeometryAllPartsDataset, self).__init__() 27 | self.mat_dir = mat_dir 28 | self.mode = mode 29 | self.vertex_num = vertex_num 30 | self.part_names = part_names 31 | self.category = os.path.basename(self.mat_dir) 32 | self.LOGR_S_MAX_MIN = category_val_dict[self.category] 33 | self.folders = np.loadtxt(os.path.join(self.mat_dir, self.mode+'.lst'), dtype=str) 34 | 35 | self.folders = sorted(self.folders) 36 | 37 | def __len__(self): 38 | return len(self.folders) 39 | 40 | def normalize(self, logr_part, s_part): 41 | (logrmax, logrmin, smax, smin) = self.LOGR_S_MAX_MIN 42 | resultmin = -0.95 43 | resultmax = 0.95 44 | logrmin = logrmin - 1e-6 45 | logrmax = logrmax + 1e-6 46 | smin = smin - 1e-6 47 | smax = smax + 1e-6 48 | 49 | rnew = (resultmax - resultmin) * (logr_part - logrmin) / (logrmax - logrmin) + resultmin 50 | snew = (resultmax - resultmin) * (s_part - smin) / (smax - smin) + resultmin 51 | return rnew, snew, logrmax, logrmin, smax, smin 52 | 53 | def __getitem__(self, idx): 54 | if torch.is_tensor(idx): 55 | idx = idx.tolist() 56 | 57 | cur_id = os.path.basename(self.folders[idx]) 58 | cur_dir = os.path.dirname(self.folders[idx]) 59 | 60 | origin_geo_inputs = [] 61 | geo_inputs = [] 62 | logrmaxs = [] 63 | logrmins = [] 64 | smaxs = [] 65 | smins = [] 66 | for part_name in self.part_names: 67 | fullname = os.path.join(cur_dir, cur_id, cur_id+'_'+part_name+'.mat') 68 | if not os.path.exists(fullname): 69 | LOGR = np.zeros((self.vertex_num, 3)) 70 | S = np.zeros((self.vertex_num, 6)) 71 | else: 72 | geo_data = sio.loadmat(fullname, verify_compressed_data_integrity=False) 73 | LOGR = geo_data['fmlogdr'] 74 | S = geo_data['fms'] 75 | 76 | if LOGR.shape[0] == 1: 77 | LOGR = np.squeeze(LOGR, axis=0) 78 | if S.shape[0] == 1: 79 | S = np.squeeze(S, axis=0) 80 | origin_geo_input = np.concatenate((LOGR, S), axis = 1) 81 | 82 | LOGR, S, logrmax, logrmin, smax, smin = self.normalize(LOGR, S) 83 | geo_input = np.concatenate((LOGR, S), axis = 1) 84 | 85 | geo_inputs.append(geo_input) 86 | origin_geo_inputs.append(origin_geo_input) 87 | logrmaxs.append(logrmax) 88 | logrmins.append(logrmin) 89 | smaxs.append(smax) 90 | smins.append(smin) 91 | geo_inputs = np.array(geo_inputs) 92 | origin_geo_inputs = np.array(origin_geo_inputs) 93 | logrmaxs = np.array(logrmaxs) 94 | logrmins = np.array(logrmins) 95 | smaxs = np.array(smaxs) 96 | smins = np.array(smins) 97 | return geo_inputs, origin_geo_inputs, logrmaxs, logrmins, smaxs, smins, cur_id 98 | 99 | if __name__ == '__main__': 100 | part_names = ['surface', 'left_leg1', 'left_leg2', 'left_leg3', 'left_leg4', 'right_leg1', 'right_leg2', 'right_leg3', 'right_leg4'] 101 | dataset = GeometryAllPartsDataset(mat_dir='/mnt/f/wutong/data/table', part_names=part_names, vertex_num=2168, mode='train') 102 | from torch.utils.data import DataLoader 103 | dataloader = DataLoader(dataset, batch_size=10, shuffle=False, num_workers=0, drop_last=True) 104 | for b, data in enumerate(dataloader): 105 | print(b) 106 | pass 107 | -------------------------------------------------------------------------------- /python/dataset/dataset_geovae.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pickle 4 | import re 5 | from collections import namedtuple 6 | 7 | import lmdb 8 | import numpy as np 9 | import scipy.io as sio 10 | import torch 11 | from PIL import Image 12 | from torch.utils.data import Dataset 13 | from torchvision import datasets 14 | 15 | category_val_dict = { 16 | 'car': [3.14917, -3.1562, 21.0342, -11.9353], 17 | 'car_new_reg': [3.14917, -3.1562, 21.0342, -11.9353], 18 | 'chair': [3.14224, -3.14214, 14.3377, -6.70715], 19 | 'plane': [3.14295, -3.1416, 15.4599, -5.63864], 20 | 'table': [3.14515, -3.14813, 11.043, -4.80372] 21 | } 22 | 23 | class GeoVAEDataset(Dataset): 24 | """docstring for GeoVAEDataset""" 25 | def __init__(self, mat_dir, mode, part_name=None): 26 | super(GeoVAEDataset, self).__init__() 27 | 28 | self.mat_dir = mat_dir 29 | self.mode = mode 30 | self.folders = np.loadtxt(os.path.join(self.mat_dir, self.mode+'.lst'), dtype=str) 31 | self.category = os.path.basename(self.mat_dir) 32 | self.part_name = part_name 33 | self.LOGR_S_MAX_MIN = category_val_dict[self.category] 34 | 35 | if self.part_name is None: 36 | self.part_name = '' 37 | 38 | self.files = [] 39 | for folder in self.folders: 40 | self.files = self.files + glob.glob(os.path.join(self.mat_dir, folder, '*'+self.part_name+'*.mat')) 41 | self.files = [file for file in self.files if 'acap' not in file] 42 | self.files = sorted(self.files) 43 | # print(self.files) 44 | print(len(self.files)) 45 | 46 | def __len__(self): 47 | return len(self.files) 48 | 49 | def normalize(self, logr_part, s_part): 50 | logrmax, logrmin, smax, smin = self.LOGR_S_MAX_MIN 51 | resultmin = -0.95 52 | resultmax = 0.95 53 | 54 | logrmin = logrmin - 1e-6 55 | logrmax = logrmax + 1e-6 56 | smin = smin - 1e-6 57 | smax = smax + 1e-6 58 | 59 | rnew = (resultmax - resultmin) * (logr_part - logrmin) / (logrmax - logrmin) + resultmin 60 | snew = (resultmax - resultmin) * (s_part - smin) / (smax - smin) + resultmin 61 | return rnew, snew, logrmax, logrmin, smax, smin 62 | 63 | def __getitem__(self, idx): 64 | if torch.is_tensor(idx): 65 | idx = idx.tolist() 66 | 67 | fullname = self.files[idx] 68 | # print(fullname) 69 | geo_data = sio.loadmat(fullname, verify_compressed_data_integrity=False) 70 | try: 71 | LOGR = geo_data['fmlogdr'] 72 | S = geo_data['fms'] 73 | except: 74 | print(fullname) 75 | return 76 | 77 | if LOGR.shape[0] == 1: 78 | LOGR = np.squeeze(LOGR, axis=0) 79 | if S.shape[0] == 1: 80 | S = np.squeeze(S, axis=0) 81 | origin_geo_input = np.concatenate((LOGR, S), axis = 1) 82 | 83 | LOGR, S, logrmax, logrmin, smax, smin = self.normalize(LOGR, S) 84 | geo_input = np.concatenate((LOGR, S), axis = 1) 85 | 86 | return geo_input, origin_geo_input, logrmax, logrmin, smax, smin, fullname 87 | 88 | if __name__ == '__main__': 89 | dataset = GeoVAEDataset(mat_dir='/mnt/f/wutong/data/table', mode='train', part_name='left_leg1') 90 | from torch.utils.data import DataLoader 91 | dataloader = DataLoader(dataset, batch_size=10, shuffle=False, num_workers=0, drop_last=True) 92 | for b, data in enumerate(dataloader): 93 | print(b) 94 | pass -------------------------------------------------------------------------------- /python/dataset/dataset_latent_geo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | from collections import namedtuple 4 | 5 | import torch 6 | from torch.utils.data import Dataset 7 | from torchvision import datasets 8 | import lmdb 9 | 10 | import scipy.io as sio 11 | import numpy as np 12 | from PIL import Image 13 | import re 14 | import glob 15 | CodeRow = namedtuple('CodeRow', ['ID', 'geo_zs']) 16 | 17 | class LatentsDatasetGeoOnly(Dataset): 18 | def __init__(self, path): 19 | self.env = lmdb.open( 20 | path, 21 | max_readers=32, 22 | readonly=True, 23 | lock=False, 24 | readahead=False, 25 | meminit=False, 26 | ) 27 | 28 | if not self.env: 29 | raise IOError('Cannot open lmdb dataset', path) 30 | 31 | with self.env.begin(write=False) as txn: 32 | self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8')) 33 | 34 | def __len__(self): 35 | return self.length 36 | 37 | def __getitem__(self, index): 38 | with self.env.begin(write=False) as txn: 39 | key = str(index).encode('utf-8') 40 | 41 | row = pickle.loads(txn.get(key)) 42 | 43 | return row.ID, torch.from_numpy(row.geo_zs) 44 | -------------------------------------------------------------------------------- /python/dataset/dataset_latent_geo_2levels.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pickle 4 | import re 5 | from collections import namedtuple 6 | 7 | import lmdb 8 | import numpy as np 9 | import scipy.io as sio 10 | import torch 11 | from PIL import Image 12 | from torch.utils.data import Dataset 13 | from torchvision import datasets 14 | 15 | class LatentGeo2LevelsDataset(Dataset): 16 | """docstring for LatentGeo2LevelsDataset""" 17 | def __init__(self, mat_dir, mode, part_name=None): 18 | super(LatentGeo2LevelsDataset, self).__init__() 19 | 20 | self.mat_dir = mat_dir 21 | self.mode = mode 22 | self.folders = np.loadtxt(os.path.join(self.mat_dir, self.mode+'.lst'), dtype=str) 23 | self.part_name = part_name 24 | if self.part_name is None: 25 | self.part_name = '' 26 | self.files = [] 27 | for folder in self.folders: 28 | self.files = self.files + glob.glob(os.path.join(self.mat_dir, folder, '*'+self.part_name+'.mat')) 29 | self.files = [file for file in self.files if 'acap' not in file] 30 | self.files = sorted(self.files) 31 | print(len(self.files)) 32 | 33 | def __len__(self): 34 | return len(self.files) 35 | 36 | def __getitem__(self, idx): 37 | if torch.is_tensor(idx): 38 | idx = idx.tolist() 39 | 40 | fullname = self.files[idx] 41 | # print(fullname) 42 | data_dict = sio.loadmat(fullname, verify_compressed_data_integrity=False) 43 | geo_z = data_dict['geo_z'] 44 | id_ts = data_dict['id_ts'] 45 | id_bs = data_dict['id_bs'] 46 | 47 | return geo_z, id_ts, id_bs, fullname 48 | 49 | if __name__ == '__main__': 50 | from torch.utils.data import DataLoader 51 | data_root = './car_latents' 52 | category = 'car' 53 | part_name = 'body' 54 | height = 256 55 | width = 256 56 | parallel = 'False' 57 | mode = 'train' 58 | batch_size = 6 59 | dataset = LatentGeo2LevelsDataset( 60 | data_root, 61 | mode, 62 | part_name=part_name) 63 | dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=True) 64 | for i, (geo_z, id_ts, id_bs, fullname) in enumerate(dataloader): 65 | if torch.isnan(geo_z).any(): 66 | print(fullname) -------------------------------------------------------------------------------- /python/dataset/dataset_latent_geo_VGG_2levels.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pickle 4 | import re 5 | from collections import namedtuple 6 | 7 | import lmdb 8 | import numpy as np 9 | import scipy.io as sio 10 | import torch 11 | from PIL import Image 12 | from torch.utils.data import Dataset 13 | from torchvision import datasets 14 | 15 | class LatentGeoVGG2LevelsDataset(Dataset): 16 | """docstring for LatentGeo2LevelsDataset""" 17 | def __init__(self, mat_dir, mode, part_name=None): 18 | super(LatentGeoVGG2LevelsDataset, self).__init__() 19 | 20 | self.mat_dir = mat_dir 21 | self.mode = mode 22 | self.folders = np.loadtxt(os.path.join(self.mat_dir, self.mode+'.lst'), dtype=str) 23 | self.part_name = part_name 24 | if self.part_name is None: 25 | self.part_name = '' 26 | self.files = [] 27 | for folder in self.folders: 28 | self.files = self.files + glob.glob(os.path.join(self.mat_dir, folder, '*'+self.part_name+'*.mat')) 29 | self.files = [file for file in self.files if 'acap' not in file] 30 | self.files = sorted(self.files) 31 | print(len(self.files)) 32 | 33 | def __len__(self): 34 | return len(self.files) 35 | 36 | def __getitem__(self, idx): 37 | if torch.is_tensor(idx): 38 | idx = idx.tolist() 39 | 40 | fullname = self.files[idx] 41 | # print(fullname) 42 | data_dict = sio.loadmat(fullname, verify_compressed_data_integrity=False) 43 | geo_z = data_dict['geo_z'] 44 | id_ts = data_dict['id_ts'] 45 | id_bs = data_dict['id_bs'] 46 | central_vggs = data_dict['central_vggs'] 47 | 48 | return geo_z, id_ts, id_bs, central_vggs, fullname 49 | -------------------------------------------------------------------------------- /python/dataset/dataset_spvae.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pickle 4 | import re 5 | from collections import namedtuple 6 | 7 | import lmdb 8 | import numpy as np 9 | import scipy.io as sio 10 | import torch 11 | from PIL import Image 12 | from torch.utils.data import Dataset 13 | from torchvision import datasets 14 | 15 | class SPVAEDataset(Dataset): 16 | """docstring for SPVAEDataset""" 17 | def __init__(self, mat_dir, mode): 18 | super(SPVAEDataset, self).__init__() 19 | 20 | self.mat_dir = mat_dir 21 | self.mode = mode 22 | self.folders = np.loadtxt(os.path.join(self.mat_dir, self.mode+'.lst'), dtype=str) 23 | self.files = [] 24 | for folder in self.folders: 25 | self.files = self.files + glob.glob(os.path.join(self.mat_dir, folder, 'geo_zs.mat')) 26 | self.files = [file for file in self.files if 'acap' not in file] 27 | self.files = sorted(self.files) 28 | # print(self.files) 29 | print(len(self.files)) 30 | 31 | def __len__(self): 32 | return len(self.files) 33 | 34 | def normalize(self, logr_part, s_part): 35 | resultmin = -0.95 36 | resultmax = 0.95 37 | 38 | logrmin = logr_part.min() 39 | logrmin = logrmin - 1e-6 40 | logrmax = logr_part.max() 41 | logrmax = logrmax + 1e-6 42 | 43 | smin = s_part.min() 44 | smin = smin - 1e-6 45 | smax = s_part.max() 46 | smax = smax + 1e-6 47 | 48 | rnew = (resultmax - resultmin) * (logr_part - logrmin) / (logrmax - logrmin) + resultmin 49 | snew = (resultmax - resultmin) * (s_part - smin) / (smax - smin) + resultmin 50 | return rnew, snew, logrmax, logrmin, smax, smin 51 | 52 | def __getitem__(self, idx): 53 | if torch.is_tensor(idx): 54 | idx = idx.tolist() 55 | 56 | fullname = self.files[idx] 57 | # print(fullname) 58 | geo_data = sio.loadmat(fullname, verify_compressed_data_integrity=False) 59 | geo_zs = geo_data['geo_zs'] 60 | 61 | return geo_zs, fullname 62 | -------------------------------------------------------------------------------- /python/dataset/dataset_vqvae.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pickle 4 | import re 5 | from collections import namedtuple 6 | 7 | import lmdb 8 | import numpy as np 9 | import scipy.io as sio 10 | import torch 11 | from PIL import Image 12 | from torch.utils.data import Dataset 13 | from torchvision.utils import save_image, make_grid 14 | from torchvision import datasets, transforms, utils 15 | from Augmentor.Operations import Distort 16 | 17 | class VQVAEDataset(Dataset): 18 | """docstring for ImageGeometryDataset""" 19 | def __init__(self, image_dir, mode, category=None, part_name=None, height=256, width=256): 20 | super(VQVAEDataset, self).__init__() 21 | 22 | self.image_dir = image_dir 23 | self.mode = mode 24 | self.category = category 25 | self.part_name = part_name 26 | self.height = int(height) 27 | self.width = int(width) 28 | if self.part_name is None: 29 | self.part_name = '' 30 | if self.mode == 'train' or self.mode == 'val' or self.mode == 'test': 31 | self.distort_aug = Distort(probability=1, grid_height=3, grid_width=4, magnitude=0) 32 | 33 | self.transform = self.get_transform(self.mode, self.height*3, self.width*4, self.category) 34 | self.folders = np.loadtxt(os.path.join(self.image_dir, self.mode+'.lst'), dtype=str) 35 | 36 | self.files = [] 37 | for folder in self.folders: 38 | no_patch_files = list( 39 | set(glob.glob(os.path.join(self.image_dir, folder, '*.png'))) - set(glob.glob(os.path.join(self.image_dir, folder, '*patch*.png'))) 40 | ) 41 | no_patch_files = [filename for filename in no_patch_files if self.part_name in filename] 42 | self.files = self.files + no_patch_files 43 | # if self.part_name is not None: 44 | # self.files = [filename for filename in self.files if self.part_name in filename] 45 | self.files = sorted(self.files) 46 | print('model num: {}'.format(len(self.files))) 47 | 48 | self.H_begin = [0, 256, 256, 256, 256, 512] 49 | self.W_begin = [256, 0, 256, 512, 768, 256] 50 | 51 | 52 | def __len__(self): 53 | return len(self.files) 54 | 55 | def get_transform(self, mode, height, width, category): 56 | if category == 'car': 57 | mean = [0.5, 0.5, 0.5] 58 | std = [0.5, 0.5, 0.5] 59 | else: 60 | mean = [0.5, 0.5, 0.5, 0.5] 61 | std = [0.5, 0.5, 0.5, 0.5] 62 | 63 | if mode == 'train' or 'val': 64 | transform = transforms.Compose( 65 | [ 66 | transforms.Resize((height, width)), 67 | transforms.CenterCrop((height, width)), 68 | # data augmentation 69 | # transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1), 70 | transforms.ToTensor(), 71 | transforms.Normalize(mean, std), 72 | ] 73 | ) 74 | elif mode == 'test': 75 | transform = transforms.Compose( 76 | [ 77 | transforms.Resize((height, width)), 78 | transforms.CenterCrop((height, width)), 79 | # data augmentation 80 | # transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.3, hue=0.3), 81 | transforms.ToTensor(), 82 | transforms.Normalize(mean, std), 83 | ] 84 | ) 85 | return transform 86 | 87 | def __getitem__(self, idx): 88 | if torch.is_tensor(idx): 89 | idx = idx.tolist() 90 | 91 | filename = self.files[idx] 92 | basename = os.path.basename(filename) 93 | 94 | image = Image.open(filename) 95 | 96 | if self.mode == 'train' or self.mode == 'val': 97 | np_image = np.array(image) 98 | distorted_image = np.zeros((image.size[1], image.size[0], np_image.shape[2])) 99 | for i in range(6): 100 | patch = Image.fromarray(np.uint8(np_image[self.H_begin[i]:self.H_begin[i]+256, self.W_begin[i]:self.W_begin[i]+256, :])) 101 | distorted_patch = self.distort_aug.perform_operation([patch]) 102 | # distorted_image[0].save(os.path.join('.', basename)) 103 | distorted_image[self.H_begin[i]:self.H_begin[i]+256, self.W_begin[i]:self.W_begin[i]+256, :] = np.array(distorted_patch[0]) 104 | image = Image.fromarray(np.uint8(distorted_image)) 105 | 106 | np_image = np.array(image) 107 | np_image.setflags(write=1) 108 | 109 | if self.category == 'car': 110 | image = Image.fromarray(np.uint8(np_image[:, :, 0:3])) 111 | else: 112 | # np_image[:, :, 3] = np_image[:, :, 3]/255 113 | # np_image[:, :, 0] = np.multiply(np_image[:, :, 0], np_image[:, :, 3]) 114 | # np_image[:, :, 1] = np.multiply(np_image[:, :, 1], np_image[:, :, 3]) 115 | # np_image[:, :, 2] = np.multiply(np_image[:, :, 2], np_image[:, :, 3]) 116 | # np_image[:, :, 3] = np_image[:, :, 3]*255 117 | image = Image.fromarray(np.uint8(np_image[:, :, 0:4])) 118 | 119 | if self.transform is not None: 120 | image = self.transform(image) 121 | # save_image(image, os.path.join('.', basename), normalize=True, range=(-1, 1)) 122 | if self.category == 'car': 123 | patches = torch.zeros(6, 3, self.height, self.width) 124 | else: 125 | patches = torch.zeros(6, 4, self.height, self.width) 126 | basenames = [] 127 | for i in range(6): 128 | patches[i, :, :, :] = image[:, self.H_begin[i]:self.H_begin[i]+256, self.W_begin[i]:self.W_begin[i]+256] 129 | basenames.append('{}_patch{}.png'.format(basename.split('.')[0], str(i))) 130 | # save_image(make_grid(patches), os.path.join('.', basename)) 131 | 132 | return patches, basenames 133 | 134 | if __name__ == '__main__': 135 | pass -------------------------------------------------------------------------------- /python/extract_latents_geo_only_all_parts.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import pickle 4 | from collections import namedtuple 5 | 6 | import numpy as np 7 | import scipy.io as sio 8 | import torch 9 | import torchvision.transforms.functional as F 10 | from PIL import Image 11 | from torch.utils.data import DataLoader 12 | from torchvision import transforms 13 | from tqdm import tqdm 14 | 15 | from dataset import get_part_names 16 | from dataset.dataset_geoall import GeometryAllPartsDataset 17 | from config import load_config 18 | from networks import get_network 19 | 20 | 21 | def extract_latents_geo(loader, geo_models, args): 22 | index = 0 23 | pbar = tqdm(loader) 24 | 25 | for i, (geo_inputs, origin_geo_inputs, logrmaxs, logrmins, smaxs, smins, filenames) in enumerate(pbar): 26 | geo_inputs = geo_inputs.to(device).float().contiguous() 27 | for j in range(geo_inputs.shape[0]): 28 | filename = filenames[j] 29 | 30 | 31 | code_mat_dir = os.path.join(args.mat_dir, filename, 'code.mat') 32 | code_mat = sio.loadmat(code_mat_dir, verify_compressed_data_integrity=False) 33 | code_mat = code_mat['code'] 34 | 35 | geo_zs_all_parts = [] 36 | for k in range(len(args.part_names)): 37 | geo_zs, geo_outputs, _, _ = geo_models[k](geo_inputs[j:j+1, k, :, :]) 38 | geo_zs = geo_zs.detach().cpu().numpy() 39 | geo_zs = np.concatenate([geo_zs, np.expand_dims(code_mat[k, :].flatten(), axis=0)], axis=1) 40 | geo_zs_all_parts.append(geo_zs) 41 | filename = filenames[j] 42 | geo_zs_all_parts = np.array(geo_zs_all_parts) 43 | geo_zs_all_parts = geo_zs_all_parts.transpose(1, 0, 2) 44 | 45 | 46 | data_dict = { 47 | 'geo_zs': geo_zs_all_parts[j:j+1, :] 48 | } 49 | sub_dir = os.path.join(args.save_path, '{}'.format(filename)) 50 | if not os.path.exists(sub_dir): 51 | os.makedirs(sub_dir) 52 | sio.savemat(os.path.join(sub_dir, 'geo_zs.mat'), data_dict) 53 | index += 1 54 | pbar.set_description('inserted {}: {} {}'.format(index, filename, geo_zs_all_parts[j:j+1, :].shape)) 55 | 56 | if __name__ == '__main__': 57 | parser = argparse.ArgumentParser() 58 | parser.add_argument('--mat_dir', type=str, required=True) 59 | parser.add_argument('--category', type=str, required=True) 60 | parser.add_argument('--vertex_num', type=int, required=True) 61 | parser.add_argument('--mode', type=str, required=True) 62 | 63 | parser.add_argument('--geovae_yaml', type=str, required=True) 64 | parser.add_argument('--geovae_ckpt_dir', type=str, required=True) 65 | 66 | parser.add_argument('--device', type=int, default=-1) 67 | parser.add_argument('--save_path', type=str, required=True) 68 | args = parser.parse_args() 69 | 70 | part_names = get_part_names(args.category) 71 | args.part_names = part_names 72 | num2device_dict = {-1: 'cpu', 0: 'cuda:0', 1: 'cuda:1', 2: 'cuda:2', 3: 'cuda:3'} 73 | device = num2device_dict[args.device] 74 | args.device = device 75 | part_names = get_part_names(args.category) 76 | 77 | geovae_config = load_config(args.geovae_yaml) 78 | geovae_config['train']['device'] = args.device 79 | 80 | # geometry dataset 81 | geo_dataset = GeometryAllPartsDataset(args.mat_dir, 82 | part_names=part_names, 83 | vertex_num=args.vertex_num, 84 | mode=args.mode 85 | ) 86 | geo_loader = DataLoader(geo_dataset, batch_size=1, shuffle=False, num_workers=1) 87 | 88 | geo_models = [] 89 | for i, part_name in enumerate(part_names): 90 | # geo 91 | geo_model = get_network(geovae_config).to(args.device) 92 | geo_model = geo_model.float() 93 | 94 | if i > 0: 95 | part_name = 'leg' 96 | 97 | # load ckpt 98 | geo_ckpt = os.path.join(args.geovae_ckpt_dir, part_name, 'latest1.pth') 99 | print('loading {}'.format(geo_ckpt)) 100 | ckpt_dict = torch.load(geo_ckpt, map_location=torch.device(device)) 101 | geo_model.load_state_dict(ckpt_dict['model_state_dict']) 102 | geo_model.eval() 103 | geo_models.append(geo_model) 104 | 105 | if not os.path.exists(args.save_path): 106 | os.mkdir(args.save_path) 107 | 108 | args.device = device 109 | extract_latents_geo(geo_loader, geo_models, args) 110 | -------------------------------------------------------------------------------- /python/networks/ModelParallel.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import threading 4 | 5 | 6 | def distribute_module(module, device): 7 | return module.cuda(device) 8 | 9 | 10 | def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): 11 | assert len(modules) == len(inputs) 12 | if kwargs_tup is not None: 13 | assert len(kwargs_tup) == len(modules) 14 | else: 15 | kwargs_tup = ({},) * len(modules) 16 | if devices is not None: 17 | assert len(modules) == len(devices) 18 | else: 19 | raise VauleError('devices is None') 20 | 21 | lock = threading.Lock() 22 | results = {} 23 | #grad_enabled = torch.is_grad_enabled() 24 | 25 | def _worker(i, module, input, kwargs, device=None): 26 | # torch.set_grad_enabled(grad_enabled) 27 | try: 28 | with torch.cuda.device(device): 29 | output = module(input) 30 | with lock: 31 | results[i] = output 32 | except Exception as e: 33 | with lock: 34 | results[i] = e 35 | 36 | if len(modules) > 1: 37 | threads = [threading.Thread(target=_worker, 38 | args=(i, module, input, kwargs, device)) 39 | for i, (module, input, kwargs, device) in 40 | enumerate(zip(modules, inputs, kwargs_tup, devices))] 41 | for thread in threads: 42 | thread.start() 43 | for thread in threads: 44 | thread.join() 45 | else: 46 | _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0]) 47 | 48 | outputs = [] 49 | for i in range(len(inputs)): 50 | output = results[i] 51 | if isinstance(output, Exception): 52 | raise output 53 | outputs.append(output) 54 | return outputs 55 | 56 | 57 | class ModelParallel(nn.Module): 58 | 59 | def __init__(self, model, device_ids=None, output_device=None): 60 | super(ModelParallel, self).__init__() 61 | 62 | if not torch.cuda.is_available(): 63 | self.module = model 64 | self.device_ids = [] 65 | return 66 | 67 | if device_ids is None: 68 | device_ids = list(range(torch.cuda.device_count())) 69 | if not hasattr(model, 'module'): 70 | raise ValueError("model does not has module attribute") 71 | if len(device_ids) < len(model.module): 72 | print('warning: number of devices is not enough for module parallel') 73 | else: 74 | device_ids = device_ids[:len(model.module)] 75 | 76 | if output_device is None: 77 | output_device = device_ids[0] 78 | self.output_device = output_device 79 | self.device_ids = device_ids 80 | self.module = model.module # module is a list 81 | self.distribute(self.module, device_ids) 82 | 83 | def forward(self, *inputs, **kwargs): 84 | if not self.device_ids: 85 | return self.module(*inputs, **kwargs) 86 | inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) 87 | 88 | if len(self.device_ids) == 1: 89 | return self.module(*inputs[0], **kwargs[0]) 90 | 91 | outputs = self.parallel_apply(self.module, inputs, kwargs) 92 | return self.gather(outputs, self.output_device) 93 | 94 | def distribute(self, module, device_ids): 95 | return [distribute_module(m, id) for m, id in zip(module, device_ids)] 96 | 97 | def scatter(self, inputs, kwargs, device_ids): 98 | if len(inputs) == 1: 99 | inputs = [inputs[0].cuda(id) for id in device_ids] 100 | else: 101 | inputs = [input.cuda(id) for input, id in zip(inputs, device_ids)] 102 | kwargs = None 103 | inputs = tuple(inputs) 104 | return inputs, kwargs 105 | 106 | def parallel_apply(self, replicas, inputs, kwargs): 107 | return parallel_apply(replicas, inputs, kwargs, self.device_ids) 108 | 109 | def gather(self, outputs, output_device): 110 | outputs = [output.cuda(output_device) for output in outputs] 111 | return outputs 112 | -------------------------------------------------------------------------------- /python/networks/__init__.py: -------------------------------------------------------------------------------- 1 | from networks.networks_vqvae import VQVAE 2 | from networks.networks_geovae import GeoVAE 3 | from networks.networks_spvae import SPVAE 4 | from networks.networks_pixelsnail import PixelSNAILTop, PixelSNAIL 5 | from dataset import get_part_names 6 | 7 | def get_network(config): 8 | model_name = config['model']['name'] 9 | part_num = len(get_part_names(config['data']['category'])) 10 | if model_name == 'vqvae': 11 | net = VQVAE( 12 | config['model']['in_channel'], 13 | config['model']['channel'], 14 | config['model']['n_res_block'], 15 | config['model']['n_res_channel'], 16 | config['model']['embed_dim'], 17 | config['model']['n_embed'], 18 | config['model']['decay'], 19 | config['model']['eps'], 20 | config['model']['beta'], 21 | config['model']['stride'], 22 | ) 23 | elif model_name == 'geovae': 24 | net = GeoVAE( 25 | config['model']['geo_hidden_dim'], 26 | config['model']['ref_mesh_mat'], 27 | config['train']['device'] 28 | ) 29 | elif model_name == 'spvae': 30 | net = SPVAE( 31 | config['model']['geo_hidden_dim'], 32 | part_num=part_num 33 | ) 34 | elif model_name == 'pixelsnail_top_center' or \ 35 | model_name == 'pixelsnail_top_others': 36 | net = PixelSNAILTop(shape=[config['model']['shape']*6, config['model']['shape']], 37 | n_class=config['model']['n_class'], 38 | channel=config['model']['channel'], 39 | kernel_size=config['model']['kernel_size'], 40 | n_block=config['model']['n_block'], 41 | n_res_block=config['model']['n_res_block'], 42 | res_channel=config['model']['res_channel'], 43 | attention=config['model']['attention'], 44 | dropout=config['model']['dropout'], 45 | n_cond_res_block=config['model']['n_cond_res_block'], 46 | cond_res_channel=config['model']['cond_res_channel'], 47 | cond_res_kernel=config['model']['cond_res_kernel'], 48 | n_out_res_block=config['model']['n_out_res_block'], 49 | n_condition_dim=config['model']['n_condition_dim'], 50 | n_condition_class=config['model']['n_condition_class'], 51 | n_condition_sub_dim=config['model']['n_condition_sub_dim'] 52 | ) 53 | elif model_name == 'pixelsnail_bottom_center' or \ 54 | model_name == 'pixelsnail_bottom_others': 55 | net = PixelSNAIL( 56 | shape=[config['model']['shape']*6, config['model']['shape']], 57 | n_class=config['model']['n_class'], 58 | channel=config['model']['channel'], 59 | kernel_size=config['model']['kernel_size'], 60 | n_block=config['model']['n_block'], 61 | n_res_block=config['model']['n_res_block'], 62 | res_channel=config['model']['res_channel'], 63 | attention=config['model']['attention'], 64 | dropout=config['model']['dropout'], 65 | n_cond_res_block=config['model']['n_cond_res_block'], 66 | cond_res_channel=config['model']['cond_res_channel'], 67 | cond_res_kernel=config['model']['cond_res_kernel'], 68 | n_out_res_block=config['model']['n_out_res_block'], 69 | n_condition_class=config['model']['n_condition_class'] 70 | ) 71 | else: 72 | raise ValueError 73 | return net 74 | 75 | def set_requires_grad(nets, requires_grad=False): 76 | """Set requies_grad=Fasle for all the networks to avoid unnecessary computations 77 | Parameters: 78 | nets (network list) -- a list of networks 79 | requires_grad (bool) -- whether the networks require gradients or not 80 | """ 81 | if not isinstance(nets, list): 82 | nets = [nets] 83 | for net in nets: 84 | if net is not None: 85 | for param in net.parameters(): 86 | param.requires_grad = requires_grad 87 | -------------------------------------------------------------------------------- /python/networks/networks_spvae.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | from torch.nn import functional as F 4 | import scipy.io as sio 5 | import numpy as np 6 | import math 7 | from typing import List, Callable, Union, Any, TypeVar, Tuple 8 | # from torch import tensor as Tensor 9 | Tensor = TypeVar('torch.tensor') 10 | 11 | class SPVAEEncoder(nn.Module): 12 | """docstring for SPVAEEncoder""" 13 | def __init__(self, feat_len): 14 | super(SPVAEEncoder, self).__init__() 15 | self.feat_len = feat_len 16 | 17 | self.mlp1 = torch.nn.Linear(feat_len, 1024) 18 | self.bn1 = nn.BatchNorm1d(num_features=1024) 19 | self.leakyrelu1 = torch.nn.LeakyReLU() 20 | 21 | self.mlp2 = torch.nn.Linear(1024, 512) 22 | self.bn2 = nn.BatchNorm1d(num_features=512) 23 | self.leakyrelu2 = torch.nn.LeakyReLU() 24 | 25 | self.mlp3 = torch.nn.Linear(512, 256) 26 | self.bn3 = nn.BatchNorm1d(num_features=256) 27 | self.leakyrelu3 = torch.nn.LeakyReLU() 28 | 29 | self.mlp_mu = torch.nn.Linear(256, 128) 30 | self.sigmoid_mu = torch.nn.Sigmoid() 31 | 32 | self.mlp_logvar = torch.nn.Linear(256, 128) 33 | 34 | def forward(self, featurein): 35 | featureout = self.leakyrelu1(self.bn1(self.mlp1(featurein))) 36 | featureout = self.leakyrelu2(self.bn2(self.mlp2(featureout))) 37 | featureout = self.mlp3(featureout) 38 | mu = self.sigmoid_mu(self.mlp_mu(featureout)) 39 | logvar = self.mlp_logvar(featureout) 40 | return mu, logvar 41 | 42 | class SPVAEDecoder(nn.Module): 43 | """docstring for SPVAEDecoder""" 44 | def __init__(self, feat_len): 45 | super(SPVAEDecoder, self).__init__() 46 | self.feat_len = feat_len 47 | 48 | self.mlp1 = torch.nn.Linear(128, 256) 49 | self.bn1 = nn.BatchNorm1d(num_features=256) 50 | self.leakyrelu1 = torch.nn.LeakyReLU() 51 | 52 | self.mlp2 = torch.nn.Linear(256, 512) 53 | self.bn2 = nn.BatchNorm1d(num_features=512) 54 | self.leakyrelu2 = torch.nn.LeakyReLU() 55 | 56 | self.mlp3 = torch.nn.Linear(512, 1024) 57 | self.bn3 = nn.BatchNorm1d(num_features=1024) 58 | self.leakyrelu3 = torch.nn.LeakyReLU() 59 | 60 | self.mlp4 = torch.nn.Linear(1024, self.feat_len) 61 | self.tanh = torch.nn.Tanh() 62 | 63 | def forward(self, featurein): 64 | featureout = self.leakyrelu1(self.bn1(self.mlp1(featurein))) 65 | featureout = self.leakyrelu2(self.bn2(self.mlp2(featureout))) 66 | featureout = self.leakyrelu3(self.bn3(self.mlp3(featureout))) 67 | featureout = self.tanh(self.mlp4(featureout)) 68 | return featureout 69 | 70 | 71 | class SPVAE(nn.Module): 72 | """docstring for SPVAE""" 73 | def __init__(self, 74 | geo_hidden_dim=64, 75 | part_num=7, 76 | device='cpu'): 77 | super(SPVAE, self).__init__() 78 | self.geo_hidden_dim = geo_hidden_dim 79 | self.part_num = part_num 80 | self.feat_len = self.part_num*(self.part_num*2+9+self.geo_hidden_dim) 81 | self.encoder = SPVAEEncoder(feat_len=self.feat_len) 82 | self.decoder = SPVAEDecoder(feat_len=self.feat_len) 83 | 84 | def reparameterize(self, mu: Tensor, logvar: Tensor) -> Tensor: 85 | """ 86 | Reparameterization trick to sample from N(mu, var) from 87 | N(0,1). 88 | :param mu: (Tensor) Mean of the latent Gaussian [B x D] 89 | :param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D] 90 | :return: (Tensor) [B x D] 91 | """ 92 | std = torch.exp(0.5 * logvar) 93 | eps = torch.randn_like(std) 94 | return eps * std + mu 95 | 96 | def forward(self, input: Tensor, **kwargs) -> List[Tensor]: 97 | mu, log_var = self.encoder(input) 98 | z = self.reparameterize(mu, log_var) 99 | return z, self.decoder(z), mu, log_var 100 | 101 | def loss_function(self, 102 | *args, 103 | kld_weight=0.001) -> dict: 104 | """ 105 | Computes the VAE loss function. 106 | KL(N(\mu, \sigma), N(0, 1)) = \log \frac{1}{\sigma} + \frac{\sigma^2 + \mu^2}{2} - \frac{1}{2} 107 | :param args: 108 | :param kwargs: 109 | :return: 110 | """ 111 | recons = args[0] 112 | input = args[1] 113 | mu = args[2] 114 | log_var = args[3] 115 | 116 | # kld_weight = kwargs['M_N'] # Account for the minibatch samples from the dataset 117 | recons_loss = F.mse_loss(recons, input) 118 | 119 | 120 | kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mu ** 2 - log_var.exp(), dim = 1), dim = 0) 121 | 122 | loss = recons_loss + kld_weight * kld_loss 123 | return {'loss': loss, 'Reconstruction_Loss':recons_loss, 'KLD':kld_loss} -------------------------------------------------------------------------------- /python/preprocess/split_dataset.py: -------------------------------------------------------------------------------- 1 | # python code/preprocess/split_dataset.py ./data/1_Quantized/Table/ --r_val 0.05 --r_test 0.05 2 | import argparse 3 | import os 4 | import random 5 | import glob 6 | 7 | parser = argparse.ArgumentParser( 8 | description='Split data into train, test and validation sets.') 9 | parser.add_argument('in_folder', type=str, 10 | help='Input folder where data is stored.') 11 | 12 | parser_nval = parser.add_mutually_exclusive_group(required=True) 13 | parser_nval.add_argument('--n_val', type=int, 14 | help='Size of validation set.') 15 | parser_nval.add_argument('--r_val', type=float, 16 | help='Relative size of validation set.') 17 | 18 | parser_ntest = parser.add_mutually_exclusive_group(required=True) 19 | parser_ntest.add_argument('--n_test', type=int, 20 | help='Size of test set.') 21 | parser_ntest.add_argument('--r_test', type=float, 22 | help='Relative size of test set.') 23 | 24 | parser.add_argument('--shuffle', action='store_true') 25 | parser.add_argument('--seed', type=int, default=4) 26 | 27 | args = parser.parse_args() 28 | 29 | if args.seed is not None: 30 | random.seed(args.seed) 31 | 32 | all_samples = [name for name in os.listdir(args.in_folder) 33 | if os.path.isdir(os.path.join(args.in_folder, name))] 34 | # all_samples = [os.path.basename(name) for name in glob.glob(os.path.join(args.in_folder, '*npy')) 35 | # if not os.path.isdir(name)] 36 | 37 | if args.shuffle: 38 | random.shuffle(all_samples) 39 | 40 | # Number of examples 41 | n_total = len(all_samples) 42 | 43 | if args.n_val is not None: 44 | n_val = args.n_val 45 | else: 46 | n_val = int(args.r_val * n_total) 47 | 48 | if args.n_test is not None: 49 | n_test = args.n_test 50 | else: 51 | n_test = int(args.r_test * n_total) 52 | 53 | if n_total < n_val + n_test: 54 | print('Error: too few training samples.') 55 | exit() 56 | 57 | n_train = n_total - n_val - n_test 58 | 59 | assert(n_train >= 0) 60 | 61 | # Select elements 62 | train_set = all_samples[:n_train] 63 | val_set = all_samples[n_train:n_train+n_val] 64 | test_set = all_samples[n_train+n_val:] 65 | 66 | # Write to file 67 | with open(os.path.join(args.in_folder, 'train.lst'), 'w') as f: 68 | f.write('\n'.join(train_set)) 69 | 70 | with open(os.path.join(args.in_folder, 'val.lst'), 'w') as f: 71 | f.write('\n'.join(val_set)) 72 | 73 | with open(os.path.join(args.in_folder, 'test.lst'), 'w') as f: 74 | f.write('\n'.join(test_set)) -------------------------------------------------------------------------------- /python/test.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from collections import OrderedDict 4 | 5 | from tqdm import tqdm 6 | 7 | from agent import get_agent 8 | from config import load_config 9 | from dataset import get_dataloader 10 | from util.utils import cycle 11 | 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument('--yaml', type=str, default='./code/yaml/chair/train_vqvae.yml', help='yaml config file') 16 | args = parser.parse_args() 17 | config = load_config(args.yaml) 18 | num2device_dict = {-1: 'cpu', 0: 'cuda:0', 1: 'cuda:1', 2: 'cuda:2', 3: 'cuda:3'} 19 | config[config['mode']]['device'] = num2device_dict[config[config['mode']]['device']] 20 | 21 | # create network and training agent 22 | tr_agent = get_agent(config) 23 | 24 | # load from checkpoint if provided 25 | # if config['train']['load_ckpt']: 26 | tr_agent.load_ckpt('latest') 27 | tr_agent.net.eval() 28 | 29 | # data 30 | test_loader = get_dataloader(config, mode='train') 31 | pbar = tqdm(test_loader) 32 | # tr_agent.save_ckpt('latest') 33 | for b, data in enumerate(pbar): 34 | imgs = data[0] 35 | # print('{} {}'.format(imgs[0, :, 3, :, :].min(), imgs[0, :, 3, :, :].max())) 36 | filenames = data[-1] 37 | # flat 38 | flat_filenames = [] 39 | 40 | for j in range(len(filenames)): 41 | flat_filenames.append(filenames[j]) 42 | filenames = flat_filenames 43 | 44 | flag = 1 45 | # flag = 0 46 | # selected_ids = ['4442b044230ac5c043dbb6421d614c0d', '46c3080551df8a62e8258fa1af480210'] 47 | # for i in range(imgs.shape[0]): 48 | # filename = filenames[i] 49 | # model_id = os.path.basename(filename).split('.')[0].split('_')[0] 50 | # # print(model_id) 51 | # if model_id in selected_ids: 52 | # flag = 1 53 | 54 | if flag == 1: 55 | print(1) 56 | # validation sss 57 | outputs, losses = tr_agent.val_func(data) 58 | tr_agent.visualize_batch(data, 'test', outputs=outputs) 59 | # tr_agent.test_func(data, 'generate', outputs=outputs) 60 | 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /python/train.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from collections import OrderedDict 4 | 5 | from tqdm import tqdm 6 | 7 | from agent import get_agent 8 | from config import load_config 9 | from dataset import get_dataloader 10 | from util.utils import cycle 11 | 12 | 13 | def main(): 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument('--yaml', type=str, default='./code/yaml/table/spvae.yml', help='yaml config file') 16 | args = parser.parse_args() 17 | config = load_config(args.yaml) 18 | num2device_dict = {-1: 'cpu', 0: 'cuda:0', 1: 'cuda:1', 2: 'cuda:2', 3: 'cuda:3'} 19 | config[config['mode']]['device'] = num2device_dict[config[config['mode']]['device']] 20 | 21 | # create network and training agent 22 | tr_agent = get_agent(config) 23 | 24 | # load from checkpoint if provided 25 | if config['train']['load_ckpt']: 26 | tr_agent.load_ckpt('latest') 27 | 28 | # data 29 | train_loader = get_dataloader(config, mode='train') 30 | val_loader = get_dataloader(config, mode='val') 31 | val_loader = cycle(val_loader) 32 | # start training 33 | clock = tr_agent.clock 34 | 35 | for e in range(clock.epoch, config['train']['epoch']): 36 | # begin iteration 37 | pbar = tqdm(train_loader) 38 | for b, data in enumerate(pbar): 39 | # train step 40 | outputs, losses = tr_agent.train_func(data) 41 | 42 | # visualize 43 | if config['train']['vis'] and clock.step % config['train']['vis_frequency'] == 0: 44 | tr_agent.visualize_batch(data, 'train', outputs=outputs) 45 | 46 | pbar.set_description("EPOCH[{}][{}]".format(e, b)) 47 | pbar.set_postfix(OrderedDict({k: v.item() for k, v in losses.items()})) 48 | 49 | # validation step 50 | if clock.step % config['val']['val_frequency'] == 0: 51 | data = next(val_loader) 52 | 53 | outputs, losses = tr_agent.val_func(data) 54 | 55 | if config['train']['vis'] and clock.step % config['train']['vis_frequency'] == 0: 56 | tr_agent.visualize_batch(data, 'val', outputs=outputs) 57 | clock.tick() 58 | 59 | # update lr by scheduler 60 | # tr_agent.update_learning_rate() 61 | clock.tock() 62 | if clock.epoch % config['train']['save_frequency'] == 0: 63 | tr_agent.save_ckpt() 64 | 65 | if clock.epoch % 10 == 0: 66 | tr_agent.save_ckpt('latest') 67 | 68 | 69 | 70 | if __name__ == '__main__': 71 | main() 72 | -------------------------------------------------------------------------------- /python/util/change_color.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pickle 4 | import re 5 | from collections import namedtuple 6 | 7 | import lmdb 8 | import numpy as np 9 | import scipy.io as sio 10 | import torch 11 | from PIL import Image 12 | from torch.utils.data import Dataset 13 | from torchvision.utils import save_image, make_grid 14 | from torchvision import datasets, transforms, utils 15 | from Augmentor.Operations import Distort 16 | 17 | 18 | class ChangeColorDataset(Dataset): 19 | """docstring for ImageGeometryDataset""" 20 | def __init__(self, image_dir, mode, category=None, part_name=None, height=256, width=256): 21 | super(ChangeColorDataset, self).__init__() 22 | 23 | self.image_dir = image_dir 24 | self.mode = mode 25 | self.category = category 26 | self.part_name = part_name 27 | self.height = int(height) 28 | self.width = int(width) 29 | if self.part_name is None: 30 | self.part_name = '' 31 | if self.mode == 'train' or self.mode == 'val' or self.mode == 'test': 32 | self.distort_aug = Distort(probability=1, grid_height=3, grid_width=4, magnitude=0) 33 | 34 | self.transform = self.get_transform(self.mode, self.height*3, self.width*4, self.category) 35 | 36 | self.files = [] 37 | no_patch_files = list( 38 | set(glob.glob(os.path.join(self.image_dir, '*.png'))) - set(glob.glob(os.path.join(self.image_dir, '*patch*.png'))) 39 | ) 40 | no_patch_files = [filename for filename in no_patch_files if self.part_name in filename] 41 | self.files = no_patch_files 42 | self.files = sorted(self.files) 43 | print('model num: {}'.format(len(self.files))) 44 | 45 | self.H_begin = [0, 256, 256, 256, 256, 512] 46 | self.W_begin = [256, 0, 256, 512, 768, 256] 47 | 48 | 49 | def __len__(self): 50 | return len(self.files) 51 | 52 | def get_transform(self, mode, height, width, category): 53 | if category == 'car': 54 | mean = [0.5, 0.5, 0.5] 55 | std = [0.5, 0.5, 0.5] 56 | else: 57 | mean = [0.5, 0.5, 0.5, 0.5] 58 | std = [0.5, 0.5, 0.5, 0.5] 59 | 60 | if mode == 'train' or 'val': 61 | transform = transforms.Compose( 62 | [ 63 | transforms.Resize((height, width)), 64 | transforms.CenterCrop((height, width)), 65 | # data augmentation 66 | transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1), 67 | transforms.ToTensor(), 68 | transforms.Normalize(mean, std), 69 | ] 70 | ) 71 | elif mode == 'test': 72 | transform = transforms.Compose( 73 | [ 74 | transforms.Resize((height, width)), 75 | transforms.CenterCrop((height, width)), 76 | # data augmentation 77 | # transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.3, hue=0.3), 78 | transforms.ToTensor(), 79 | transforms.Normalize(mean, std), 80 | ] 81 | ) 82 | return transform 83 | 84 | def __getitem__(self, idx): 85 | if torch.is_tensor(idx): 86 | idx = idx.tolist() 87 | 88 | filename = self.files[idx] 89 | basename = os.path.basename(filename) 90 | 91 | image = Image.open(filename) 92 | 93 | if self.mode == 'train' or self.mode == 'val': 94 | np_image = np.array(image) 95 | distorted_image = np.zeros((image.size[1], image.size[0], np_image.shape[2])) 96 | for i in range(6): 97 | patch = Image.fromarray(np.uint8(np_image[self.H_begin[i]:self.H_begin[i]+256, self.W_begin[i]:self.W_begin[i]+256, :])) 98 | distorted_patch = self.distort_aug.perform_operation([patch]) 99 | # distorted_image[0].save(os.path.join('.', basename)) 100 | distorted_image[self.H_begin[i]:self.H_begin[i]+256, self.W_begin[i]:self.W_begin[i]+256, :] = np.array(distorted_patch[0]) 101 | image = Image.fromarray(np.uint8(distorted_image)) 102 | 103 | np_image = np.array(image) 104 | np_image.setflags(write=1) 105 | 106 | if self.category == 'car': 107 | image = Image.fromarray(np.uint8(np_image[:, :, 0:3])) 108 | else: 109 | np_image[:, :, 3] = np_image[:, :, 3]/255 110 | np_image[:, :, 0] = np.multiply(np_image[:, :, 0], np_image[:, :, 3]) 111 | np_image[:, :, 1] = np.multiply(np_image[:, :, 1], np_image[:, :, 3]) 112 | np_image[:, :, 2] = np.multiply(np_image[:, :, 2], np_image[:, :, 3]) 113 | np_image[:, :, 3] = np_image[:, :, 3]*255 114 | image = Image.fromarray(np.uint8(np_image[:, :, 0:4])) 115 | image.save(os.path.join('./temp', basename)) 116 | 117 | if self.transform is not None: 118 | image = self.transform(image) 119 | # save_image(image, os.path.join('.', basename)) 120 | if self.category == 'car': 121 | patches = torch.zeros(6, 3, self.height, self.width) 122 | else: 123 | patches = torch.zeros(6, 4, self.height, self.width) 124 | basenames = [] 125 | for i in range(6): 126 | patches[i, :, :, :] = image[:, self.H_begin[i]:self.H_begin[i]+256, self.W_begin[i]:self.W_begin[i]+256] 127 | basenames.append('{}_patch{}.png'.format(basename.split('.')[0], str(i))) 128 | # save_image(make_grid(patches), os.path.join('.', basename)) 129 | 130 | return patches, basenames 131 | 132 | if __name__ == '__main__': 133 | from torch.utils.data import DataLoader 134 | data_root = './20210425_plane_pixelsnail/body/top_16/auto_texture' 135 | category = 'car' 136 | part_name = 'body' 137 | height = 256 138 | width = 256 139 | parallel = 'False' 140 | mode = 'val' 141 | batch_size = 6 142 | 143 | dataset = ChangeColorDataset( 144 | data_root, 145 | mode, 146 | category=category, 147 | part_name=part_name, 148 | height=height, 149 | width=width) 150 | dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, drop_last=True) 151 | for i, (img, filename) in enumerate(dataloader): 152 | print(img.shape) 153 | from einops import rearrange, reduce, repeat 154 | img = rearrange(img, 'B P C H W -> (B P) C H W') 155 | print(img.shape) -------------------------------------------------------------------------------- /python/util/copy_dir.py: -------------------------------------------------------------------------------- 1 | # python copy_dir.py \ 2 | # --json_dir /mnt/b/wutong/222/PapersAndCode/Siggraph2020/TEXTURE/Example_SIG21_revision/2_Random/selected/car/F13_3_done \ 3 | # --shapenet_dir /mnt/sdg/wutong/65e/lmx/siga20/data/models/car/transferred_dir 4 | 5 | # python copy_dir.py \ 6 | # --json_dir /mnt/b/wutong/222/PapersAndCode/Siggraph2020/TEXTURE/Example_SIG21_revision/2_Random/selected/chair/F13_3_done \ 7 | # --shapenet_dir /mnt/sdg/wutong/65e/wutong/ShapeNetCore.v2/03001627 8 | 9 | # python copy_dir.py \ 10 | # --json_dir /mnt/b/wutong/222/PapersAndCode/Siggraph2020/TEXTURE/Example_SIG21_revision/2_Random/selected/plane/F13_3_done/f16381a160f20bc4a3b534252984039 \ 11 | # --shapenet_dir /mnt/sdg/wutong/65e/lmx/siga20/data/models/plane/transferred_dir 12 | 13 | # python copy_dir.py \ 14 | # --json_dir /mnt/b/wutong/222/PapersAndCode/Siggraph2020/TEXTURE/Example_SIG21_revision/2_Random/selected/table/F13_3_done \ 15 | # --shapenet_dir /mnt/sdg/wutong/65e/wutong/ShapeNetCore.v2/04379243 16 | 17 | import json 18 | import os 19 | import glob 20 | import shutil 21 | import argparse 22 | 23 | if __name__ == '__main__': 24 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 25 | parser.add_argument('--json_dir', type=str, required=True, default='') 26 | parser.add_argument('--shapenet_dir', type=str, required=True, default='') 27 | opt = parser.parse_args() 28 | 29 | json_dir = opt.json_dir 30 | json_files = glob.glob(os.path.join(json_dir, '*multiview.json')) 31 | 32 | for json_file in json_files: 33 | with open(json_file) as f: 34 | dist_dict = json.load(f) 35 | model_id = os.path.basename(json_file).replace('.json', '') 36 | sub_dir = os.path.join(json_dir, model_id) 37 | if not os.path.exists(sub_dir): 38 | os.makedirs(sub_dir) 39 | 40 | for i, (k, v) in enumerate(dist_dict.items()): 41 | if i <= 10: 42 | # cur_id = os.path.basename(k).split('_')[0] 43 | cur_id = os.path.basename(os.path.dirname(os.path.dirname(k))) 44 | # cur_id = os.path.basename(os.path.dirname(k)) 45 | cmd = 'cp -r {} {}'.format(os.path.join(opt.shapenet_dir, cur_id), os.path.join(sub_dir, '{}_{}').format(i, cur_id)) 46 | os.system(cmd) 47 | -------------------------------------------------------------------------------- /python/util/random_cmap.py: -------------------------------------------------------------------------------- 1 | """ 2 | Borrowed from https://github.com/delestro/rand_cmap. 3 | """ 4 | 5 | # Generate random colormap 6 | def rand_cmap(nlabels, type='bright', first_color_black=True, last_color_black=False, verbose=True): 7 | """ 8 | Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks 9 | :param nlabels: Number of labels (size of colormap) 10 | :param type: 'bright' for strong colors, 'soft' for pastel colors 11 | :param first_color_black: Option to use first color as black, True or False 12 | :param last_color_black: Option to use last color as black, True or False 13 | :param verbose: Prints the number of labels and shows the colormap. True or False 14 | :return: colormap for matplotlib 15 | """ 16 | from matplotlib.colors import LinearSegmentedColormap 17 | import colorsys 18 | import numpy as np 19 | 20 | if type not in ('bright', 'soft'): 21 | print ('Please choose "bright" or "soft" for type') 22 | return 23 | 24 | if verbose: 25 | print('Number of labels: ' + str(nlabels)) 26 | 27 | # Generate color map for bright colors, based on hsv 28 | if type == 'bright': 29 | randHSVcolors = [(np.random.uniform(low=0.0, high=1), 30 | np.random.uniform(low=0.2, high=1), 31 | np.random.uniform(low=0.9, high=1)) for i in range(nlabels)] 32 | 33 | # Convert HSV list to RGB 34 | randRGBcolors = [] 35 | for HSVcolor in randHSVcolors: 36 | randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2])) 37 | 38 | if first_color_black: 39 | randRGBcolors[0] = [0, 0, 0] 40 | 41 | if last_color_black: 42 | randRGBcolors[-1] = [0, 0, 0] 43 | 44 | random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) 45 | 46 | # Generate soft pastel colors, by limiting the RGB spectrum 47 | if type == 'soft': 48 | low = 0.6 49 | high = 0.95 50 | randRGBcolors = [(np.random.uniform(low=low, high=high), 51 | np.random.uniform(low=low, high=high), 52 | np.random.uniform(low=low, high=high)) for i in range(nlabels)] 53 | 54 | if first_color_black: 55 | randRGBcolors[0] = [0, 0, 0] 56 | 57 | if last_color_black: 58 | randRGBcolors[-1] = [0, 0, 0] 59 | random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) 60 | 61 | # Display colorbar 62 | if verbose: 63 | from matplotlib import colors, colorbar 64 | from matplotlib import pyplot as plt 65 | fig, ax = plt.subplots(1, 1, figsize=(15, 0.5)) 66 | 67 | bounds = np.linspace(0, nlabels, nlabels + 1) 68 | norm = colors.BoundaryNorm(bounds, nlabels) 69 | 70 | cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None, 71 | boundaries=bounds, format='%1i', orientation=u'horizontal') 72 | 73 | return random_colormap 74 | 75 | if __name__ == '__main__': 76 | cmap = rand_cmap(100) 77 | cmap._segmentdata['blue'] 78 | 79 | -------------------------------------------------------------------------------- /python/util/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import shutil 4 | 5 | 6 | class TrainClock(object): 7 | def __init__(self): 8 | self.epoch = 1 9 | self.minibatch = 0 10 | self.step = 0 11 | 12 | def tick(self): 13 | self.minibatch += 1 14 | self.step += 1 15 | 16 | def tock(self): 17 | self.epoch += 1 18 | self.minibatch = 0 19 | 20 | def make_checkpoint(self): 21 | return { 22 | 'epoch': self.epoch, 23 | 'minibatch': self.minibatch, 24 | 'step': self.step 25 | } 26 | 27 | def restore_checkpoint(self, clock_dict): 28 | self.epoch = clock_dict['epoch'] 29 | self.minibatch = clock_dict['minibatch'] 30 | self.step = clock_dict['step'] 31 | 32 | 33 | class AverageMeter(object): 34 | """Computes and stores the average and current value""" 35 | 36 | def __init__(self, name): 37 | self.name = name 38 | self.reset() 39 | 40 | def reset(self): 41 | self.val = 0 42 | self.avg = 0 43 | self.sum = 0 44 | self.count = 0 45 | 46 | def update(self, val, n=1): 47 | self.val = val 48 | self.sum += val * n 49 | self.count += n 50 | self.avg = self.sum / self.count 51 | 52 | 53 | def save_args(args, save_dir): 54 | param_path = os.path.join(save_dir, 'params.json') 55 | 56 | with open(param_path, 'w') as fp: 57 | json.dump(args.__dict__, fp, indent=4, sort_keys=True) 58 | 59 | 60 | def ensure_dir(path): 61 | """ 62 | create path by first checking its existence, 63 | :param paths: path 64 | :return: 65 | """ 66 | if not os.path.exists(path): 67 | os.makedirs(path) 68 | 69 | 70 | def ensure_dirs(paths): 71 | """ 72 | create paths by first checking their existence 73 | :param paths: list of path 74 | :return: 75 | """ 76 | if isinstance(paths, list) and not isinstance(paths, str): 77 | for path in paths: 78 | ensure_dir(path) 79 | else: 80 | ensure_dir(paths) 81 | 82 | 83 | def remkdir(path): 84 | """ 85 | if dir exists, remove it and create a new one 86 | :param path: 87 | :return: 88 | """ 89 | if os.path.exists(path): 90 | shutil.rmtree(path) 91 | os.makedirs(path) 92 | 93 | 94 | def cycle(iterable): 95 | while True: 96 | for x in iterable: 97 | yield x 98 | 99 | 100 | def test(): 101 | pass 102 | 103 | 104 | if __name__ == '__main__': 105 | test() 106 | -------------------------------------------------------------------------------- /python/util/visualization.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import numpy as np 4 | import cv2 5 | 6 | def merge_patches(patch_image_dir, channel): 7 | patch0_files = glob.glob(os.path.join(patch_image_dir, '*patch0.png*')) 8 | merged_dir = os.path.join(patch_image_dir, 'merged') 9 | if not os.path.exists(merged_dir): 10 | os.mkdir(merged_dir) 11 | 12 | for patch0_file in patch0_files: 13 | head_tail = os.path.split(patch0_file) 14 | head = head_tail[0] 15 | tail = head_tail[1] 16 | 17 | # if already exists, just jump over it. 18 | out_name = os.path.join(merged_dir, tail.replace('patch0', '')) 19 | if os.path.exists(out_name): 20 | continue 21 | 22 | patch0 = cv2.imread(patch0_file, cv2.IMREAD_UNCHANGED) 23 | # get filename 24 | patch1_file = os.path.join(head, tail.replace('patch0', 'patch1')) 25 | patch2_file = os.path.join(head, tail.replace('patch0', 'patch2')) 26 | patch3_file = os.path.join(head, tail.replace('patch0', 'patch3')) 27 | patch4_file = os.path.join(head, tail.replace('patch0', 'patch4')) 28 | patch5_file = os.path.join(head, tail.replace('patch0', 'patch5')) 29 | # read 30 | patch1 = cv2.imread(patch1_file, cv2.IMREAD_UNCHANGED) 31 | patch2 = cv2.imread(patch2_file, cv2.IMREAD_UNCHANGED) 32 | patch3 = cv2.imread(patch3_file, cv2.IMREAD_UNCHANGED) 33 | patch4 = cv2.imread(patch4_file, cv2.IMREAD_UNCHANGED) 34 | patch5 = cv2.imread(patch5_file, cv2.IMREAD_UNCHANGED) 35 | # indices 36 | H_begin = [256, 0, 256, 512, 768, 256] 37 | W_begin = [0, 256, 256, 256, 256, 512] 38 | patches = [] 39 | patches.append(patch0) 40 | patches.append(patch1) 41 | patches.append(patch2) 42 | patches.append(patch3) 43 | patches.append(patch4) 44 | patches.append(patch5) 45 | # merge 46 | merged_image = np.zeros((768, 1024, channel), np.uint8) 47 | for i in range(6): 48 | try: 49 | merged_image[W_begin[i]:W_begin[i]+256, H_begin[i]:H_begin[i]+256, :] = patches[i] 50 | except Exception: 51 | continue 52 | # save 53 | cv2.imwrite(out_name, merged_image) 54 | 55 | # rm 56 | # cmd = 'rm {}/*png'.format(patch_image_dir) 57 | # os.system(cmd) 58 | 59 | 60 | -------------------------------------------------------------------------------- /python/yaml/table/leg/geovae.yml: -------------------------------------------------------------------------------- 1 | mode: 'train' 2 | model: 3 | name: geovae 4 | geo_hidden_dim: 64 5 | ref_mesh_mat: ../data/table/table_std.mat 6 | data: 7 | dataset: '' 8 | data_root: ../data/table 9 | category: table 10 | part_name: leg 11 | parallel: False 12 | train: 13 | device: 0 14 | model_dir: ./table_geovae/leg 15 | log_dir: ./table_geovae/leg/log 16 | load_ckpt: False 17 | epoch: 100000 18 | lr: 0.0001 19 | save_frequency: 2000 20 | lr_decay: 1 21 | lr_step_size: 1000 22 | vis: True 23 | vis_frequency: 100 24 | batch_size: 4 25 | is_shuffle: True 26 | num_workers: 4 27 | val: 28 | batch_size: 1 29 | is_shuffle: False 30 | num_workers: 1 31 | val_frequency: 100 32 | test: 33 | batch_size: 1 34 | is_shuffle: False 35 | num_workers: 1 36 | -------------------------------------------------------------------------------- /python/yaml/table/leg/pixelsnail_bottom.yml: -------------------------------------------------------------------------------- 1 | mode: 'train' 2 | model: 3 | name: pixelsnail_bottom_others 4 | shape: 32 5 | n_class: 256 6 | channel: 256 7 | kernel_size: 5 8 | n_block: 4 9 | n_res_block: 4 10 | res_channel: 256 11 | attention: False 12 | dropout: 0 13 | n_cond_res_block: 3 14 | cond_res_channel: 256 15 | cond_res_kernel: 3 16 | n_out_res_block: 0 17 | n_condition_dim: 64 18 | n_condition_class: 256 19 | data: 20 | dataset: '' 21 | data_root: ./table_latents 22 | category: table 23 | part_name: null 24 | parallel: False 25 | train: 26 | device: 0 27 | model_dir: ./table_pixelsnail/leg/bottom 28 | log_dir: ./table_pixelsnail/leg/bottom/log 29 | load_ckpt: False 30 | epoch: 100000 31 | lr: 0.0003 32 | save_frequency: 100 33 | lr_decay: 0.999 34 | lr_step_size: 100 35 | vis: True 36 | vis_frequency: 100 37 | batch_size: 8 38 | is_shuffle: True 39 | num_workers: 1 40 | val: 41 | batch_size: 1 42 | is_shuffle: False 43 | num_workers: 1 44 | val_frequency: 100 45 | test: 46 | batch_size: 1 47 | is_shuffle: False 48 | num_workers: 1 49 | -------------------------------------------------------------------------------- /python/yaml/table/leg/pixelsnail_top.yml: -------------------------------------------------------------------------------- 1 | mode: 'train' 2 | model: 3 | name: pixelsnail_top_others 4 | shape: 16 5 | n_class: 256 6 | channel: 256 7 | kernel_size: 5 8 | n_block: 4 9 | n_res_block: 4 10 | res_channel: 128 11 | attention: True 12 | dropout: 0.1 13 | n_cond_res_block: 3 14 | cond_res_channel: 256 15 | cond_res_kernel: 3 16 | n_out_res_block: 0 17 | n_condition_dim: 6064 18 | n_condition_class: 256 19 | n_condition_sub_dim: 16 20 | data: 21 | dataset: '' 22 | data_root: ./table_latents 23 | category: table 24 | part_name: leg 25 | parallel: False 26 | train: 27 | device: 0 28 | model_dir: ./table_pixelsnail/leg/top_16 29 | log_dir: ./table_pixelsnail/leg/top_16/log 30 | load_ckpt: False 31 | epoch: 100000 32 | lr: 0.0003 33 | save_frequency: 100 34 | lr_decay: 0.999 35 | lr_step_size: 100 36 | vis: True 37 | vis_frequency: 100 38 | batch_size: 4 39 | is_shuffle: False 40 | num_workers: 4 41 | val: 42 | batch_size: 1 43 | is_shuffle: False 44 | num_workers: 1 45 | val_frequency: 100 46 | test: 47 | batch_size: 1 48 | is_shuffle: False 49 | num_workers: 1 50 | -------------------------------------------------------------------------------- /python/yaml/table/spvae.yml: -------------------------------------------------------------------------------- 1 | mode: 'train' 2 | model: 3 | name: spvae 4 | geo_hidden_dim: 128 5 | ref_mesh_mat: ../data/table/table_std.mat 6 | data: 7 | dataset: '' 8 | data_root: ../latent_data/table_geovae_latents 9 | category: table 10 | parallel: False 11 | train: 12 | device: -1 13 | model_dir: ./table_spvae 14 | log_dir: ./table_spvae/log 15 | load_ckpt: False 16 | epoch: 100000 17 | lr: 0.0001 18 | save_frequency: 2000 19 | lr_decay: 1 20 | lr_step_size: 1000 21 | vis: True 22 | vis_frequency: 100 23 | batch_size: 10 24 | is_shuffle: True 25 | num_workers: 4 26 | val: 27 | batch_size: 2 28 | is_shuffle: False 29 | num_workers: 1 30 | val_frequency: 100 31 | test: 32 | batch_size: 2 33 | is_shuffle: False 34 | num_workers: 1 35 | -------------------------------------------------------------------------------- /python/yaml/table/surface/geovae.yml: -------------------------------------------------------------------------------- 1 | mode: 'train' 2 | model: 3 | name: geovae 4 | geo_hidden_dim: 64 5 | ref_mesh_mat: ../data/table/table_std.mat 6 | data: 7 | dataset: '' 8 | data_root: ../data/table 9 | category: table 10 | part_name: surface 11 | parallel: False 12 | train: 13 | device: 1 14 | model_dir: ./table_geovae/surface 15 | log_dir: ./table_geovae/surface/log 16 | load_ckpt: False 17 | epoch: 100000 18 | lr: 0.0001 19 | save_frequency: 2000 20 | lr_decay: 1 21 | lr_step_size: 1000 22 | vis: True 23 | vis_frequency: 100 24 | batch_size: 4 25 | is_shuffle: True 26 | num_workers: 4 27 | val: 28 | batch_size: 1 29 | is_shuffle: False 30 | num_workers: 1 31 | val_frequency: 100 32 | test: 33 | batch_size: 1 34 | is_shuffle: False 35 | num_workers: 1 36 | -------------------------------------------------------------------------------- /python/yaml/table/surface/pixelsnail_bottom.yml: -------------------------------------------------------------------------------- 1 | mode: 'train' 2 | model: 3 | name: pixelsnail_bottom_center 4 | shape: 32 5 | n_class: 256 6 | channel: 256 7 | kernel_size: 5 8 | n_block: 4 9 | n_res_block: 4 10 | res_channel: 256 11 | attention: False 12 | dropout: 0 13 | n_cond_res_block: 3 14 | cond_res_channel: 256 15 | cond_res_kernel: 3 16 | n_out_res_block: 0 17 | n_condition_dim: 64 18 | n_condition_class: 256 19 | data: 20 | dataset: '' 21 | data_root: ./table_latents 22 | category: table 23 | part_name: null 24 | parallel: False 25 | train: 26 | device: 0 27 | model_dir: ./table_pixelsnail/surface/bottom 28 | log_dir: ./table_pixelsnail/surface/bottom/log 29 | load_ckpt: False 30 | epoch: 100000 31 | lr: 0.0003 32 | save_frequency: 100 33 | lr_decay: 0.999 34 | lr_step_size: 100 35 | vis: True 36 | vis_frequency: 100 37 | batch_size: 8 38 | is_shuffle: True 39 | num_workers: 1 40 | val: 41 | batch_size: 1 42 | is_shuffle: False 43 | num_workers: 1 44 | val_frequency: 100 45 | test: 46 | batch_size: 1 47 | is_shuffle: False 48 | num_workers: 1 49 | -------------------------------------------------------------------------------- /python/yaml/table/surface/pixelsnail_top.yml: -------------------------------------------------------------------------------- 1 | mode: 'train' 2 | model: 3 | name: pixelsnail_top_center 4 | shape: 16 5 | n_class: 256 6 | channel: 256 7 | kernel_size: 5 8 | n_block: 4 9 | n_res_block: 4 10 | res_channel: 128 11 | attention: True 12 | dropout: 0.1 13 | n_cond_res_block: 3 14 | cond_res_channel: 256 15 | cond_res_kernel: 3 16 | n_out_res_block: 0 17 | n_condition_dim: 64 18 | n_condition_class: 256 19 | n_condition_sub_dim: 16 20 | data: 21 | dataset: '' 22 | data_root: ./table_latents 23 | category: table 24 | part_name: surface 25 | parallel: False 26 | train: 27 | device: 0 28 | model_dir: ./table_pixelsnail/surface/top_16 29 | log_dir: ./table_pixelsnail/surface/top_16/log 30 | load_ckpt: True 31 | epoch: 100000 32 | lr: 0.0003 33 | save_frequency: 100 34 | lr_decay: 0.999 35 | lr_step_size: 100 36 | vis: True 37 | vis_frequency: 100 38 | batch_size: 6 39 | is_shuffle: False 40 | num_workers: 4 41 | val: 42 | batch_size: 1 43 | is_shuffle: False 44 | num_workers: 1 45 | val_frequency: 100 46 | test: 47 | batch_size: 1 48 | is_shuffle: False 49 | num_workers: 1 50 | -------------------------------------------------------------------------------- /python/yaml/table/vqvae.yml: -------------------------------------------------------------------------------- 1 | mode: 'train' 2 | model: 3 | name: 'vqvae' 4 | in_channel: 4 5 | channel: 128 6 | n_res_block: 2 7 | n_res_channel: 32 8 | embed_dim: 64 9 | n_embed: 256 10 | decay: 0.99 11 | eps: 0.00001 12 | beta: 0.25 13 | stride: 8 14 | alpha: 2 15 | data: 16 | dataset: '' 17 | data_root: ../data/table 18 | category: table 19 | part_name: null 20 | height: 256 21 | width: 256 22 | parallel: False 23 | train: 24 | device: 0 25 | model_dir: ./table_vqvae 26 | log_dir: ./table_vqvae/log 27 | load_ckpt: False 28 | epoch: 100000 29 | lr: 0.0003 30 | save_frequency: 10 31 | lr_decay: 0.999 32 | lr_step_size: 100 33 | vis: True 34 | vis_frequency: 100 35 | batch_size: 25 36 | is_shuffle: False 37 | num_workers: 4 38 | val: 39 | batch_size: 6 40 | is_shuffle: False 41 | num_workers: 1 42 | val_frequency: 100 43 | test: 44 | batch_size: 6 45 | is_shuffle: False 46 | num_workers: 1 47 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cached-property==1.5.2 2 | certifi==2020.6.20 3 | chardet==3.0.4 4 | cycler==0.10.0 5 | decorator==4.4.2 6 | googledrivedownloader==0.4 7 | h5py==3.1.0 8 | idna==2.10 9 | imageio==2.9.0 10 | isodate==0.6.0 11 | joblib==0.17.0 12 | kiwisolver==1.3.1 13 | llvmlite==0.34.0 14 | lmdb==1.0.0 15 | matplotlib==3.3.2 16 | networkx==2.5 17 | numba==0.51.2 18 | numpy==1.19.4 19 | opencv-python==4.4.0.46 20 | pandas==1.1.4 21 | Pillow==8.0.1 22 | pkg-resources==0.0.0 23 | plyfile==0.7.2 24 | pyparsing==2.4.7 25 | python-dateutil==2.8.1 26 | pytz==2020.4 27 | PyWavelets==1.1.1 28 | rdflib==5.0.0 29 | requests==2.24.0 30 | scikit-image==0.17.2 31 | scikit-learn==0.23.2 32 | scipy==1.5.4 33 | six==1.15.0 34 | threadpoolctl==2.1.0 35 | tifffile==2020.9.3 36 | torch==1.4.0 37 | torch-geometric==1.4.3 38 | torch-scatter==2.0.4 39 | torch-sparse==0.6.1 40 | torchvision==0.5.0 41 | tqdm==4.51.0 42 | urllib3==1.25.11 43 | --------------------------------------------------------------------------------