├── LICENSE ├── Matlab ├── ACAP.mexw64 ├── ERMS.m ├── FeatureMap.m ├── GenFeature.exe ├── GetFeatureR.m ├── InverseMap.m ├── MeshSimp.mexw64 ├── SaveObjT.mexw64 ├── batch_rename.m ├── cotlp.m ├── get_feature17_pooling_new.m ├── meshlp.mexw64 ├── preprocess.m ├── recon_from_convvertex2.m ├── recon_from_interpolation.m ├── recon_from_random.m ├── recon_script.m ├── show_error.m ├── sort_nat.m └── tree2mapping.m ├── README.md ├── img └── network.jpg └── python ├── main.py ├── meshVAE_graph2.py └── utils.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 IGLICT 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Matlab/ACAP.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/MeshPooling/9e59cdf906cc47e87aa7ab16363418d66cac465d/Matlab/ACAP.mexw64 -------------------------------------------------------------------------------- /Matlab/ERMS.m: -------------------------------------------------------------------------------- 1 | function Erms=ERMS(groundtruthfolder,testfolder) 2 | 3 | gtlist = dir([groundtruthfolder,'\*.obj']); 4 | [~, i] = sort_nat({gtlist.name}); 5 | gtlist = gtlist(i); 6 | 7 | testlist = dir([testfolder,'\*.obj']); 8 | [~, i] = sort_nat({testlist.name}); 9 | testlist = testlist(i); 10 | 11 | assert(size(gtlist,1)==size(testlist,1)); 12 | 13 | difference = zeros(size(testlist,1),1); 14 | for i =1:size(gtlist,1) 15 | [rv]=cotlp([groundtruthfolder,'\',gtlist(i).name]); 16 | [ov]=cotlp([testfolder,'\',testlist(i).name]); 17 | difference(i) = sum(sum((rv-ov).^2)); 18 | end 19 | 20 | Erms = 1000*sqrt(sum(difference))/sqrt(3*size(gtlist,1)*size(rv,1)); 21 | end -------------------------------------------------------------------------------- /Matlab/FeatureMap.m: -------------------------------------------------------------------------------- 1 | function [ fmlogdr, fms ] = FeatureMap( LOGDR, S ) 2 | %LOGDR = LOGDR'; 3 | [mlogdr, nlogdr] = size(LOGDR); 4 | [ms, ns] = size(S); 5 | edgenum = nlogdr/9; 6 | snum = ns/9; 7 | %create 8 | fmlogdr = zeros(mlogdr, edgenum*3); 9 | fms = zeros(ms, snum*6); 10 | 11 | for i = 1 : mlogdr 12 | for j = 0 : edgenum-1 13 | fmlogdr(i, j*3+1) = LOGDR(i, j*9+2); 14 | fmlogdr(i, j*3+2) = LOGDR(i, j*9+3); 15 | fmlogdr(i, j*3+3) = LOGDR(i, j*9+6); 16 | end 17 | end 18 | 19 | 20 | for i = 1 : ms 21 | for j = 0 : snum-1 22 | fms(i, j*6+1) = S(i, j*9+1); 23 | fms(i, j*6+2) = S(i, j*9+2); 24 | fms(i, j*6+3) = S(i, j*9+3); 25 | fms(i, j*6+4) = S(i, j*9+5); 26 | fms(i, j*6+5) = S(i, j*9+6); 27 | fms(i, j*6+6) = S(i, j*9+9); 28 | end 29 | end 30 | 31 | end 32 | 33 | -------------------------------------------------------------------------------- /Matlab/GenFeature.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/MeshPooling/9e59cdf906cc47e87aa7ab16363418d66cac465d/Matlab/GenFeature.exe -------------------------------------------------------------------------------- /Matlab/GetFeatureR.m: -------------------------------------------------------------------------------- 1 | function [ ] = GetFeatureR( srcfolder, number ) 2 | cmdline = ['.\GenFeature.exe 16 ',srcfolder,' ', num2str(number)]; 3 | dos(cmdline); 4 | tarfvt = [srcfolder,'\fv_r.mat']; 5 | movefile('E:\SIGA2014\workspace\fv.mat',tarfvt); 6 | %movefile('F:\SIGA2014\workspace\fv.mat',tarfvt); 7 | end 8 | -------------------------------------------------------------------------------- /Matlab/InverseMap.m: -------------------------------------------------------------------------------- 1 | function [ NLOGDR, NS ] = InverseMap( fmlogdr, fms ) 2 | [mfmlogdr,nfmlogdr] = size(fmlogdr); 3 | [mfms, nfms] = size(fms); 4 | edgenum = nfmlogdr/3; 5 | snum = nfms/6; 6 | NLOGDR = zeros(mfmlogdr, edgenum*9); 7 | NS = zeros(mfms, snum*9); 8 | 9 | for i = 1 : mfmlogdr 10 | for j = 0 : edgenum-1 11 | NLOGDR(i,j*9+1) = 0; 12 | NLOGDR(i,j*9+2) = fmlogdr(i,j*3+1); 13 | NLOGDR(i,j*9+3) = fmlogdr(i,j*3+2); 14 | NLOGDR(i,j*9+4) = -fmlogdr(i,j*3+1); 15 | NLOGDR(i,j*9+5) = 0; 16 | NLOGDR(i,j*9+6) = fmlogdr(i,j*3+3); 17 | NLOGDR(i,j*9+7) = -fmlogdr(i,j*3+2); 18 | NLOGDR(i,j*9+8) = -fmlogdr(i,j*3+3); 19 | NLOGDR(i,j*9+9) = 0; 20 | end 21 | end 22 | 23 | for i = 1 : mfms 24 | for j = 0 : snum-1 25 | NS(i,j*9+1) = fms(i,j*6+1); 26 | NS(i,j*9+2) = fms(i,j*6+2); 27 | NS(i,j*9+3) = fms(i,j*6+3); 28 | NS(i,j*9+4) = fms(i,j*6+2); 29 | NS(i,j*9+5) = fms(i,j*6+4); 30 | NS(i,j*9+6) = fms(i,j*6+5); 31 | NS(i,j*9+7) = fms(i,j*6+3); 32 | NS(i,j*9+8) = fms(i,j*6+5); 33 | NS(i,j*9+9) = fms(i,j*6+6); 34 | end 35 | end 36 | 37 | end 38 | 39 | -------------------------------------------------------------------------------- /Matlab/MeshSimp.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/MeshPooling/9e59cdf906cc47e87aa7ab16363418d66cac465d/Matlab/MeshSimp.mexw64 -------------------------------------------------------------------------------- /Matlab/SaveObjT.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/MeshPooling/9e59cdf906cc47e87aa7ab16363418d66cac465d/Matlab/SaveObjT.mexw64 -------------------------------------------------------------------------------- /Matlab/batch_rename.m: -------------------------------------------------------------------------------- 1 | function batch_rename(sourcefolder, targetfolder, suffix, del) 2 | % rename file name to natural number 3 | 4 | if nargin == 1 5 | targetfolder = [sourcefolder,'\rename']; 6 | end 7 | if nargin < 3 8 | suffix = 'obj'; 9 | end 10 | if nargin < 4 11 | del = 0; 12 | end 13 | if ~exist(targetfolder, 'dir') 14 | mkdir(targetfolder); 15 | end 16 | filelist = dir([sourcefolder, '\*.', suffix]); 17 | [~, id] = sort_nat({filelist.name}); 18 | filelist = filelist(id); 19 | len = length(filelist); 20 | for i = 1:len 21 | oldname = filelist(i).name; 22 | newname = [num2str(i),'.',suffix]; 23 | status = copyfile([sourcefolder,'\',oldname],[targetfolder,'\',newname]); 24 | if status == 1 25 | disp([oldname, ' is renamed as ', newname]) 26 | else 27 | disp([oldname, ' failed!']) 28 | end 29 | if del 30 | delete([sourcefolder,'\',oldname]); 31 | end 32 | end -------------------------------------------------------------------------------- /Matlab/cotlp.m: -------------------------------------------------------------------------------- 1 | function [v, f, n, L, M, VV, CotWeight, Laplace_Matrix, L_unweight] = cotlp(filename, K) 2 | % read obj file and output some information about mesh 3 | % v: vertices 4 | % f: faces 5 | % n: vertices normals 6 | % L: laplace matrix 7 | % M: no use 8 | % VV: adjacency list 9 | % CotWeight: cotweight matrix 10 | % Laplace_Matrix: transfer vertices to edges with cotweight 11 | % L_unweight: transfer vertices to edges without cotweight 12 | 13 | if nargin == 1 14 | K = 3; 15 | end 16 | [v, f, n, II, JJ, SS, AA, vv, cotweight, laplace_matrix, a, a, L_unweight] = meshlp(filename, K); 17 | v = v'; 18 | n = n'; 19 | W=sparse(II, JJ, SS); 20 | L=W; 21 | A=AA; 22 | Atmp = sparse(1:length(A),1:length(A),1./A); 23 | M=sparse(1:length(A),1:length(A),A); 24 | %L = sparse(diag(1./ A)) * W; 25 | % L = Atmp * W; 26 | VV=vv; 27 | CotWeight=cotweight'; 28 | Laplace_Matrix=laplace_matrix'; 29 | L_unweight=L_unweight'; 30 | end 31 | 32 | -------------------------------------------------------------------------------- /Matlab/get_feature17_pooling_new.m: -------------------------------------------------------------------------------- 1 | function get_feature17_pooling_new(densefolder,simmesh,tree_vector,matpath) 2 | % get ACAP feature with pooling and the convolution 3 | % FLOGRNEW,FS,neighbour1,,neighbour2,mapping,demapping 4 | if nargin < 4 5 | matpath = densefolder; 6 | end 7 | if ~exist(matpath, 'dir') 8 | mkdir(matpath) 9 | end 10 | 11 | vdensemeshlist = dir([densefolder,'\*.obj']); 12 | [~,i] = sort_nat({vdensemeshlist.name}); 13 | vdensemeshlist = vdensemeshlist(i); 14 | if ~exist([densefolder,'\fv_r.mat'],'file') 15 | % more mesh models, more memory needed! 16 | GetFeatureR(densefolder,length(vdensemeshlist)) 17 | end 18 | fv = load([densefolder,'\fv_r.mat']); 19 | 20 | [vdensemesh,~,~,~,~,VVdense,cotweight_dense]=cotlp([densefolder,'\',vdensemeshlist(1).name]); 21 | if nargin < 2 22 | simmesh = [densefolder,'\',vdensemeshlist(1).name]; 23 | end 24 | [vsimpmesh,~,~,~,~,VVsimp,cotweight_simp]=cotlp(simmesh); 25 | 26 | W1 = full(cotweight_dense); 27 | W2 = full(cotweight_simp); 28 | for i = 1:size(W1,1) 29 | for j = 1:size(W1,2) 30 | if W1(i,j) ~= 0 31 | W1(i,j)=1; 32 | end 33 | end 34 | end 35 | for i = 1:size(W2,1) 36 | for j = 1:size(W2,2) 37 | if W2(i,j) ~= 0 38 | W2(i,j)=1; 39 | end 40 | end 41 | end 42 | 43 | neighbour1=zeros(size(vdensemesh,1),100); 44 | neighbour2=zeros(size(vsimpmesh,1),100); 45 | maxnum=0; 46 | for i=1:size(VVdense,1) 47 | neighbour1(i,1:size(VVdense{i,:},2))=VVdense{i,:}; 48 | if size(VVdense{i,:},2)>maxnum 49 | maxnum=size(VVdense{i,:},2); 50 | end 51 | end 52 | neighbour1(:,maxnum+1:end)=[]; 53 | maxnum=0; 54 | for i=1:size(VVsimp,1) 55 | neighbour2(i,1:size(VVsimp{i,:},2))=VVsimp{i,:}; 56 | if size(VVsimp{i,:},2)>maxnum 57 | maxnum=size(VVsimp{i,:},2); 58 | end 59 | end 60 | neighbour2(:,maxnum+1:end)=[]; 61 | 62 | vdensenum = size(vdensemesh, 1); 63 | mapping = tree2mapping(tree_vector, vdensenum); 64 | 65 | 66 | [fmlogdr, fms] = FeatureMap(fv.LOGRNEW, fv.S); 67 | feature = cat(2, fms, fmlogdr); 68 | fmlogdr=permute(reshape(fmlogdr,size(fmlogdr,1),3,size(vdensemesh,1)),[1,3,2]); 69 | fms=permute(reshape(fms,size(fms,1),6,size(vdensemesh,1)),[1,3,2]); 70 | cotweight1=zeros(size(neighbour1)); 71 | cotweight2=zeros(size(neighbour2)); 72 | for i=1:size(neighbour1,1) 73 | for j=1:size(neighbour1,2) 74 | if neighbour1(i,j)>0 75 | % cotweight1(i,j)=cotweight_dense(i,neighbour1(i,j)); 76 | cotweight1(i,j)=1/length(nonzeros(neighbour1(i,:))); 77 | end 78 | end 79 | end 80 | for i=1:size(neighbour2,1) 81 | for j=1:size(neighbour2,2) 82 | if neighbour2(i,j)>0 83 | % cotweight2(i,j)=cotweight_simp(i,neighbour2(i,j)); 84 | cotweight2(i,j)=1/length(nonzeros(neighbour2(i,:))); 85 | end 86 | end 87 | end 88 | iii=1:size(fmlogdr,1); 89 | FLOGRNEW=fmlogdr(iii,:,:); 90 | FS=fms(iii,:,:); 91 | neighbour=neighbour1; 92 | neighbour1=neighbour1; 93 | neighbour2=neighbour2; 94 | mapping=mapping; 95 | feature=feature(iii,:,:); 96 | cotweight1=cotweight1; 97 | cotweight2=cotweight2; 98 | 99 | save([matpath,'\vertFeaturepoolingc.mat'],'FLOGRNEW','FS','neighbour',... 100 | 'neighbour1','neighbour2','mapping','feature','cotweight1','cotweight2','W1','W2','-v7.3') 101 | 102 | end 103 | -------------------------------------------------------------------------------- /Matlab/meshlp.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/MeshPooling/9e59cdf906cc47e87aa7ab16363418d66cac465d/Matlab/meshlp.mexw64 -------------------------------------------------------------------------------- /Matlab/preprocess.m: -------------------------------------------------------------------------------- 1 | % demo preprocessing script for SCAPE 2 | data_folder = '.\dense_scape'; 3 | 4 | % rename obj 5 | dense_folder = fullfile(data_folder,'rename'); 6 | batch_rename(data_folder, dense_folder) 7 | 8 | % edge contraction 9 | simfolder = [dense_folder,'\sim']; 10 | simratio = 0.5; 11 | simtype = 1; 12 | sim_gamma = 0.005; 13 | if ~exist(simfolder,'dir') 14 | mkdir(simfolder); 15 | end 16 | oriobj = fullfile(dense_folder, '1.obj'); 17 | simobj = fullfile(simfolder,['loss',num2str(simtype),'_',num2str(sim_gamma),'.obj']); 18 | [tree,v1,~] = MeshSimp(oriobj, simobj, simratio, simtype, sim_gamma); 19 | v2 = cotlp(simobj); 20 | assert(size(v1,2)==size(v2,1)); % check simplification succeed or not 21 | 22 | % ACAP feature preparation 23 | matpath = [dense_folder,'\feature']; 24 | get_feature17_pooling_new(dense_folder, simobj, tree, matpath); 25 | -------------------------------------------------------------------------------- /Matlab/recon_from_convvertex2.m: -------------------------------------------------------------------------------- 1 | function erms = recon_from_convvertex2(groundtruthfolder,folder,step) 2 | % recon models from h5 3 | % randomh5=[folder,'\randomtest',num2str(step),'.h5']; 4 | recon_h5=[folder,'\rebuild',num2str(step),'.h5']; 5 | % [~,random_name]=fileparts(randomh5); 6 | [~,recon_name]=fileparts(recon_h5); 7 | % random_folder=[folder,'\',random_name]; 8 | recon_folder=[folder,'\',recon_name]; 9 | 10 | % if ~exist(random_folder,'dir') 11 | % mkdir(random_folder) 12 | % end 13 | if ~exist(recon_folder,'dir') 14 | mkdir(recon_folder) 15 | end 16 | 17 | % copyfile(randomah5,[random_folder,'\randomtest',num2str(step),'.h5']) 18 | copyfile(recon_h5,[recon_folder,'\recon',num2str(step),'.h5']) 19 | recon_from_random([groundtruthfolder,'\1.obj'],recon_folder,recon_folder); 20 | 21 | valid_id = h5read(recon_h5,'/valid_id'); 22 | valid_id = valid_id + 1; 23 | validtruthfolder = [groundtruthfolder, '\valid']; 24 | validreconfolder = [recon_folder,'\valid']; 25 | if ~exist(validtruthfolder, 'dir') 26 | mkdir(validtruthfolder); 27 | end 28 | if ~exist(validreconfolder, 'dir') 29 | mkdir(validreconfolder); 30 | end 31 | batch_rename(recon_folder,[recon_folder,'\rename']); 32 | for i = 1:length(valid_id) 33 | copyfile([groundtruthfolder,'\',num2str(valid_id(i)),'.obj'],[validtruthfolder,'\',num2str(valid_id(i)),'.obj']); 34 | copyfile([recon_folder,'\rename\',num2str(valid_id(i)),'.obj'],[validreconfolder,'\',num2str(valid_id(i)),'.obj']); 35 | end 36 | erms = show_error(validtruthfolder,validreconfolder); 37 | delete([recon_folder,'\rename\*.obj']); 38 | rmdir([recon_folder,'\rename']); 39 | % delete([validtruthfolder,'\*.obj']); 40 | % rmdir(validtruthfolder); 41 | 42 | end -------------------------------------------------------------------------------- /Matlab/recon_from_interpolation.m: -------------------------------------------------------------------------------- 1 | function recon_from_interpolation(reconfolder,groundtruthfolder,targetfolder) 2 | % reconstruct interpolation obj 3 | if nargin < 3 4 | targetfolder = reconfolder; 5 | end 6 | if ~exist(targetfolder,'dir') 7 | mkdir(targetfolder) 8 | end 9 | matlist = dir([reconfolder,'\*.h5']); 10 | for i = 1:size(matlist,1) 11 | [~,recon_name,~] = fileparts(matlist(i).name); 12 | folder = [targetfolder,'\',recon_name]; 13 | if ~exist(folder, 'dir') 14 | mkdir(folder) 15 | end 16 | copyfile([reconfolder,'\',matlist(i).name],[folder,'\',matlist(i).name]); 17 | recon_from_random([groundtruthfolder,'\1.obj'],folder,folder); 18 | end 19 | 20 | end -------------------------------------------------------------------------------- /Matlab/recon_from_random.m: -------------------------------------------------------------------------------- 1 | function [latent_z]=recon_from_random(firstfile,matfolder,workpath,name) 2 | % point feature 3 | if nargin<3 4 | workpath=[matfolder,'\..\mesha']; 5 | name='test_mesh'; 6 | elseif nargin<4 7 | name='test_mesh'; 8 | end 9 | if ~exist(workpath,'file') 10 | mkdir(workpath); 11 | end 12 | originfile = firstfile; 13 | if ischar(matfolder) 14 | matlist=dir([matfolder,'\*.h5']); 15 | else 16 | matlist=1; 17 | end 18 | NLOGDR=[]; 19 | NS=[]; 20 | latent_z=[]; 21 | for i=1:size(matlist,1) 22 | NLOGDR=[]; 23 | NS=[]; 24 | if ischar(matfolder) 25 | m.test_mesh = h5read([matfolder,'\',matlist(i).name],['/',name]); 26 | gen=permute(m.test_mesh,[3,1,2]); 27 | FLOGDR=gen(:,1:3,:); 28 | if size(gen,2)>3 29 | FS=gen(:,4:9,:); 30 | else 31 | lowbarindex=strfind(matlist(i).name,'_'); 32 | if (ismember('A',matlist(i).name)||ismember('a',matlist(i).name)) 33 | [mat_namea,~]=searchmat(matfolder); 34 | end 35 | if (ismember('B',matlist(i).name)||ismember('b',matlist(i).name)) 36 | [~,mat_namea]=searchmat(matfolder); 37 | end 38 | mat=load(mat_namea); 39 | FS=permute(mat.FS,[1,3,2]); 40 | if size(FS,1)~=size(FLOGDR,1)||(ismember('a',matlist(i).name)&&ismember('b',matlist(i).name)) 41 | FS=zeros(size(FLOGDR).*[1,2,1]); 42 | FS(:,[1,4,6],:)=ones(size(FLOGDR)); 43 | end 44 | end 45 | 46 | FLOGDR=reshape(FLOGDR,size(FLOGDR,1),size(FLOGDR,2)*size(FLOGDR,3)); 47 | FS=reshape(FS,size(FS,1),size(FS,2)*size(FS,3)); 48 | else 49 | gen=matfolder; 50 | m.latent_z=[]; 51 | FLOGDR=gen(:,:,1:3); 52 | FS=gen(:,:,4:9); 53 | end 54 | 55 | [ NLOGDR1, NS1 ] = InverseMap(FLOGDR,FS); 56 | NLOGDR=[NLOGDR;NLOGDR1]; 57 | NS=[NS;NS1]; 58 | end 59 | 60 | for i=1:size(NS,1) 61 | ACAP(originfile,[workpath,'\',sprintf('%05d',i),name,'.obj'],NLOGDR(i,:), NS(i,:)); 62 | end 63 | 64 | end -------------------------------------------------------------------------------- /Matlab/recon_script.m: -------------------------------------------------------------------------------- 1 | % demo reconstruct script 2 | groundtruthfolder = '.\dense_scape\rename'; 3 | targetfolder = 'Q:\meshpooling\release\dense_scape_39.0_3.0_meanpooling_0.5_K=3'; 4 | file_name = [targetfolder, '\result.txt']; 5 | data = dlmread(file_name); 6 | m = size(data,1); 7 | for j = 1:m 8 | if data(j,1) == 1 9 | epoch_num = j * 100; 10 | rebuild_loss = data(j,2); 11 | valid_loss = data(j,4); 12 | end 13 | end 14 | erms = recon_from_convvertex2(groundtruthfolder,targetfolder,epoch_num); 15 | -------------------------------------------------------------------------------- /Matlab/show_error.m: -------------------------------------------------------------------------------- 1 | function Erms=show_error(groundtruthfolder,testfolder,align) 2 | % show the error with the ground truth (RMSE) 3 | if nargin < 3 4 | align = 1; 5 | end 6 | 7 | groundtruthlist=dir([groundtruthfolder,'\*.obj']); 8 | [~,i]=sort_nat({groundtruthlist.name}); 9 | groundtruthlist=groundtruthlist(i); 10 | 11 | testlist=dir([testfolder,'\*.obj']); 12 | [~,i]=sort_nat({testlist.name}); 13 | testlist=testlist(i); 14 | 15 | assert(length(groundtruthlist)==length(testlist)) 16 | 17 | modeli = zeros(length(groundtruthlist),1); 18 | for i = 1:length(testlist) 19 | vground = cotlp([groundtruthfolder,'\',groundtruthlist(i).name]); 20 | vtest = cotlp([testfolder,'\',testlist(i).name]); 21 | if align 22 | v1_align=vground; 23 | v2_align=vtest; 24 | v1_align_center = mean(v1_align,1); 25 | v2_align_center = mean(v2_align,1); 26 | H = v2_align'*v1_align-size(v2_align,1)*v2_align_center'*v1_align_center; 27 | [U,~,V] = svd(H); 28 | R = V*U'; 29 | if det(R)<0 30 | R(:,3)=-R(:,3); 31 | end 32 | T = v1_align_center'-R*v2_align_center'; 33 | vtest=R*vtest'+repmat(T,1,size(vtest,1)); 34 | vtest=vtest'; 35 | end 36 | v_error = vtest-vground; 37 | groundtruthlist(i).v = vground; 38 | testlist(i).v = vtest; 39 | dist = sum(v_error.*v_error,2); 40 | modeli(i)=mean(sqrt(dist)); 41 | end 42 | Erms = (mean(modeli)); 43 | end -------------------------------------------------------------------------------- /Matlab/sort_nat.m: -------------------------------------------------------------------------------- 1 | function [cs,index] = sort_nat(c,mode) 2 | %sort_nat: Natural order sort of cell array of strings. 3 | % usage: [S,INDEX] = sort_nat(C) 4 | % 5 | % where, 6 | % C is a cell array (vector) of strings to be sorted. 7 | % S is C, sorted in natural order. 8 | % INDEX is the sort order such that S = C(INDEX); 9 | % 10 | % Natural order sorting sorts strings containing digits in a way such that 11 | % the numerical value of the digits is taken into account. It is 12 | % especially useful for sorting file names containing index numbers with 13 | % different numbers of digits. Often, people will use leading zeros to get 14 | % the right sort order, but with this function you don't have to do that. 15 | % For example, if C = {'file1.txt','file2.txt','file10.txt'}, a normal sort 16 | % will give you 17 | % 18 | % {'file1.txt' 'file10.txt' 'file2.txt'} 19 | % 20 | % whereas, sort_nat will give you 21 | % 22 | % {'file1.txt' 'file2.txt' 'file10.txt'} 23 | % 24 | % See also: sort 25 | 26 | % Version: 1.4, 22 January 2011 27 | % Author: Douglas M. Schwarz 28 | % Email: dmschwarz=ieee*org, dmschwarz=urgrad*rochester*edu 29 | % Real_email = regexprep(Email,{'=','*'},{'@','.'}) 30 | 31 | 32 | % Set default value for mode if necessary. 33 | if nargin < 2 34 | mode = 'ascend'; 35 | end 36 | 37 | % Make sure mode is either 'ascend' or 'descend'. 38 | modes = strcmpi(mode,{'ascend','descend'}); 39 | is_descend = modes(2); 40 | if ~any(modes) 41 | error('sort_nat:sortDirection',... 42 | 'sorting direction must be ''ascend'' or ''descend''.') 43 | end 44 | 45 | % Replace runs of digits with '0'. 46 | c2 = regexprep(c,'\d+','0'); 47 | 48 | % Compute char version of c2 and locations of zeros. 49 | s1 = char(c2); 50 | z = s1 == '0'; 51 | 52 | % Extract the runs of digits and their start and end indices. 53 | [digruns,first,last] = regexp(c,'\d+','match','start','end'); 54 | 55 | % Create matrix of numerical values of runs of digits and a matrix of the 56 | % number of digits in each run. 57 | num_str = length(c); 58 | max_len = size(s1,2); 59 | num_val = NaN(num_str,max_len); 60 | num_dig = NaN(num_str,max_len); 61 | for i = 1:num_str 62 | num_val(i,z(i,:)) = sscanf(sprintf('%s ',digruns{i}{:}),'%f'); 63 | num_dig(i,z(i,:)) = last{i} - first{i} + 1; 64 | end 65 | 66 | % Find columns that have at least one non-NaN. Make sure activecols is a 67 | % 1-by-n vector even if n = 0. 68 | activecols = reshape(find(~all(isnan(num_val))),1,[]); 69 | n = length(activecols); 70 | 71 | % Compute which columns in the composite matrix get the numbers. 72 | numcols = activecols + (1:2:2*n); 73 | 74 | % Compute which columns in the composite matrix get the number of digits. 75 | ndigcols = numcols + 1; 76 | 77 | % Compute which columns in the composite matrix get chars. 78 | charcols = true(1,max_len + 2*n); 79 | charcols(numcols) = false; 80 | charcols(ndigcols) = false; 81 | 82 | % Create and fill composite matrix, comp. 83 | comp = zeros(num_str,max_len + 2*n); 84 | comp(:,charcols) = double(s1); 85 | comp(:,numcols) = num_val(:,activecols); 86 | comp(:,ndigcols) = num_dig(:,activecols); 87 | 88 | % Sort rows of composite matrix and use index to sort c in ascending or 89 | % descending order, depending on mode. 90 | [unused,index] = sortrows(comp); 91 | if is_descend 92 | index = index(end:-1:1); 93 | end 94 | index = reshape(index,size(c)); 95 | cs = c(index); 96 | -------------------------------------------------------------------------------- /Matlab/tree2mapping.m: -------------------------------------------------------------------------------- 1 | function real_mapping = tree2mapping(tree_vector, densenum) 2 | % This function transform a tree to a mapping matrix 3 | vertex_num = size(tree_vector, 1); 4 | total_mapping = zeros(vertex_num, 100); 5 | valid = zeros(vertex_num, 1); 6 | pooling_vertex_num = zeros(vertex_num, 1); 7 | 8 | simpnum = 0; 9 | for i = 1:vertex_num 10 | if ~tree_vector(i,1) 11 | valid(i,1) = 1; 12 | simpnum = simpnum + 1; 13 | end 14 | end 15 | 16 | for i = 1:densenum 17 | j = i; 18 | while tree_vector(j,1)~=0 19 | col_num = pooling_vertex_num(j, 1) + 1; 20 | total_mapping(j,col_num) = i; 21 | pooling_vertex_num(j,1) = pooling_vertex_num(j,1) + 1; 22 | j = tree_vector(j); 23 | end 24 | col_num = pooling_vertex_num(j, 1) + 1; 25 | total_mapping(j,col_num) = i; 26 | pooling_vertex_num(j,1) = pooling_vertex_num(j,1) + 1; 27 | end 28 | 29 | row = 1; 30 | max_pooling_num = 0; 31 | real_mapping = zeros(simpnum, 100); 32 | for i = 1:size(valid,1) 33 | if valid(i,1) 34 | for j = 1:pooling_vertex_num(i,1) 35 | real_mapping(row,j) = total_mapping(i,j); 36 | end 37 | if max_pooling_num < pooling_vertex_num(i,1) 38 | max_pooling_num = pooling_vertex_num(i,1); 39 | end 40 | row = row + 1; 41 | end 42 | end 43 | 44 | real_mapping(:,max_pooling_num+1:end) = []; 45 | 46 | end -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Mesh Variational Autoencoders with Edge Contraction Pooling 2 | Code for '[Mesh Variational Autoencoders with Edge Contraction Pooling](http://www.geometrylearning.com/meshpooling_cvprw2020.pdf)'. 3 | 4 | ![Overview](./img/network.jpg) 5 | 6 | ## Abstract 7 | 3D shape analysis is an important research topic in computer vision and graphics. While existing methods have generalized image-based deep learning to meshes using graph-based convolutions, the lack of an effective pooling operation restricts the learning capability of their networks. In this paper, we propose a novel pooling operation for mesh datasets with the same connectivity but different geometry, by building a mesh hierarchy using mesh simplification. For this purpose, we develop a modified mesh simplification method to avoid generating highly irregularly sized triangles. Our pooling operation effectively encodes the correspondence between coarser and finer meshes in the hierarchy. We then present a variational auto-encoder (VAE) structure with the edge contraction pooling and graphbased convolutions, to explore probability latent spaces of 3D surfaces and perform 3D shape generation. Our network requires far fewer parameters than the original mesh VAE and thus can handle denser models thanks to our new pooling operation and convolutional kernels. Our evaluation also shows that our method has better generalization ability and is more reliable in various applications, including shape generation and shape interpolation. 8 | 9 | ## Requirements 10 | TensorFlow 1.15 11 | 12 | ## Usage 13 | 1. You should calculate ACAP feature for the shapes in the dataset and perform edge contraction for the first shape (typically the neutral pose). In "Matlab" folder, see and run `preprocess.m`. Note: We rename all models into natural number. 14 | 2. The computed mat file will be in `[path to your data]\rename\feature`, named as `vertFeaturepoolingc.mat`. You may copy it to the folder included the network python code and replace the character `c` in the name with the name of the shape dataset, such as `vertFeaturepoolingscape.mat`. 15 | 3. Cd into the python code dir and run 16 | ``` 17 | python main.py --model scape --gpu 0 18 | ``` 19 | 4. After traning, use `recon_script.m` to reconstruct the shape. 20 | 21 | ## Citation 22 | If you use the code in your research, please cite: 23 | ``` 24 | @inproceedings{yuan2020mesh, 25 | title={Mesh variational autoencoders with edge contraction pooling}, 26 | author={Yuan, Yu-Jie and Lai, Yu-Kun and Yang, Jie and Duan, Qi and Fu, Hongbo and Gao, Lin}, 27 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops}, 28 | pages={274--275}, 29 | year={2020} 30 | } 31 | ``` -------------------------------------------------------------------------------- /img/network.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/IGLICT/MeshPooling/9e59cdf906cc47e87aa7ab16363418d66cac465d/img/network.jpg -------------------------------------------------------------------------------- /python/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | import meshVAE_graph2 as model 4 | 5 | FLAGS = tf.flags.FLAGS 6 | tf.flags.DEFINE_string("gpu", '0', "gpu id") 7 | tf.flags.DEFINE_string("model", 'dense_scape', "model name") 8 | tf.flags.DEFINE_float("lambda_generation", 40, "lambda of generation error") 9 | tf.flags.DEFINE_float("lambda_latent", 3, "lambda of KL divergence") 10 | tf.flags.DEFINE_float("lambda_r2", 1, "lambda of l2 regularization") 11 | tf.flags.DEFINE_float("lr", 0.001, "learning rate") 12 | tf.flags.DEFINE_integer("epoch_num", 8000, "epoch number") 13 | tf.flags.DEFINE_integer("hidden_dim", 128, "latent dimension") 14 | tf.flags.DEFINE_boolean("use_pooling", True, "use pooling or not") 15 | tf.flags.DEFINE_boolean("max_pooling", False, "max pooling or mean pooling") 16 | tf.flags.DEFINE_float("vae_ablity", 0.5, "ratio of models used for validation") 17 | tf.flags.DEFINE_integer("K", 3, "graph convolution parameter") 18 | tf.flags.DEFINE_integer("batch_size", 50, "batch size") 19 | 20 | feature_file = 'vertFeaturepooling' + FLAGS.model + '.mat' 21 | 22 | if FLAGS.use_pooling and FLAGS.max_pooling: 23 | logfolder = './' + FLAGS.model + '_' + str(FLAGS.lambda_generation) + '_' + \ 24 | str(FLAGS.lambda_latent) + '_maxpooling_' + str(FLAGS.vae_ablity) + '_K=' + str(FLAGS.K) 25 | elif FLAGS.use_pooling and not FLAGS.max_pooling: 26 | logfolder = './' + FLAGS.model + '_' + str(FLAGS.lambda_generation) + '_' + \ 27 | str(FLAGS.lambda_latent) + '_meanpooling_' + str(FLAGS.vae_ablity) + '_K=' + str(FLAGS.K) 28 | else: 29 | logfolder = './' + FLAGS.model + '_' + str(FLAGS.lambda_generation) + '_' + \ 30 | str(FLAGS.lambda_latent) + '_nopooling_' + str(FLAGS.vae_ablity) + '_K=' + str(FLAGS.K) 31 | 32 | if not os.path.isdir(logfolder): 33 | os.mkdir(logfolder) 34 | 35 | os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu 36 | convmesh = model.convMesh(feature_file, FLAGS, logfolder) 37 | convmesh.train() 38 | 39 | # interpolation 40 | # inter_begin_id = 7 41 | # inter_end_id = 14 42 | # interval = 30 43 | # convmesh.interpola(logfolder + '/convMesh.model-' + str(FLAGS.epoch_num), inter_begin_id, inter_end_id, interval, logfolder + '/itlp') 44 | 45 | # random generation 46 | # gen_num = 100 47 | # convmesh.random_generate(logfolder + '/convMesh.model-' + str(FLAGS.epoch_num), gen_num, logfolder + '/random') -------------------------------------------------------------------------------- /python/meshVAE_graph2.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | import tensorflow as tf 3 | import os 4 | import numpy as np 5 | import scipy.io as sio 6 | from six.moves import xrange 7 | import scipy.interpolate as interpolate 8 | import h5py 9 | import time 10 | import random, pickle 11 | from utils import * 12 | 13 | 14 | class convMesh(): 15 | config = tf.ConfigProto() 16 | config.gpu_options.allow_growth = True 17 | 18 | def __init__(self, feature_file, FLAGS, logfolder): 19 | 20 | self.model = FLAGS.model 21 | self.lambda_generation = FLAGS.lambda_generation 22 | self.lambda_latent = FLAGS.lambda_latent 23 | self.lambda_r2 = FLAGS.lambda_r2 24 | self.sp_learning_rate = False 25 | self.lr = FLAGS.lr 26 | self.epoch_num = FLAGS.epoch_num 27 | self.layers = 3 28 | self.K = FLAGS.K 29 | self.hidden_dim = FLAGS.hidden_dim 30 | self.batch_size = FLAGS.batch_size 31 | self.use_pooling = FLAGS.use_pooling 32 | self.max_pooling = FLAGS.max_pooling 33 | self.useS = True 34 | self.result_min = -0.95 35 | self.result_max = 0.95 36 | self.vae_ablity = FLAGS.vae_ablity 37 | 38 | self.tb = False 39 | 40 | self.logfolder = logfolder 41 | 42 | self.feature, self.neighbour1, self.degree1, self.mapping1, self.neighbour2, self.degree2, \ 43 | self.maxdemapping, self.meanpooling_degree, self.meandepooling_mapping, self.meandepooling_degree, \ 44 | self.logrmin, self.logrmax, self.smin, self.smax, self.modelnum, self.pointnum1, self.pointnum2, \ 45 | self.maxdegree1, self.maxdegree2, self.mapping1_col, self.L1, self.L2, self.cotw1, self.cotw2 \ 46 | = load_data(feature_file, self.result_min, self.result_max, useS=self.useS, graphconv=True) 47 | 48 | if not self.useS: 49 | self.vertex_dim = 3 50 | self.finaldim = 3 51 | else: 52 | self.vertex_dim = 9 53 | self.finaldim = 9 54 | 55 | self.inputs = tf.placeholder(tf.float64, [None, self.pointnum1, self.vertex_dim], name='input_mesh') 56 | self.nb1 = tf.constant(self.neighbour1, dtype='int64', shape=[self.pointnum1, self.maxdegree1], 57 | name='nb_relation1') 58 | self.nb2 = tf.constant(self.neighbour2, dtype='int64', shape=[self.pointnum2, self.maxdegree2], 59 | name='nb_relation2') 60 | self.degrees1 = tf.constant(self.degree1, dtype='float64', shape=[self.pointnum1, 1], name='degrees1') 61 | self.degrees2 = tf.constant(self.degree2, dtype='float64', shape=[self.pointnum2, 1], name='degrees2') 62 | self.random = tf.placeholder(tf.float64, [None, self.hidden_dim], name='random_samples') 63 | self.cw1 = tf.constant(self.cotw1, dtype='float64', shape=[self.pointnum1, self.maxdegree1, 1], name='a/cw1') 64 | self.cw2 = tf.constant(self.cotw2, dtype='float64', shape=[self.pointnum2, self.maxdegree2, 1], name='a/cw2') 65 | self.Laplace1 = self.L1 66 | if self.use_pooling: 67 | self.Laplace2 = self.L2 68 | else: 69 | self.Laplace2 = self.L1 70 | 71 | if self.use_pooling and self.max_pooling: 72 | self.mappingpooling1 = tf.constant(self.mapping1, dtype='int64', shape=[self.pointnum2, self.mapping1_col], 73 | name='mapping') 74 | self.mappingdepooling1 = tf.constant(self.maxdemapping, dtype='int64', shape=[self.pointnum1, 1], 75 | name='demapping') 76 | elif self.use_pooling and not self.max_pooling: 77 | self.mappingpooling1 = tf.constant(self.mapping1, dtype='int64', shape=[self.pointnum2, self.mapping1_col], 78 | name='mapping') 79 | self.mappingdepooling1 = tf.constant(self.meandepooling_mapping, dtype='int64', shape=[self.pointnum1, 1], 80 | name='demapping') 81 | self.mean_pl_degree = tf.constant(self.meanpooling_degree, dtype='float64', shape=[self.pointnum2, 1], 82 | name='mapping_degree') 83 | self.mean_depl_degree = tf.constant(self.meandepooling_degree, dtype='float64', shape=[self.pointnum1, 1], 84 | name='demapping_degree') 85 | else: 86 | self.nb2, self.degrees2 = self.nb1, self.degrees1 87 | self.pointnum2, self.cw2 = self.pointnum1, self.cw1 88 | print('we don\'t use pooling!!') 89 | 90 | self.enc_w = [] 91 | self.dec_w = [] 92 | for i in range(self.layers): 93 | if i == self.layers - 1: 94 | enc_weight = tf.get_variable("encoder/conv_weight"+str(i+1), [self.vertex_dim * self.K, self.finaldim], tf.float64, 95 | tf.random_normal_initializer(stddev=0.02)) 96 | dec_weight = tf.get_variable("decoder/conv_weight"+str(i+1), [self.vertex_dim * self.K, self.finaldim], tf.float64, 97 | tf.random_normal_initializer(stddev=0.02)) 98 | else: 99 | enc_weight = tf.get_variable("encoder/conv_weight"+str(i+1), [self.vertex_dim * self.K, self.vertex_dim], tf.float64, 100 | tf.random_normal_initializer(stddev=0.02)) 101 | dec_weight = tf.get_variable("decoder/conv_weight"+str(i+1), [self.vertex_dim * self.K, self.vertex_dim], tf.float64, 102 | tf.random_normal_initializer(stddev=0.02)) 103 | self.enc_w.append(enc_weight) 104 | self.dec_w.append(dec_weight) 105 | 106 | 107 | self.meanpara = tf.get_variable("encoder/mean_weights", [self.pointnum2 * self.finaldim, self.hidden_dim], 108 | tf.float64, tf.random_normal_initializer(stddev=0.02)) 109 | self.stdpara = tf.get_variable("encoder/std_weights", [self.pointnum2 * self.finaldim, self.hidden_dim], 110 | tf.float64, tf.random_normal_initializer(stddev=0.02)) 111 | 112 | # train 113 | self.z_mean, self.z_stddev = self.encoder(self.inputs, train=True) 114 | self.guessed_z = self.z_mean + self.z_stddev * tf.random_normal(tf.shape(self.z_mean), 0, 1, dtype=tf.float64) 115 | self.generated_mesh_train = self.decoder(self.guessed_z, train=True) 116 | 117 | # test 118 | self.z_mean_test, self.z_stddev_test = self.encoder(self.inputs, train=False) 119 | self.guessed_z_rebuild = self.z_mean_test 120 | self.generated_mesh_rebuild = self.decoder(self.guessed_z_rebuild, train=False) 121 | 122 | # generation 123 | self.test_mesh = self.decoder(self.random, train=False) 124 | 125 | self.generation_loss = self.lambda_generation * tf.reduce_mean(tf.reduce_sum(tf.pow(self.inputs - self.generated_mesh_train, 2.0), [1, 2])) 126 | 127 | self.latent_loss = self.lambda_latent * tf.reduce_mean(0.5 * tf.reduce_sum( 128 | tf.square(self.z_mean) + tf.square(self.z_stddev) - tf.log(1e-8 + tf.square(self.z_stddev)) - 1, [1])) 129 | 130 | self.valid_loss = self.lambda_generation * tf.reduce_mean(tf.reduce_sum(tf.pow(self.inputs - self.generated_mesh_rebuild, 2.0), [1, 2])) 131 | 132 | reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope='encoder') + tf.get_collection( 133 | tf.GraphKeys.REGULARIZATION_LOSSES, scope='decoder') 134 | 135 | self.r2_loss = sum(reg_losses) + tf.nn.l2_loss(self.enc_w) + tf.nn.l2_loss(self.dec_w) + tf.nn.l2_loss(self.meanpara) + tf.nn.l2_loss(self.stdpara) 136 | self.r2_loss = self.r2_loss * self.lambda_r2 137 | 138 | self.loss = self.generation_loss + self.latent_loss + self.r2_loss 139 | 140 | self.global_step = tf.Variable(0, trainable=False) 141 | if self.sp_learning_rate: 142 | new_learning_rate = tf.train.exponential_decay(self.lr, self.global_step, 3000, 0.5, staircase=True) 143 | self.optimizer = tf.train.AdamOptimizer(new_learning_rate).minimize(self.loss) 144 | else: 145 | self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss) 146 | 147 | self.saver_best = tf.train.Saver(max_to_keep = None) 148 | self.saver_vae = tf.train.Saver(max_to_keep = 3) 149 | tf.summary.scalar('loss_all', self.loss) 150 | tf.summary.scalar('loss_generation', self.generation_loss) 151 | tf.summary.scalar('loss_latent', self.latent_loss) 152 | tf.summary.scalar('loss_r2', self.r2_loss) 153 | self.merge_summary = tf.summary.merge_all() 154 | 155 | self.checkpoint_dir = self.logfolder 156 | if os.path.exists(self.logfolder + '/log.txt'): 157 | self.log_file = open(self.logfolder + '/log.txt', 'a') 158 | else: 159 | self.log_file = open(self.logfolder + '/log.txt', 'w') 160 | 161 | if os.path.exists(logfolder + '/result.txt'): 162 | self.sim_log_file = open(logfolder + '/result.txt', 'a') 163 | else: 164 | self.sim_log_file = open(logfolder + '/result.txt', 'w') 165 | 166 | # functions 167 | def get_conv_weights(self, input_dim, output_dim, name='convweight'): 168 | with tf.variable_scope(name) as scope: 169 | n = tf.get_variable("nb_weights", [input_dim, output_dim], tf.float64, 170 | tf.random_normal_initializer(stddev=0.02)) 171 | v = tf.get_variable("vertex_weights", [input_dim, output_dim], tf.float64, 172 | tf.random_normal_initializer(stddev=0.02)) 173 | 174 | return n, v 175 | 176 | def encoder(self, input_feature, train=True, reuse=False): 177 | with tf.variable_scope("encoder") as scope: 178 | scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0)) 179 | if not train or reuse: 180 | train = False 181 | reuse = True 182 | scope.reuse_variables() 183 | 184 | conv1 = graph_conv2(input_feature, self.Laplace1, self.vertex_dim, self.enc_w[0], self.K, name='conv1', training=train) 185 | conv1 = graph_conv2(conv1, self.Laplace1, self.vertex_dim, self.enc_w[1], self.K, name='conv2', training=train) 186 | 187 | if self.use_pooling and self.max_pooling: 188 | conv1 = mesh_max_pooling(conv1, self.mappingpooling1) 189 | elif self.use_pooling and not self.max_pooling: 190 | conv1 = mesh_mean_pooling(conv1, self.mappingpooling1, self.mean_pl_degree) 191 | else: 192 | conv1 = conv1 193 | 194 | conv1 = graph_conv2(conv1, self.Laplace2, self.finaldim, self.enc_w[2], self.K, name='conv3', training=train, 195 | bn=False) 196 | x0 = tf.reshape(conv1, [tf.shape(conv1)[0], self.pointnum2 * self.finaldim]) 197 | mean = linear1(x0, self.meanpara, self.hidden_dim, 'mean') 198 | stddev = linear1(x0, self.stdpara, self.hidden_dim, 'stddev') 199 | stddev = tf.sqrt(2 * tf.nn.sigmoid(stddev)) 200 | 201 | return mean, stddev 202 | 203 | def decoder(self, latent_tensor, train=True, reuse=False): 204 | with tf.variable_scope("decoder") as scope: 205 | scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0)) 206 | if not train or reuse: 207 | train = False 208 | reuse = True 209 | scope.reuse_variables() 210 | 211 | l1 = linear1(latent_tensor, tf.transpose(self.meanpara), self.pointnum2 * self.finaldim, 'mean') 212 | l2 = tf.reshape(l1, [tf.shape(l1)[0], self.pointnum2, self.finaldim]) 213 | conv1 = graph_conv2(l2, self.Laplace2, self.vertex_dim, self.dec_w[2], self.K, name='conv4', training=train) 214 | if self.use_pooling and self.max_pooling: 215 | conv1 = mesh_max_depooling(conv1, self.mappingdepooling1) 216 | elif self.use_pooling and not self.max_pooling: 217 | conv1 = mesh_mean_depooling(conv1, self.mappingdepooling1, self.mean_depl_degree) 218 | else: 219 | conv1 = conv1 220 | 221 | conv1 = graph_conv2(conv1, self.Laplace1, self.vertex_dim, self.dec_w[1], self.K, name='conv5', training=train) 222 | 223 | conv1 = graph_conv2(conv1, self.Laplace1, self.vertex_dim, self.dec_w[0], self.K, name='conv6', 224 | training=train, bn=False) 225 | 226 | return conv1 227 | 228 | def train(self): 229 | with tf.Session(config=self.config) as self.sess: 230 | if not os.path.exists(self.checkpoint_dir): 231 | os.makedirs(self.checkpoint_dir) 232 | 233 | tf.global_variables_initializer().run() 234 | 235 | could_load_vae, checkpoint_counter_vae = self.load(self.checkpoint_dir) 236 | 237 | if (could_load_vae and checkpoint_counter_vae < self.epoch_num): 238 | self.start_step_vae = checkpoint_counter_vae 239 | else: 240 | self.start_step_vae = 0 241 | 242 | self.write = tf.summary.FileWriter(self.logfolder + '/logs/', self.sess.graph) 243 | 244 | rng = np.random.RandomState(23456) 245 | 246 | file_name = 'id'+str(self.vae_ablity)+'.dat' 247 | if os.path.isfile(file_name): 248 | id = pickle.load(open(file_name, 'rb')) 249 | id.show() 250 | Ia = id.Ia 251 | Ib = id.Ib 252 | else: 253 | Ia = np.arange(len(self.feature)) 254 | Ia = random.sample(list(Ia), int(len(self.feature) * (1 - self.vae_ablity))) 255 | Ib = Ia 256 | id = Id(Ia, Ib) 257 | id.show() 258 | f = open(file_name, 'wb') 259 | pickle.dump(id, f, pickle.HIGHEST_PROTOCOL) 260 | f.close() 261 | id = pickle.load(open(file_name, 'rb')) 262 | id.show() 263 | 264 | self.C_Ia = list(set(np.arange(len(self.feature))).difference(set(Ia))) 265 | valid_best = float('inf') 266 | 267 | batch_size = self.batch_size 268 | for epoch in xrange(self.start_step_vae, self.epoch_num): 269 | timecurrent = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) 270 | rng.shuffle(Ia) 271 | train_feature = self.feature[Ia] 272 | train_num = len(train_feature) 273 | 274 | for bidx in xrange(0, train_num//batch_size + 1): 275 | train_feature_batch = train_feature[bidx*batch_size:min(train_num, bidx*batch_size+batch_size)] 276 | if len(train_feature_batch) == 0: 277 | continue 278 | random_batch = np.random.normal(loc=0.0, scale=1.0, size=(len(train_feature_batch), self.hidden_dim)) 279 | 280 | _, cost_generation, cost_latent, cost_r2 = self.sess.run([self.optimizer, self.generation_loss, self.latent_loss, self.r2_loss], feed_dict={self.inputs: train_feature_batch, self.random: random_batch}) 281 | 282 | print("%s Epoch|Batch: [%4d|%4d]generation_loss: %.8f, latent_loss: %.8f r2_loss: %.8f" % ( 283 | timecurrent, epoch + 1, bidx+1, cost_generation, cost_latent, cost_r2)) 284 | self.log_file.write("%s Epoch|Batch: [%4d|%4d]generation_loss: %.8f, latent_loss: %.8f, r2_loss: %.8f\n" % ( 285 | timecurrent, epoch + 1, bidx+1, cost_generation, cost_latent, cost_r2)) 286 | 287 | test_num = len(self.C_Ia) 288 | valid_generation = 0 289 | for bidx in xrange(0, test_num//batch_size + 1): 290 | dxb = self.C_Ia[bidx*batch_size:min(test_num, bidx*batch_size+batch_size)] 291 | test_feature_batch = self.feature[dxb] 292 | valid_generation_batch = self.valid_loss.eval({self.inputs: test_feature_batch}) 293 | valid_generation += valid_generation_batch 294 | print("%s Epoch: [%4d]valid_loss: %.8f" % (timecurrent, epoch + 1, valid_generation)) 295 | 296 | if np.mod(epoch + 1, 100) == 0: 297 | 298 | if valid_generation < valid_best: 299 | valid_best = valid_generation 300 | print('Save best!') 301 | self.log_file.write("Save Best! Epoch: %4d\n" % (epoch + 1)) 302 | self.sim_log_file.write("%d %.8f %.8f %.8f %.8f\n" % (1, cost_generation, cost_latent, valid_generation, cost_r2)) 303 | self.saver_best.save(self.sess, self.logfolder + '/' + 'convMesh_validbest.model') 304 | else: 305 | self.sim_log_file.write("%d %.8f %.8f %.8f %.8f\n" % (0, cost_generation, cost_latent, valid_generation, cost_r2)) 306 | 307 | self.saver_vae.save(self.sess, self.logfolder + '/' + 'convMesh.model', global_step=epoch + 1) 308 | 309 | rebuild_mesh = [] 310 | for bidx in xrange(0, len(self.feature)//batch_size + 1): 311 | rebuild_batch = self.generated_mesh_rebuild.eval({self.inputs: self.feature[bidx*batch_size:min(len(self.feature), bidx*batch_size+batch_size)]}) 312 | rebuild_mesh_batch = recover_data(rebuild_batch, self.logrmin, self.logrmax, self.smin, self.smax, self.result_min, self.result_max, self.useS) 313 | if bidx == 0: 314 | rebuild_mesh = rebuild_mesh_batch 315 | else: 316 | rebuild_mesh = np.concatenate((rebuild_mesh, rebuild_mesh_batch), axis=0) 317 | 318 | savefile = h5py.File(self.logfolder + '/' + 'rebuild' + str(epoch + 1) + '.h5', 'w') 319 | savefile['test_mesh'] = rebuild_mesh 320 | savefile['valid_id'] = self.C_Ia 321 | savefile.close() 322 | 323 | if self.tb and (epoch + 1) % 20 == 0: 324 | s = self.sess.run(self.merge_summary, feed_dict={self.inputs: self.feature, self.random: random_batch}) 325 | self.write.add_summary(s, epoch) 326 | 327 | self.log_file.close() 328 | self.sim_log_file.close() 329 | return 330 | 331 | def interpola(self, restore, begin_id, end_id, interval, foldername): 332 | with tf.Session(config=self.config) as self.sess: 333 | self.saver_vae.restore(self.sess, restore) 334 | x = np.zeros([2, self.pointnum1, self.vertex_dim]) 335 | x[0, :, :] = self.feature[begin_id, :, :] 336 | x[1, :, :] = self.feature[end_id, :, :] 337 | 338 | random_np = self.guessed_z_rebuild.eval({self.inputs: x}) 339 | random2_intpl = interpolate.griddata( 340 | np.linspace(0, 1, len(random_np) * 1), random_np, 341 | np.linspace(0, 1, interval), method='linear') 342 | 343 | if not os.path.isdir(foldername): 344 | os.mkdir(foldername) 345 | 346 | test = self.sess.run([self.test_mesh], feed_dict={self.random: random2_intpl})[0] 347 | 348 | test = recover_data(test, self.logrmin, self.logrmax, self.smin, self.smax, self.result_min, self.result_max, self.useS) 349 | 350 | name = foldername + '/intlp_test' + str(begin_id) + '_' + str(end_id) + '.h5' 351 | print(name) 352 | f = h5py.File(name, 'w') 353 | f['test_mesh'] = test 354 | f['latent_z'] = random_np 355 | f.close() 356 | 357 | return 358 | 359 | def embedding(self, restore): 360 | with tf.Session(config=self.config) as self.sess: 361 | self.saver_vae.restore(self.sess, restore) 362 | 363 | meanemb, stddev = self.sess.run([self.z_mean_test, self.z_stddev_test], feed_dict={self.inputs: self.feature}) 364 | if not os.path.isdir(self.logfolder + '/embdata'): 365 | os.mkdir(self.logfolder + '/embdata') 366 | name = self.logfolder + '/embdata/embedding_' + model + '.h5' 367 | f = h5py.File(name, 'w') 368 | f['meanemb'] = meanemb 369 | f['stddev'] = stddev 370 | f.close() 371 | 372 | return 373 | 374 | def model_dir(self, model_name, dataset_name): 375 | return "{}_{}_{}".format(model_name, dataset_name, hidden_dim) 376 | 377 | def load(self, checkpoint_dir): 378 | import re 379 | print(" [*] Reading checkpoints...") 380 | # checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name) 381 | saver = self.saver_vae 382 | 383 | ckpt = tf.train.get_checkpoint_state(checkpoint_dir) 384 | if ckpt and ckpt.model_checkpoint_path: 385 | ckpt_name = os.path.basename(ckpt.model_checkpoint_path) 386 | 387 | saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) 388 | counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0)) 389 | print(" [*] Success to read {}".format(ckpt_name)) 390 | return True, counter 391 | else: 392 | print(" [*] Failed to find a checkpoint") 393 | return False, 0 # model = convMESH() 394 | 395 | def random_generate(self, restore, gennum, foldername): 396 | with tf.Session(config=self.config) as self.sess: 397 | self.saver_vae.restore(self.sess, restore) 398 | 399 | random_batch = np.random.normal(loc=0.0, scale=1.0, size=(gennum, self.hidden_dim)) 400 | if not os.path.isdir(foldername): 401 | os.mkdir(foldername) 402 | test = self.sess.run([self.test_mesh], feed_dict={self.random: random_batch})[0] 403 | test = recover_data(test, self.logrmin, self.logrmax, self.smin, self.smax, self.result_min, self.result_max, self.useS) 404 | 405 | file_name = 'id' + str(vae_ablity) + '.dat' 406 | id = pickle.load(open(file_name, 'rb')) 407 | Ia = id.Ia 408 | 409 | name = foldername + '/random_gen' + str(gennum) + '.h5' 410 | print(name) 411 | f = h5py.File(name, 'w') 412 | f['test_mesh'] = test 413 | f['train_id'] = Ia 414 | f['latent_z'] = random_batch 415 | f.close() 416 | 417 | return 418 | -------------------------------------------------------------------------------- /python/utils.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | 3 | from math import sin, cos, sqrt 4 | from six.moves import xrange 5 | 6 | import h5py 7 | import numpy as np 8 | import scipy.io as sio 9 | import tensorflow as tf 10 | import scipy.sparse 11 | import scipy.sparse.linalg 12 | import scipy.spatial.distance 13 | 14 | 15 | class Id: 16 | 17 | def __init__(self, Ia, Ib): 18 | self.Ia = Ia 19 | self.Ib = Ib 20 | 21 | def show(self): 22 | print('A: %s\nB: %s' % (self.Ia, self.Ib)) 23 | 24 | 25 | def mesh_max_pooling(input, mapping): 26 | # minnum = -float('inf') 27 | minnum = 0.0 28 | input_dim = 9 29 | padding_feature = minnum * tf.ones([tf.shape(input)[0], 1, tf.shape(input)[2]], tf.float64) 30 | 31 | padded_input = tf.concat([padding_feature, input], 1) 32 | 33 | def compute_nb_feature(input_f): 34 | return tf.gather(input_f, mapping) 35 | 36 | total_nb_feature = tf.map_fn(compute_nb_feature, padded_input) 37 | # max_nb_index = tf.argmax(tf.abs(total_nb_feature)-, 2) 38 | # max_nb_feature = total_nb_feature[:,:,max_nb_index,:] 39 | 40 | max_nb_feature_plus = tf.reduce_max(total_nb_feature, reduction_indices=[2]) 41 | 42 | max_nb_feature_minus = tf.reduce_max(-total_nb_feature, reduction_indices=[2]) 43 | max_nb_feature = tf.where(max_nb_feature_plus > max_nb_feature_minus, max_nb_feature_plus, -max_nb_feature_minus) 44 | 45 | return max_nb_feature 46 | 47 | 48 | def mesh_max_depooling(input, mapping): 49 | # minnum = -float('inf') 50 | input_dim = 9 51 | padding_feature = tf.zeros([tf.shape(input)[0], 1, tf.shape(input)[2]], tf.float64) 52 | 53 | padded_input = tf.concat([padding_feature, input], 1) 54 | 55 | def compute_nb_feature(input_f): 56 | return tf.gather(input_f, mapping) 57 | 58 | total_nb_feature = tf.map_fn(compute_nb_feature, padded_input) 59 | max_nb_feature = tf.reduce_max(total_nb_feature, axis=2) 60 | # max_nb_feature = tf.reshape(total_nb_feature, [tf.shape(input)[0], tf.shape(mapping)[0], tf.shape(input)[2]]) 61 | # max_nb_feature = tf.reduce_max(total_nb_feature, reduction_indices=[2]) 62 | 63 | return max_nb_feature 64 | 65 | 66 | def mesh_mean_pooling(input, mapping, degree): 67 | # minnum = -float('inf') 68 | 69 | input_dim = 9 70 | padding_feature = tf.zeros([tf.shape(input)[0], 1, tf.shape(input)[2]], tf.float64) 71 | 72 | padded_input = tf.concat([padding_feature, input], 1) 73 | 74 | def compute_nb_feature(input_f): 75 | return tf.gather(input_f, mapping) 76 | 77 | total_nb_feature = tf.map_fn(compute_nb_feature, padded_input) 78 | mean_nb_feature = tf.reduce_sum(total_nb_feature, axis=2) / degree 79 | 80 | return mean_nb_feature 81 | 82 | 83 | def mesh_mean_depooling(input, mapping, degree): 84 | input_dim = 9 85 | # padding_feature = tf.zeros([tf.shape(input)[0], 1, tf.shape(input)[2]], tf.float64) 86 | 87 | padded_input = input 88 | 89 | def compute_nb_feature(input_f): 90 | return tf.gather(input_f, mapping) 91 | 92 | total_nb_feature = tf.map_fn(compute_nb_feature, padded_input) 93 | mean_nb_feature = tf.reduce_sum(total_nb_feature, axis=2) / degree 94 | 95 | return mean_nb_feature 96 | 97 | 98 | def load_data(path, resultmin, resultmax, useS=True, graphconv=False): 99 | data = h5py.File(path) 100 | datalist = data.keys() 101 | logr = np.transpose(data['FLOGRNEW'], (2, 1, 0)) 102 | s = np.transpose(data['FS'], (2, 1, 0)) 103 | # neighbour = data['neighbour'] 104 | neighbour1 = np.transpose(data['neighbour1']) 105 | neighbour2 = np.transpose(data['neighbour2']) 106 | mapping = np.transpose(data['mapping']) 107 | cotweight1 = np.transpose(data['cotweight1']) 108 | cotweight2 = np.transpose(data['cotweight2']) 109 | 110 | pointnum1 = neighbour1.shape[0] 111 | pointnum2 = neighbour2.shape[0] 112 | maxdegree1 = neighbour1.shape[1] 113 | maxdegree2 = neighbour2.shape[1] 114 | modelnum = len(logr) 115 | 116 | logrmin = logr.min() 117 | logrmin = logrmin - 1e-6 118 | logrmax = logr.max() 119 | logrmax = logrmax + 1e-6 120 | smin = s.min() 121 | smin = smin - 1e-6 122 | smax = s.max() 123 | smax = smax + 1e-6 124 | 125 | rnew = (resultmax - resultmin) * (logr - logrmin) / (logrmax - logrmin) + resultmin 126 | snew = (resultmax - resultmin) * (s - smin) / (smax - smin) + resultmin 127 | if useS: 128 | feature = np.concatenate((rnew, snew), axis=2) 129 | else: 130 | feature = rnew 131 | 132 | f = np.zeros_like(feature).astype('float64') 133 | f = feature 134 | 135 | nb1 = np.zeros((pointnum1, maxdegree1)).astype('float64') 136 | nb1 = neighbour1 137 | 138 | nb2 = np.zeros((pointnum2, maxdegree2)).astype('float64') 139 | nb2 = neighbour2 140 | 141 | L1 = np.zeros((pointnum1, pointnum1)).astype('float64') 142 | L2 = np.zeros((pointnum2, pointnum2)).astype('float64') 143 | 144 | if graphconv and 'W1' in datalist: 145 | L = np.transpose(data['W1']) 146 | L1 = L 147 | L1 = rescale_L(Laplacian(scipy.sparse.csr_matrix(L1))) 148 | 149 | if graphconv and 'W2' in datalist: 150 | L = np.transpose(data['W2']) 151 | L2 = L 152 | L2 = rescale_L(Laplacian(scipy.sparse.csr_matrix(L2))) 153 | 154 | cotw1 = np.zeros((cotweight1.shape[0], cotweight1.shape[1], 1)).astype('float64') 155 | cotw2 = np.zeros((cotweight2.shape[0], cotweight2.shape[1], 1)).astype('float64') 156 | for i in range(1): 157 | cotw1[:, :, i] = cotweight1 158 | cotw2[:, :, i] = cotweight2 159 | 160 | degree1 = np.zeros((neighbour1.shape[0], 1)).astype('float64') 161 | for i in range(neighbour1.shape[0]): 162 | degree1[i] = np.count_nonzero(nb1[i]) 163 | 164 | degree2 = np.zeros((neighbour2.shape[0], 1)).astype('float64') 165 | for i in range(neighbour2.shape[0]): 166 | degree2[i] = np.count_nonzero(nb2[i]) 167 | 168 | mapping1 = np.zeros((pointnum2, mapping.shape[1])).astype('float64') 169 | maxdemapping = np.zeros((pointnum1, 1)).astype('float64') 170 | 171 | mapping1_col = mapping.shape[1] 172 | 173 | mapping1 = mapping 174 | # mapping2 = demapping 175 | for i in range(pointnum1): 176 | # print i 177 | idx = np.where(mapping1 == i + 1) 178 | if idx[1][0] > 0: 179 | maxdemapping[i] = 1 180 | else: 181 | maxdemapping[i] = idx[0][0] + 1 182 | 183 | meanpooling_degree = np.zeros((mapping.shape[0], 1)).astype('float64') 184 | for i in range(mapping.shape[0]): 185 | meanpooling_degree[i] = np.count_nonzero(mapping1[i]) 186 | 187 | meandepooling_mapping = np.zeros((pointnum1, 1)).astype('float64') 188 | meandepooling_degree = np.zeros((pointnum1, 1)).astype('float64') 189 | 190 | for i in range(pointnum1): 191 | idx = np.where(mapping1 == i + 1)[0] 192 | meandepooling_mapping[i] = idx[0] 193 | meandepooling_degree[i] = meanpooling_degree[idx[0]] 194 | 195 | return f, nb1, degree1, mapping1, nb2, degree2, maxdemapping, meanpooling_degree, meandepooling_mapping, meandepooling_degree, \ 196 | logrmin, logrmax, smin, smax, modelnum, pointnum1, pointnum2, maxdegree1, maxdegree2, mapping1_col, L1, L2, cotw1, cotw2 197 | 198 | 199 | def load_data1(path, resultmin, resultmax, useS=True, graphconv=False): 200 | data = h5py.File(path) 201 | datalist = data.keys() 202 | logr = np.transpose(data['FLOGRNEW'], (2, 1, 0)) 203 | s = np.transpose(data['FS'], (2, 1, 0)) 204 | # neighbour = data['neighbour'] 205 | neighbour1 = np.transpose(data['neighbour1']) 206 | neighbour2 = np.transpose(data['neighbour2']) 207 | neighbour3 = np.transpose(data['neighbour3']) 208 | mapping1 = np.transpose(data['mapping1']) 209 | mapping2 = np.transpose(data['mapping2']) 210 | cotweight1 = np.transpose(data['cotweight1']) 211 | cotweight2 = np.transpose(data['cotweight2']) 212 | cotweight3 = np.transpose(data['cotweight3']) 213 | 214 | pointnum1 = neighbour1.shape[0] 215 | pointnum2 = neighbour2.shape[0] 216 | pointnum3 = neighbour3.shape[0] 217 | maxdegree1 = neighbour1.shape[1] 218 | maxdegree2 = neighbour2.shape[1] 219 | maxdegree3 = neighbour3.shape[1] 220 | modelnum = len(logr) 221 | 222 | logrmin = logr.min() 223 | logrmin = logrmin - 1e-6 224 | logrmax = logr.max() 225 | logrmax = logrmax + 1e-6 226 | smin = s.min() 227 | smin = smin - 1e-6 228 | smax = s.max() 229 | smax = smax + 1e-6 230 | 231 | rnew = (resultmax - resultmin) * (logr - logrmin) / (logrmax - logrmin) + resultmin 232 | snew = (resultmax - resultmin) * (s - smin) / (smax - smin) + resultmin 233 | if useS: 234 | feature = np.concatenate((rnew, snew), axis=2) 235 | else: 236 | feature = rnew 237 | 238 | f = np.zeros_like(feature).astype('float64') 239 | f = feature 240 | 241 | nb1 = np.zeros((pointnum1, maxdegree1)).astype('float64') 242 | nb1 = neighbour1 243 | 244 | nb2 = np.zeros((pointnum2, maxdegree2)).astype('float64') 245 | nb2 = neighbour2 246 | 247 | nb3 = np.zeros((pointnum3, maxdegree3)).astype('float64') 248 | nb3 = neighbour3 249 | 250 | L1 = np.zeros((pointnum1, pointnum1)).astype('float64') 251 | L2 = np.zeros((pointnum2, pointnum2)).astype('float64') 252 | L3 = np.zeros((pointnum3, pointnum3)).astype('float64') 253 | 254 | if graphconv and 'W1' in datalist: 255 | L = np.transpose(data['W1']) 256 | L1 = L 257 | L1 = rescale_L(Laplacian(scipy.sparse.csr_matrix(L1))) 258 | 259 | if graphconv and 'W2' in datalist: 260 | L = np.transpose(data['W2']) 261 | L2 = L 262 | L2 = rescale_L(Laplacian(scipy.sparse.csr_matrix(L2))) 263 | 264 | if graphconv and 'W3' in datalist: 265 | L = np.transpose(data['W3']) 266 | L3 = L 267 | L3 = rescale_L(Laplacian(scipy.sparse.csr_matrix(L3))) 268 | 269 | cotw1 = np.zeros((cotweight1.shape[0], cotweight1.shape[1], 1)).astype('float64') 270 | cotw2 = np.zeros((cotweight2.shape[0], cotweight2.shape[1], 1)).astype('float64') 271 | cotw3 = np.zeros((cotweight3.shape[0], cotweight3.shape[1], 1)).astype('float64') 272 | for i in range(1): 273 | cotw1[:, :, i] = cotweight1 274 | cotw2[:, :, i] = cotweight2 275 | cotw3[:, :, i] = cotweight3 276 | 277 | degree1 = np.zeros((neighbour1.shape[0], 1)).astype('float64') 278 | for i in range(neighbour1.shape[0]): 279 | degree1[i] = np.count_nonzero(nb1[i]) 280 | 281 | degree2 = np.zeros((neighbour2.shape[0], 1)).astype('float64') 282 | for i in range(neighbour2.shape[0]): 283 | degree2[i] = np.count_nonzero(nb2[i]) 284 | 285 | degree3 = np.zeros((neighbour3.shape[0], 1)).astype('float64') 286 | for i in range(neighbour3.shape[0]): 287 | degree3[i] = np.count_nonzero(nb3[i]) 288 | 289 | mapping11 = np.zeros((pointnum2, mapping1.shape[1])).astype('float64') 290 | maxdemapping1 = np.zeros((pointnum1, 1)).astype('float64') 291 | 292 | mapping12 = np.zeros((pointnum3, mapping2.shape[1])).astype('float64') 293 | maxdemapping2 = np.zeros((pointnum2, 1)).astype('float64') 294 | 295 | mapping11_col = mapping1.shape[1] 296 | mapping12_col = mapping2.shape[1] 297 | 298 | mapping11 = mapping1 299 | mapping12 = mapping2 300 | # mapping2 = demapping 301 | for i in range(pointnum1): 302 | # print i 303 | idx = np.where(mapping11 == i + 1) 304 | if idx[1][0] > 0: 305 | maxdemapping1[i] = 0 306 | else: 307 | maxdemapping1[i] = idx[0][0] 308 | for i in range(pointnum2): 309 | # print i 310 | idx = np.where(mapping12 == i + 1) 311 | if idx[1][0] > 0: 312 | maxdemapping2[i] = 0 313 | else: 314 | maxdemapping2[i] = idx[0][0] 315 | 316 | meanpooling_degree1 = np.zeros((mapping1.shape[0], 1)).astype('float64') 317 | for i in range(mapping1.shape[0]): 318 | meanpooling_degree1[i] = np.count_nonzero(mapping11[i]) 319 | 320 | meanpooling_degree2 = np.zeros((mapping2.shape[0], 1)).astype('float64') 321 | for i in range(mapping2.shape[0]): 322 | meanpooling_degree2[i] = np.count_nonzero(mapping12[i]) 323 | 324 | meandepooling_mapping1 = np.zeros((pointnum1, 1)).astype('float64') 325 | meandepooling_degree1 = np.zeros((pointnum1, 1)).astype('float64') 326 | 327 | meandepooling_mapping2 = np.zeros((pointnum2, 1)).astype('float64') 328 | meandepooling_degree2 = np.zeros((pointnum2, 1)).astype('float64') 329 | 330 | for i in range(pointnum1): 331 | idx = np.where(mapping11 == i + 1)[0] 332 | meandepooling_mapping1[i] = idx[0] 333 | meandepooling_degree1[i] = meanpooling_degree1[idx[0]] 334 | 335 | for i in range(pointnum2): 336 | idx = np.where(mapping12 == i + 1)[0] 337 | meandepooling_mapping2[i] = idx[0] 338 | meandepooling_degree2[i] = meanpooling_degree2[idx[0]] 339 | 340 | return f, nb1, degree1, mapping11, nb2, degree2, mapping12, nb3, degree3, \ 341 | maxdemapping1, meanpooling_degree1, meandepooling_mapping1, meandepooling_degree1, \ 342 | maxdemapping2, meanpooling_degree2, meandepooling_mapping2, meandepooling_degree2, \ 343 | logrmin, logrmax, smin, smax, modelnum, pointnum1, pointnum2, pointnum3, maxdegree1, maxdegree2, maxdegree3, \ 344 | mapping11_col, mapping12_col, L1, L2, L3, cotw1, cotw2, cotw3 345 | 346 | 347 | def recover_data(recover_feature, logrmin, logrmax, smin, smax, resultmin, resultmax, useS=True): 348 | logr = recover_feature[:, :, 0:3] 349 | 350 | logr = (logrmax - logrmin) * (logr - resultmin) / (resultmax - resultmin) + logrmin 351 | # feature=[] 352 | if useS: 353 | s = recover_feature[:, :, 3:9] 354 | s = (smax - smin) * (s - resultmin) / (resultmax - resultmin) + smin 355 | logr = np.concatenate((logr, s), axis=2) 356 | 357 | return logr 358 | 359 | 360 | def linear1(input_, matrix, output_size, name='Linear', stddev=0.02, bias_start=0.0): 361 | with tf.variable_scope(name) as scope: 362 | scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0)) 363 | # matrix = tf.get_variable("weights", [input_size, output_size], tf.float64, 364 | # tf.random_normal_initializer(stddev=stddev)) 365 | bias = tf.get_variable("bias", [output_size], tf.float64, 366 | initializer=tf.constant_initializer(bias_start)) 367 | 368 | return tf.matmul(input_, matrix) + bias 369 | 370 | 371 | def leaky_relu(input_, alpha=0.02): 372 | return tf.maximum(input_, alpha * input_) 373 | 374 | 375 | def batch_norm_wrapper(inputs, name='batch_norm', is_training=False, decay=0.9, epsilon=1e-5): 376 | with tf.variable_scope(name) as scope: 377 | scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0)) 378 | if is_training == True: 379 | scale = tf.get_variable('scale', dtype=tf.float64, trainable=True, 380 | initializer=tf.ones([inputs.get_shape()[-1]], dtype=tf.float64)) 381 | beta = tf.get_variable('beta', dtype=tf.float64, trainable=True, 382 | initializer=tf.zeros([inputs.get_shape()[-1]], dtype=tf.float64)) 383 | pop_mean = tf.get_variable('overallmean', dtype=tf.float64, trainable=False, 384 | initializer=tf.zeros([inputs.get_shape()[-1]], dtype=tf.float64)) 385 | pop_var = tf.get_variable('overallvar', dtype=tf.float64, trainable=False, 386 | initializer=tf.ones([inputs.get_shape()[-1]], dtype=tf.float64)) 387 | else: 388 | scope.reuse_variables() 389 | scale = tf.get_variable('scale', dtype=tf.float64, trainable=True) 390 | beta = tf.get_variable('beta', dtype=tf.float64, trainable=True) 391 | pop_mean = tf.get_variable('overallmean', dtype=tf.float64, trainable=False) 392 | pop_var = tf.get_variable('overallvar', dtype=tf.float64, trainable=False) 393 | 394 | if is_training == True: 395 | axis = list(range(len(inputs.get_shape()) - 1)) 396 | batch_mean, batch_var = tf.nn.moments(inputs, axis) 397 | train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay)) 398 | train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay)) 399 | with tf.control_dependencies([train_mean, train_var]): 400 | return tf.nn.batch_normalization(inputs, batch_mean, batch_var, beta, scale, epsilon) 401 | else: 402 | return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, epsilon) 403 | 404 | # -----------------------------------------------------graph conv-------------------------------- 405 | 406 | 407 | def Laplacian(W, normalized=True): 408 | """Return the Laplacian of the weigth matrix.""" 409 | 410 | # Degree matrix. 411 | d = W.sum(axis=0) 412 | # d=d.astype(W.dtype) 413 | # Laplacian matrix. 414 | if not normalized: 415 | D = scipy.sparse.diags(d.A.squeeze(), 0) 416 | L = D - W 417 | else: 418 | d += np.spacing(np.array(0.0, W.dtype)) 419 | d = 1 / np.sqrt(d) 420 | D = scipy.sparse.diags(d.A.squeeze(), 0) 421 | I = scipy.sparse.identity(d.size, dtype=W.dtype) 422 | L = I - D * W * D 423 | 424 | # assert np.abs(L - L.T).mean() < 1e-9 425 | assert type(L) is scipy.sparse.csr_matrix 426 | return L 427 | 428 | 429 | def rescale_L(L, lmax=2): 430 | """Rescale the Laplacian eigenvalues in [-1,1].""" 431 | M, M = L.shape 432 | I = scipy.sparse.identity(M, format='csr', dtype=L.dtype) 433 | L /= lmax / 2 434 | L -= I 435 | return L 436 | 437 | 438 | def chebyshev(L, X, K): 439 | """Return T_k X where T_k are the Chebyshev polynomials of order up to K. 440 | Complexity is O(KMN).""" 441 | M, N = X.shape 442 | # assert L.dtype == X.dtype 443 | 444 | # L = rescale_L(L, lmax) 445 | # Xt = T @ X: MxM @ MxN. 446 | Xt = np.empty((K, M, N), L.dtype) 447 | # Xt_0 = T_0 X = I X = X. 448 | Xt[0, ...] = X 449 | # Xt_1 = T_1 X = L X. 450 | if K > 1: 451 | Xt[1, ...] = L.dot(X) 452 | # Xt_k = 2 L Xt_k-1 - Xt_k-2. 453 | for k in range(2, K): 454 | Xt[k, ...] = 2 * L.dot(Xt[k - 1, ...]) - Xt[k - 2, ...] 455 | return Xt 456 | 457 | 458 | def graph_conv2(x, L, Fout, W, K, name='graph_conv', training=True, special_activation=True, no_activation=False, bn=True): 459 | with tf.variable_scope(name) as scope: 460 | scope.set_regularizer(tf.contrib.layers.l2_regularizer(scale=1.0)) 461 | N, M, Fin = x.get_shape() 462 | L = L.tocoo() 463 | indices = np.column_stack((L.row, L.col)) 464 | L = tf.SparseTensor(indices, L.data, L.shape) 465 | L = tf.sparse_reorder(L) 466 | # Transform to Chebyshev basis 467 | x0 = tf.transpose(x, perm=[1, 2, 0]) # M x Fin x N 468 | x0 = tf.reshape(x0, [M, -1]) # M x Fin*N 469 | x = tf.expand_dims(x0, 0) # 1 x M x Fin*N 470 | 471 | def concat(x, x_): 472 | x_ = tf.expand_dims(x_, 0) # 1 x M x Fin*N 473 | return tf.concat([x, x_], axis=0) # K x M x Fin*N 474 | if K > 1: 475 | x1 = tf.sparse_tensor_dense_matmul(L, x0) 476 | x = concat(x, x1) 477 | for k in range(2, K): 478 | x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # M x Fin*N 479 | x = concat(x, x2) 480 | x0, x1 = x1, x2 481 | x = tf.reshape(x, [K, M, Fin, -1]) # K x M x Fin x N 482 | x = tf.transpose(x, perm=[3, 1, 2, 0]) # N x M x Fin x K 483 | x = tf.reshape(x, [-1, Fin * K]) # N*M x Fin*K 484 | # Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair. 485 | x = tf.matmul(x, W) # N*M x Fout 486 | 487 | x = tf.reshape(x, [-1, M, Fout]) # N x M x Fout 488 | 489 | if not bn: 490 | fb = x 491 | else: 492 | fb = batch_norm_wrapper(x, is_training=training) 493 | 494 | if no_activation: 495 | fa = fb 496 | elif not special_activation: 497 | fa = leaky_relu(fb) 498 | else: 499 | fa = tf.nn.tanh(fb) 500 | 501 | return fa 502 | --------------------------------------------------------------------------------