├── README.md ├── cpp_GI_generation ├── CMakeLists.txt ├── config.ini ├── include │ ├── GI.h │ ├── GPC.h │ ├── Generator.h │ ├── Heap.h │ ├── Ini.h │ ├── IniFile.h │ ├── Mesh_C.h │ ├── Vector3.h │ ├── dirent.h │ ├── libcompcur.h │ ├── nanoflann.hpp │ └── utils.h ├── lib │ └── libcompcur.lib ├── libcompcur.dll ├── readme.md └── src │ └── gigen.cpp ├── keypoints_faust_all.kpi ├── matlab_LPS ├── Laplacian_Energy_Gen.m ├── check_face_vertex.m ├── clamp.m ├── comp_geodesics_to_all.mexw64 ├── compute_boundary_all.m ├── compute_curvature.m ├── compute_mesh_laplacian_plusA_half.m ├── compute_mesh_weight_plusA_half.m ├── compute_normal.m ├── compute_vertex_face_ring.m ├── dijkstra_to_all.m ├── getoptions.m ├── libcompcur.dll ├── libcompcur.h ├── libcompcur.lib ├── perform_mesh_smoothing.m ├── read_off.m ├── read_shape.m ├── shape_diameter.m ├── symmshlp_matrix.m ├── symmshlpmatrix.mexw64 └── triangulation2adjacency.m └── python_learning ├── classify_gi_by_pidx_and_split.py ├── descGen.py ├── tfr_gen.py ├── train_mincv_perloss6890_256.py └── train_softmax6890.py /README.md: -------------------------------------------------------------------------------- 1 | # LPS 2 | 3 | This code implements a deep learning method on **Local Point Signature** for 3D surface shape matching described in our CVPR 2019 paper: 4 | 5 | ["A Robust Local Spectral Descriptor for Matching Non-Rigid Shapes with Incompatible Shape Structures"](http://openaccess.thecvf.com/content_CVPR_2019/html/Wang_A_Robust_Local_Spectral_Descriptor_for_Matching_Non-Rigid_Shapes_With_CVPR_2019_paper.html) 6 | 7 | by Yiqun Wang, Jianwei Guo, Dong-Ming Yan, Kai Wang, Xiaopeng Zhang. 8 | 9 | [Project Page](http://www.nlpr.ia.ac.cn/ivc/project/specmathcing/) 10 | 11 | Please consider citing the above paper if this code/program (or part of it) benefits your project. 12 | 13 | 14 | ## Usage 15 | 16 | There are three folders here. The "cpp" calls the "matlab" for GI generation. The "python" is used for network training and testing. 17 | 18 | The usage is as follows: 19 | 20 | 1. Compile matlab project: 21 | 22 | MCC matlab "mcc -W cpplib:libcompcur -T link:lib compute_curvature.m". We got the libcompcur.dll that is going to be added to the CPP project. 23 | 24 | 25 | 2. Build cpp solution: this code is to generate geometry images. You can run this step in your local desktop. 26 | 27 | Modify CMakeLists: 28 | add include_directories and link_directories for openmesh and matlab runtime 29 | Cmake 30 | Build solution 31 | Modify config.ini for mesh_dir(directory of OFF models) gi_dir(directory of geometry images) and kpi_dir(directory of key points, you can skip it for dense matching) 32 | edit other paras such as gi_size(NxN of gi), rotation_num and radius_list_p(the ratio of geodesic diameter). 33 | Add "libcompcur.dll" to folder with GIGen.exe 34 | Run "GIGen.exe config.ini" to generate GI 35 | 36 | 37 | 3. Python project: this code is to train and test network. You should copy the geometry images generated in last step into the server. 38 | 39 | 3.1 Train network: 40 | 41 | run "classify_gi_by_pidx_and_split.py" to classify dataset by point index for generating Tfrecord 42 | source_dir is the folder of geometry images(rotation_num=12) for training, destination_dir is the generated folder after splitting 43 | run "tfr_gen.py" to generate Tfrecord 44 | gi_dir is the same as destination_dir, tfr_dir is the generated folder of Tfrecord. 45 | run "train_softmax6890.py" to pretrain a classification network 46 | tfr_dir needs to be specified 47 | run "train_mincv_perloss6890_256.py" to train the triplet network by restoring a pre-trained classification model 48 | Tfr_dir needs to be specified 49 | 50 | 3.2 Test to generate descriptor: 51 | 52 | run "descGen.py" to generate descriptor using test dataset 53 | gi_dir is the folder of geometry images(rotation_num=1) for testing, desc_dir is the generated folder of descriptor. 54 | 55 | 56 | ## License 57 | 58 | This program is free software; you can redistribute it and/or modify it under the terms of the 59 | GNU General Public License as published by the Free Software Foundation; either version 2 of 60 | the License, or (at your option) any later version. 61 | -------------------------------------------------------------------------------- /cpp_GI_generation/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.6) 2 | 3 | project(GIGen) 4 | 5 | # Note, replace these directories with the correct paths on your system 6 | include_directories( ${CMAKE_CURRENT_LIST_DIR}\\include ) 7 | include_directories( E:\\Program\ Files\\OpenMesh\ 7.1\\include ) # OpenMesh head files directory. 8 | include_directories( E:\\Program\ Files\\MATLAB\\R2016a\\extern\\include ) # MATLAB Runtime head files directory. 9 | 10 | link_directories( ${CMAKE_CURRENT_LIST_DIR}\\lib ) 11 | link_directories( E:\\Program\ Files\\OpenMesh\ 7.1\\lib ) # OpenMesh lib files directory. 12 | link_directories( E:\\Program\ Files\\MATLAB\\R2016a\\extern\\lib\\win64\\microsoft ) # MATLAB Runtime lib files directory. 13 | 14 | 15 | aux_source_directory( ${CMAKE_CURRENT_LIST_DIR}/src src ) 16 | 17 | add_executable( ${PROJECT_NAME} ${src} ) 18 | 19 | set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT ${PROJECT_NAME}) 20 | 21 | 22 | target_link_libraries( ${PROJECT_NAME} debug OpenMeshCored ) 23 | target_link_libraries( ${PROJECT_NAME} debug OpenMeshToolsd ) 24 | 25 | target_link_libraries( ${PROJECT_NAME} optimized OpenMeshCore ) 26 | target_link_libraries( ${PROJECT_NAME} optimized OpenMeshTools ) 27 | 28 | target_link_libraries( ${PROJECT_NAME} libeng ) 29 | target_link_libraries( ${PROJECT_NAME} libmx ) 30 | target_link_libraries( ${PROJECT_NAME} libmat ) 31 | target_link_libraries( ${PROJECT_NAME} mclmcr ) 32 | target_link_libraries( ${PROJECT_NAME} mclmcrrt ) 33 | target_link_libraries( ${PROJECT_NAME} libcompcur ) 34 | 35 | 36 | add_definitions( -D_USE_MATH_DEFINES ) 37 | 38 | # set(CMAKE_CXX_FLAGS_DEBUG "-O0" ) 39 | # set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG " ) -------------------------------------------------------------------------------- /cpp_GI_generation/config.ini: -------------------------------------------------------------------------------- 1 | ; Assuming that all meshes in the mesh_dir have the same topology. 2 | [dirs] 3 | mesh_dir=E:\\Academic_yqW\\Projects\\LPS\\3ddata\\faust_all 4 | gi_dir=F:\\Academic_Data\\Projects\\LPS\\3ddata\\faust_all_cb_32 5 | kpi_dir=E:\\Academic_yqW\\Projects\\LPS\\keypoints_faust_all.kpi 6 | 7 | [settings] 8 | gi_size=8 9 | hks_len=16 10 | rotation_num=12 11 | radius_list_p={0.035} 12 | ;radius_list_p={0.021, 0.035} 13 | ; radius_list_p={0.021, 0.028, 0.035} 14 | ; radius_list=default 15 | 16 | using_all_points=false 17 | ; if using_all_points == false, read kpi from kpi_dir -------------------------------------------------------------------------------- /cpp_GI_generation/include/GI.h: -------------------------------------------------------------------------------- 1 | /************************************************************ 2 | * This file is MODIFIED from a part of the 3D descriptor 3 | * learning framework by Hanyu Wang(王涵玉) 4 | * https://github.com/jianweiguo/local3Ddescriptorlearning 5 | * 6 | * Author: Yiqun Wang(王逸群) 7 | * https://github.com/yiqun-wang/LPS 8 | ************************************************************/ 9 | 10 | #ifndef GI_H 11 | #define GI_H 12 | 13 | #include 14 | #include 15 | #include "GPC.h" 16 | #include "Vector3.h" 17 | #include "utils.h" 18 | 19 | namespace GIGen 20 | { 21 | //template 22 | typedef MeshOM Mesh; 23 | class GI 24 | { 25 | private: 26 | const GPC &gpc; 27 | const int gi_size, rotation_num, used_attr_num; 28 | std::vector>>> geo_img_all_rots; // Geometry images of all rotation angles. 29 | 30 | 31 | inline double triangle_area(const Point_2& p1, const Point_2& p2, const Point_2& p3) 32 | { 33 | double ax = p2.x() - p1.x(); 34 | double ay = p2.y() - p1.y(); 35 | double bx = p3.x() - p1.x(); 36 | double by = p3.y() - p1.y(); 37 | return fabs(0.5 * (ax * by - ay * bx)); 38 | } 39 | 40 | 41 | bool append_features(const std::vector>& sampling_points, const unsigned int &gi_idx, bool with_hks=false) 42 | { 43 | auto &gi = geo_img_all_rots[gi_idx]; 44 | 45 | for (int r = 0; r < gi_size; r++) 46 | { 47 | for (int c = 0; c < gi_size; c++) 48 | { 49 | auto &this_point = sampling_points[r][c]; 50 | auto idx_vec = gpc.find_triangle_vertices(this_point); 51 | 52 | auto &&p0 = gpc.access_vertex_by_mesh_idx(idx_vec[0]); 53 | auto &&p1 = gpc.access_vertex_by_mesh_idx(idx_vec[1]); 54 | auto &&p2 = gpc.access_vertex_by_mesh_idx(idx_vec[2]); 55 | 56 | double s0 = triangle_area(this_point, p1, p2); 57 | double s1 = triangle_area(p0, this_point, p2); 58 | double s2 = triangle_area(p0, p1, this_point); 59 | double s = s0 + s1 + s2; 60 | 61 | double p0_c_max, p1_c_max, p2_c_max; 62 | double p0_c_min, p1_c_min, p2_c_min; 63 | Point p0_normal, p1_normal, p2_normal; 64 | Point p0_resp, p1_resp, p2_resp; 65 | Point p0_point, p1_point, p2_point; 66 | 67 | std::vector p0_le, p1_le, p2_le; 68 | p0.get_attr("le", p0_le); 69 | p1.get_attr("le", p1_le); 70 | p2.get_attr("le", p2_le); 71 | std::vector p0_cf, p1_cf, p2_cf; 72 | p0.get_attr("cf", p0_cf); 73 | p1.get_attr("cf", p1_cf); 74 | p2.get_attr("cf", p2_cf); 75 | 76 | for (int i = 0; i < p0_cf.size(); i++) 77 | { 78 | gi[r][c].emplace_back((p0_cf[i] * s0 + p1_cf[i] * s1 + p2_cf[i] * s2) / s); 79 | } 80 | 81 | with_hks = false; 82 | if (with_hks) 83 | { 84 | std::vector p0_hks, p1_hks, p2_hks; 85 | p0.get_attr("hks", p0_hks); 86 | p1.get_attr("hks", p1_hks); 87 | p2.get_attr("hks", p2_hks); 88 | 89 | for (int i = 0; i < p0_hks.size(); i++) 90 | { 91 | gi[r][c].emplace_back((p0_hks[i] * s0 + p1_hks[i] * s1 + p2_hks[i] * s2) / s); 92 | } 93 | } 94 | 95 | } 96 | } 97 | 98 | return true; 99 | 100 | } 101 | 102 | 103 | 104 | public: 105 | 106 | GI(const GPC &gpc, const std::vector &max_radius, const int &gi_size, const int &rotation_num, const int &used_attr_num = 5) : 107 | gpc(gpc), 108 | geo_img_all_rots(rotation_num, std::vector>>(gi_size, std::vector>(gi_size))), 109 | gi_size(gi_size), rotation_num(rotation_num), used_attr_num(used_attr_num) 110 | { 111 | if (!gpc.point_num()) 112 | return; 113 | 114 | // Initialization of the geometry image; 115 | for (auto &gi : this->geo_img_all_rots) 116 | { 117 | for (auto &c : gi) 118 | { 119 | for (auto &p : c) 120 | { 121 | p.reserve(used_attr_num * max_radius.size()); 122 | } 123 | } 124 | } 125 | 126 | 127 | double start_x = -sqrt(2) / 2 + sqrt(2) / (2 * double(gi_size)); 128 | double start_y = sqrt(2) / 2 - sqrt(2) / (2 * double(gi_size)); 129 | double delta = sqrt(2) / (double(gi_size)); 130 | 131 | 132 | //Sampling points 133 | std::vector> generic_sampling_points(gi_size, std::vector(gi_size)); 134 | for (int r = 0; r < gi_size; r++) 135 | { 136 | for (int c = 0; c < gi_size; c++) 137 | { 138 | generic_sampling_points[r][c] = Point_2(start_x + c * delta, start_y - r * delta); 139 | } 140 | } 141 | 142 | 143 | 144 | double rotation_rad = 2 * M_PI / rotation_num; 145 | for (unsigned int i = 0; i < rotation_num; i++) 146 | { 147 | double rad = rotation_rad * i; 148 | 149 | for (double radius : max_radius) 150 | { 151 | std::vector> sampling_points = generic_sampling_points; 152 | 153 | for (auto& row : sampling_points) 154 | { 155 | for (auto& point : row) 156 | { 157 | double x = point.x() * cos(rad) - point.y() * sin(rad); // Rotate the sampling points. 158 | double y = point.x() * sin(rad) + point.y() * cos(rad); 159 | point = Point_2(x * radius, y * radius); // Scale the sampling points to fit parameterization radius. 160 | } 161 | } 162 | 163 | 164 | if (radius == max_radius[max_radius.size() - 1]) 165 | { 166 | this->append_features(sampling_points, i, true); 167 | } 168 | else 169 | { 170 | this->append_features(sampling_points, i, false); 171 | } 172 | } 173 | 174 | 175 | } 176 | } 177 | 178 | 179 | bool save_all(const std::string &geo_img_dir, const std::string& name_prefix) const 180 | { 181 | return this->save_all(Dir(geo_img_dir), name_prefix); 182 | 183 | } 184 | 185 | bool save_all(const Dir &geo_img_dir, const std::string& name_prefix) const 186 | { 187 | for (unsigned int i = 0; i < this->rotation_num; i++) 188 | { 189 | auto geo_img_path = geo_img_dir + name_prefix + "_rot_" + to_string_f("%02d", i) + ".gi"; 190 | 191 | std::ofstream out(geo_img_path); 192 | 193 | int count = 100; 194 | while (!out && count > 0) 195 | { 196 | std::this_thread::sleep_for(std::chrono::milliseconds(100)); 197 | out = std::ofstream(geo_img_path); 198 | count--; 199 | } 200 | if (!out) return false; 201 | 202 | for (int ch = 0; ch < geo_img_all_rots[i][0][0].size(); ch++) 203 | { 204 | for (auto &r_vec : geo_img_all_rots[i]) 205 | { 206 | for (auto &val : r_vec) 207 | out << std::fixed << val[ch] << " "; 208 | out << std::endl; 209 | } 210 | out << std::endl; 211 | } 212 | 213 | out.close(); 214 | } 215 | return true; 216 | } 217 | 218 | bool save_all_rotation_in_one(const std::string &geo_img_dir, const std::string& name_prefix) const 219 | { 220 | auto geo_img_path = geo_img_dir + name_prefix + ".gi"; 221 | 222 | std::ofstream out(geo_img_path); 223 | int count = 100; 224 | while (!out && count > 0) 225 | { 226 | std::this_thread::sleep_for(std::chrono::milliseconds(100)); 227 | out = std::ofstream(geo_img_path); 228 | count--; 229 | } 230 | if (!out) return false; 231 | 232 | 233 | // 12*8*8*32-> 12*2*32*32 234 | for (unsigned int i = 0; i < this->rotation_num; i++) 235 | { 236 | int num = geo_img_all_rots[i][0][0].size() / 4; 237 | for (int ch = 0; ch < num; ch++) 238 | { 239 | for (auto &r_vec : geo_img_all_rots[i]) 240 | { 241 | for (unsigned int n = 0; n < 4; n++) 242 | { 243 | for (auto &val : r_vec) 244 | out << std::fixed << val[4*ch+n] << " "; 245 | } 246 | out << std::endl; 247 | } 248 | if ((ch+1) % 4 == 0) { out << std::endl; } 249 | } 250 | 251 | out << std::endl << std::endl; 252 | 253 | } 254 | 255 | out.close(); 256 | return true; 257 | } 258 | 259 | 260 | }; 261 | 262 | 263 | }; // End namespace GIGen 264 | 265 | 266 | #endif // !GI_H 267 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/GPC.h: -------------------------------------------------------------------------------- 1 | /************************************************************ 2 | * This file is MODIFIED from a part of the 3D descriptor 3 | * learning framework by Hanyu Wang(王涵玉) 4 | * https://github.com/jianweiguo/local3Ddescriptorlearning 5 | * 6 | * Author: Yiqun Wang(王逸群) 7 | * https://github.com/yiqun-wang/LPS 8 | ************************************************************/ 9 | 10 | #ifndef GPC_H 11 | #define GPC_H 12 | 13 | #include 14 | #include 15 | #include "Generator.h" 16 | #include "Mesh_C.h" 17 | #include "Vector3.h" 18 | #include "nanoflann.hpp" 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | 27 | namespace GIGen 28 | { 29 | using Point = Vector3; 30 | 31 | // Basic 2D point, default initialized by rectangular coordinate. 32 | class Point_2 33 | { 34 | private: 35 | double x_, y_, r_, theta_; 36 | 37 | public: 38 | Point_2(const double &coor_0, const double &coor_1, const bool &use_polar_coor = false) // (x, y) or (r, theta) 39 | { 40 | if (!use_polar_coor) 41 | { 42 | x_ = coor_0; 43 | y_ = coor_1; 44 | r_ = std::sqrt(coor_0 * coor_0 + coor_1 * coor_1); 45 | theta_ = std::atan2(coor_1, coor_0); 46 | } 47 | else 48 | { 49 | x_ = coor_0 * std::cos(coor_1); 50 | y_ = coor_0 * std::sin(coor_1); 51 | r_ = coor_0; 52 | theta_ = coor_1; 53 | } 54 | } 55 | 56 | Point_2() {} 57 | 58 | double x() const 59 | { 60 | return x_; 61 | } 62 | 63 | double y() const 64 | { 65 | return y_; 66 | } 67 | 68 | double r() const 69 | { 70 | return r_; 71 | } 72 | 73 | double theta() const 74 | { 75 | return theta_; 76 | } 77 | }; 78 | 79 | 80 | // 2D Point with attributes, default initialized by polar coordinate. 81 | class Attr_Point_2 : public Point_2 82 | { 83 | private: 84 | const unsigned int idx_; 85 | 86 | std::map scalar_attr_; 87 | std::map vec3_attr_; 88 | std::map> stdvec_attr_; 89 | public: 90 | Attr_Point_2(const unsigned int &idx_, const double &coor_0, const double &coor_1, const bool &use_polar_coor = true) : 91 | idx_(idx_), Point_2(coor_0, coor_1, use_polar_coor) {} 92 | 93 | 94 | unsigned int idx() const 95 | { 96 | return this->idx_; 97 | } 98 | 99 | auto scalar_attr() const 100 | { 101 | return this->scalar_attr_; 102 | } 103 | 104 | auto vec3_attr() const 105 | { 106 | return this->vec3_attr_; 107 | } 108 | 109 | auto stdvec_attr() const 110 | { 111 | return this->stdvec_attr_; 112 | } 113 | 114 | //int attr_vec_length() const 115 | //{ 116 | // return scalar_attr_.size() + vec3_attr_.size() * 3; 117 | //} 118 | 119 | void get_attr(const std::string &attr_name, double &attr_val) const 120 | { 121 | attr_val = this->scalar_attr_.at(attr_name); 122 | } 123 | 124 | void get_attr(const std::string &attr_name, Point &attr_val) const 125 | { 126 | attr_val = this->vec3_attr_.at(attr_name); 127 | } 128 | 129 | void get_attr(const std::string &attr_name, std::vector &attr_val) const 130 | { 131 | attr_val = this->stdvec_attr_.at(attr_name); 132 | } 133 | 134 | void set_attr(const std::string &attr_name, const double &attr_val) 135 | { 136 | this->scalar_attr_[attr_name] = attr_val; 137 | } 138 | 139 | void set_attr(const std::string &attr_name, const Point &attr_val) 140 | { 141 | this->vec3_attr_[attr_name] = attr_val; 142 | } 143 | 144 | void set_attr(const std::string &attr_name, const std::vector &attr_val) 145 | { 146 | this->stdvec_attr_[attr_name] = attr_val; 147 | } 148 | 149 | std::string to_string_all() const 150 | { 151 | std::ostringstream oss; 152 | oss << "idx: " << std::setw(5) << this->idx_ << std::setprecision(6) 153 | << "\tr: " << this->r() << "\ttheta: " << this->theta() 154 | << "\tx: " << this->x() << "\ty: " << this->y(); 155 | 156 | oss << std::endl; 157 | 158 | for (auto &p : this->scalar_attr_) 159 | { 160 | oss << "\t" << p.first << ": " << p.second; 161 | } 162 | 163 | oss << std::endl; 164 | 165 | for (auto &p : this->vec3_attr_) 166 | { 167 | oss << "\t" << p.first << ": " << p.second.to_string() << std::endl; 168 | } 169 | 170 | for (auto &p : this->stdvec_attr_) 171 | { 172 | oss << "\t" << p.first << ": " << std::endl << "\t"; 173 | for (auto &item : p.second) 174 | { 175 | oss << item << ", "; 176 | } 177 | } 178 | 179 | oss << std::endl << "--------------------------------------------------------------------"; 180 | 181 | return oss.str(); 182 | } 183 | 184 | // Overload operator== for Attr_Point_2. 185 | bool operator==(const Attr_Point_2 &p) const 186 | { 187 | return this->idx() == p.idx() && this->x() == p.r() && this->y() == p.theta(); 188 | } 189 | 190 | 191 | }; 192 | 193 | 194 | //Overload operator<< for Attr_Point_2 195 | ostream& operator<<(ostream& out, const Attr_Point_2 &p) 196 | { 197 | std::cout.setf(ios::fixed); 198 | out << "idx: " << std::setw(5) << p.idx() << std::setprecision(6) 199 | << "\tr: " << p.r() << "\ttheta: " << p.theta() 200 | << "\tx: " << p.x() << "\ty: " << p.y(); 201 | return out; 202 | } 203 | 204 | 205 | //// Hash functor for Attr_Point_2 206 | //struct hash_Attr_Point 207 | //{ 208 | // size_t operator()(const Attr_Point_2 &p) const 209 | // { 210 | // return std::hash()(p.idx()) ^ std::hash()(p.r()) ^ std::hash()(p.theta()); 211 | // } 212 | //}; 213 | 214 | 215 | 216 | class Attr_Point_Set : public std::vector 217 | { 218 | private: 219 | std::map mesh_idx2aps_idx; 220 | 221 | 222 | public: 223 | auto emplace_back(Attr_Point_2 &ap) 224 | { 225 | this->mesh_idx2aps_idx[ap.idx()] = std::vector::size(); 226 | return std::vector::emplace_back(ap); 227 | } 228 | 229 | auto &access_by_mesh_idx(const unsigned int &idx) 230 | { 231 | return (*this)[mesh_idx2aps_idx[idx]]; 232 | } 233 | 234 | auto &const_access_by_mesh_idx(const unsigned int &idx) const 235 | { 236 | return (*this)[mesh_idx2aps_idx.at(idx)]; 237 | } 238 | 239 | auto count(const unsigned int &key) const 240 | { 241 | return mesh_idx2aps_idx.count(key); 242 | } 243 | 244 | auto clear() 245 | { 246 | mesh_idx2aps_idx.clear(); 247 | return std::vector::clear(); 248 | } 249 | 250 | }; 251 | 252 | 253 | // 2D PointCloud for kd_tree 254 | template 255 | struct PointCloud_2 256 | { 257 | struct PC_2_Point 258 | { 259 | T x, y; 260 | }; 261 | 262 | std::vector pts; 263 | std::vector> tri_vertices_idx; 264 | 265 | // Must return the number of data points 266 | inline unsigned int kdtree_get_point_count() const { return pts.size(); } 267 | 268 | // Returns the dim'th component of the idx'th point in the class: 269 | // Since this is inlined and the "dim" argument is typically an immediate value, the 270 | // "if/else's" are actually solved at compile time. 271 | inline T kdtree_get_pt(const unsigned int idx, int dim) const 272 | { 273 | if (dim == 0) return pts[idx].x; 274 | else return pts[idx].y; 275 | } 276 | 277 | inline std::vector kdtree_get_tri_vertices(const unsigned int &idx) const 278 | { 279 | return tri_vertices_idx.at(idx); 280 | } 281 | 282 | // Optional bounding-box computation: return false to default to a standard bbox computation loop. 283 | // Return true if the BBOX was already computed by the class and returned in "bb" so it can be avoided to redo it again. 284 | // Look at bb.size() to find out the expected dimensionality (e.g. 2 or 3 for point clouds) 285 | template 286 | bool kdtree_get_bbox(BBOX& /* bb */) const { return false; } 287 | 288 | inline void add_point(const T &x, const T &y, const unsigned int &p0_idx, const unsigned int &p1_idx, const unsigned int &p2_idx) 289 | { 290 | pts.emplace_back(PC_2_Point{ x, y }); 291 | tri_vertices_idx.emplace_back(std::vector{p0_idx, p1_idx, p2_idx}); 292 | } 293 | 294 | }; 295 | 296 | 297 | 298 | using namespace nanoflann; 299 | 300 | // GPC computation class. 301 | template 302 | class GPC 303 | { 304 | 305 | private: 306 | Mesh &mesh; 307 | const unsigned int source_idx; 308 | const double computation_max_radius; 309 | const std::vector computation_radius_list; 310 | 311 | Generator gpc_gen; 312 | Attr_Point_Set all_computed_points; // Info of parameterized 2D points in the local patch. 313 | PointCloud_2 centroids; // Centroids 314 | Point mean_normal, mean_u_max; 315 | double mean_c_max, mean_c_min; 316 | 317 | 318 | // construct a kd-tree index: 319 | typedef KDTreeSingleIndexAdaptor< 320 | L2_Simple_Adaptor >, 321 | PointCloud_2, 322 | 2 /* dim */ 323 | > my_kd_tree_t; 324 | 325 | //my_kd_tree_t *index; 326 | 327 | std::shared_ptr index; 328 | 329 | 330 | // Compute the mean normal and mean u_max of all points in this GPC. 331 | void compute_mean_vals() 332 | { 333 | if (this->all_computed_points.size()) 334 | { 335 | 336 | Point normal_accumelater(0, 0, 0), u_max_accumelater(0, 0, 0); 337 | 338 | for (auto &p : this->all_computed_points) 339 | { 340 | Point normal_p, u_max_p; 341 | 342 | p.get_attr("normal", normal_p); 343 | normal_accumelater = normal_accumelater + normal_p; 344 | 345 | p.get_attr("u_max", u_max_p); 346 | u_max_accumelater = u_max_accumelater + u_max_p; 347 | } 348 | 349 | this->mean_normal = normal_accumelater / this->all_computed_points.size(); 350 | this->mean_u_max = u_max_accumelater / this->all_computed_points.size(); 351 | 352 | this->mean_normal = this->mean_normal / this->mean_normal.length(); 353 | this->mean_u_max = this->mean_u_max / this->mean_u_max.length(); 354 | } 355 | else 356 | { 357 | this->mean_normal = this->mean_u_max = Point(0, 0, 0); 358 | } 359 | } 360 | 361 | void compute_mean_curvature() 362 | { 363 | if (this->all_computed_points.size()) 364 | { 365 | 366 | double c_max_accumelater(0), c_min_accumelater(0); 367 | 368 | for (auto &p : this->all_computed_points) 369 | { 370 | double c_max_p, c_min_p; 371 | 372 | p.get_attr("c_max", c_max_p); 373 | c_max_accumelater += c_max_p; 374 | 375 | p.get_attr("c_min", c_min_p); 376 | c_min_accumelater += c_min_p; 377 | } 378 | 379 | this->mean_c_max = c_max_accumelater / this->all_computed_points.size(); 380 | this->mean_c_min = c_min_accumelater / this->all_computed_points.size(); 381 | 382 | } 383 | else 384 | { 385 | this->mean_c_max = this->mean_c_min = 0; 386 | } 387 | } 388 | 389 | inline auto append_centroid(const unsigned int &p0_idx, const unsigned int &p1_idx, const unsigned int &p2_idx) 390 | { 391 | auto p0 = all_computed_points.access_by_mesh_idx(p0_idx); 392 | auto p1 = all_computed_points.access_by_mesh_idx(p1_idx); 393 | auto p2 = all_computed_points.access_by_mesh_idx(p2_idx); 394 | 395 | auto &¢roid_x = (p0.x() + p1.x() + p2.x()) / 3; 396 | auto &¢roid_y = (p0.y() + p1.y() + p2.y()) / 3; 397 | 398 | centroids.add_point(centroid_x, centroid_y, p0_idx, p1_idx, p2_idx); 399 | } 400 | 401 | 402 | public: 403 | GPC(Mesh &mesh_, const unsigned int &source_idx_, const double &computation_max_radius_, const std::vector &computation_radius_list_) : 404 | mesh(mesh_), source_idx(source_idx_), computation_max_radius(computation_max_radius_), computation_radius_list(computation_radius_list_), gpc_gen(mesh) 405 | { 406 | 407 | } 408 | 409 | 410 | //~GPC() 411 | //{ 412 | // delete index; 413 | //} 414 | 415 | void test() 416 | { 417 | std::cout << "i r theta" << std::endl; 418 | std::cout << "-------------------" << std::endl; 419 | 420 | auto INF = (std::numeric_limits::max)(); 421 | 422 | for (int i = 0; i < mesh.n_vertices(); i++) 423 | { 424 | const double r = gpc_gen.getDistance(i); 425 | if (r < INF) 426 | { 427 | const double theta = gpc_gen.getAngle(i); 428 | std::cout << i << " " << r << " " << theta << std::endl; 429 | } 430 | } 431 | } 432 | 433 | void compute_cf(int number, string base) 434 | { 435 | auto &vphs = this->mesh.get_vphs(); 436 | 437 | int mul = 1; 438 | Mesh patch = mesh; 439 | OpenMesh::VertexHandle vertex(source_idx); 440 | gpc_gen.setStopDist(computation_max_radius * 2.0); 441 | gpc_gen.setNodeSource(source_idx); 442 | gpc_gen.run(); 443 | for (size_t j = 0; j < mesh.n_vertices(); j++) 444 | { 445 | OpenMesh::VertexHandle vertex_del(j); 446 | const double r = gpc_gen.getDistance(j); 447 | if (r > computation_max_radius * 1.5 ) 448 | { 449 | patch.delete_vertex(vertex_del, true); 450 | for (Mesh::VertexFaceIter vf_it = patch.vf_iter(vertex_del); vf_it.is_valid(); ++vf_it) 451 | { 452 | patch.delete_face(*vf_it, true); 453 | } 454 | } 455 | } 456 | patch.garbage_collection(); 457 | std::cout << patch.n_vertices() << " "; 458 | 459 | mwArray cf = patch.compute_vertex_cf(mul); 460 | 461 | size_t cf_len = 3 * mesh.get_le_len() + 1; 462 | mesh.property(vphs.cf, vertex).resize(2 * cf_len); 463 | for (size_t i = 0; i < mul * cf_len; i++) 464 | { 465 | mesh.property(vphs.cf, vertex)[i] = cf(1, i + 1); 466 | } 467 | 468 | //////////////////add 469 | double rlarge = computation_max_radius * 2.0 470 | double rsmall = computation_max_radius; 471 | patch = mesh; 472 | for (size_t j = 0; j < mesh.n_vertices(); j++) 473 | { 474 | OpenMesh::VertexHandle vertex_del(j); 475 | const double r = gpc_gen.getDistance(j); 476 | if (r > rlarge) 477 | { 478 | patch.delete_vertex(vertex_del, true); 479 | for (Mesh::VertexFaceIter vf_it = patch.vf_iter(vertex_del); vf_it.is_valid(); ++vf_it) 480 | { 481 | patch.delete_face(*vf_it, true); 482 | } 483 | } 484 | } 485 | patch.garbage_collection(); 486 | std::cout << patch.n_vertices() << " "; 487 | mwArray cf2 = patch.compute_vertex_cf(mul); 488 | 489 | patch = mesh; 490 | for (size_t i = 0; i < mul * cf_len; i++) 491 | { 492 | mesh.property(vphs.cf, vertex)[i + cf_len] = cf2(1, i + 1); 493 | } 494 | 495 | } 496 | 497 | void compute_cf_45(int number, string base) 498 | { 499 | auto &vphs = this->mesh.get_vphs(); 500 | 501 | int mul = 1; 502 | Mesh patch = mesh; //add 503 | OpenMesh::VertexHandle vertex(source_idx); 504 | gpc_gen.setStopDist(computation_max_radius * 3.0); 505 | gpc_gen.setNodeSource(source_idx); 506 | gpc_gen.run(); 507 | for (size_t j = 0; j < mesh.n_vertices(); j++) 508 | { 509 | OpenMesh::VertexHandle vertex_del(j); 510 | const double r = gpc_gen.getDistance(j); 511 | if (r > computation_max_radius * 3.0) 512 | { 513 | patch.delete_vertex(vertex_del, true); 514 | for (Mesh::VertexFaceIter vf_it = patch.vf_iter(vertex_del); vf_it.is_valid(); ++vf_it) 515 | { 516 | patch.delete_face(*vf_it, true); 517 | } 518 | } 519 | } 520 | patch.garbage_collection(); 521 | // write mesh to output.obj 522 | if (number == 4478) 523 | { 524 | try { 525 | if (!OpenMesh::IO::write_mesh(patch, "outputxyz.off")) { //"+ std::to_string(number)+" " + base + " 526 | std::cerr << "Cannot write mesh to file 'output.off'" << std::endl; 527 | } 528 | } 529 | catch (std::exception& x) 530 | { 531 | std::cerr << x.what() << std::endl; 532 | } 533 | } 534 | std::cout << patch.n_vertices() << " "; 535 | mwArray cf = patch.compute_vertex_cf(mul); 536 | 537 | size_t cf_len = 3 * mesh.get_le_len() +1; 538 | mesh.property(vphs.cf, vertex).resize(3 * cf_len); 539 | for (size_t i = 0; i < mul * cf_len; i++) 540 | { 541 | mesh.property(vphs.cf, vertex)[i] = cf(1, i + 1); 542 | } 543 | 544 | ////////////////add 545 | double rlarge = computation_max_radius * 2.0; 546 | double rsmall = computation_max_radius; 547 | patch = mesh; 548 | for (size_t j = 0; j < mesh.n_vertices(); j++) 549 | { 550 | OpenMesh::VertexHandle vertex_del(j); 551 | const double r = gpc_gen.getDistance(j); 552 | if (r > rlarge) 553 | { 554 | patch.delete_vertex(vertex_del, true); 555 | for (Mesh::VertexFaceIter vf_it = patch.vf_iter(vertex_del); vf_it.is_valid(); ++vf_it) 556 | { 557 | patch.delete_face(*vf_it, true); 558 | } 559 | } 560 | } 561 | patch.garbage_collection(); 562 | std::cout << patch.n_vertices() << " "; 563 | mwArray cf2 = patch.compute_vertex_cf(mul); 564 | 565 | patch = mesh; 566 | for (size_t i = 0; i < mul * cf_len; i++) 567 | { 568 | mesh.property(vphs.cf, vertex)[i + cf_len] = cf2(1, i + 1); 569 | } 570 | 571 | //////////////////////////////////// 572 | for (size_t j = 0; j < mesh.n_vertices(); j++) 573 | { 574 | OpenMesh::VertexHandle vertex_del(j); 575 | const double r = gpc_gen.getDistance(j); 576 | if (r > rsmall) 577 | { 578 | patch.delete_vertex(vertex_del, true); 579 | for (Mesh::VertexFaceIter vf_it = patch.vf_iter(vertex_del); vf_it.is_valid(); ++vf_it) 580 | { 581 | patch.delete_face(*vf_it, true); 582 | } 583 | } 584 | } 585 | patch.garbage_collection(); 586 | std::cout << patch.n_vertices() << " "; 587 | mwArray cf3 = patch.compute_vertex_cf(mul); 588 | for (size_t i = 0; i < mul * cf_len; i++) 589 | { 590 | mesh.property(vphs.cf, vertex)[i + 2 * cf_len] = cf3(1, i + 1); 591 | } 592 | } 593 | 594 | void compute_cf_90(int number, string base) 595 | { 596 | auto &vphs = this->mesh.get_vphs(); 597 | 598 | int mul = 3; 599 | Mesh patch = mesh; //add 600 | OpenMesh::VertexHandle vertex(source_idx); 601 | gpc_gen.setStopDist(computation_max_radius * 3.0); 602 | gpc_gen.setNodeSource(source_idx); 603 | gpc_gen.run(); 604 | for (size_t j = 0; j < mesh.n_vertices(); j++) 605 | { 606 | OpenMesh::VertexHandle vertex_del(j); 607 | const double r = gpc_gen.getDistance(j); 608 | if (r > computation_max_radius * 3.0) 609 | { 610 | patch.delete_vertex(vertex_del, true); 611 | for (Mesh::VertexFaceIter vf_it = patch.vf_iter(vertex_del); vf_it.is_valid(); ++vf_it) 612 | { 613 | patch.delete_face(*vf_it, true); 614 | } 615 | } 616 | } 617 | patch.garbage_collection(); 618 | std::cout << patch.n_vertices() << " "; 619 | mwArray cf = patch.compute_vertex_cf(mul); 620 | 621 | 622 | size_t cf_len = 3 * mesh.get_le_len(); 623 | mesh.property(vphs.cf, vertex).resize((1 + 2 + 3) * cf_len); 624 | for (size_t i = 0; i < mul * cf_len; i++) 625 | { 626 | mesh.property(vphs.cf, vertex)[i] = cf(1, i + 1); 627 | } 628 | mul--; 629 | 630 | double rlarge = computation_max_radius * 2.0; 631 | double rsmall = computation_max_radius; 632 | patch = mesh; 633 | for (size_t j = 0; j < mesh.n_vertices(); j++) 634 | { 635 | OpenMesh::VertexHandle vertex_del(j); 636 | const double r = gpc_gen.getDistance(j); 637 | if (r > rlarge) 638 | { 639 | patch.delete_vertex(vertex_del, true); 640 | for (Mesh::VertexFaceIter vf_it = patch.vf_iter(vertex_del); vf_it.is_valid(); ++vf_it) 641 | { 642 | patch.delete_face(*vf_it, true); 643 | } 644 | } 645 | } 646 | patch.garbage_collection(); 647 | std::cout << patch.n_vertices() << " "; 648 | mwArray cf2 = patch.compute_vertex_cf(mul); 649 | 650 | patch = mesh; 651 | for (size_t i = 0; i < mul * cf_len; i++) 652 | { 653 | mesh.property(vphs.cf, vertex)[i + (mul + 1)*cf_len] = cf2(1, i + 1); 654 | } 655 | mul--; 656 | //////////////////////////////////// 657 | for (size_t j = 0; j < mesh.n_vertices(); j++) 658 | { 659 | OpenMesh::VertexHandle vertex_del(j); 660 | const double r = gpc_gen.getDistance(j); 661 | if (r > rsmall) 662 | { 663 | patch.delete_vertex(vertex_del, true); 664 | for (Mesh::VertexFaceIter vf_it = patch.vf_iter(vertex_del); vf_it.is_valid(); ++vf_it) 665 | { 666 | patch.delete_face(*vf_it, true); 667 | } 668 | } 669 | } 670 | patch.garbage_collection(); 671 | if (number == 456) 672 | { 673 | try { 674 | if (!OpenMesh::IO::write_mesh(patch, "outputxyz3.off")) { 675 | std::cerr << "Cannot write mesh to file 'output.off'" << std::endl; 676 | } 677 | } 678 | catch (std::exception& x) 679 | { 680 | std::cerr << x.what() << std::endl; 681 | } 682 | } 683 | std::cout << patch.n_vertices() << " "; 684 | mwArray cf3 = patch.compute_vertex_cf(mul); 685 | for (size_t i = 0; i < mul * cf_len; i++) 686 | { 687 | mesh.property(vphs.cf, vertex)[i + (2 * mul + 3)*cf_len] = cf3(1, i + 1); 688 | } 689 | } 690 | 691 | void compute_GPC() 692 | { 693 | auto &vphs = this->mesh.get_vphs(); 694 | 695 | gpc_gen.setStopDist(computation_max_radius); 696 | gpc_gen.setNodeSource(source_idx); 697 | gpc_gen.run(); 698 | 699 | for (size_t i = 0; i < mesh.n_vertices(); i++) 700 | { 701 | const double r = gpc_gen.getDistance(i); 702 | OpenMesh::VertexHandle vertex(i); 703 | //if (r < INF) 704 | if (r < computation_max_radius) 705 | { 706 | const double theta = gpc_gen.getAngle(i); 707 | 708 | Attr_Point_2 ap(i, r, theta); 709 | ap.set_attr("u_max", mesh.property(vphs.u_max, vertex)); 710 | ap.set_attr("u_min", mesh.property(vphs.u_min, vertex)); 711 | ap.set_attr("c_max", mesh.property(vphs.c_max, vertex)); 712 | ap.set_attr("c_min", mesh.property(vphs.c_min, vertex)); 713 | ap.set_attr("normal", mesh.property(vphs.normal, vertex)); 714 | ap.set_attr("hks", mesh.property(vphs.hks, vertex)); 715 | ap.set_attr("resp", mesh.property(vphs.resp, vertex)); 716 | ap.set_attr("le", mesh.property(vphs.le, vertex)); 717 | ap.set_attr("cf", mesh.property(vphs.cf, vertex)); 718 | ap.set_attr("point", mesh.property(vphs.point, vertex)); 719 | 720 | all_computed_points.emplace_back(ap); 721 | } 722 | } 723 | 724 | this->compute_mean_vals(); 725 | 726 | this->auto_rotate(); 727 | 728 | this->compute_mean_vals(); 729 | 730 | this->compute_mean_curvature(); 731 | 732 | vector tmp; 733 | tmp.reserve(3); 734 | 735 | for (auto &face : mesh.faces()) 736 | { 737 | tmp.clear(); 738 | bool flag = true; 739 | 740 | for (auto fv_iter = mesh.fv_begin(face); fv_iter.is_valid(); fv_iter++) // Select the faces in the local patch. 741 | { 742 | //cout << fv_iter->idx() << endl; 743 | if (!all_computed_points.count(fv_iter->idx())) 744 | { 745 | flag = false; 746 | break; 747 | } 748 | tmp.emplace_back(fv_iter->idx()); 749 | } 750 | 751 | if (flag) // It's the triangle in the local patch. 752 | { 753 | this->append_centroid(tmp[0], tmp[1], tmp[2]); 754 | } 755 | } 756 | 757 | 758 | this->index = std::make_shared(2 /*dim*/, centroids, KDTreeSingleIndexAdaptorParams(10 /* max leaf */)); 759 | this->index->buildIndex(); 760 | } 761 | 762 | // Rotate the GPC patch according to rotation_mat and recompute mean vals. 763 | void applied_rotation(Rot_mat rotation_mat) 764 | { 765 | for (auto &p : this->all_computed_points) 766 | { 767 | Point normal_p, u_max_p; 768 | 769 | p.get_attr("normal", normal_p); 770 | p.set_attr("normal", rotation_mat * normal_p); 771 | 772 | p.get_attr("u_max", u_max_p); 773 | p.set_attr("u_max", rotation_mat * u_max_p); 774 | 775 | // Ignore u_min. 776 | } 777 | this->compute_mean_vals(); 778 | } 779 | 780 | 781 | // Automatically rotate the GPC patch to align mean_normal to (0, 0, 1), mean_u_max to x-z plane(norm: (0, 1, 0)). 782 | void auto_rotate() 783 | { 784 | Point z_axis(0, 0, 1), x_axis(1, 0, 0); 785 | Point rot_axis; 786 | double theta; 787 | 788 | 789 | rot_axis = this->mean_normal.crossProd(z_axis); 790 | theta = std::acos(this->mean_normal * z_axis / this->mean_normal.length()); 791 | this->applied_rotation(Rot_mat(rot_axis, theta)); 792 | 793 | Point curr_mean_u_max = this->mean_u_max; 794 | Point curr_mean_u_max_xy_proj(curr_mean_u_max.x(), curr_mean_u_max.y(), 0); 795 | Point target_vec_xy_proj(1, 0, 0); 796 | rot_axis = z_axis; 797 | theta = std::acos(curr_mean_u_max_xy_proj * target_vec_xy_proj / curr_mean_u_max_xy_proj.length()); 798 | this->applied_rotation(Rot_mat(rot_axis, theta)); 799 | } 800 | 801 | 802 | // Warning: r_set will be cleared firstly. 803 | void get_patch_vertex_set(Attr_Point_Set &r_set, const double &max_radius) const 804 | { 805 | if (!r_set.empty()) 806 | r_set.clear(); 807 | 808 | for (const auto &p : this->all_computed_points) 809 | { 810 | if (p.r() <= max_radius) 811 | { 812 | Attr_Point_2 p_in = p; 813 | r_set.emplace_back(p_in); 814 | } 815 | } 816 | } 817 | 818 | 819 | const Attr_Point_2 access_vertex_by_mesh_idx(const unsigned int &idx) const 820 | { 821 | const auto &r_val = this->all_computed_points.const_access_by_mesh_idx(idx); 822 | return r_val; 823 | } 824 | 825 | 826 | //inline std::vector& find_triangle_vertices(const Point_2 &p) const 827 | //{ 828 | // return find_triangle_vertices(p.x(), p.y()); 829 | //} 830 | 831 | 832 | // This function is modified from nanoflann/pointcloud_example.cpp 833 | std::vector find_triangle_vertices(const Point_2 &p) const 834 | { 835 | //using namespace nanoflann; 836 | 837 | double query_pt[2] = { p.x(), p.y() }; 838 | 839 | //// construct a kd-tree index: 840 | //typedef KDTreeSingleIndexAdaptor< 841 | // L2_Simple_Adaptor >, 842 | // PointCloud_2, 843 | // 2 /* dim */ 844 | //> my_kd_tree_t; 845 | 846 | //my_kd_tree_t index(2 /*dim*/, centroids, KDTreeSingleIndexAdaptorParams(10 /* max leaf */)); 847 | //index.buildIndex(); 848 | 849 | const size_t num_results = 1; 850 | size_t ret_index; 851 | double out_dist_sqr; 852 | nanoflann::KNNResultSet resultSet(num_results); 853 | resultSet.init(&ret_index, &out_dist_sqr); 854 | this->index->findNeighbors(resultSet, &query_pt[0], nanoflann::SearchParams(10)); 855 | 856 | return centroids.kdtree_get_tri_vertices(ret_index); // Here we simply assume that the given point (x, y) belongs to the triangle whose centroid is cloest to it. 857 | } 858 | 859 | 860 | 861 | Point get_mean_normal() const 862 | { 863 | return this->mean_normal; 864 | } 865 | 866 | 867 | Point get_mean_u_max() const 868 | { 869 | return this->mean_u_max; 870 | } 871 | 872 | size_t point_num() const 873 | { 874 | return this->all_computed_points.size(); 875 | } 876 | 877 | }; 878 | 879 | }; // End namespace GIGen 880 | 881 | #endif // !GPC_H 882 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/Generator.h: -------------------------------------------------------------------------------- 1 | /************************************************************ 2 | * This file is part of the DGPC library. The library computes 3 | * Discrete Geodesic Polar Coordinates on a polygonal mesh. 4 | * 5 | * More info: 6 | * http://folk.uio.no/eivindlm/dgpc/ 7 | * 8 | * Authors: Eivind Lyche Melvær and Martin Reimers 9 | * Centre of Mathematics and Department of Informatics 10 | * University of Oslo, Norway, 2012 11 | ************************************************************/ 12 | #ifndef DGPC_GENERATOR_H 13 | #define DGPC_GENERATOR_H 14 | 15 | #include "Heap.h" 16 | #include "Mesh_C.h" 17 | 18 | #include 19 | #include 20 | 21 | namespace GIGen { 22 | 23 | /** 24 | * A class to generate DGPC: Discrete Geodesic Polar Coordinates on 25 | * polygonal meshes. The class is templetized with a halfedge mesh 26 | * datastructure, and has been tested with OpenMesh. It should 27 | * probably also work on a CGAL::Polyhedron_3 without too much 28 | * effort. 29 | */ 30 | template 31 | class Generator { 32 | 33 | typedef typename Mesh::point_type Point; 34 | typedef typename Point::value_type real; 35 | 36 | typedef typename Mesh::FaceHandle FaceHandle; 37 | typedef typename Mesh::HalfedgeHandle HalfedgeHandle; 38 | typedef typename Mesh::VertexHandle VertexHandle; 39 | 40 | public: 41 | /** 42 | * Construct a Generator for a Mesh. 43 | */ 44 | Generator(const Mesh& mesh) : 45 | mesh_(mesh) { 46 | eps_ = 1e-12; 47 | stopdist_ = (std::numeric_limits::max)(); 48 | const int n = mesh_.n_vertices(); 49 | distances_.resize(n); 50 | angles_.resize(n); 51 | }; 52 | 53 | /** 54 | * Set epsilon. The algorithm will skip iterations which are not 55 | * significant for accuracy less than the given epsilon. 56 | */ 57 | void setEps(real eps) { eps_ = eps; }; 58 | /** 59 | * Set stop distance, geodesic radius of patch to compute. 60 | */ 61 | void setStopDist(real d) { stopdist_ = d; }; 62 | 63 | /** 64 | * Set source point. The point is assumed to be either one of the 65 | * nodes of face_idx, or on one of the faces. 66 | */ 67 | void setSource(const Point& source, int face_idx); 68 | 69 | /** 70 | * Set source point on node node_idx. 71 | */ 72 | void setNodeSource(int node_idx); 73 | 74 | /** 75 | * Set source on point, which lies on the face face_idx. 76 | */ 77 | void setFaceSource(const Point& point, int face_idx); 78 | 79 | /** 80 | * Start generation of DGPC. When complete, distances and angles 81 | * for the nodes in a geodesic disk with radius stopdist_ will be 82 | * available with getDistance(ni) and getAngle(ni). 83 | */ 84 | int run(); 85 | 86 | /** 87 | * Get Gamma, the smallest ring of nodes connected by edges 88 | * surrounding the source point. These nodes are initialized with 89 | * angles and distance after setSource is called. 90 | */ 91 | const std::vector& getGamma() { return gamma_; }; 92 | 93 | /** 94 | * Get DGPC polar distance for node ni 95 | */ 96 | real getDistance(int ni) { return distances_[ni]; }; 97 | 98 | /** 99 | * Get DGPC polar angle for node ni 100 | */ 101 | real getAngle(int ni) { return angles_[ni]; }; 102 | 103 | /** 104 | * Get DGPC polar distances 105 | */ 106 | const std::vector& getDistances() { return distances_; }; 107 | 108 | /** 109 | * Get DGPC polar angles 110 | */ 111 | const std::vector& getAngles() { return angles_; }; 112 | 113 | 114 | protected: 115 | const Mesh& mesh_; 116 | real eps_; 117 | real stopdist_; 118 | 119 | GIGen::Heap heap_; 120 | 121 | std::vector distances_; 122 | std::vector angles_; 123 | 124 | std::vector gamma_; 125 | 126 | void initialize(); 127 | 128 | real initializeGamma(const Point& point); 129 | 130 | bool tryComputeNodeFromEdge(int node, int edge[2]); 131 | real computeDistance(const Point& pt, int edge[2], real& alpha); 132 | real computeAngle(int node, int edge[2], real alpha); 133 | 134 | }; 135 | 136 | //Implementation of setSource 137 | template 138 | void 139 | Generator::setSource(const Point& point, int face_idx) 140 | { 141 | 142 | const real proximity_threshold = 10e-5; 143 | 144 | //Fetch nodes of the face 145 | std::vector nodes; 146 | FaceHandle face = mesh_.face_handle(face_idx); 147 | HalfedgeHandle heh = mesh_.halfedge_handle(face); 148 | HalfedgeHandle start = heh; 149 | do { 150 | VertexHandle vh = mesh_.to_vertex_handle(heh); 151 | nodes.push_back(vh.idx()); 152 | heh = mesh_.next_halfedge_handle(heh); 153 | } while(heh != start); 154 | 155 | //Is the source on a node? 156 | for(int i = 0; i < nodes.size(); i++) { 157 | VertexHandle vh = mesh_.vertex_handle(nodes[i]); 158 | const Point& np = mesh_.point(vh); 159 | 160 | if(np.dist(point) < proximity_threshold) { 161 | setNodeSource(nodes[i]); 162 | return; 163 | } 164 | } 165 | 166 | //Assume the source is on the face 167 | setFaceSource(point, face_idx); 168 | return; 169 | 170 | } 171 | 172 | //Implementation of setNodeSource 173 | template 174 | void 175 | Generator::setNodeSource(int node_idx) 176 | { 177 | //Clear distances, angles and gamma 178 | initialize(); 179 | 180 | //Initialize source node 181 | distances_[node_idx] = 0; 182 | angles_[node_idx] = 0; 183 | 184 | //Find gamma, walk along the 1-ring around source 185 | VertexHandle source = mesh_.vertex_handle(node_idx); 186 | HalfedgeHandle heh = mesh_.halfedge_handle(source); 187 | 188 | if(mesh_.is_boundary(source)) { 189 | //Skip anticlockwise around source until heh is the last non-boundary halfedge 190 | HalfedgeHandle b = mesh_.opposite_halfedge_handle(heh); 191 | while(!mesh_.is_boundary(b)) { 192 | heh = mesh_.next_halfedge_handle(b); 193 | b = mesh_.opposite_halfedge_handle(heh); 194 | } 195 | } 196 | 197 | HalfedgeHandle start = heh; 198 | VertexHandle to; 199 | 200 | //Traverse all halfedges pointing into source 201 | do { 202 | heh = mesh_.next_halfedge_handle(heh); 203 | to = mesh_.to_vertex_handle(heh); 204 | 205 | //Traverse all nodes on the edge of this face, except source and first anticlockwise neighhbour 206 | while (to != source) { 207 | gamma_.push_back(to.idx()); 208 | heh = mesh_.next_halfedge_handle(heh); 209 | to = mesh_.to_vertex_handle(heh); 210 | } 211 | //heh is now pointing to source 212 | heh = mesh_.opposite_halfedge_handle(heh); 213 | 214 | } while(heh != start); 215 | 216 | Point source_pt = mesh_.point(source); 217 | 218 | //Initialize gamma with distances and angles 219 | real phitot = initializeGamma(source_pt); 220 | 221 | if(!mesh_.is_boundary(source)) { 222 | //Scale angles to sum to 2pi 223 | const real alpha = (2*M_PI)/phitot; 224 | const int num = gamma_.size(); 225 | for(unsigned int i = 0; i < num; i++) { 226 | //Store the angle for this node 227 | angles_[gamma_[i]] *= alpha; 228 | } 229 | } 230 | 231 | } 232 | 233 | //Implementation of setFaceSource 234 | template 235 | void 236 | Generator::setFaceSource(const Point& point, int face_idx) 237 | { 238 | 239 | //Clear distances, angles and gamma 240 | initialize(); 241 | 242 | //Find gamma, the nodes of this face. 243 | FaceHandle face = mesh_.face_handle(face_idx); 244 | HalfedgeHandle heh = mesh_.halfedge_handle(face); 245 | HalfedgeHandle start = heh; 246 | do { 247 | VertexHandle vh = mesh_.to_vertex_handle(heh); 248 | int ni = vh.idx(); 249 | gamma_.push_back(ni); 250 | heh = mesh_.next_halfedge_handle(heh); 251 | } while(heh != start); 252 | 253 | //Initialize gamma with distances and angles 254 | initializeGamma(point); 255 | } 256 | 257 | 258 | //Implementation of run 259 | template 260 | int 261 | Generator::run() 262 | { 263 | 264 | int last_finished = -1; 265 | 266 | int edges[3]; 267 | std::vector next; 268 | 269 | HalfedgeHandle heh, end; 270 | 271 | while (!heap_.empty()) { 272 | 273 | int curr = heap_.getCandidate(); 274 | if (curr == -1) break; 275 | 276 | last_finished = curr; 277 | 278 | VertexHandle curr_vertex = mesh_.vertex_handle(curr); 279 | 280 | //Iterate halfedges pointing into current vertex (one for each 281 | //face adjacent to curr) 282 | HalfedgeHandle face_start = mesh_.halfedge_handle(curr_vertex); 283 | HalfedgeHandle face = face_start; 284 | 285 | do { 286 | face = mesh_.opposite_halfedge_handle(face); 287 | if(!mesh_.is_boundary(face)) { 288 | heh = mesh_.prev_halfedge_handle(face); 289 | end = heh; 290 | 291 | //For this face, we will attempt to compute DGPC from each 292 | //of the two edges connected to source 293 | edges[0] = mesh_.to_vertex_handle(heh).idx(); 294 | heh = mesh_.next_halfedge_handle(heh); 295 | edges[1] = mesh_.to_vertex_handle(heh).idx(); 296 | heh = mesh_.next_halfedge_handle(heh); 297 | edges[2] = mesh_.to_vertex_handle(heh).idx(); 298 | heh = mesh_.next_halfedge_handle(heh); 299 | 300 | assert(edges[1] == curr); 301 | 302 | //We can now attempt to compute DGPC from the two edges 303 | // [edges[0], edges[1]] and [edges[1], edges[2]] 304 | 305 | //We will attempt to compute DGPC for all nodes in this 306 | //face (except source). Build a list of the nodes in "next". 307 | next.clear(); 308 | next.push_back(edges[2]); 309 | 310 | while(heh != end) { 311 | next.push_back(mesh_.to_vertex_handle(heh).idx()); 312 | heh = mesh_.next_halfedge_handle(heh); 313 | } 314 | 315 | next.push_back(edges[0]); 316 | 317 | for(int i = 0; i < next.size(); i++) { 318 | 319 | int n = next[i]; 320 | 321 | if( n != edges[0] ) { 322 | //Compute distance to n over the edge [edges[0], edges[1]] 323 | tryComputeNodeFromEdge(n, &edges[0]); 324 | } 325 | 326 | if( n != edges[2] ) { 327 | //Compute distance to n over the edge [edges[1], edges[2]] 328 | tryComputeNodeFromEdge(n, &edges[1]); 329 | } 330 | } 331 | } 332 | face = mesh_.next_halfedge_handle(face); 333 | } while(face != face_start); 334 | } 335 | 336 | return last_finished; 337 | } 338 | 339 | 340 | //////////////////////////////////////////// 341 | // Implementation of protected methods below 342 | //////////////////////////////////////////// 343 | 344 | // Implementation of initialize 345 | template 346 | void 347 | Generator::initialize() 348 | { 349 | std::fill(distances_.begin(), distances_.end(), (std::numeric_limits::max)()); 350 | heap_.initialize(&distances_); 351 | gamma_.clear(); 352 | } 353 | 354 | template 355 | typename Generator::real 356 | Generator::initializeGamma(const Point& point) 357 | { 358 | 359 | const int num = gamma_.size(); 360 | real phitot = 0; 361 | 362 | //For each node in gamma_ 363 | // * Compute distances from point, store in distances_ 364 | // * Compute angles spanned in point, store in angles_ 365 | // * Insert node in heap 366 | // return total sum of angles spanned in point. 367 | for(int i = 0; i < num; i++) { 368 | 369 | int ni = gamma_[i]; 370 | 371 | VertexHandle nbvh = mesh_.vertex_handle(ni); 372 | const Point& nb = mesh_.point(nbvh); 373 | real dist = (point-nb).length(); 374 | distances_[ni] = dist; 375 | 376 | int ip = i+1; 377 | if(ip >= num) ip = 0; 378 | int nip = gamma_[ip]; 379 | VertexHandle nbvhp = mesh_.vertex_handle(nip); 380 | const Point& nbp = mesh_.point(nbvhp); 381 | 382 | const Point nb_t = (nb - point).normalize(); 383 | const Point nbp_t = (nbp - point).normalize(); 384 | real cos_phi = nb_t * nbp_t; 385 | real phi = acos(cos_phi); 386 | 387 | angles_[ni] = phitot; 388 | 389 | heap_.push(ni); 390 | 391 | phitot += phi; 392 | 393 | } 394 | 395 | return phitot; 396 | } 397 | 398 | 399 | //Implementation of tryComputeNodeFromEdge 400 | template 401 | bool 402 | Generator::tryComputeNodeFromEdge(int node, int edge[2]) 403 | { 404 | real thresh = 1.0+eps_; 405 | real alpha; 406 | 407 | VertexHandle h = mesh_.vertex_handle(node); 408 | const Point& pt = mesh_.point(h); 409 | 410 | real newdist = computeDistance(pt, edge, alpha); 411 | 412 | if (distances_[node]/newdist > thresh) { 413 | //Store new distance, and compute angle 414 | distances_[node] = newdist; 415 | angles_[node] = computeAngle(node, edge, alpha); 416 | 417 | if(newdist < stopdist_) { 418 | heap_.push(node); 419 | } 420 | return true; 421 | } 422 | return false; 423 | } 424 | 425 | //Implementation of computeNodeFromEdge 426 | template 427 | typename Generator::real 428 | Generator::computeDistance(const Point& pt, int edge[2], real& alpha) 429 | { 430 | 431 | const Point& Nk = mesh_.point(mesh_.vertex_handle(edge[0])); 432 | const Point& Nj = mesh_.point(mesh_.vertex_handle(edge[1])); 433 | 434 | const real Uk = distances_[edge[0]]; 435 | const real Uj = distances_[edge[1]]; 436 | 437 | const real djptsq = Nj.dist2(pt); 438 | const real djpt = sqrt(djptsq); 439 | const real dkptsq = Nk.dist2(pt); 440 | const real dkpt = sqrt(dkptsq); 441 | 442 | const Point ekj = Nk-Nj; 443 | const real djksq = ekj.length2(); 444 | const real djk = sqrt(djksq); 445 | 446 | //Stable evaluation of Herons formula 447 | //using namespace std; //For max and min 448 | const real a = (std::max)(djk, (std::max)(Uj, Uk)); 449 | const real c = (std::min)(djk, (std::min)(Uj, Uk)); 450 | real b; 451 | if(a == djk) { 452 | b = (std::max)(Uj, Uk); 453 | } else if(a == Uj) { 454 | b = (std::max)(djk, Uk); 455 | } else if(a == Uk) { 456 | b = (std::max)(djk, Uj); 457 | } 458 | 459 | const real H_under_root = ( (a + (b+c)) * 460 | (c - (a-b)) * 461 | (c + (a-b)) * 462 | (a + (b-c)) ); 463 | 464 | if(H_under_root < 0 || djk < 10e-12) { 465 | // Triangle inequality fails, return Dijkstra instead 466 | const real dijkstra_j = Uj + Nj.dist(pt); 467 | const real dijkstra_k = Uk + Nk.dist(pt); 468 | if(dijkstra_j < dijkstra_k) { 469 | alpha = 0; 470 | return dijkstra_j; 471 | } else { 472 | alpha = 1; 473 | return dijkstra_k; 474 | } 475 | } 476 | 477 | 478 | const real H = sqrt( H_under_root ); 479 | 480 | const Point ej = Nj-pt; 481 | const Point ek = Nk-pt; 482 | 483 | const real A2 = ej.crossProd(ek).length(); 484 | 485 | const real ej_ekj = ej * ekj; 486 | const real ek_ekj = ek * ekj; 487 | 488 | const real f31 = djksq - (Uj-Uk)*(Uj-Uk); 489 | const real f32 = (Uj+Uk)*(Uj+Uk) - djksq; 490 | 491 | const real f3 = f31*f32; // ( djksq - (Uj-Uk)*(Uj-Uk) ) * ( (Uj+Uk)*(Uj+Uk) - djksq ); 492 | 493 | const real Ujsq = Uj*Uj; 494 | const real Uksq = Uk*Uk; 495 | 496 | const real f1_j = A2 * (djksq + Uksq - Ujsq); 497 | const real f1_k = A2 * (djksq + Ujsq - Uksq); 498 | 499 | const real xj = (f1_j + ek_ekj*H); 500 | const real xk = (f1_k - ej_ekj*H); 501 | 502 | if(xj < 0 || xk < 0) { 503 | // Update from outside triangle, return Dijkstra instead 504 | const real dijkstra_j = Uj + Nj.dist(pt); 505 | const real dijkstra_k = Uk + Nk.dist(pt); 506 | if(dijkstra_j < dijkstra_k) { 507 | alpha = 0; 508 | return dijkstra_j; 509 | } else { 510 | alpha = 1; 511 | return dijkstra_k; 512 | } 513 | } 514 | 515 | const real f4 = 2*A2*djksq; 516 | 517 | real Ui = sqrt(xj*xj*djptsq + 2*xj*xk*ej*ek + xk*xk*dkptsq)/f4; 518 | 519 | const real cos_jk = ( Ujsq+Uksq - djksq)/(2*Uj*Uk); 520 | const real cos_ji = ( Ujsq+Ui*Ui - djptsq)/(2*Uj*Ui); 521 | alpha = acos(cos_ji) / acos (cos_jk); 522 | 523 | return Ui; 524 | 525 | } 526 | 527 | template 528 | typename Generator::real 529 | Generator::computeAngle(int node, int edge[2], real alpha) 530 | { 531 | real nkphi = angles_[edge[0]]; 532 | real njphi = angles_[edge[1]]; 533 | 534 | const real diff = fabs(njphi-nkphi); 535 | 536 | if(diff < eps_) 537 | return njphi; 538 | 539 | if(diff > M_PI) { 540 | //Make the interpolation modulo 2pi 541 | if(njphi < nkphi) njphi += 2*M_PI; 542 | else nkphi += 2*M_PI; 543 | } 544 | 545 | real angle = (1-alpha) * njphi + alpha * nkphi; 546 | 547 | if(angle > 2*M_PI) 548 | angle -= 2*M_PI; 549 | 550 | return angle; 551 | } 552 | 553 | }; //End namespace GIGen 554 | 555 | #endif //DGPC_GENERATOR_H 556 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/Heap.h: -------------------------------------------------------------------------------- 1 | /************************************************************ 2 | * This file is part of the DGPC library. The library computes 3 | * Discrete Geodesic Polar Coordinates on a polygonal mesh. 4 | * 5 | * More info: 6 | * http://folk.uio.no/eivindlm/dgpc/ 7 | * 8 | * Authors: Eivind Lyche Melvær and Martin Reimers 9 | * Centre of Mathematics and Department of Informatics 10 | * University of Oslo, Norway, 2012 11 | ************************************************************/ 12 | #ifndef DGPC_HEAP_H 13 | #define DGPC_HEAP_H 14 | 15 | #include 16 | #include 17 | 18 | namespace GIGen { 19 | 20 | template 21 | class HeapNode { 22 | 23 | public: 24 | int idx_; 25 | real key_; 26 | 27 | HeapNode( int idx, real key) { idx_ = idx; key_ = key;} 28 | ~HeapNode(){} 29 | 30 | bool operator > ( const HeapNode& x) const { return (this->key_ > x.key_);} 31 | bool operator >= ( const HeapNode& x) const { return (this->key_ >= x.key_);} 32 | bool operator < ( const HeapNode& x) const { return (this->key_ < x.key_);} 33 | bool operator <= ( const HeapNode& x) const { return (this->key_ <= x.key_);} 34 | }; 35 | 36 | template 37 | class Heap { 38 | 39 | typedef std::priority_queue< HeapNode, std::vector< HeapNode >, std::greater > > Heap_t; 40 | 41 | Heap_t heap_; 42 | std::vector flags_; 43 | std::vector* keys_; 44 | 45 | public: 46 | Heap( ) { keys_ = 0; } 47 | Heap( std::vector* keys) { initialize(keys);} 48 | ~Heap() {} 49 | 50 | void initialize( std::vector* keys) { keys_ = keys; resize (keys_->size()); std::fill(flags_.begin(),flags_.end(),false);} 51 | void resize( int size) { flags_.resize(size); } 52 | bool isInHeap( int idx) const { return flags_[idx];} 53 | void removeFromHeap( int idx) { flags_[idx]=false;} 54 | void push( int idx) { heap_.push(HeapNode(idx,(*keys_)[idx])); flags_[idx]=true; } 55 | bool empty () { return (top()==-1);} 56 | int size () const { return heap_.size();} 57 | 58 | int getCandidate() { 59 | if (!heap_.empty()) { 60 | int ret = top(); 61 | pop(); 62 | return ret; 63 | } else 64 | return -1; 65 | } 66 | 67 | void pop() { 68 | while (!heap_.empty() && !isInHeap(heap_.top().idx_)) 69 | heap_.pop(); 70 | if (!heap_.empty()) { 71 | flags_[heap_.top().idx_]=false; 72 | heap_.pop(); 73 | } 74 | } 75 | 76 | int top() { 77 | while (!heap_.empty() && !isInHeap(heap_.top().idx_)) 78 | heap_.pop(); 79 | if (!heap_.empty()) 80 | return heap_.top().idx_; 81 | else 82 | return -1; 83 | } 84 | 85 | }; 86 | 87 | }; //End namespace GIGen 88 | 89 | #endif // DGPC_HEAP_H 90 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/Ini.h: -------------------------------------------------------------------------------- 1 | /** 2 | * @file 3 | * @brief initialization file read and write API 4 | * @author Deng Yangjun 5 | * @date 2007-12-9 6 | * @version 0.2 7 | * (C)2007 Midapex 8 | * This program is free software; you can redistribute it and/or modify it 9 | * under the terms of the GNU Library General Public License as published 10 | * by the Free Software Foundation; either version 2 of the License, 11 | * or (at your option) any later version. 12 | */ 13 | #ifndef INI_FILE_CPP_H_ 14 | #define INI_FILE_CPP_H_ 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #define MAX_INI_FILE_SIZE 1024*16 22 | using namespace std; 23 | 24 | class IniFile 25 | { 26 | public: 27 | IniFile(const string & fileName) 28 | { 29 | m_fileName = fileName; 30 | } 31 | 32 | public: 33 | virtual ~IniFile(void) {} 34 | 35 | const string & getFileName() const 36 | { 37 | return m_fileName; 38 | } 39 | 40 | const string &getSection() const 41 | { 42 | return m_section; 43 | } 44 | void setSection(const string §ion) 45 | { 46 | m_section = section; 47 | } 48 | 49 | bool write(const string &key, const string & value) const 50 | { 51 | return write_profile_string(m_section.c_str(), key.c_str(), value.c_str(), m_fileName.c_str()) == 1 ? true : false; 52 | } 53 | bool write(const string &key, int value) const 54 | { 55 | char tmp[64]; 56 | sprintf(tmp, "%d", value); 57 | return write(key, tmp); 58 | } 59 | 60 | string readStr(const string &key, const string &default_value) const 61 | { 62 | char buf[4096]; 63 | read_profile_string(m_section.c_str(), key.c_str(), buf, sizeof(buf), default_value.c_str(), m_fileName.c_str()); 64 | return buf; 65 | } 66 | int readInt(const string &key, int default_value) const 67 | { 68 | return read_profile_int(m_section.c_str(), key.c_str(), default_value, m_fileName.c_str()); 69 | } 70 | 71 | public: 72 | static int read_profile_string(const char *section, const char *key, char *value, 73 | int size, const char *default_value, const char *file) 74 | { 75 | char buf[MAX_INI_FILE_SIZE] = { 0 }; 76 | int file_size; 77 | int sec_s, sec_e, key_s, key_e, value_s, value_e; 78 | 79 | //check parameters 80 | assert(section != NULL && strlen(section)); 81 | assert(key != NULL && strlen(key)); 82 | assert(value != NULL); 83 | assert(size > 0); 84 | assert(file != NULL &&strlen(key)); 85 | 86 | if (!load_ini_file(file, buf, &file_size)) 87 | { 88 | if (default_value != NULL) 89 | { 90 | strncpy(value, default_value, size); 91 | } 92 | return 0; 93 | } 94 | 95 | if (!parse_file(section, key, buf, &sec_s, &sec_e, &key_s, &key_e, &value_s, &value_e)) 96 | { 97 | if (default_value != NULL) 98 | { 99 | strncpy(value, default_value, size); 100 | } 101 | return 0; //not find the key 102 | } 103 | else 104 | { 105 | int cpcount = value_e - value_s; 106 | 107 | if (size - 1 < cpcount) 108 | { 109 | cpcount = size - 1; 110 | } 111 | 112 | memset(value, 0, size); 113 | memcpy(value, buf + value_s, cpcount); 114 | value[cpcount] = '\0'; 115 | 116 | return 1; 117 | } 118 | } 119 | static int read_profile_int(const char *section, const char *key, int default_value, 120 | const char *file) 121 | { 122 | char value[32] = { 0 }; 123 | if (!read_profile_string(section, key, value, sizeof(value), NULL, file)) 124 | { 125 | return default_value; 126 | } 127 | else 128 | { 129 | return atoi(value); 130 | } 131 | } 132 | static int write_profile_string(const char *section, const char *key, 133 | const char *value, const char *file) 134 | { 135 | char buf[MAX_INI_FILE_SIZE] = { 0 }; 136 | char w_buf[MAX_INI_FILE_SIZE] = { 0 }; 137 | int sec_s, sec_e, key_s, key_e, value_s, value_e; 138 | int value_len = (int)strlen(value); 139 | int file_size; 140 | FILE *out; 141 | 142 | //check parameters 143 | assert(section != NULL && strlen(section)); 144 | assert(key != NULL && strlen(key)); 145 | assert(value != NULL); 146 | assert(file != NULL &&strlen(key)); 147 | 148 | if (!load_ini_file(file, buf, &file_size)) 149 | { 150 | sec_s = -1; 151 | } 152 | else 153 | { 154 | parse_file(section, key, buf, &sec_s, &sec_e, &key_s, &key_e, &value_s, &value_e); 155 | } 156 | 157 | if (-1 == sec_s) 158 | { 159 | if (0 == file_size) 160 | { 161 | sprintf(w_buf + file_size, "[%s]\n%s=%s\n", section, key, value); 162 | } 163 | else 164 | { 165 | //not find the section, then add the new section at end of the file 166 | memcpy(w_buf, buf, file_size); 167 | sprintf(w_buf + file_size, "\n[%s]\n%s=%s\n", section, key, value); 168 | } 169 | } 170 | else if (-1 == key_s) 171 | { 172 | //not find the key, then add the new key=value at end of the section 173 | memcpy(w_buf, buf, sec_e); 174 | sprintf(w_buf + sec_e, "%s=%s\n", key, value); 175 | sprintf(w_buf + sec_e + strlen(key) + strlen(value) + 2, buf + sec_e, file_size - sec_e); 176 | } 177 | else 178 | { 179 | //update value with new value 180 | memcpy(w_buf, buf, value_s); 181 | memcpy(w_buf + value_s, value, value_len); 182 | memcpy(w_buf + value_s + value_len, buf + value_e, file_size - value_e); 183 | } 184 | 185 | out = fopen(file, "w"); 186 | if (NULL == out) 187 | { 188 | return 0; 189 | } 190 | 191 | if (-1 == fputs(w_buf, out)) 192 | { 193 | fclose(out); 194 | return 0; 195 | } 196 | 197 | fclose(out); 198 | return 1; 199 | } 200 | 201 | private: 202 | static int load_ini_file(const char *file, char *buf, int *file_size) 203 | { 204 | FILE *in = NULL; 205 | int i = 0; 206 | *file_size = 0; 207 | 208 | assert(file != NULL); 209 | assert(buf != NULL); 210 | 211 | in = fopen(file, "r"); 212 | if (NULL == in) { 213 | return 0; 214 | } 215 | 216 | buf[i] = fgetc(in); 217 | 218 | //load initialization file 219 | while (buf[i] != (char)EOF) { 220 | i++; 221 | assert(i < MAX_INI_FILE_SIZE); //file too big, you can redefine MAX_INI_FILE_SIZE to fit the big file 222 | buf[i] = fgetc(in); 223 | } 224 | 225 | buf[i] = '\0'; 226 | *file_size = i; 227 | 228 | fclose(in); 229 | return 1; 230 | } 231 | 232 | static int newline(char c) 233 | { 234 | return ('\n' == c || '\r' == c) ? 1 : 0; 235 | } 236 | 237 | static int end_of_string(char c) 238 | { 239 | return '\0' == c ? 1 : 0; 240 | } 241 | static int left_barce(char c) 242 | { 243 | return '[' == c ? 1 : 0; 244 | } 245 | static int right_brace(char c) 246 | { 247 | return ']' == c ? 1 : 0; 248 | } 249 | static int parse_file(const char *section, const char *key, const char *buf, int *sec_s, int *sec_e, 250 | int *key_s, int *key_e, int *value_s, int *value_e) 251 | { 252 | const char *p = buf; 253 | int i = 0; 254 | 255 | assert(buf != NULL); 256 | assert(section != NULL && strlen(section)); 257 | assert(key != NULL && strlen(key)); 258 | 259 | *sec_e = *sec_s = *key_e = *key_s = *value_s = *value_e = -1; 260 | 261 | while (!end_of_string(p[i])) { 262 | //find the section 263 | if ((0 == i || newline(p[i - 1])) && left_barce(p[i])) 264 | { 265 | int section_start = i + 1; 266 | 267 | //find the ']' 268 | do { 269 | i++; 270 | } while (!right_brace(p[i]) && !end_of_string(p[i])); 271 | 272 | if (0 == strncmp(p + section_start, section, i - section_start)) { 273 | int newline_start = 0; 274 | 275 | i++; 276 | 277 | //Skip over space char after ']' 278 | while (isspace(p[i])) { 279 | i++; 280 | } 281 | 282 | //find the section 283 | *sec_s = section_start; 284 | *sec_e = i; 285 | 286 | while (!(newline(p[i - 1]) && left_barce(p[i])) 287 | && !end_of_string(p[i])) { 288 | int j = 0; 289 | //get a new line 290 | newline_start = i; 291 | 292 | while (!newline(p[i]) && !end_of_string(p[i])) { 293 | i++; 294 | } 295 | 296 | //now i is equal to end of the line 297 | j = newline_start; 298 | 299 | if (';' != p[j]) //skip over comment 300 | { 301 | while (j < i && p[j] != '=') { 302 | j++; 303 | if ('=' == p[j]) { 304 | if (strncmp(key, p + newline_start, j - newline_start) == 0) 305 | { 306 | //find the key ok 307 | *key_s = newline_start; 308 | *key_e = j - 1; 309 | 310 | *value_s = j + 1; 311 | *value_e = i; 312 | 313 | return 1; 314 | } 315 | } 316 | } 317 | } 318 | 319 | i++; 320 | } 321 | } 322 | } 323 | else 324 | { 325 | i++; 326 | } 327 | } 328 | return 0; 329 | } 330 | 331 | private: 332 | string m_fileName; 333 | string m_section; 334 | }; 335 | 336 | #endif 337 | 338 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/IniFile.h: -------------------------------------------------------------------------------- 1 | /** 2 | * @file 3 | * @brief initialization file read and write API 4 | * @author Deng Yangjun 5 | * @date 2007-12-9 6 | * @version 0.2 7 | * (C)2007 Midapex 8 | * This program is free software; you can redistribute it and/or modify it 9 | * under the terms of the GNU Library General Public License as published 10 | * by the Free Software Foundation; either version 2 of the License, 11 | * or (at your option) any later version. 12 | */ 13 | #ifndef INI_FILE_CPP_H_ 14 | #define INI_FILE_CPP_H_ 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #define MAX_INI_FILE_SIZE 1024*16 22 | using namespace std; 23 | 24 | class IniFile 25 | { 26 | public: 27 | IniFile(const string & fileName) 28 | { 29 | m_fileName = fileName; 30 | } 31 | 32 | 33 | public: 34 | virtual ~IniFile(void) {} 35 | 36 | const string & getFileName() const 37 | { 38 | return m_fileName; 39 | } 40 | 41 | const string &getSection() const 42 | { 43 | return m_section; 44 | } 45 | 46 | void setSection(const string §ion) 47 | { 48 | m_section = section; 49 | } 50 | 51 | bool write(const string &key, const string & value) const 52 | { 53 | return write_profile_string(m_section.c_str(), key.c_str(), value.c_str(), m_fileName.c_str()) == 1 ? true : false; 54 | } 55 | 56 | bool write(const string &key, int value) const 57 | { 58 | char tmp[64]; 59 | sprintf(tmp, "%d", value); 60 | return write(key, tmp); 61 | } 62 | 63 | string readStr(const string &key, const string &default_value = "") const 64 | { 65 | char buf[4096]; 66 | read_profile_string(m_section.c_str(), key.c_str(), buf, sizeof(buf), default_value.c_str(), m_fileName.c_str()); 67 | return buf; 68 | } 69 | 70 | int readInt(const string &key, int default_value = -1) const 71 | { 72 | return read_profile_int(m_section.c_str(), key.c_str(), default_value, m_fileName.c_str()); 73 | } 74 | 75 | 76 | public: 77 | static int read_profile_string(const char *section, const char *key, char *value, 78 | int size, const char *default_value, const char *file) 79 | { 80 | char buf[MAX_INI_FILE_SIZE] = { 0 }; 81 | int file_size; 82 | int sec_s, sec_e, key_s, key_e, value_s, value_e; 83 | 84 | //check parameters 85 | assert(section != NULL && strlen(section)); 86 | assert(key != NULL && strlen(key)); 87 | assert(value != NULL); 88 | assert(size > 0); 89 | assert(file != NULL &&strlen(key)); 90 | 91 | if (!load_ini_file(file, buf, &file_size)) 92 | { 93 | if (default_value != NULL) 94 | { 95 | strncpy(value, default_value, size); 96 | } 97 | return 0; 98 | } 99 | 100 | if (!parse_file(section, key, buf, &sec_s, &sec_e, &key_s, &key_e, &value_s, &value_e)) 101 | { 102 | if (default_value != NULL) 103 | { 104 | strncpy(value, default_value, size); 105 | } 106 | return 0; //not find the key 107 | } 108 | else 109 | { 110 | int cpcount = value_e - value_s; 111 | 112 | if (size - 1 < cpcount) 113 | { 114 | cpcount = size - 1; 115 | } 116 | 117 | memset(value, 0, size); 118 | memcpy(value, buf + value_s, cpcount); 119 | value[cpcount] = '\0'; 120 | 121 | return 1; 122 | } 123 | } 124 | 125 | static int read_profile_int(const char *section, const char *key, int default_value, 126 | const char *file) 127 | { 128 | char value[32] = { 0 }; 129 | if (!read_profile_string(section, key, value, sizeof(value), NULL, file)) 130 | { 131 | return default_value; 132 | } 133 | else 134 | { 135 | return atoi(value); 136 | } 137 | } 138 | 139 | static int write_profile_string(const char *section, const char *key, 140 | const char *value, const char *file) 141 | { 142 | char buf[MAX_INI_FILE_SIZE] = { 0 }; 143 | char w_buf[MAX_INI_FILE_SIZE] = { 0 }; 144 | int sec_s, sec_e, key_s, key_e, value_s, value_e; 145 | int value_len = (int)strlen(value); 146 | int file_size; 147 | FILE *out; 148 | 149 | //check parameters 150 | assert(section != NULL && strlen(section)); 151 | assert(key != NULL && strlen(key)); 152 | assert(value != NULL); 153 | assert(file != NULL &&strlen(key)); 154 | 155 | if (!load_ini_file(file, buf, &file_size)) 156 | { 157 | sec_s = -1; 158 | } 159 | else 160 | { 161 | parse_file(section, key, buf, &sec_s, &sec_e, &key_s, &key_e, &value_s, &value_e); 162 | } 163 | 164 | if (-1 == sec_s) 165 | { 166 | if (0 == file_size) 167 | { 168 | sprintf(w_buf + file_size, "[%s]\n%s=%s\n", section, key, value); 169 | } 170 | else 171 | { 172 | //not find the section, then add the new section at end of the file 173 | memcpy(w_buf, buf, file_size); 174 | sprintf(w_buf + file_size, "\n[%s]\n%s=%s\n", section, key, value); 175 | } 176 | } 177 | else if (-1 == key_s) 178 | { 179 | //not find the key, then add the new key=value at end of the section 180 | memcpy(w_buf, buf, sec_e); 181 | sprintf(w_buf + sec_e, "%s=%s\n", key, value); 182 | sprintf(w_buf + sec_e + strlen(key) + strlen(value) + 2, buf + sec_e, file_size - sec_e); 183 | } 184 | else 185 | { 186 | //update value with new value 187 | memcpy(w_buf, buf, value_s); 188 | memcpy(w_buf + value_s, value, value_len); 189 | memcpy(w_buf + value_s + value_len, buf + value_e, file_size - value_e); 190 | } 191 | 192 | out = fopen(file, "w"); 193 | if (NULL == out) 194 | { 195 | return 0; 196 | } 197 | 198 | if (-1 == fputs(w_buf, out)) 199 | { 200 | fclose(out); 201 | return 0; 202 | } 203 | 204 | fclose(out); 205 | return 1; 206 | } 207 | 208 | 209 | private: 210 | static int load_ini_file(const char *file, char *buf, int *file_size) 211 | { 212 | FILE *in = NULL; 213 | int i = 0; 214 | *file_size = 0; 215 | 216 | assert(file != NULL); 217 | assert(buf != NULL); 218 | 219 | in = fopen(file, "r"); 220 | if (NULL == in) { 221 | return 0; 222 | } 223 | 224 | buf[i] = fgetc(in); 225 | 226 | //load initialization file 227 | while (buf[i] != (char)EOF) { 228 | i++; 229 | assert(i < MAX_INI_FILE_SIZE); //file too big, you can redefine MAX_INI_FILE_SIZE to fit the big file 230 | buf[i] = fgetc(in); 231 | } 232 | 233 | buf[i] = '\0'; 234 | *file_size = i; 235 | 236 | fclose(in); 237 | return 1; 238 | } 239 | 240 | static int newline(char c) 241 | { 242 | return ('\n' == c || '\r' == c) ? 1 : 0; 243 | } 244 | 245 | static int end_of_string(char c) 246 | { 247 | return '\0' == c ? 1 : 0; 248 | } 249 | 250 | static int left_barce(char c) 251 | { 252 | return '[' == c ? 1 : 0; 253 | } 254 | 255 | static int right_brace(char c) 256 | { 257 | return ']' == c ? 1 : 0; 258 | } 259 | 260 | static int parse_file(const char *section, const char *key, const char *buf, int *sec_s, int *sec_e, 261 | int *key_s, int *key_e, int *value_s, int *value_e) 262 | { 263 | const char *p = buf; 264 | int i = 0; 265 | 266 | assert(buf != NULL); 267 | assert(section != NULL && strlen(section)); 268 | assert(key != NULL && strlen(key)); 269 | 270 | *sec_e = *sec_s = *key_e = *key_s = *value_s = *value_e = -1; 271 | 272 | while (!end_of_string(p[i])) { 273 | //find the section 274 | if ((0 == i || newline(p[i - 1])) && left_barce(p[i])) 275 | { 276 | int section_start = i + 1; 277 | 278 | //find the ']' 279 | do { 280 | i++; 281 | } while (!right_brace(p[i]) && !end_of_string(p[i])); 282 | 283 | if (0 == strncmp(p + section_start, section, i - section_start)) { 284 | int newline_start = 0; 285 | 286 | i++; 287 | 288 | //Skip over space char after ']' 289 | while (isspace(p[i])) { 290 | i++; 291 | } 292 | 293 | //find the section 294 | *sec_s = section_start; 295 | *sec_e = i; 296 | 297 | while (!(newline(p[i - 1]) && left_barce(p[i])) 298 | && !end_of_string(p[i])) { 299 | int j = 0; 300 | //get a new line 301 | newline_start = i; 302 | 303 | while (!newline(p[i]) && !end_of_string(p[i])) { 304 | i++; 305 | } 306 | 307 | //now i is equal to end of the line 308 | j = newline_start; 309 | 310 | if (';' != p[j]) //skip over comment 311 | { 312 | while (j < i && p[j] != '=') { 313 | j++; 314 | if ('=' == p[j]) { 315 | if (strncmp(key, p + newline_start, j - newline_start) == 0) 316 | { 317 | //find the key ok 318 | *key_s = newline_start; 319 | *key_e = j - 1; 320 | 321 | *value_s = j + 1; 322 | *value_e = i; 323 | 324 | return 1; 325 | } 326 | } 327 | } 328 | } 329 | 330 | i++; 331 | } 332 | } 333 | } 334 | else 335 | { 336 | i++; 337 | } 338 | } 339 | return 0; 340 | } 341 | 342 | 343 | private: 344 | string m_fileName; 345 | string m_section; 346 | }; 347 | 348 | #endif 349 | 350 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/Mesh_C.h: -------------------------------------------------------------------------------- 1 | /************************************************************ 2 | * This file is MODIFIED from a part of the 3D descriptor 3 | * learning framework by Hanyu Wang(王涵玉) 4 | * https://github.com/jianweiguo/local3Ddescriptorlearning 5 | * The library computes Discrete Geodesic Polar Coordinates 6 | * on a polygonal mesh. 7 | * DGPC file's authors: Eivind Lyche Melvær and Martin Reimers 8 | * 9 | * Author: Yiqun Wang(王逸群) 10 | * https://github.com/yiqun-wang/LPS 11 | ************************************************************/ 12 | 13 | #ifndef DGPC_MESH_H 14 | #define DGPC_MESH_H 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include "Vector3.h" 22 | #include "libcompcur.h" 23 | #include "mclmcr.h" 24 | #include "matrix.h" 25 | #include "mclcppclass.h" 26 | 27 | 28 | using namespace std; 29 | 30 | namespace GIGen 31 | { 32 | 33 | // Redefine default Point class in OpenMesh 34 | template 35 | struct OpenMeshTraits : OpenMesh::DefaultTraits 36 | { 37 | typedef P Point; 38 | VertexAttributes(OpenMesh::Attributes::Status); 39 | FaceAttributes(OpenMesh::Attributes::Status); 40 | EdgeAttributes(OpenMesh::Attributes::Status); 41 | }; 42 | 43 | using Point = GIGen::Vector3; 44 | 45 | struct VPropHandles 46 | { 47 | OpenMesh::VPropHandleT c_max, c_min; 48 | OpenMesh::VPropHandleT u_max, u_min, normal, resp, point; 49 | OpenMesh::VPropHandleT> hks, le, cf; 50 | }; 51 | 52 | // Main mesh class 53 | class MeshOM : public OpenMesh::TriMesh_ArrayKernelT> 54 | { 55 | private: 56 | 57 | bool is_vertex_info_computed_ = false; 58 | int hks_len, le_len; 59 | string filename; 60 | VPropHandles vphs; 61 | 62 | bool to_VF_mwArray(mwArray &vertices, mwArray &faces) 63 | { 64 | // Dimension check. 65 | mwArray &v_dims = vertices.GetDimensions(); 66 | mwArray &dim_v_dims = v_dims.GetDimensions(); 67 | 68 | mwArray &f_dims = faces.GetDimensions(); 69 | mwArray &dim_f_dims = f_dims.GetDimensions(); 70 | 71 | int &&dim_v_dims_1_2 = dim_v_dims(1, 2); 72 | int &&v_dims_1_1 = v_dims(1, 1); 73 | int &&v_dims_1_2 = v_dims(1, 2); 74 | 75 | int &&dim_f_dims_1_2 = dim_f_dims(1, 2); 76 | int &&f_dims_1_1 = f_dims(1, 1); 77 | int &&f_dims_1_2 = f_dims(1, 2); 78 | 79 | if (!(dim_v_dims_1_2 == 2 && v_dims_1_1 == 3 && v_dims_1_2 == this->n_vertices()) || 80 | !(dim_f_dims_1_2 == 2 && f_dims_1_1 == 3 && f_dims_1_2 == this->n_faces())) 81 | { 82 | cerr << "Filed to call function to_VF_mwArray(*), dimensions do mot match." << endl; 83 | exit(0); 84 | } 85 | 86 | 87 | //for (auto &vertex : MeshBase::vertices()) 88 | for (auto &vertex : this->vertices()) 89 | { 90 | auto &v = this->point(vertex); 91 | for (size_t i = 0; i < 3; i++) 92 | { 93 | vertices(i + 1, vertex.idx() + 1) = v[i]; 94 | } 95 | } 96 | 97 | for (auto &face : this->faces()) 98 | { 99 | size_t i = 0; 100 | 101 | // fv_iter belongs to MeshBase::FaceVertexIter 102 | for (auto fv_iter = this->fv_begin(face); fv_iter.is_valid(); fv_iter++) 103 | { 104 | faces(i + 1, face.idx() + 1) = fv_iter->idx() + 1; 105 | i += 1; 106 | } 107 | } 108 | 109 | return true; 110 | 111 | } 112 | 113 | 114 | public: 115 | double diameter; 116 | 117 | // For compatibility to Generator.h 118 | typedef Point point_type; 119 | 120 | MeshOM(const std::string& filename, const int &hks_len = 16, const int &le_len = 5) 121 | { 122 | this->hks_len = hks_len; 123 | this->le_len = le_len; 124 | this->read_mesh(filename); 125 | } 126 | 127 | MeshOM() {} 128 | 129 | bool read_mesh(const std::string& filename) 130 | { 131 | bool is_succeeded = OpenMesh::IO::read_mesh(*this, filename); 132 | 133 | if (is_succeeded) 134 | { 135 | this->filename = filename; 136 | this->compute_vertex_info(); 137 | } 138 | else 139 | return false; 140 | 141 | return is_succeeded; 142 | } 143 | 144 | void applied_rotation(Rot_mat rotation_mat) 145 | { 146 | for (auto &vertex : this->vertices()) 147 | { 148 | auto &v = this->point(vertex); 149 | this->set_point(vertex, rotation_mat * v); 150 | } 151 | } 152 | 153 | bool compute_vertex_info() 154 | { 155 | 156 | mwArray vertices(3, this->n_vertices(), mxDOUBLE_CLASS); 157 | mwArray faces(3, this->n_faces(), mxDOUBLE_CLASS); 158 | 159 | if (!this->to_VF_mwArray(vertices, faces)) 160 | { 161 | cerr << "to_VF_mwArray failed!" << endl; 162 | return false; 163 | } 164 | 165 | mwArray u_max(3, this->n_vertices(), mxDOUBLE_CLASS); 166 | mwArray u_min(3, this->n_vertices(), mxDOUBLE_CLASS); 167 | mwArray curvature_max(1, this->n_vertices(), mxDOUBLE_CLASS); 168 | mwArray curvature_min(1, this->n_vertices(), mxDOUBLE_CLASS); 169 | mwArray normal(3, this->n_vertices(), mxDOUBLE_CLASS); 170 | mwArray hks(this->hks_len, this->n_vertices(), mxDOUBLE_CLASS); 171 | mwArray le(this->le_len, this->n_vertices(), mxDOUBLE_CLASS); 172 | mwArray cf(1, this->le_len * 3 + 1, mxDOUBLE_CLASS); 173 | mwArray filenameArray(this->filename.c_str()); 174 | mwArray desc_len(1, 1, mxDOUBLE_CLASS); 175 | mwArray le_len(1, 1, mxDOUBLE_CLASS); 176 | mwArray diameter(1, 1, mxDOUBLE_CLASS); 177 | mwArray resp(3, this->n_vertices(), mxDOUBLE_CLASS); 178 | desc_len(1, 1) = this->hks_len; 179 | le_len(1, 1) = (this->le_len * 3.0 + 1) / 3.0; 180 | mwArray cf_flag(1, 1, mxLOGICAL_CLASS); 181 | cf_flag(1, 1) = false; 182 | 183 | compute_curvature(10, u_max, u_min, curvature_max, curvature_min, normal, hks, diameter, resp, le, cf, vertices, faces, filenameArray, desc_len, le_len, cf_flag); //number output 184 | 185 | this->diameter = diameter(1, 1); 186 | this->add_property(vphs.u_max); 187 | this->add_property(vphs.u_min); 188 | this->add_property(vphs.c_max); 189 | this->add_property(vphs.c_min); 190 | this->add_property(vphs.normal); 191 | this->add_property(vphs.hks); 192 | this->add_property(vphs.resp); 193 | this->add_property(vphs.le); 194 | this->add_property(vphs.cf); 195 | this->add_property(vphs.point); 196 | 197 | for (auto &vertex : this->vertices()) 198 | { 199 | this->property(vphs.c_max, vertex) = curvature_max(1, vertex.idx() + 1); 200 | this->property(vphs.c_min, vertex) = curvature_min(1, vertex.idx() + 1); 201 | this->property(vphs.hks, vertex).resize(this->hks_len); 202 | this->property(vphs.le, vertex).resize(this->le_len); //need ini.. 203 | for (size_t i = 0; i < 3; i++) 204 | { 205 | this->property(vphs.u_max, vertex)[i] = u_max(i + 1, vertex.idx() + 1); 206 | this->property(vphs.u_min, vertex)[i] = u_min(i + 1, vertex.idx() + 1); 207 | this->property(vphs.normal, vertex)[i] = normal(i + 1, vertex.idx() + 1); 208 | this->property(vphs.resp, vertex)[i] = resp(i + 1, vertex.idx() + 1); 209 | this->property(vphs.point, vertex)[i] = vertices(i + 1, vertex.idx() + 1); 210 | } 211 | 212 | for (size_t i = 0; i < this->hks_len; i++) 213 | { 214 | this->property(vphs.hks, vertex)[i] = hks(i + 1, vertex.idx() + 1); 215 | } 216 | 217 | for (size_t i = 0; i < this->le_len; i++) 218 | { 219 | this->property(vphs.le, vertex)[i] = le(i + 1, vertex.idx() + 1); 220 | } 221 | } 222 | 223 | this->is_vertex_info_computed_ = true; 224 | return true; 225 | 226 | } 227 | 228 | mwArray compute_vertex_cf(int mul) 229 | { 230 | 231 | mwArray vertices(3, this->n_vertices(), mxDOUBLE_CLASS); 232 | mwArray faces(3, this->n_faces(), mxDOUBLE_CLASS); 233 | 234 | if (!this->to_VF_mwArray(vertices, faces)) 235 | { 236 | cerr << "to_VF_mwArray failed!" << endl; 237 | return vertices; 238 | } 239 | 240 | mwArray u_max(3, this->n_vertices(), mxDOUBLE_CLASS); 241 | mwArray u_min(3, this->n_vertices(), mxDOUBLE_CLASS); 242 | mwArray curvature_max(1, this->n_vertices(), mxDOUBLE_CLASS); 243 | mwArray curvature_min(1, this->n_vertices(), mxDOUBLE_CLASS); 244 | mwArray normal(3, this->n_vertices(), mxDOUBLE_CLASS); 245 | mwArray hks(this->hks_len, this->n_vertices(), mxDOUBLE_CLASS); 246 | mwArray le(this->le_len * mul, this->n_vertices(), mxDOUBLE_CLASS); 247 | mwArray cf(1, (this->le_len * 3 + 1) * mul, mxDOUBLE_CLASS); 248 | mwArray filenameArray(this->filename.c_str()); 249 | mwArray desc_len(1, 1, mxDOUBLE_CLASS); 250 | mwArray le_len(1, 1, mxDOUBLE_CLASS); 251 | mwArray diameter(1, 1, mxDOUBLE_CLASS); 252 | mwArray resp(3, this->n_vertices(), mxDOUBLE_CLASS); 253 | desc_len(1, 1) = this->hks_len; 254 | le_len(1, 1) = (this->le_len * 3.0 + 1) * mul / 3.0; 255 | mwArray cf_flag(1, 1, mxLOGICAL_CLASS); 256 | cf_flag(1, 1) = true; 257 | 258 | compute_curvature(10, u_max, u_min, curvature_max, curvature_min, normal, hks, diameter, resp, le, cf, vertices, faces, filenameArray, desc_len, le_len, cf_flag); //number output 259 | 260 | return cf; 261 | 262 | } 263 | 264 | 265 | bool is_vertex_info_computed() const 266 | { 267 | return is_vertex_info_computed_; 268 | } 269 | 270 | 271 | const VPropHandles &get_vphs() const 272 | { 273 | return vphs; 274 | } 275 | 276 | double get_le_len() const 277 | { 278 | return le_len; 279 | } 280 | 281 | double correct_calc_edge_length(EdgeHandle _eh) const 282 | { 283 | auto _heh = this->halfedge_handle(_eh, 0); 284 | auto v = this->point(this->to_vertex_handle(_heh)) - this->point(this->from_vertex_handle(_heh)); 285 | return v.length(); 286 | } 287 | 288 | double mean_edge_length() 289 | { 290 | double sum = 0; 291 | for (auto &edge : this->edges()) 292 | { 293 | sum += this->correct_calc_edge_length(edge); 294 | } 295 | 296 | return sum / this->n_edges(); 297 | } 298 | 299 | 300 | }; 301 | 302 | } //end namespace GIGen 303 | 304 | #endif //DGPC_MESH_H 305 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/Vector3.h: -------------------------------------------------------------------------------- 1 | /************************************************************ 2 | * This file is MODIFIED from a part of the DGPC library to 3 | * help it support to read off model files. 4 | * 5 | * The library computes Discrete Geodesic Polar Coordinates 6 | * on a polygonal mesh. 7 | * 8 | * Author: Hanyu Wang(王涵玉) 9 | * DGPC file's authors: Eivind Lyche Melvær and Martin Reimers 10 | ************************************************************/ 11 | 12 | #ifndef VECTOR3_H 13 | #define VECTOR3_H 14 | #include 15 | #include 16 | #include 17 | 18 | namespace GIGen { 19 | template 20 | class Vector3 { 21 | real_t p_[3]; 22 | void set(real_t x, real_t y, real_t z) { 23 | p_[0] = x; 24 | p_[1] = y; 25 | p_[2] = z; 26 | } 27 | public: 28 | typedef real_t value_type; 29 | typedef Vector3 vector_type; 30 | 31 | static const size_t size_ = 3; 32 | 33 | Vector3() { 34 | p_[0] = p_[1] = p_[2] = std::numeric_limits::max(); 35 | } 36 | 37 | Vector3(real_t x, real_t y, real_t z) { 38 | set(x, y, z); 39 | } 40 | 41 | Vector3(const real_t v[]) { 42 | set(v[0], v[1], v[2]); 43 | } 44 | 45 | Vector3(const Vector3& p) { 46 | set(p.x(), p.y(), p.z()); 47 | } 48 | 49 | const real_t& x() const { return p_[0]; }; 50 | real_t& x() { return p_[0]; }; 51 | const real_t& y() const { return p_[1]; }; 52 | real_t& y() { return p_[1]; }; 53 | const real_t& z() const { return p_[2]; }; 54 | real_t& z() { return p_[2]; }; 55 | 56 | const real_t& operator [] (int i) const { return p_[i]; }; 57 | real_t& operator [] (int i) { return p_[i]; }; 58 | 59 | Vector3& operator= (const Vector3& p) { 60 | set(p.x(), p.y(), p.z()); 61 | return *this; 62 | } 63 | 64 | Vector3 operator- (const Vector3& v) const { 65 | return Vector3(x() - v.x(), y() - v.y(), z() - v.z()); 66 | } 67 | 68 | Vector3 operator+ (const Vector3& v) const { 69 | return Vector3(x() + v.x(), y() + v.y(), z() + v.z()); 70 | } 71 | 72 | real_t operator* (const Vector3& v) const { 73 | return x()*v.x() + y()*v.y() + z()*v.z(); 74 | } 75 | 76 | Vector3 operator* (real_t d) const { 77 | return Vector3(x()*d, y()*d, z()*d); 78 | } 79 | 80 | // Hanyu's code 81 | Vector3 operator/ (real_t d) const 82 | { 83 | return Vector3(x() / d, y() / d, z() / d); 84 | } 85 | 86 | real_t dist(const Vector3& v) const { 87 | return std::sqrt(dist2(v)); 88 | } 89 | 90 | real_t dist2(const Vector3& v) const { 91 | real_t dx = x() - v.x(); 92 | real_t dy = y() - v.y(); 93 | real_t dz = z() - v.z(); 94 | return dx*dx + dy*dy + dz*dz; 95 | } 96 | 97 | real_t length() const { 98 | return std::sqrt(length2()); 99 | } 100 | 101 | real_t length2() const { 102 | return x()*x() + y()*y() + z()*z(); 103 | } 104 | 105 | Vector3 crossProd(const Vector3& v) const { 106 | return Vector3(y()*v.z() - z()*v.y(), 107 | z()*v.x() - x()*v.z(), 108 | x()*v.y() - y()*v.x()); 109 | } 110 | 111 | Vector3& normalize() { 112 | const real_t len2 = length2(); 113 | if (len2) { 114 | const real_t len = std::sqrt(len2); 115 | p_[0] /= len; 116 | p_[1] /= len; 117 | p_[2] /= len; 118 | } 119 | return *this; 120 | } 121 | 122 | //// Hanyu's code 123 | //template 124 | //friend ostream& operator<< (ostream& out, const Vector3& s); 125 | 126 | // Hanyu's code 127 | std::string to_string() const 128 | { 129 | std::ostringstream oss; 130 | oss << "x: " << p_[0] << ", y: " << p_[1] << ", z: " << p_[2]; 131 | return oss.str(); 132 | } 133 | 134 | }; 135 | 136 | template 137 | Vector3 operator* (real_t d, const Vector3& v) 138 | { 139 | return v*d; 140 | } 141 | 142 | 143 | //// Hanyu's code 144 | //template 145 | //ostream& operator<< (ostream& out, const Vector3& s) 146 | //{ 147 | // out << "x: " << s[0] << ", y: " << s[1] << ", z: " << s[2]; 148 | // return out; 149 | //} 150 | 151 | 152 | // Hanyu's code 153 | template 154 | class Rot_mat 155 | { 156 | private: 157 | real_t r_m[3][3]; 158 | 159 | public: 160 | 161 | Rot_mat(const real_t &rot_x, const real_t &rot_y, const real_t &rot_z, const real_t &rot_theta) 162 | { 163 | double &&vec_norm = std::sqrt(rot_x * rot_x + rot_y * rot_y + rot_z * rot_z); 164 | 165 | real_t th = rot_theta; 166 | 167 | real_t &&x = rot_x / vec_norm; 168 | real_t &&y = rot_y / vec_norm; 169 | real_t &&z = rot_z / vec_norm; 170 | 171 | this->r_m[0][0] = std::cos(th) + (1 - std::cos(th)) * x * x; 172 | this->r_m[0][1] = (1 - std::cos(th)) * x * y - std::sin(th) * z; 173 | this->r_m[0][2] = (1 - std::cos(th)) * x * z + std::sin(th) * y; 174 | 175 | this->r_m[1][0] = (1 - std::cos(th)) * y * z + std::sin(th) * z; 176 | this->r_m[1][1] = std::cos(th) + (1 - std::cos(th)) * y * y; 177 | this->r_m[1][2] = (1 - std::cos(th)) * y * z - std::sin(th) * x; 178 | 179 | this->r_m[2][0] = (1 - std::cos(th)) * z * x - std::sin(th) * y; 180 | this->r_m[2][1] = (1 - std::cos(th)) * z * y + std::sin(th) * x; 181 | this->r_m[2][2] = std::cos(th) + (1 - std::cos(th)) * z * z; 182 | } 183 | 184 | Rot_mat(const Vector3 &rot_axis, const real_t &rot_theta) : 185 | Rot_mat(rot_axis.x(), rot_axis.y(), rot_axis.z(), rot_theta) {} 186 | 187 | Vector3 operator* (const Vector3& v) const 188 | { 189 | return Vector3 190 | ( 191 | this->r_m[0][0] * v.x() + this->r_m[0][1] * v.y() + this->r_m[0][2] * v.z(), 192 | this->r_m[1][0] * v.x() + this->r_m[1][1] * v.y() + this->r_m[1][2] * v.z(), 193 | this->r_m[2][0] * v.x() + this->r_m[2][1] * v.y() + this->r_m[2][2] * v.z() 194 | ); 195 | } 196 | 197 | }; 198 | 199 | 200 | } //End namespace GIGen 201 | 202 | #endif //VECTOR3_H 203 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/libcompcur.h: -------------------------------------------------------------------------------- 1 | // 2 | // MATLAB Compiler: 6.2 (R2016a) 3 | // Date: Mon Nov 12 17:47:20 2018 4 | // Arguments: "-B" "macro_default" "-W" "cpplib:libcompcur" "-T" "link:lib" 5 | // "compute_curvature.m" 6 | // 7 | 8 | #ifndef __libcompcur_h 9 | #define __libcompcur_h 1 10 | 11 | #if defined(__cplusplus) && !defined(mclmcrrt_h) && defined(__linux__) 12 | # pragma implementation "mclmcrrt.h" 13 | #endif 14 | #include "mclmcrrt.h" 15 | #include "mclcppclass.h" 16 | #ifdef __cplusplus 17 | extern "C" { 18 | #endif 19 | 20 | #if defined(__SUNPRO_CC) 21 | /* Solaris shared libraries use __global, rather than mapfiles 22 | * to define the API exported from a shared library. __global is 23 | * only necessary when building the library -- files including 24 | * this header file to use the library do not need the __global 25 | * declaration; hence the EXPORTING_ logic. 26 | */ 27 | 28 | #ifdef EXPORTING_libcompcur 29 | #define PUBLIC_libcompcur_C_API __global 30 | #else 31 | #define PUBLIC_libcompcur_C_API /* No import statement needed. */ 32 | #endif 33 | 34 | #define LIB_libcompcur_C_API PUBLIC_libcompcur_C_API 35 | 36 | #elif defined(_HPUX_SOURCE) 37 | 38 | #ifdef EXPORTING_libcompcur 39 | #define PUBLIC_libcompcur_C_API __declspec(dllexport) 40 | #else 41 | #define PUBLIC_libcompcur_C_API __declspec(dllimport) 42 | #endif 43 | 44 | #define LIB_libcompcur_C_API PUBLIC_libcompcur_C_API 45 | 46 | 47 | #else 48 | 49 | #define LIB_libcompcur_C_API 50 | 51 | #endif 52 | 53 | /* This symbol is defined in shared libraries. Define it here 54 | * (to nothing) in case this isn't a shared library. 55 | */ 56 | #ifndef LIB_libcompcur_C_API 57 | #define LIB_libcompcur_C_API /* No special import/export declaration */ 58 | #endif 59 | 60 | extern LIB_libcompcur_C_API 61 | bool MW_CALL_CONV libcompcurInitializeWithHandlers( 62 | mclOutputHandlerFcn error_handler, 63 | mclOutputHandlerFcn print_handler); 64 | 65 | extern LIB_libcompcur_C_API 66 | bool MW_CALL_CONV libcompcurInitialize(void); 67 | 68 | extern LIB_libcompcur_C_API 69 | void MW_CALL_CONV libcompcurTerminate(void); 70 | 71 | 72 | 73 | extern LIB_libcompcur_C_API 74 | void MW_CALL_CONV libcompcurPrintStackTrace(void); 75 | 76 | extern LIB_libcompcur_C_API 77 | bool MW_CALL_CONV mlxCompute_curvature(int nlhs, mxArray *plhs[], int nrhs, mxArray 78 | *prhs[]); 79 | 80 | 81 | #ifdef __cplusplus 82 | } 83 | #endif 84 | 85 | #ifdef __cplusplus 86 | 87 | /* On Windows, use __declspec to control the exported API */ 88 | #if defined(_MSC_VER) || defined(__BORLANDC__) 89 | 90 | #ifdef EXPORTING_libcompcur 91 | #define PUBLIC_libcompcur_CPP_API __declspec(dllexport) 92 | #else 93 | #define PUBLIC_libcompcur_CPP_API __declspec(dllimport) 94 | #endif 95 | 96 | #define LIB_libcompcur_CPP_API PUBLIC_libcompcur_CPP_API 97 | 98 | #else 99 | 100 | #if !defined(LIB_libcompcur_CPP_API) 101 | #if defined(LIB_libcompcur_C_API) 102 | #define LIB_libcompcur_CPP_API LIB_libcompcur_C_API 103 | #else 104 | #define LIB_libcompcur_CPP_API /* empty! */ 105 | #endif 106 | #endif 107 | 108 | #endif 109 | 110 | extern LIB_libcompcur_CPP_API void MW_CALL_CONV compute_curvature(int nargout, mwArray& Umax, mwArray& Umin, mwArray& Cmax, mwArray& Cmin, mwArray& Normal, mwArray& hks, mwArray& diameter, mwArray& resp, mwArray& le, mwArray& cf, const mwArray& V, const mwArray& F, const mwArray& off_filename, const mwArray& hks_len, const mwArray& le_len, const mwArray& cf_flag); 111 | 112 | #endif 113 | #endif 114 | -------------------------------------------------------------------------------- /cpp_GI_generation/include/utils.h: -------------------------------------------------------------------------------- 1 | /************************************************************ 2 | * Author: Hanyu Wang(王涵玉) 3 | ************************************************************/ 4 | 5 | #ifndef Path_H 6 | #define Path_H 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include "dirent.h" 14 | 15 | 16 | namespace GIGen 17 | { 18 | class Path : public std::string 19 | { 20 | public: 21 | Path(const std::string &s) 22 | { 23 | this->assign(s); 24 | } 25 | 26 | Path() {} 27 | 28 | inline auto &pythonic_replace(const std::string& src, const std::string& dest) // Replace all src occured in *this into dest. 29 | { 30 | std::string::size_type pos = 0; 31 | while ((pos = this->find(src, pos)) != std::string::npos) 32 | { 33 | this->replace(pos, src.size(), dest); 34 | pos += dest.size(); 35 | } 36 | return *this; 37 | } 38 | 39 | auto &operator=(const std::string &s) // Value assigning method. 40 | { 41 | return this->assign(s); 42 | } 43 | 44 | }; 45 | 46 | 47 | class Dir : public Path 48 | { 49 | public: 50 | Dir(const std::string &s) 51 | { 52 | this->assign(s); 53 | if (this->size()) 54 | this->append(this->at(this->size() - 1) != '/' ? "/" : ""); // use linux style path 55 | } 56 | Dir() {} 57 | 58 | bool ls_to_vector(std::vector& filenames) const // List folders/files in this dir into filenames. 59 | { 60 | const char *path = this->c_str(); 61 | struct dirent* ent = NULL; 62 | DIR *pDir; 63 | pDir = opendir(path); 64 | 65 | if (pDir == NULL) 66 | return false; 67 | 68 | Path tmp; 69 | while (NULL != (ent = readdir(pDir))) 70 | { 71 | tmp = ent->d_name; 72 | if (tmp != "." && tmp != "..") 73 | filenames.push_back(tmp); 74 | } 75 | return true; 76 | } 77 | 78 | //std::string &join(const std::string &p) 79 | //{ 80 | // return this->append(p); 81 | //} 82 | 83 | static Path join(const std::string &p0, const std::string &p1) // Join two path together (assuming the first one belongs to Dir). 84 | { 85 | return Path(Dir(p0).append(p1)); 86 | } 87 | 88 | }; 89 | 90 | 91 | template 92 | std::string to_string_f(const char* format, const _Printable p) 93 | { 94 | char buffer[20]; 95 | sprintf(buffer, format, p); 96 | return std::string(buffer); 97 | } 98 | 99 | 100 | template 101 | std::vector<_ItemType> read_vector(const std::string &filepath) 102 | { 103 | _ItemType a; 104 | std::vector<_ItemType> r_vec; 105 | std::ifstream file(filepath, std::ifstream::in); 106 | while (file >> a) 107 | { 108 | r_vec.emplace_back(a); 109 | } 110 | return r_vec; 111 | } 112 | 113 | 114 | bool is_useless_char(int ch) 115 | { 116 | return (ch == '[') || (ch == ']') || (ch == '{') || (ch == '}') || (ch == '(') || (ch == ')') || std::isspace(ch); 117 | } 118 | 119 | 120 | inline std::string &trim(std::string &s) 121 | { 122 | s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int ch) { return !is_useless_char(ch); })); 123 | s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end()); 124 | 125 | return s; 126 | } 127 | 128 | 129 | std::vector parse_list(std::string s, const std::string &delimiter = ",") 130 | { 131 | trim(s); 132 | std::vector r_vec; 133 | size_t pos = 0; 134 | std::string token; 135 | 136 | while ((pos = s.find(delimiter)) != std::string::npos) 137 | { 138 | token = s.substr(0, pos); 139 | r_vec.emplace_back(std::stod(token)); 140 | s.erase(0, pos + delimiter.length()); 141 | } 142 | 143 | if (trim(s) == "") 144 | return r_vec; 145 | r_vec.emplace_back(std::stod(s)); 146 | 147 | return r_vec; 148 | } 149 | 150 | void show_progress_bar(const volatile int ¤t, const int &total, const time_t &begin_time) 151 | { 152 | const int total_progerss = 50; 153 | auto progress = total_progerss*(current) / total; 154 | std::cout << '\r' << "[" << std::string(progress, '#') << std::string(total_progerss - progress, ' '); 155 | std::cout << "]" << " " << current << "/" << total; 156 | std::cout << " total time cost: " << time(0) - begin_time << 's' << std::flush; 157 | } 158 | 159 | } // End namespace GIGen 160 | 161 | #endif // !Path_H -------------------------------------------------------------------------------- /cpp_GI_generation/lib/libcompcur.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/cpp_GI_generation/lib/libcompcur.lib -------------------------------------------------------------------------------- /cpp_GI_generation/libcompcur.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/cpp_GI_generation/libcompcur.dll -------------------------------------------------------------------------------- /cpp_GI_generation/readme.md: -------------------------------------------------------------------------------- 1 | # Caution 2 | 3 | - You need to manually copy *libcompcur.dll* to the correct path to run the binary. 4 | 5 | - When creating geometry images, GIGen will automatically skip existed geometry image files in the output directory. However, GIGen will **NOT** check the integrity of them. Therefore, it is the user's responsibility to make sure that all existed files are correct and integrated. -------------------------------------------------------------------------------- /cpp_GI_generation/src/gigen.cpp: -------------------------------------------------------------------------------- 1 | /************************************************************ 2 | * This file is MODIFIED from a part of the 3D descriptor 3 | * learning framework by Hanyu Wang(王涵玉) 4 | * https://github.com/jianweiguo/local3Ddescriptorlearning 5 | * 6 | * Author: Yiqun Wang(王逸群) 7 | * https://github.com/yiqun-wang/LPS 8 | ************************************************************/ 9 | 10 | #ifdef _MSC_VER 11 | #ifndef _CRT_SECURE_NO_WARNINGS 12 | #define _CRT_SECURE_NO_WARNINGS 13 | #endif // !_CRT_SECURE_NO_WARNINGS 14 | #endif 15 | 16 | #include 17 | #include "Generator.h" 18 | #include "Mesh_C.h" 19 | #include "GPC.h" 20 | #include "utils.h" 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include "dirent.h" 29 | #include "IniFile.h" 30 | #include "libcompcur.h" 31 | #include "mclmcr.h" 32 | #include "matrix.h" 33 | #include "mclcppclass.h" 34 | 35 | 36 | using namespace OpenMesh; 37 | using namespace GIGen; 38 | using namespace std; 39 | 40 | 41 | // mutex to control global_finished_count_ptr 42 | static mutex mtx; 43 | 44 | void usage_error(const char* progname) 45 | { 46 | cout << "Usage: " << progname << " config_filename " << endl; 47 | cout << endl; 48 | cout << "Examples: " << endl; 49 | cout << endl; 50 | cout << " " << "Compute geometry images using config.ini" << endl; 51 | cout << " " << progname << " config.ini" << endl; 52 | cout << endl; 53 | exit(-1); 54 | } 55 | 56 | 57 | // Geometry image generation function 58 | void process_one_point(Mesh &curr_mesh, 59 | const unsigned int &source_idx_, 60 | const double &max_r, 61 | const std::vector &radius_list, 62 | const int &gi_size, 63 | const int &rotation_num, 64 | const Dir &gi_dir, 65 | const std::string& name_prefix, 66 | volatile int *global_finished_count_ptr) 67 | { 68 | GPC gpc(curr_mesh, source_idx_, max_r, radius_list); 69 | gpc.compute_GPC(); 70 | GI gi(gpc, radius_list, gi_size, rotation_num); 71 | 72 | if (!gi.save_all_rotation_in_one(gi_dir, name_prefix)) 73 | { 74 | cerr << "Failed to save geometry images." << endl; 75 | cerr << gi_dir + name_prefix + "_rot_x.gi" << endl; 76 | } 77 | 78 | while (!mtx.try_lock()); 79 | (*global_finished_count_ptr)++; 80 | mtx.unlock(); 81 | 82 | } 83 | 84 | 85 | int main(int argc, char** argv) 86 | { 87 | time_t begin_time = time(0); 88 | 89 | // Parse options. 90 | if (argc != 2) usage_error(argv[0]); 91 | const std::string& config_filepath = argv[1]; 92 | 93 | // Initialize MATLAB Computation Library. 94 | if (!libcompcurInitialize()) 95 | { 96 | cerr << "Could not initialize libcompcur!" << endl; 97 | exit(-1); 98 | } 99 | 100 | cout << "The libcompcur Initialization Success!" << endl; 101 | 102 | 103 | // Get configurations from config file. 104 | bool got_kpis = false; 105 | bool got_radius_list = false; 106 | vector kpi_set; 107 | vector radius_list_p, radius_list; 108 | 109 | IniFile ini(config_filepath); 110 | 111 | ini.setSection("dirs"); 112 | auto mesh_dir_str = ini.readStr("mesh_dir"); 113 | auto gi_dir_str = ini.readStr("gi_dir"); 114 | auto kpi_dir_str = ini.readStr("kpi_dir"); 115 | 116 | ini.setSection("settings"); 117 | int gi_size = ini.readInt("gi_size"); 118 | int hks_len = ini.readInt("hks_len"); 119 | int rotation_num = ini.readInt("rotation_num"); 120 | auto radius_list_str = ini.readStr("radius_list_p"); 121 | 122 | if (radius_list_str == "default") 123 | { 124 | radius_list_p = { 0.021, 0.028, 0.035 }; 125 | } 126 | else 127 | { 128 | radius_list_p = parse_list(radius_list_str); 129 | } 130 | 131 | auto max_r_iter = max_element(radius_list_p.begin(), radius_list_p.end()); 132 | double max_r = (*max_r_iter) * 1.33333333; 133 | 134 | if (ini.readStr("using_all_points") != "true") 135 | { 136 | kpi_set = read_vector(kpi_dir_str); 137 | if (!kpi_set.size()) 138 | { 139 | cerr << "Cannot read keypoint indices!" << endl; 140 | exit(-1); 141 | } 142 | got_kpis = true; 143 | } 144 | 145 | 146 | const Dir mesh_dir(mesh_dir_str); 147 | const Dir gi_dir(gi_dir_str); 148 | 149 | vector filenames, off_filenames; 150 | if (!mesh_dir.ls_to_vector(filenames)) 151 | { 152 | std::cerr << "Fail to list files." << std::endl; 153 | return 1; 154 | } 155 | 156 | if (!(_access(gi_dir.c_str(), 00/*check if it exists*/) == 0)/*not exist!*/) 157 | { 158 | if (_mkdir(gi_dir.c_str()) == -1) 159 | { 160 | cerr << "Failed to make directory." << endl; 161 | exit(-1); 162 | } 163 | } 164 | 165 | // Extract off files. 166 | for (const auto &filename : filenames) 167 | { 168 | std::string&& ext = filename.substr(filename.rfind('.') == std::string::npos ? filename.length() : filename.rfind('.') + 1); 169 | transform(ext.begin(), ext.end(), ext.begin(), ::tolower); 170 | if (ext == "off") 171 | { 172 | off_filenames.emplace_back(filename); 173 | } 174 | } 175 | 176 | 177 | for (size_t filename_i = 0; filename_i < off_filenames.size(); filename_i++) 178 | { 179 | const auto &filename = off_filenames[filename_i]; 180 | cout << "Processing " << filename << ". " << filename_i + 1 << "/" << off_filenames.size() << endl; 181 | 182 | auto name_prefix_base = Path(filename).pythonic_replace(".off", ""); 183 | auto &&fullpath = Dir::join(mesh_dir, filename); 184 | Mesh curr_mesh(fullpath); 185 | 186 | //if (!got_kpis) 187 | { 188 | int n = curr_mesh.n_vertices(); 189 | kpi_set.resize(n); 190 | for (size_t i = 0; i < n; i++) 191 | { 192 | kpi_set[i] = i; 193 | } 194 | got_kpis = true; 195 | } 196 | 197 | //modified list 198 | //if (!got_radius_list) 199 | { 200 | double tmp = curr_mesh.diameter; 201 | radius_list.clear(); 202 | for (auto &i : radius_list_p) 203 | { 204 | radius_list.emplace_back(i * tmp); 205 | } 206 | max_r = (*max_r_iter) * 1.33333333; 207 | max_r *= tmp; 208 | 209 | got_radius_list = true; 210 | //continue; 211 | } 212 | 213 | 214 | auto curr_gi_dir = gi_dir + Dir(name_prefix_base); 215 | 216 | if (!(_access(curr_gi_dir.c_str(), 00/*check if it exists*/) == 0)/*not exist!*/) 217 | { 218 | if (mkdir(curr_gi_dir.c_str()) == -1) 219 | { 220 | cerr << "Failed to make directory." << endl; 221 | exit(-1); 222 | } 223 | } 224 | 225 | ////////compute_cf 226 | for (size_t i = 0; i < /*kpi_n*/ curr_mesh.n_vertices(); i++) 227 | { 228 | GPC gpc(curr_mesh, /*kpi_set[i]*/ i, max_r, radius_list); 229 | gpc.compute_cf(i, name_prefix_base); 230 | std::cout << "iterations " << i + 1 << std::endl; 231 | } 232 | 233 | 234 | ////////compute_cf45 for keypoint 235 | //size_t kpi_n = kpi_set.size(); 236 | //for (size_t i = 0; i < /*kpi_n*/ curr_mesh.n_vertices(); i++) 237 | //{ 238 | // GPC gpc(curr_mesh, /*kpi_set[i]*/ i, max_r, radius_list); 239 | // gpc.compute_cf_45(/*kpi_set[i]*/ i, name_prefix_base); 240 | // std::cout << "iterations " << i + 1 << std::endl; 241 | //} 242 | 243 | 244 | size_t kpi_num = kpi_set.size(); 245 | 246 | // global_finished_count is shared by threads 247 | volatile int global_finished_count = 0; 248 | volatile int *global_finished_count_ptr = &global_finished_count; 249 | for (size_t i = 0; i < kpi_num; i++) 250 | { 251 | std::string name_prefix = name_prefix_base + "_pidx_" + to_string_f("%04d", i); 252 | 253 | 254 | 255 | auto geo_img_path = curr_gi_dir + name_prefix + ".gi"; 256 | 257 | if (_access(geo_img_path.c_str(), 00/*check if it exists*/) == 0 /*exist!*/) 258 | { 259 | while (!mtx.try_lock()); 260 | global_finished_count++; 261 | mtx.unlock(); 262 | //fin.close(); 263 | continue; 264 | } 265 | 266 | // Multithread geometry image generation. 267 | thread t(process_one_point, curr_mesh, kpi_set[i], max_r, radius_list, gi_size, rotation_num, curr_gi_dir, name_prefix, global_finished_count_ptr); 268 | t.detach(); 269 | 270 | // Single thread version 271 | //process_one_point(curr_mesh, kpi_set[i], max_r, radius_list, gi_size, rotation_num, curr_gi_dir, name_prefix, global_finished_count_ptr); 272 | 273 | show_progress_bar(global_finished_count, kpi_num, begin_time); 274 | 275 | } 276 | 277 | // Waiting for all threads to finish. 278 | while (global_finished_count < kpi_num) 279 | { 280 | cout << "waiting... finished count: " << global_finished_count << std::flush; 281 | std::this_thread::sleep_for(std::chrono::milliseconds(200)); 282 | } 283 | 284 | show_progress_bar(global_finished_count, kpi_num, begin_time); 285 | 286 | cout << endl; 287 | 288 | } 289 | 290 | libcompcurTerminate(); 291 | return 0; 292 | } 293 | 294 | 295 | -------------------------------------------------------------------------------- /matlab_LPS/Laplacian_Energy_Gen.m: -------------------------------------------------------------------------------- 1 | % ********************************************************** 2 | % Author: Yiqun Wang(ÍõÒÝȺ) 3 | % https://github.com/yiqun-wang/LPS 4 | % ********************************************************** 5 | function [E, Cf] = Laplacian_Energy_Gen(V, F, k) 6 | 7 | shape.VERT = V';shape.TRIV = F'; 8 | [~, shape.n] = size(V); 9 | options.symmetrize = 1; 10 | options.normalize = 0; 11 | type = 'conformal'; 12 | stitching = false; 13 | K = 3*k; 14 | 15 | [L,A] = compute_mesh_laplacian_plusA_half(shape.VERT',shape.TRIV',type,options); 16 | 17 | if stitching 18 | boundary_edge = compute_boundary_all(shape.TRIV'); % for boundary 19 | % A=full(A); 20 | % L=full(L); 21 | W=diag(diag(L))-L; 22 | inner = 1:shape.n; 23 | inner(boundary_edge) = []; 24 | boundary = sort(boundary_edge); 25 | bs = size(boundary, 2); 26 | is = size(inner, 2); 27 | AA = zeros(size(A) + is); 28 | AA(1:shape.n, 1:shape.n) = A; 29 | AA(shape.n+1:end, shape.n+1:end) = A(inner,inner); 30 | LL = zeros(size(W) + is); 31 | LL(1:shape.n, 1:shape.n) = W; 32 | LL(shape.n+1:end,boundary)=W(inner,boundary); 33 | LL(boundary, shape.n+1:end)=W(boundary, inner); 34 | LL(shape.n+1:end, shape.n+1:end)=W(inner,inner); 35 | LL = diag(sum(LL,2)) - LL; 36 | A=sparse(AA); 37 | L=sparse(LL); 38 | VERT = zeros(shape.n+is, 3); 39 | VERT(1:shape.n, :) = shape.VERT; 40 | VERT(shape.n+1:end, :) = shape.VERT(inner, :); 41 | shape.VERT = VERT; 42 | shape.n = size(LL, 1); 43 | end 44 | 45 | [V,D] = eigs(L,A,K+1,-1); 46 | %ascending sort and eliminate DC. 47 | V=fliplr(V(:,1:end-1)); D=rot90(D(1:end-1,1:end-1),2); 48 | C = V' * A* shape.VERT; 49 | 50 | Cf = D * sqrt(sum(C.^2,2)); 51 | Cf = Cf'; 52 | E = zeros(floor(k),shape.n); 53 | end 54 | 55 | 56 | -------------------------------------------------------------------------------- /matlab_LPS/check_face_vertex.m: -------------------------------------------------------------------------------- 1 | function [vertex,face] = check_face_vertex(vertex,face, options) 2 | 3 | % check_face_vertex - check that vertices and faces have the correct size 4 | % 5 | % [vertex,face] = check_face_vertex(vertex,face); 6 | % 7 | % Copyright (c) 2007 Gabriel Peyre 8 | 9 | vertex = check_size(vertex,2,4); 10 | face = check_size(face,3,4); 11 | 12 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%% 13 | function a = check_size(a,vmin,vmax) 14 | if isempty(a) 15 | return; 16 | end 17 | if size(a,1)>size(a,2) 18 | a = a'; 19 | end 20 | if size(a,1)<3 && size(a,2)==3 21 | a = a'; 22 | end 23 | if size(a,1)<=3 && size(a,2)>=3 && sum(abs(a(:,3)))==0 24 | % for flat triangles 25 | % a = a'; 26 | end 27 | if size(a,1)vmax 28 | error('face or vertex is not of correct size'); 29 | end 30 | -------------------------------------------------------------------------------- /matlab_LPS/clamp.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/matlab_LPS/clamp.m -------------------------------------------------------------------------------- /matlab_LPS/comp_geodesics_to_all.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/matlab_LPS/comp_geodesics_to_all.mexw64 -------------------------------------------------------------------------------- /matlab_LPS/compute_boundary_all.m: -------------------------------------------------------------------------------- 1 | function boundary=compute_boundary_all(face, options) 2 | 3 | % compute_boundary - compute the vertices on the boundary of a 3D mesh 4 | % 5 | % boundary=compute_boundary(face); 6 | % 7 | % Copyright (c) 2007 Gabriel Peyre 8 | 9 | if size(face,1)nvert 103 | warning('problem in boundary'); 104 | end 105 | 106 | 107 | %%% OLD %%% 108 | function v = compute_boundary_old(faces) 109 | 110 | nvert = max(face(:)); 111 | ring = compute_vertex_ring( face ); 112 | 113 | % compute boundary 114 | v = -1; 115 | for i=1:nvert % first find a starting vertex 116 | f = ring{i}; 117 | if f(end)<0 118 | v = i; 119 | break; 120 | end 121 | end 122 | if v<0 123 | error('No boundary found.'); 124 | end 125 | boundary = [v]; 126 | prev = -1; 127 | while true 128 | f = ring{v}; 129 | if f(end)>=0 130 | error('Problem in boundary'); 131 | end 132 | if f(1)~=prev 133 | prev = v; 134 | v = f(1); 135 | else 136 | prev = v; 137 | v = f(end-1); 138 | end 139 | if ~isempty( find(boundary==v) ) 140 | % we have reach the begining of the boundary 141 | if v~=boundary(1) 142 | warning('Begining and end of boundary doesn''t match.'); 143 | else 144 | break; 145 | end 146 | end 147 | boundary = [boundary,v]; 148 | end -------------------------------------------------------------------------------- /matlab_LPS/compute_curvature.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/matlab_LPS/compute_curvature.m -------------------------------------------------------------------------------- /matlab_LPS/compute_mesh_laplacian_plusA_half.m: -------------------------------------------------------------------------------- 1 | function [L,A] = compute_mesh_laplacian_plusA_half(vertex,face,type,options) 2 | 3 | % compute_mesh_laplacian - compute a laplacian matrix 4 | % 5 | % L = compute_mesh_laplacian(vertex,face,type,options); 6 | % 7 | % If options.symmetrize=1 and options.normalize=0 then 8 | % L = D-W 9 | % If options.symmetrize=1 and options.normalize=1 then 10 | % L = eye(n)-D^{-1/2}*W*D^{-1/2} 11 | % If options.symmetrize=0 and options.normalize=1 then 12 | % L = eye(n)-D^{-1}*W. 13 | % where D=diag(sum(W,2)) and W is the unormalized weight matrix 14 | % (see compute_mesh_weight). 15 | % 16 | % type can 'combinatorial', 'distance', 'conformal'. 17 | % 18 | % See also compute_mesh_weight. 19 | % 20 | % Copyright (c) 2007 Gabriel Peyre 21 | 22 | options.null = 0; 23 | if isfield(options, 'normalize') 24 | normalize = options.normalize; 25 | else 26 | normalize = 1; 27 | end 28 | if isfield(options, 'symmetrize') 29 | symmetrize = options.symmetrize; 30 | else 31 | symmetrize = 1; 32 | end 33 | 34 | options.normalize = 0; 35 | [W,A] = compute_mesh_weight_plusA_half(vertex,face,type,options); 36 | n = size(W,1); 37 | if symmetrize==1 && normalize==0 38 | L = diag(sum(W,2)) - W; 39 | elseif symmetrize==1 && normalize==1 40 | L = speye(n) - diag(sum(W,2).^(-1/2)) * W * diag(sum(W,2).^(-1/2)); 41 | elseif symmetrize==0 && normalize==1 42 | L = speye(n) - diag(sum(W,2).^(-1)) * W; 43 | else 44 | error('Does not work with symmetrize=0 and normalize=0'); 45 | end 46 | -------------------------------------------------------------------------------- /matlab_LPS/compute_mesh_weight_plusA_half.m: -------------------------------------------------------------------------------- 1 | function [W, A] = compute_mesh_weight_plusA_half(vertex,face,type,options) 2 | 3 | % compute_mesh_weight - compute a weight matrix 4 | % 5 | % W = compute_mesh_weight(vertex,face,type,options); 6 | % 7 | % W is sparse weight matrix and W(i,j)=0 is vertex i and vertex j are not 8 | % connected in the mesh. 9 | % 10 | % type is either 11 | % 'combinatorial': W(i,j)=1 is vertex i is conntected to vertex j. 12 | % 'distance': W(i,j) = 1/d_ij^2 where d_ij is distance between vertex 13 | % i and j. 14 | % 'conformal': W(i,j) = cot(alpha_ij)+cot(beta_ij) where alpha_ij and 15 | % beta_ij are the adjacent angle to edge (i,j) 16 | % 17 | % If options.normalize=1, the the rows of W are normalize to sum to 1. 18 | % 19 | % Copyright (c) 2007 Gabriel Peyre 20 | 21 | options.null = 0; 22 | [vertex,face] = check_face_vertex(vertex,face); 23 | 24 | nface = size(face,1); 25 | n = max(max(face)); 26 | 27 | verb = getoptions(options, 'verb', n>5000); 28 | 29 | if nargin<3 30 | type = 'conformal'; 31 | end 32 | 33 | switch lower(type) 34 | case 'combinatorial' 35 | W = triangulation2adjacency(face); 36 | case 'distance' 37 | W = my_euclidean_distance(triangulation2adjacency(face),vertex); 38 | W(W>0) = 1./W(W>0); 39 | W = (W+W')/2; 40 | A = []; %add 41 | case 'conformal' 42 | % conformal laplacian 43 | W = sparse(n,n); 44 | A = sparse(n,n); 45 | for i=1:3 46 | i1 = mod(i-1,3)+1; 47 | i2 = mod(i ,3)+1; 48 | i3 = mod(i+1,3)+1; 49 | pp = vertex(:,face(i2,:)) - vertex(:,face(i1,:)); 50 | qq = vertex(:,face(i3,:)) - vertex(:,face(i1,:)); 51 | % normalize the vectors 52 | lpp = sqrt(sum(pp.^2,1)); lqq = sqrt(sum(qq.^2,1)); 53 | pp = pp ./ repmat( sqrt(sum(pp.^2,1)), [3 1] ); 54 | qq = qq ./ repmat( sqrt(sum(qq.^2,1)), [3 1] ); 55 | % compute angles 56 | ang = acos(sum(pp.*qq,1)); 57 | area = (sin(ang)/6.0).*(lpp.*lqq); 58 | W = W + sparse(face(i2,:),face(i3,:),0.5*cot(ang),n,n); 59 | W = W + sparse(face(i3,:),face(i2,:),0.5*cot(ang),n,n); 60 | A = A + sparse(face(i1,:),face(i1,:),area,n,n); 61 | end 62 | if 0 63 | %% OLD CODE 64 | W = sparse(n,n); 65 | ring = compute_vertex_face_ring(face); 66 | for i = 1:n 67 | if verb 68 | progressbar(i,n); 69 | end 70 | for b = ring{i} 71 | % b is a face adjacent to a 72 | bf = face(:,b); 73 | % compute complementary vertices 74 | if bf(1)==i 75 | v = bf(2:3); 76 | elseif bf(2)==i 77 | v = bf([1 3]); 78 | elseif bf(3)==i 79 | v = bf(1:2); 80 | else 81 | error('Problem in face ring.'); 82 | end 83 | j = v(1); k = v(2); 84 | vi = vertex(:,i); 85 | vj = vertex(:,j); 86 | vk = vertex(:,k); 87 | % angles 88 | alpha = myangle(vk-vi,vk-vj); 89 | beta = myangle(vj-vi,vj-vk); 90 | % add weight 91 | W(i,j) = W(i,j) + cot( alpha ); 92 | W(i,k) = W(i,k) + cot( beta ); 93 | end 94 | end 95 | end 96 | otherwise 97 | error('Unknown type.') 98 | end 99 | 100 | if isfield(options, 'normalize') && options.normalize==1 101 | W = diag(sum(W,2).^(-1)) * W; 102 | end 103 | 104 | 105 | 106 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 107 | function beta = myangle(u,v); 108 | 109 | du = sqrt( sum(u.^2) ); 110 | dv = sqrt( sum(v.^2) ); 111 | du = max(du,eps); dv = max(dv,eps); 112 | beta = acos( sum(u.*v) / (du*dv) ); 113 | 114 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 115 | function W = my_euclidean_distance(A,vertex) 116 | 117 | if size(vertex,1) size(sources,1)) 5 | sources = sources'; 6 | end 7 | 8 | if(size(sources,2) > 1) 9 | error('sources must be stored in a vector'); 10 | end 11 | 12 | D = comp_geodesics_to_all(double(S.X), double(S.Y), double(S.Z), ... 13 | double(S.TRIV'), sources, 1); 14 | end -------------------------------------------------------------------------------- /matlab_LPS/getoptions.m: -------------------------------------------------------------------------------- 1 | function v = getoptions(options, name, v, mendatory) 2 | 3 | % getoptions - retrieve options parameter 4 | % 5 | % v = getoptions(options, 'entry', v0); 6 | % is equivalent to the code: 7 | % if isfield(options, 'entry') 8 | % v = options.entry; 9 | % else 10 | % v = v0; 11 | % end 12 | % 13 | % Copyright (c) 2007 Gabriel Peyre 14 | 15 | if nargin<4 16 | mendatory = 0; 17 | end 18 | 19 | if isfield(options, name) 20 | v = eval(['options.' name ';']); 21 | elseif mendatory 22 | error(['You have to provide options.' name '.']); 23 | end -------------------------------------------------------------------------------- /matlab_LPS/libcompcur.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/matlab_LPS/libcompcur.dll -------------------------------------------------------------------------------- /matlab_LPS/libcompcur.h: -------------------------------------------------------------------------------- 1 | // 2 | // MATLAB Compiler: 6.2 (R2016a) 3 | // Date: Mon Nov 12 17:47:20 2018 4 | // Arguments: "-B" "macro_default" "-W" "cpplib:libcompcur" "-T" "link:lib" 5 | // "compute_curvature.m" 6 | // 7 | 8 | #ifndef __libcompcur_h 9 | #define __libcompcur_h 1 10 | 11 | #if defined(__cplusplus) && !defined(mclmcrrt_h) && defined(__linux__) 12 | # pragma implementation "mclmcrrt.h" 13 | #endif 14 | #include "mclmcrrt.h" 15 | #include "mclcppclass.h" 16 | #ifdef __cplusplus 17 | extern "C" { 18 | #endif 19 | 20 | #if defined(__SUNPRO_CC) 21 | /* Solaris shared libraries use __global, rather than mapfiles 22 | * to define the API exported from a shared library. __global is 23 | * only necessary when building the library -- files including 24 | * this header file to use the library do not need the __global 25 | * declaration; hence the EXPORTING_ logic. 26 | */ 27 | 28 | #ifdef EXPORTING_libcompcur 29 | #define PUBLIC_libcompcur_C_API __global 30 | #else 31 | #define PUBLIC_libcompcur_C_API /* No import statement needed. */ 32 | #endif 33 | 34 | #define LIB_libcompcur_C_API PUBLIC_libcompcur_C_API 35 | 36 | #elif defined(_HPUX_SOURCE) 37 | 38 | #ifdef EXPORTING_libcompcur 39 | #define PUBLIC_libcompcur_C_API __declspec(dllexport) 40 | #else 41 | #define PUBLIC_libcompcur_C_API __declspec(dllimport) 42 | #endif 43 | 44 | #define LIB_libcompcur_C_API PUBLIC_libcompcur_C_API 45 | 46 | 47 | #else 48 | 49 | #define LIB_libcompcur_C_API 50 | 51 | #endif 52 | 53 | /* This symbol is defined in shared libraries. Define it here 54 | * (to nothing) in case this isn't a shared library. 55 | */ 56 | #ifndef LIB_libcompcur_C_API 57 | #define LIB_libcompcur_C_API /* No special import/export declaration */ 58 | #endif 59 | 60 | extern LIB_libcompcur_C_API 61 | bool MW_CALL_CONV libcompcurInitializeWithHandlers( 62 | mclOutputHandlerFcn error_handler, 63 | mclOutputHandlerFcn print_handler); 64 | 65 | extern LIB_libcompcur_C_API 66 | bool MW_CALL_CONV libcompcurInitialize(void); 67 | 68 | extern LIB_libcompcur_C_API 69 | void MW_CALL_CONV libcompcurTerminate(void); 70 | 71 | 72 | 73 | extern LIB_libcompcur_C_API 74 | void MW_CALL_CONV libcompcurPrintStackTrace(void); 75 | 76 | extern LIB_libcompcur_C_API 77 | bool MW_CALL_CONV mlxCompute_curvature(int nlhs, mxArray *plhs[], int nrhs, mxArray 78 | *prhs[]); 79 | 80 | 81 | #ifdef __cplusplus 82 | } 83 | #endif 84 | 85 | #ifdef __cplusplus 86 | 87 | /* On Windows, use __declspec to control the exported API */ 88 | #if defined(_MSC_VER) || defined(__BORLANDC__) 89 | 90 | #ifdef EXPORTING_libcompcur 91 | #define PUBLIC_libcompcur_CPP_API __declspec(dllexport) 92 | #else 93 | #define PUBLIC_libcompcur_CPP_API __declspec(dllimport) 94 | #endif 95 | 96 | #define LIB_libcompcur_CPP_API PUBLIC_libcompcur_CPP_API 97 | 98 | #else 99 | 100 | #if !defined(LIB_libcompcur_CPP_API) 101 | #if defined(LIB_libcompcur_C_API) 102 | #define LIB_libcompcur_CPP_API LIB_libcompcur_C_API 103 | #else 104 | #define LIB_libcompcur_CPP_API /* empty! */ 105 | #endif 106 | #endif 107 | 108 | #endif 109 | 110 | extern LIB_libcompcur_CPP_API void MW_CALL_CONV compute_curvature(int nargout, mwArray& Umax, mwArray& Umin, mwArray& Cmax, mwArray& Cmin, mwArray& Normal, mwArray& hks, mwArray& diameter, mwArray& resp, mwArray& le, mwArray& cf, const mwArray& V, const mwArray& F, const mwArray& off_filename, const mwArray& hks_len, const mwArray& le_len, const mwArray& cf_flag); 111 | 112 | #endif 113 | #endif 114 | -------------------------------------------------------------------------------- /matlab_LPS/libcompcur.lib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/matlab_LPS/libcompcur.lib -------------------------------------------------------------------------------- /matlab_LPS/perform_mesh_smoothing.m: -------------------------------------------------------------------------------- 1 | function f = perform_mesh_smoothing(face,vertex,f,options) 2 | 3 | % perform_mesh_smoothing - smooth a function defined on a mesh by averaging 4 | % 5 | % f = perform_mesh_smoothing(face,vertex,f,options); 6 | % 7 | % Smooth a function f on a width of options.niter_averaging vertices. 8 | % 9 | % Copyright (c) 2007 Gabriel Peyre 10 | 11 | options.null = 0; 12 | naver = getoptions(options, 'niter_averaging', 1); 13 | type = getoptions(options, 'averaging_type', 'combinatorial'); 14 | 15 | if nargin<3 16 | f = []; 17 | end 18 | if isempty(f) 19 | f = vertex; 20 | end 21 | if size(f,1)1 27 | for i=1:size(f,2) 28 | f(:,i) = perform_mesh_smoothing(face,vertex,f(:,i),options); 29 | end 30 | return; 31 | end 32 | 33 | n = max(face(:)); 34 | 35 | % compute normalized averaging matrix 36 | if strcmp(type, 'combinatorial') 37 | %add diagonal 38 | W = triangulation2adjacency(face) + speye(n); 39 | D = spdiags(full(sum(W,2).^(-1)),0,n,n); 40 | W = D*W; 41 | else 42 | options.normalize=1; 43 | W = compute_mesh_weight(vertex,face,type,options); 44 | end 45 | 46 | % do averaging to smooth the field 47 | for k=1:naver 48 | f = W*f; 49 | end -------------------------------------------------------------------------------- /matlab_LPS/read_off.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/matlab_LPS/read_off.m -------------------------------------------------------------------------------- /matlab_LPS/read_shape.m: -------------------------------------------------------------------------------- 1 | function shape = read_shape( shape_path ) 2 | 3 | [vertex, face] = read_off(shape_path); 4 | shape.VERT=vertex'; 5 | shape.TRIV=face'; 6 | shape.n = length(vertex); 7 | shape.m = length(face); 8 | 9 | end 10 | 11 | -------------------------------------------------------------------------------- /matlab_LPS/shape_diameter.m: -------------------------------------------------------------------------------- 1 | function diam = shape_diameter(X, T) 2 | 3 | S.surface.X = X(:,1); 4 | S.surface.Y = X(:,2); 5 | S.surface.Z = X(:,3); 6 | S.surface.TRIV = T; 7 | S.surface.nv = size(X,1); 8 | 9 | % Shape diameter 10 | d = dijkstra_to_all(S.surface, 1); 11 | [~,i] = max(d); 12 | d = dijkstra_to_all(S.surface, i); 13 | diam = max(d); 14 | -------------------------------------------------------------------------------- /matlab_LPS/symmshlp_matrix.m: -------------------------------------------------------------------------------- 1 | function [W A h] = symmshlp_matrix(filename, opt) 2 | % 3 | % Compute the symmetric Laplace-Beltrami matrix from mesh 4 | % 5 | % INPUTS 6 | % filename: off file of triangle mesh. 7 | % opt.htype: the way to compute the parameter h. h = hs * neighborhoodsize 8 | % if htype = 'ddr' (data driven); h = hs if hytpe = 'psp' (pre-specify) 9 | % Default : 'ddr' 10 | % opt.hs: the scaling factor that scales the neighborhood size to the 11 | % parameter h where h^2 = 4t. 12 | % Default: 2, must > 0 13 | % opt.rho: The cut-off for Gaussion function evaluation. 14 | % Default: 3, must > 0 15 | % opt.dtype: the way to compute the distance 16 | % dtype = 'euclidean' or 'geodesic'; 17 | % Default : 'euclidean' 18 | 19 | % 20 | % OUTPUTS 21 | % W: symmetric weight matrix 22 | % A: area weight per vertex, the Laplace matrix = diag(1./ A) * W 23 | % h: Gaussian width: h^2 = 4t 24 | 25 | 26 | if nargin < 1 27 | error('Too few input arguments'); 28 | elseif nargin < 2 29 | opt.hs = 2; 30 | opt.rho = 3; 31 | opt.htype = 'ddr'; 32 | opt.dtype = 'euclidean'; 33 | end 34 | opt=parse_opt(opt); 35 | 36 | if opt.hs <= 0 || opt.rho <= 0 37 | error('Invalid values in opt'); 38 | end 39 | 40 | 41 | [II JJ SS AA h] = symmshlpmatrix(filename, opt); 42 | % [II JJ SS AA h] = symmshlpmatrix_old(filename, opt); 43 | W=sparse(II, JJ, SS); 44 | A=AA; 45 | 46 | % Parsing Option. 47 | function option = parse_opt(opt) 48 | option = opt; 49 | option_names = {'hs', 'rho', 'htype', 'dtype'}; 50 | if ~isfield(option,'hs'), 51 | option = setfield(option,'hs',2); 52 | end 53 | if ~isfield(option,'rho'), 54 | option = setfield(option,'rho', 3); 55 | end 56 | 57 | if ~isfield(option,'htype'), 58 | option = setfield(option,'htype', 'ddr'); 59 | end 60 | 61 | if ~isfield(option,'dtype'), 62 | option = setfield(option,'dtype', 'euclidean'); 63 | end 64 | 65 | -------------------------------------------------------------------------------- /matlab_LPS/symmshlpmatrix.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/matlab_LPS/symmshlpmatrix.mexw64 -------------------------------------------------------------------------------- /matlab_LPS/triangulation2adjacency.m: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yiqun-wang/LPS/a0b75715c32a72a7a6a907cb958103e62b8f88e5/matlab_LPS/triangulation2adjacency.m -------------------------------------------------------------------------------- /python_learning/classify_gi_by_pidx_and_split.py: -------------------------------------------------------------------------------- 1 | # ********************************************************** 2 | # Author: Hanyu Wang(王涵玉) 3 | # ********************************************************** 4 | # This script classifies geometry images according to point indices and apply train-validation-test split. 5 | 6 | import argparse 7 | import os 8 | from os.path import join 9 | 10 | from tqdm import tqdm 11 | from sklearn.model_selection import train_test_split 12 | 13 | parser = argparse.ArgumentParser(description='This script classifies geometry images according to point indices and apply ' 14 | 'train-validation-test split.') 15 | 16 | parser.add_argument('--n_models', '--nm', default=100, type=int, 17 | help='number of calculated models') 18 | parser.add_argument('--n_points', '--np', default=6890, type=int, 19 | help='number of calculated points') 20 | parser.add_argument('--source_dir', '-s', 21 | default=r'/data/yqwang/Dataset/faust_256p_045_cb/gi', type=str, 22 | help='directory of source geometry images') 23 | parser.add_argument('--destination_dir', '-d', 24 | default=r'/data/yqwang/Dataset/faust_256p_045_cb/gi_classified', type=str, 25 | help='directory to store classified geometry images') 26 | parser.add_argument('--percentage_train', '--ptr', default=0.75, type=float, 27 | help='percentage of training set') 28 | parser.add_argument('--percentage_val', '--pv', default=0.1, type=float, 29 | help='percentage of validation set') 30 | parser.add_argument('--percentage_test', '--pte', default=0.15, type=float, 31 | help='percentage of testing set') 32 | 33 | args = parser.parse_args() 34 | 35 | geoimg_dir = args.source_dir 36 | result_pre_dir = args.destination_dir 37 | 38 | percentage_valtest = args.percentage_val + args.percentage_test 39 | test_of_valtest = args.percentage_test / percentage_valtest 40 | 41 | if not os.path.exists(geoimg_dir): 42 | print('ERROR: Geometry image path not found.') 43 | exit(-1) 44 | 45 | model_ids = list(range(args.n_models)) 46 | train_ids, valtest_ids = train_test_split(model_ids, test_size=percentage_valtest) 47 | val_ids, test_ids = train_test_split(valtest_ids, test_size=test_of_valtest) 48 | # train_ids = [1, 2, 3, 4, 5, 7, 10, 11, 13, 16, 17, 19, 20, 21, 23, 25, 27, 28, 30, 31, 32, 33, 35, 36, 37, 40, 41, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 85, 86, 87, 89, 90, 91, 92, 94, 95, 97, 98, 99] 49 | # val_ids = [96, 0, 8, 42, 78, 46, 14, 83, 88, 29] 50 | # test_ids = [34, 69, 6, 39, 38, 9, 43, 12, 15, 18, 84, 22, 24, 26, 93] 51 | 52 | train_target_dir = join(result_pre_dir, 'train') 53 | val_target_dir = join(result_pre_dir, 'val') 54 | test_target_dir = join(result_pre_dir, 'test') 55 | 56 | train_result_dir_list = [join(train_target_dir, 'pidx_%04d' % i) for i in range(args.n_points)] 57 | val_result_dir_list = [join(val_target_dir, 'pidx_%04d' % i) for i in range(args.n_points)] 58 | test_result_dir_list = [join(test_target_dir, 'pidx_%04d' % i) for i in range(args.n_points)] 59 | 60 | 61 | for rd in train_result_dir_list: 62 | os.makedirs(rd, exist_ok=True) 63 | for rd in val_result_dir_list: 64 | os.makedirs(rd, exist_ok=True) 65 | for rd in test_result_dir_list: 66 | os.makedirs(rd, exist_ok=True) 67 | 68 | geoimg_path_list = [] 69 | 70 | sub_dirs = os.listdir(geoimg_dir) 71 | for sub_dir in sub_dirs: 72 | model_abs_dir = join(geoimg_dir, sub_dir) 73 | for gi_name in os.listdir(model_abs_dir): 74 | geoimg_path_list.append(join(model_abs_dir, gi_name)) 75 | 76 | 77 | for gi_path in tqdm(geoimg_path_list): 78 | if not os.path.isfile(gi_path) or len(gi_path.split(os.path.sep)[-1]) < 3 or gi_path.split('.')[-1] != 'gi': 79 | continue 80 | 81 | class_id = int(gi_path.split('_')[-1].split('.')[0]) 82 | model_id = int(gi_path.split('_')[-3]) 83 | 84 | if model_id in train_ids: 85 | os.rename(gi_path, join(train_result_dir_list[class_id], gi_path.split(os.path.sep)[-1])) 86 | elif model_id in val_ids: 87 | os.rename(gi_path, join(val_result_dir_list[class_id], gi_path.split(os.path.sep)[-1])) 88 | elif model_id in test_ids: 89 | os.rename(gi_path, join(test_result_dir_list[class_id], gi_path.split(os.path.sep)[-1])) 90 | else: 91 | raise IOError('Unexpected model id') 92 | 93 | with open(join(result_pre_dir, 'train_val_test.txt'), 'w') as txt: 94 | txt.write('training_set: \n') 95 | txt.write(str(set(train_ids))) 96 | txt.write('\n\n') 97 | txt.write('validation_set: \n') 98 | txt.write(str(set(val_ids))) 99 | txt.write('\n\n') 100 | txt.write('testing_set: \n') 101 | txt.write(str(set(test_ids))) 102 | txt.write('\n\n') -------------------------------------------------------------------------------- /python_learning/descGen.py: -------------------------------------------------------------------------------- 1 | # ********************************************************** 2 | # Author: Hanyu Wang(王涵玉) 3 | # ********************************************************** 4 | 5 | import argparse 6 | import os 7 | import sys 8 | import time 9 | from os.path import join 10 | 11 | import numpy as np 12 | import tensorflow as tf 13 | import tensorlayer as tl 14 | from tensorlayer.layers import * 15 | from tqdm import tqdm 16 | 17 | # In[2]: 18 | 19 | parser = argparse.ArgumentParser(description='') 20 | 21 | parser.add_argument('--gpuid', '-g', default='3', type=str, metavar='N', 22 | help='GPU id to run') 23 | 24 | parser.add_argument('--batch_size', '--bs', default=512, type=int, 25 | help='batch size of evaluation') 26 | 27 | parser.add_argument('--restore_path', default='/data/yqwang/Project/3dDescriptor/train_mincv_cb_gi/saved_models_045_ext/training_model-99999', 28 | type=str, 29 | help='path to the saved model') 30 | 31 | parser.add_argument('--gi_size', default=32, type=int, 32 | help='length and width of geometry image, assuming it\'s square') 33 | parser.add_argument('--gi_channel', default=2, type=int, 34 | help='number of geometry image channels') 35 | 36 | 37 | parser.add_argument('--gi_dir', '--gd', default='/data/yqwang/Dataset/faust_test_045_cb/', 38 | type=str, help='root directory of gi files') 39 | parser.add_argument('--desc_dir', '--dd', default='/data/yqwang/Project/3dDescriptor/evaluation/descs_99999_045_cb_gi_ext/', 40 | type=str, help='directory of descriptors') 41 | 42 | global args 43 | 44 | 45 | # In[3]: 46 | 47 | # In[4]: 48 | 49 | # In[5]: 50 | 51 | class TripletNet: 52 | def __init__(self, args=None, is_training=True): 53 | self.args = args 54 | self.is_training = is_training 55 | # self.predict_net =None 56 | self.anchor_net = None # anchor_net is also the predict_net 57 | self.positive_net = None 58 | self.negative_net = None 59 | self.descriptors = None # descriptors of anchors 60 | self.cost = None 61 | self.cost_same = None 62 | self.cost_diff = None 63 | self.all_multiuse_params = None 64 | self.predictions = None 65 | self.acc = None 66 | 67 | def inference(self, gi_placeholder, reuse=None): # reuse=None is equal to reuse=False(i.e. don't reuse) 68 | with tf.variable_scope('model', reuse=reuse): 69 | tl.layers.set_name_reuse(reuse) # reuse! 70 | 71 | network = tl.layers.InputLayer(gi_placeholder, name='input') 72 | 73 | """ conv2 """ 74 | network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 75 | padding='SAME', W_init=args.conv_initializer, name='conv2_1') 76 | 77 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 78 | is_train=self.is_training, name='bn2_1') 79 | 80 | network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), 81 | padding='SAME', name='pool2') 82 | 83 | """ conv3 """ 84 | network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 85 | padding='SAME', W_init=args.conv_initializer, name='conv3_1') 86 | 87 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 88 | is_train=self.is_training, name='bn3_1') 89 | 90 | network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), 91 | padding='SAME', name='pool3') 92 | 93 | """ conv4 """ 94 | network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 95 | padding='SAME', W_init=args.conv_initializer, name='conv4_1') 96 | 97 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 98 | is_train=self.is_training, name='bn4_1') 99 | 100 | network = MeanPool2d(network, filter_size=(2, 2), strides=(2, 2), 101 | padding='SAME', name='pool4') 102 | 103 | 104 | network = FlattenLayer(network, name='flatten') 105 | network = DenseLayer(network, n_units=512, act=tf.identity, name='fc1_relu') 106 | 107 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 108 | is_train=self.is_training, name='bn_fc') 109 | network = DenseLayer(network, n_units=256, act=tf.identity, name='128d_embedding') 110 | 111 | return network 112 | 113 | 114 | def build_nets(self, anchor_placeholder, positive_placeholder, negative_placeholder, anchor_label_placeholder, keypoint_num=None): 115 | self.anchor_net = self.inference(anchor_placeholder, reuse=None) 116 | self.descriptors = self.anchor_net.outputs 117 | 118 | 119 | 120 | # In[6]: 121 | args = parser.parse_args() 122 | # args = parser.parse_args(args=['-g 1']) 123 | setattr(args, 'conv_initializer', tf.contrib.layers.xavier_initializer()) 124 | setattr(args, 'activation', tl.activation.leaky_relu) 125 | 126 | 127 | os.makedirs(args.desc_dir, exist_ok=True) 128 | required_gi_shape = (args.gi_size, args.gi_size, args.gi_channel) 129 | 130 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid 131 | config = tf.ConfigProto() 132 | config.gpu_options.allow_growth = True 133 | sess = tf.InteractiveSession(config=config) 134 | 135 | 136 | 137 | 138 | # In[7]: 139 | 140 | def read_gi(path) -> np.ndarray: 141 | # Open a gi file and return the content as numpy.ndarray 142 | 143 | tensor = [[]] 144 | with open(path, 'r') as text: 145 | for line in text: 146 | if line == '\n': 147 | tensor.append([]) 148 | else: 149 | tensor[-1].append([float(i) for i in line.strip(' \t\n').split()]) 150 | 151 | while not tensor[-1]: 152 | del tensor[-1] 153 | 154 | rtensor = np.asarray(tensor, dtype=np.float32).transpose((1, 2, 0)) 155 | 156 | assert rtensor.shape == required_gi_shape, 'The dimension of gi does not match with the CNNs. \n' + \ 157 | 'Requires %s but receives %s' % (str(required_gi_shape), str(rtensor.shape)) 158 | 159 | return rtensor 160 | 161 | 162 | def write_descriptors(path, descriptor_list): 163 | """ 164 | Write descriptors in a text file 165 | 166 | :param path: path of the target text file. 167 | :descriptor_list: list of either descriptor vector to be written, or an integer -1 which represent non-existence. 168 | 169 | """ 170 | 171 | with open(path, 'w') as descs: 172 | for descriptor in descriptor_list: 173 | if descriptor.__class__ == int: 174 | descs.write(str(descriptor) + '\n') 175 | else: 176 | desc_len = len(descriptor) 177 | for i, val in enumerate(descriptor): 178 | if i != desc_len - 1: 179 | descs.write(str(val) + ',') 180 | else: 181 | descs.write(str(val) + '\n') 182 | return 183 | 184 | 185 | # In[8]: 186 | 187 | run_time = time.localtime(time.time()) 188 | 189 | # [batch_size, height, width, channels] 190 | anchor_placeholder = tf.placeholder( 191 | dtype=tf.float32, 192 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 193 | 194 | positive_placeholder = tf.placeholder( 195 | dtype=tf.float32, 196 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 197 | 198 | negative_placeholder = tf.placeholder( 199 | dtype=tf.float32, 200 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 201 | 202 | anchor_label_placeholder = tf.placeholder( 203 | dtype=tf.int32, 204 | shape=[None]) # [batch_size, height, width, channels] 205 | 206 | triplet_net = TripletNet(is_training=False) # testing 207 | 208 | triplet_net.build_nets( 209 | anchor_placeholder=anchor_placeholder, 210 | positive_placeholder=positive_placeholder, 211 | negative_placeholder=negative_placeholder, 212 | anchor_label_placeholder=anchor_label_placeholder 213 | ) 214 | 215 | train_params = triplet_net.anchor_net.all_params 216 | 217 | 218 | # temp = set(tf.global_variables()) 219 | 220 | # if args.restore: 221 | # tl.layers.initialize_global_variables(sess) 222 | # load_saver = tf.train.Saver() 223 | # load_saver.restore(sess, args.restore_path) 224 | # info = 'Restore model parameters from %s' % args.restore_path 225 | # print(info) 226 | 227 | # else: 228 | # tl.layers.initialize_global_variables(sess) 229 | # info = 'Successfully initialized global variables.' 230 | # print(info) 231 | 232 | load_saver = tf.train.Saver() 233 | load_saver.restore(sess, args.restore_path) 234 | info = 'Restore model parameters from %s' % args.restore_path 235 | print(info) 236 | 237 | 238 | # sess.run(tf.initialize_variables(set(tf.global_variables()) - temp)) 239 | 240 | 241 | triplet_net.anchor_net.print_params() 242 | triplet_net.anchor_net.print_layers() 243 | 244 | 245 | # start_time = time.time() 246 | 247 | model_folder_list = os.listdir(args.gi_dir) 248 | 249 | n_models = len(model_folder_list) 250 | 251 | for i, model_folder in enumerate(model_folder_list): 252 | 253 | start_time = time.time() 254 | print('Processing %s, %d of %d' % (model_folder, i + 1, n_models)) 255 | 256 | model_id = int(model_folder.split('_')[-1]) #_->. 257 | current_model_dir = join(args.gi_dir, model_folder) 258 | n_allpoints = len(os.listdir(current_model_dir)) 259 | #current_descriptor_path = join(args.desc_dir, 'tr_reg_%.3d.desc' % model_id) 260 | current_descriptor_path = join(args.desc_dir, '%s.desc' % model_folder) 261 | 262 | gi_list = [] 263 | # nonexist_point_id_list = [] 264 | 265 | print('Loading gi files...') 266 | for point_id in tqdm(range(n_allpoints)): 267 | current_gi_path = join(current_model_dir, 'tr_reg_%.3d_pidx_%.4d.gi' % (model_id, point_id)) 268 | #current_gi_path = join(current_model_dir, '%s_pidx_%.4d.gi' % (model_folder, point_id)) 269 | assert os.path.isfile(current_gi_path), 'One required gi file not found! Filepath: %s' % current_gi_path 270 | gi_list.append(read_gi(current_gi_path)) 271 | # if os.path.isfile(current_gi_path): 272 | # gi_list.append(read_gi(current_gi_path)) 273 | # else: 274 | # nonexist_point_id_list.append(point_id) 275 | gi_list_batch_list = (gi_list[i: i + args.batch_size] for i in range(0, len(gi_list), args.batch_size)) 276 | 277 | print('Generating descriptors...') 278 | current_model_desc_list = [] 279 | 280 | for gi_list_batch in gi_list_batch_list: 281 | current_model_desc_list.extend(list(sess.run(triplet_net.descriptors, 282 | feed_dict={anchor_placeholder: np.asarray(gi_list_batch)}))) 283 | # current_model_desc_list = list(current_model_descriptors) 284 | 285 | # for nonexist_point_id in nonexist_point_id_list: 286 | # current_model_desc_list.insert(nonexist_point_id, -1) # Value -1 means the descriptor of this point does not exist. 287 | 288 | write_descriptors(current_descriptor_path, current_model_desc_list) 289 | 290 | print('Descriptors of %s generated, time cost: %fs' % (model_folder, time.time() - start_time)) 291 | 292 | -------------------------------------------------------------------------------- /python_learning/tfr_gen.py: -------------------------------------------------------------------------------- 1 | # ********************************************************** 2 | # Author: Hanyu Wang(王涵玉) 3 | # ********************************************************** 4 | # This script generates TFRecords from geometry images. 5 | 6 | import tensorflow as tf 7 | import os 8 | import numpy as np 9 | import argparse 10 | from os.path import join 11 | from tqdm import tqdm 12 | from random import shuffle 13 | 14 | 15 | parser = argparse.ArgumentParser() 16 | 17 | parser.add_argument('--tfr_dir', '-d', 18 | default='/data/yqwang/Dataset/faust_256p_045_cb/gi_TFRecords', 19 | type=str, help='directory to store TFRecords') 20 | parser.add_argument('--gi_dir', '-s', 21 | default='/data/yqwang/Dataset/faust_256p_045_cb/gi_classified', 22 | type=str, help='directory to read geometry images') 23 | 24 | 25 | def open_gi(path) -> list: 26 | # Open a gi file and return the content as numpy.ndarray 27 | 28 | gis = [[[]]] 29 | 30 | with open(path, 'r') as text: 31 | 32 | blank_line_count = 0 33 | tensor = gis[-1] 34 | 35 | for line in text: 36 | if line == '\n': 37 | blank_line_count += 1 38 | else: 39 | if blank_line_count == 1: 40 | tensor.append([]) 41 | 42 | elif blank_line_count == 3: 43 | gis.append([[]]) 44 | tensor = gis[-1] 45 | 46 | blank_line_count = 0 47 | tensor[-1].append([float(i) for i in line.strip(' \t\n').split()]) 48 | 49 | return list(np.asarray(gis, dtype=np.float32).transpose((0, 2, 3, 1))) 50 | 51 | 52 | if __name__ == '__main__': 53 | 54 | args = parser.parse_args() 55 | 56 | tfrecords_dir = args.tfr_dir 57 | gi_dir = args.gi_dir 58 | 59 | train_gi_dir = join(gi_dir, 'train') 60 | val_gi_dir = join(gi_dir, 'val') 61 | test_gi_dir = join(gi_dir, 'test') 62 | 63 | if not (os.path.exists(train_gi_dir) and os.path.exists(val_gi_dir) and os.path.exists(test_gi_dir)): 64 | print('ERROR: Classified geometry image path not found.') 65 | exit(-1) 66 | 67 | train_tfrecords_dir = join(tfrecords_dir, 'train') 68 | val_tfrecords_dir = join(tfrecords_dir, 'val') 69 | test_tfrecords_dir = join(tfrecords_dir, 'test') 70 | 71 | os.makedirs(train_tfrecords_dir, exist_ok=True) 72 | os.makedirs(val_tfrecords_dir, exist_ok=True) 73 | os.makedirs(test_tfrecords_dir, exist_ok=True) 74 | 75 | for type_name in tqdm(os.listdir(train_gi_dir)): 76 | label = int(type_name.split('_')[-1]) 77 | # print(label) 78 | 79 | type_dir = join(train_gi_dir, type_name) 80 | 81 | train_tfrecord_path = join(train_tfrecords_dir, type_name + '.tfrecords') 82 | train_writer = tf.python_io.TFRecordWriter(train_tfrecord_path) 83 | 84 | gi_names = os.listdir(type_dir) 85 | 86 | gi_list = [] 87 | for gi_name in gi_names: 88 | gi_path = join(type_dir, gi_name) 89 | gi_12rot = open_gi(gi_path) 90 | gi_list.extend(gi_12rot) 91 | 92 | np.random.shuffle(gi_list) 93 | 94 | for gi in gi_list: 95 | gi_raw = gi.tobytes() # Convert geometry images to raw bytes. 96 | example = tf.train.Example( 97 | features=tf.train.Features(feature={ 98 | 99 | 'gi_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[gi_raw])), 100 | 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label])) 101 | 102 | })) 103 | 104 | train_writer.write(example.SerializeToString()) # Serialize gi bytes 105 | 106 | train_writer.close() 107 | 108 | # 123213 109 | for type_name in tqdm(os.listdir(val_gi_dir)): 110 | label = int(type_name.split('_')[-1]) 111 | # print(label) 112 | 113 | type_dir = join(val_gi_dir, type_name) 114 | 115 | val_tfrecords_path = join(val_tfrecords_dir, type_name + '.tfrecords') 116 | 117 | val_writer = tf.python_io.TFRecordWriter(val_tfrecords_path) 118 | 119 | gi_names = os.listdir(type_dir) 120 | 121 | gi_list = [] 122 | for gi_name in gi_names: 123 | gi_path = join(type_dir, gi_name) 124 | gi_12rot = open_gi(gi_path) 125 | gi_list.extend(gi_12rot) 126 | 127 | np.random.shuffle(gi_list) 128 | 129 | for gi in gi_list: 130 | gi_raw = gi.tobytes() # Convert geometry images to raw bytes. 131 | example = tf.train.Example( 132 | features=tf.train.Features(feature={ 133 | 134 | 'gi_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[gi_raw])), 135 | 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label])) 136 | 137 | })) 138 | 139 | val_writer.write(example.SerializeToString()) # Serialize gi bytes 140 | 141 | val_writer.close() 142 | 143 | # 12312312 144 | for type_name in tqdm(os.listdir(test_gi_dir)): 145 | label = int(type_name.split('_')[-1]) 146 | # print(label) 147 | 148 | type_dir = join(test_gi_dir, type_name) 149 | 150 | test_tfrecords_path = join(test_tfrecords_dir, type_name + '.tfrecords') 151 | 152 | test_writer = tf.python_io.TFRecordWriter(test_tfrecords_path) 153 | 154 | gi_names = os.listdir(type_dir) 155 | 156 | gi_list = [] 157 | for gi_name in gi_names: 158 | gi_path = join(type_dir, gi_name) 159 | gi_12rot = open_gi(gi_path) 160 | gi_list.extend(gi_12rot) 161 | 162 | np.random.shuffle(gi_list) 163 | 164 | for gi in gi_list: 165 | gi_raw = gi.tobytes() # Convert geometry images to raw bytes. 166 | example = tf.train.Example( 167 | features=tf.train.Features(feature={ 168 | 169 | 'gi_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[gi_raw])), 170 | 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[label])) 171 | 172 | })) 173 | 174 | test_writer.write(example.SerializeToString()) # Serialize gi bytes 175 | 176 | test_writer.close() 177 | 178 | 179 | 180 | # c = open_gi('E:/tr_scan_000_keypoint_00_rot_0.gi') 181 | # print(c.shape) 182 | # print(c.dtype) 183 | -------------------------------------------------------------------------------- /python_learning/train_mincv_perloss6890_256.py: -------------------------------------------------------------------------------- 1 | # ********************************************************** 2 | # This file is MODIFIED from a part of the 3D descriptor 3 | # learning framework by Hanyu Wang(王涵玉) 4 | # https://github.com/jianweiguo/local3Ddescriptorlearning 5 | # 6 | # Author: Yiqun Wang(王逸群) 7 | # https://github.com/yiqun-wang/LPS 8 | # ********************************************************** 9 | 10 | import argparse 11 | import os 12 | import sys 13 | import time 14 | import math 15 | from os.path import join 16 | 17 | import numpy as np 18 | import tensorflow as tf 19 | import tensorlayer as tl 20 | from tensorlayer.layers import * 21 | 22 | 23 | # In[2]: 24 | 25 | 26 | parser = argparse.ArgumentParser(description='') 27 | 28 | parser.add_argument('--gpuid', '-g', default='0', type=str, metavar='N', 29 | help='GPU id to run') 30 | 31 | parser.add_argument('--learning_rate', '--lr', default=0.0003, type=float, #0.0003 32 | help='the learning rate') 33 | parser.add_argument('--l2_regularizer_scale', default=0.005, type=float, 34 | help='scale parameter used in the l2 regularization') 35 | parser.add_argument('--n_iteration', '-n', default=100000, type=int,metavar='N', 36 | help='number of training iterations') 37 | 38 | parser.add_argument('--batch_size', '--bs', default=512, type=int, 39 | help='size of training batch, it is equal to batch_keypoint_num*batch_gi_num') 40 | parser.add_argument('--batch_keypoint_num', '--bkn', default=64, type=int, 41 | help='number of different keypoints in a training batch') 42 | parser.add_argument('--batch_gi_num', '--bgn', default=8, type=int, 43 | help='number of geometry images of one keypoint in a training batch') 44 | parser.add_argument('--patch_num', '--pn', default=256, type=int, 45 | help='number of patch divide in training process') 46 | parser.add_argument('--desc_dims', '--dd', default=256, type=int, 47 | help='number of patch divide in training process') 48 | 49 | parser.add_argument('--val_freq', '--vf', default=5, type=int, 50 | help='frequency of validation.') 51 | parser.add_argument('--print_freq', '--pf', default=100, type=int, 52 | help=r'print info every {print_freq} iterations') 53 | parser.add_argument('--save_freq', '--sf', default=100, type=int, 54 | help=r'save the current trained model every {save_freq} iterations') 55 | 56 | parser.add_argument('--summary_saving_dir', '--ssd', default='./summary_ext', type=str, 57 | help='directory to save summaries') 58 | parser.add_argument('--model_saving_dir', '--msd', default='./saved_models_ext', type=str, 59 | help='directory to save trained models') 60 | 61 | parser.add_argument('--restore', '-r', dest='restore',default=True, action='store_true', 62 | help='bool value, restore variables from saved model of not') 63 | parser.add_argument('--restore_path',default='/data/yqwang/Project/3dDescriptor/train_mincv_cb_gi/saved_models/training_model-99999', #train_softmax_adam_cc/saved_models_2/training_model-1499', #21999', 64 | type=str, 65 | help='path to the saved model(if restore)') 66 | 67 | parser.add_argument('--use_kpi_set', dest='use_kpi_set', default=True, action='store_true', 68 | help='bool value, use keypoint set from keypoint file or not') 69 | parser.add_argument('--keypoints_path', default='/data/yqwang/Dataset/faust_256p/keypoints_faust_all.kpi', 70 | type=str, 71 | help='path to the keypoint file(if use_kpi_set)') 72 | parser.add_argument('--n_all_points', default=6890, type=int, 73 | help='number of all points in the model') 74 | 75 | 76 | parser.add_argument('--shuffle_batch_capacity', default=10000, type=int, 77 | help='capacity of shuffle bacth buffer') 78 | parser.add_argument('--gi_size', default=32, type=int, 79 | help='length and width of geometry image, assuming it\'s square') 80 | parser.add_argument('--gi_channel', default=2, type=int, 81 | help='number of geometry image channels') 82 | parser.add_argument('--triplet_loss_gap', default=1, type=float, 83 | help='the gap value used in the triplet loss') 84 | parser.add_argument('--n_loss_compute_iter', default=17, type=int, 85 | help='number of iterations to compute the training loss') 86 | # parser.add_argument('--n_test_iter', default=100, type=int, 87 | # help='number of iterations to compute the test results') 88 | parser.add_argument('--tfr_dir', default='/data/yqwang/Dataset/faust_256p_025_cb/gi_TFRecords', 89 | type=str, 90 | help='directory of training TFRecords, containing' 91 | '3 subdirectories: \"train\", \"val\", and \"test\"') 92 | parser.add_argument('--tfr_name_template', default=r'pidx_%04d.tfrecords', type=str, 93 | help='name template of TFRecords filenames') 94 | 95 | global args 96 | 97 | 98 | # In[3]: 99 | 100 | 101 | def read_index_file(path, delimiter=' '): 102 | """ 103 | Read indices from a text file and return a list of indices. 104 | :param path: path of the text file. 105 | :return: a list of indices. 106 | """ 107 | 108 | index_list = [] 109 | with open(path, 'r') as text: 110 | 111 | for line in text: 112 | ls = line.strip(' {}[]\t') 113 | 114 | if not ls or ls[0] == '#': # Comment content 115 | continue 116 | ll = ls.split(delimiter) 117 | 118 | for id_str in ll: 119 | idst = id_str.strip() 120 | if idst == '': 121 | continue 122 | index_list.append(int(idst)) 123 | 124 | return index_list 125 | 126 | 127 | # In[4]: 128 | 129 | 130 | def append_log(path, string_stream): 131 | """ 132 | Write string_stream in a log file. 133 | :param path: path of the log file. 134 | :param string_stream: string that will be write. 135 | """ 136 | 137 | with open(path, 'a') as log: 138 | log.write(string_stream) 139 | return 140 | 141 | 142 | # In[5]: 143 | 144 | 145 | class TripletNet: 146 | def __init__(self, args=None, is_training=True): 147 | self.args = args 148 | self.is_training = is_training 149 | # self.predict_net =None 150 | self.anchor_net = None # anchor_net is also the predict_net 151 | self.positive_net = None 152 | self.negative_net = None 153 | self.descriptors = None # descriptors of anchors 154 | self.cost = None 155 | self.cost_same = None 156 | self.cost_diff = None 157 | self.all_multiuse_params = None 158 | self.predictions = None 159 | self.acc = None 160 | 161 | def inference(self, gi_placeholder, reuse=None): # reuse=None is equal to reuse=False(i.e. don't reuse) 162 | with tf.variable_scope('model', reuse=reuse): 163 | tl.layers.set_name_reuse(reuse) # reuse! 164 | 165 | network = tl.layers.InputLayer(gi_placeholder, name='input') 166 | 167 | """ conv2 """ 168 | network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 169 | padding='SAME', W_init=args.conv_initializer, name='conv2_1') 170 | 171 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 172 | is_train=self.is_training, name='bn2_1') 173 | 174 | network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), 175 | padding='SAME', name='pool2') 176 | 177 | """ conv3 """ 178 | network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 179 | padding='SAME', W_init=args.conv_initializer, name='conv3_1') 180 | 181 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 182 | is_train=self.is_training, name='bn3_1') 183 | 184 | network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), 185 | padding='SAME', name='pool3') 186 | 187 | """ conv4 """ 188 | network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 189 | padding='SAME', W_init=args.conv_initializer, name='conv4_1') 190 | 191 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 192 | is_train=self.is_training, name='bn4_1') 193 | 194 | network = MeanPool2d(network, filter_size=(2, 2), strides=(2, 2), 195 | padding='SAME', name='pool4') 196 | 197 | 198 | network = FlattenLayer(network, name='flatten') 199 | network = DenseLayer(network, n_units=512, act=tf.identity, name='fc1_relu') 200 | 201 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 202 | is_train=self.is_training, name='bn_fc') 203 | network = DenseLayer(network, n_units=args.desc_dims, act=tf.identity, name='128d_embedding') 204 | 205 | return network 206 | 207 | 208 | def build_nets(self, anchor_placeholder, positive_placeholder, negative_placeholder, anchor_label_placeholder, keypoint_num): 209 | self.anchor_net = self.inference(anchor_placeholder, reuse=None) 210 | self.positive_net = self.inference(positive_placeholder, reuse=True) 211 | self.negative_net = self.inference(negative_placeholder, reuse=True) 212 | 213 | 214 | 215 | self.all_multiuse_params = self.anchor_net.all_params.copy() 216 | 217 | 218 | 219 | self.cost_same = tf.norm(self.anchor_net.outputs - self.positive_net.outputs, axis=1) 220 | print(self.cost_same.shape) 221 | # assert self.cost_same.shape[0] == args.batch_size 222 | self.cost_diff = tf.norm(self.anchor_net.outputs - self.negative_net.outputs, axis=1) 223 | 224 | delta_mean = tf.reduce_mean(self.cost_same - self.cost_diff) 225 | cost_same_mean, cost_same_variance = tf.nn.moments(self.cost_same, axes=[0]) 226 | cost_diff_mean, _ = tf.nn.moments(self.cost_diff, axes=[0]) 227 | 228 | cv = tf.sqrt(cost_same_variance)/cost_same_mean 229 | 230 | # self.cost = tf.maximum(zero, gap + delta) 231 | # ratio = self.cost_diff / self.cost_same 232 | # self.cost = - (gap + self.cost_diff) / (gap + self.cost_same) + self.cost_same 233 | 234 | ratio = cost_diff_mean / cost_same_mean 235 | 236 | # batch_variance = args.batch_size*tf.sqrt(cost_same_variance) 237 | 238 | # self.cost = self.cost_same + tf.maximum(zero, gap - self.cost_diff) + batch_variance 239 | self.cost = tf.reduce_sum(tf.maximum(np.float32(0), np.float32(args.triplet_loss_gap) + self.cost_same - self.cost_diff)) + 0.1*args.batch_size*cv #+0.1* cost_same_mean 240 | 241 | tf.summary.scalar(name='cost', tensor=self.cost) 242 | tf.summary.scalar(name='delta_mean', tensor=delta_mean) 243 | tf.summary.scalar(name='ratio', tensor=ratio) 244 | tf.summary.scalar(name='cost_same_mean', tensor=cost_same_mean) 245 | tf.summary.scalar(name='cost_diff_mean', tensor=cost_diff_mean) 246 | # tf.summary.scalar(name='batch_variance', tensor=batch_variance) 247 | tf.summary.scalar(name='CV', tensor=cv) 248 | # tf.summary.scalar(name='accuracy', tensor=self.acc) 249 | 250 | # Weight decay 251 | l2 = 0 252 | for p in tl.layers.get_variables_with_name('W_conv2d'): 253 | l2 += tf.contrib.layers.l2_regularizer(args.l2_regularizer_scale)(p) 254 | tf.summary.histogram(name=p.name, values=p) 255 | 256 | for p in tl.layers.get_variables_with_name('128d_embedding/W'): 257 | l2 += tf.contrib.layers.l2_regularizer(args.l2_regularizer_scale)(p) 258 | tf.summary.histogram(name=p.name, values=p) 259 | 260 | self.cost += l2 261 | 262 | 263 | 264 | args = parser.parse_args()#args=['-g 2'] 265 | setattr(args, 'conv_initializer', tf.contrib.layers.xavier_initializer()) 266 | setattr(args, 'activation', tl.activation.leaky_relu) 267 | 268 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid 269 | train_tfr_dir = join(args.tfr_dir, 'train') 270 | val_tfr_dir = join(args.tfr_dir, 'val') 271 | config = tf.ConfigProto() 272 | config.gpu_options.allow_growth = True 273 | #config.gpu_options.per_process_gpu_memory_fraction = 0.9 274 | sess = tf.InteractiveSession(config=config) 275 | 276 | if args.use_kpi_set: 277 | keypoint_list = read_index_file(args.keypoints_path) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 278 | else: 279 | keypoint_list = list(range(args.n_all_points)) 280 | 281 | # debug 282 | 283 | # keypoint_list = list(range(16)) 284 | 285 | keypoint_num = len(keypoint_list) 286 | 287 | # rebuild 0-based index 288 | keypoint_list = list(range(keypoint_num)) 289 | 290 | 291 | # In[7]: 292 | 293 | 294 | def parse_and_decode(serialized_example): 295 | features = tf.parse_single_example(serialized_example, 296 | features={ 297 | 'gi_raw': tf.FixedLenFeature([], tf.string), 298 | 'label': tf.FixedLenFeature([], tf.int64), 299 | }) 300 | 301 | gi = tf.decode_raw(features['gi_raw'], tf.float32) 302 | gi = tf.reshape(gi, [args.gi_size, args.gi_size, args.gi_channel]) 303 | label = tf.cast(features['label'], tf.int32) # throw label tensor 304 | return gi, label 305 | 306 | 307 | # In[8]: 308 | 309 | 310 | run_time = time.localtime(time.time()) 311 | 312 | # [batch_size, height, width, channels] 313 | anchor_placeholder = tf.placeholder( 314 | dtype=tf.float32, 315 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 316 | 317 | positive_placeholder = tf.placeholder( 318 | dtype=tf.float32, 319 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 320 | 321 | negative_placeholder = tf.placeholder( 322 | dtype=tf.float32, 323 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 324 | 325 | anchor_label_placeholder = tf.placeholder( 326 | dtype=tf.int32, 327 | shape=[None]) # [batch_size, height, width, channels] 328 | 329 | triplet_net = TripletNet(is_training=True) # training 330 | 331 | triplet_net.build_nets( 332 | anchor_placeholder=anchor_placeholder, 333 | positive_placeholder=positive_placeholder, 334 | negative_placeholder=negative_placeholder, 335 | anchor_label_placeholder=anchor_label_placeholder, 336 | keypoint_num=keypoint_num 337 | ) 338 | 339 | train_params = triplet_net.anchor_net.all_params 340 | 341 | merged_summary = tf.summary.merge_all() 342 | train_writer = tf.summary.FileWriter(join(args.summary_saving_dir, 'train'), sess.graph) 343 | validation_writer = tf.summary.FileWriter(join(args.summary_saving_dir, 'validation'), sess.graph) 344 | log_path = join('./', 'log_' + time.strftime('%Y-%m-%d_%H-%M-%S', run_time) + '.log') 345 | log_stream = 'Start running at ' + time.strftime('%Y-%m-%d %H:%M:%S', run_time) + '.\n' 346 | log_stream += '================================================================================\n' 347 | 348 | with open(log_path, 'w') as logf: 349 | logf.write(log_stream) 350 | 351 | log_stream = str(args) + '\n' 352 | append_log(log_path, log_stream) 353 | 354 | log_stream = '' 355 | 356 | temp = set(tf.global_variables()) 357 | 358 | if args.restore: 359 | tl.layers.initialize_global_variables(sess) 360 | load_saver = tf.train.Saver() 361 | load_saver.restore(sess, args.restore_path) 362 | info = 'Restore model parameters from %s' % args.restore_path 363 | log_stream += info 364 | log_stream += '\n' 365 | print(info) 366 | 367 | else: 368 | tl.layers.initialize_global_variables(sess) 369 | info = 'Successfully initialized global variables.' 370 | log_stream += info 371 | log_stream += '\n' 372 | print(info) 373 | 374 | train_op = tf.train.AdamOptimizer(args.learning_rate, beta1=0.9, beta2=0.999, 375 | epsilon=1e-08, use_locking=False).minimize(triplet_net.cost, 376 | var_list=train_params) 377 | sess.run(tf.initialize_variables(set(tf.global_variables()) - temp)) 378 | # train_op = tf.train.AdadeltaOptimizer().minimize(triplet_net.cost, var_list=train_params) 379 | 380 | saver = tf.train.Saver() 381 | 382 | triplet_net.anchor_net.print_params() 383 | triplet_net.anchor_net.print_layers() 384 | 385 | info = ' learning_rate: %f' % args.learning_rate 386 | log_stream += info 387 | log_stream += '\n' 388 | print(info) 389 | 390 | info = ' batch_size: %d' % args.batch_size 391 | log_stream += info 392 | log_stream += '\n' 393 | print(info) 394 | 395 | append_log(log_path, log_stream) 396 | log_stream = '' 397 | 398 | 399 | 400 | patch_num = args.patch_num 401 | num_point = math.ceil(6890//patch_num) 402 | 403 | train_placeholder_list = [] 404 | val_placeholder_list = [] 405 | 406 | train_iter_list = [] 407 | val_iter_list = [] 408 | 409 | train_next_element_list = [] 410 | val_next_element_list = [] 411 | 412 | train_filename_list = [] 413 | val_filename_list = [] 414 | train_filename_list = [[] for j in range(patch_num)] 415 | val_filename_list = [[] for j in range(patch_num)] 416 | 417 | for keypoint_idx in range(keypoint_num): 418 | 419 | for index in range(patch_num): 420 | if keypoint_idx >= index * num_point and keypoint_idx < (index + 1) * num_point: 421 | train_filename_list[index].append(join(train_tfr_dir, args.tfr_name_template % keypoint_idx)) 422 | val_filename_list[index].append(join(val_tfr_dir, args.tfr_name_template % keypoint_idx)) 423 | break 424 | 425 | 426 | 427 | for i in range(patch_num): 428 | train_iter = tf.data.TFRecordDataset(train_filename_list[i]).map(parse_and_decode).batch(2).shuffle( args.shuffle_batch_capacity).repeat().make_one_shot_iterator() 429 | train_iter_list.append(train_iter) 430 | 431 | train_next_element = train_iter.get_next() 432 | train_next_element_list.append(train_next_element) 433 | 434 | val_iter = tf.data.TFRecordDataset(val_filename_list[i]).map(parse_and_decode).batch(2).shuffle( args.shuffle_batch_capacity).repeat().make_one_shot_iterator() 435 | val_iter_list.append(val_iter) 436 | 437 | val_next_element = val_iter.get_next() 438 | val_next_element_list.append(val_next_element) 439 | 440 | # In[ ]: 441 | 442 | 443 | # training 444 | 445 | start_time = time.time() 446 | 447 | # pidx2position = dict() 448 | # loaded_keypoint_list = None 449 | 450 | for iteration in range(args.n_iteration): 451 | 452 | selected_keypoints = np.random.choice(a=keypoint_list, size=args.batch_keypoint_num, replace=False) 453 | 454 | 455 | ss = time.time() 456 | train_anchor_gi_all = [] 457 | train_label_all = [] 458 | train_anchor_gi_all = None 459 | train_label_all = None 460 | train_element_list = [] 461 | 462 | for keypoint_id in selected_keypoints: 463 | for index in range(patch_num): 464 | if keypoint_id >= index * num_point and keypoint_id < (index + 1) * num_point: 465 | for i in range(args.batch_gi_num//2): 466 | train_gi, train_label = sess.run(train_next_element_list[index]) 467 | 468 | if train_anchor_gi_all is None: 469 | train_anchor_gi_all = train_gi 470 | else: 471 | train_anchor_gi_all = np.append(train_anchor_gi_all, train_gi, axis=0) 472 | 473 | if train_label_all is None: 474 | train_label_all = train_label 475 | else: 476 | train_label_all = np.append(train_label_all, train_label, axis=0) 477 | # train_element_list.append(train_next_element_list[index]) 478 | break 479 | 480 | print('select train tfr time cost: %f' % (time.time() - ss)) 481 | # triplet_net.is_training = False # compute descriptors 482 | 483 | triplet_net.is_training = True # train 484 | 485 | train_positive_gi_all = np.zeros_like(train_anchor_gi_all) 486 | train_negative_gi_all = np.zeros_like(train_anchor_gi_all) 487 | 488 | for index in range(len(train_anchor_gi_all)): 489 | 490 | gi_same = np.asarray([target_gi for j, target_gi in enumerate(train_anchor_gi_all) 491 | if train_label_all[j] == train_label_all[index] and j != index]) 492 | train_positive_gi_all[index] = gi_same[np.random.choice(a=range(len(gi_same)), size=1)[0]] 493 | 494 | gi_diff = np.asarray([target_gi for j, target_gi in enumerate(train_anchor_gi_all) 495 | if train_label_all[j] != train_label_all[index]]) 496 | train_negative_gi_all[index] = gi_diff[np.random.choice(a=range(len(gi_diff)), size=1)[0]] 497 | 498 | 499 | ts = time.time() 500 | # _, summary = sess.run([train_op, merged_summary], 501 | # feed_dict={anchor_placeholder: train_anchor_gi_all, 502 | # anchor_label_placeholder: train_label_all}) 503 | 504 | 505 | _, summary = sess.run([train_op, merged_summary], 506 | feed_dict={anchor_placeholder: train_anchor_gi_all, 507 | positive_placeholder: train_positive_gi_all, 508 | negative_placeholder: train_negative_gi_all}) 509 | 510 | 511 | print('train time cost: %f' % (time.time() - ts)) 512 | 513 | train_writer.add_summary(summary, global_step=iteration) 514 | 515 | if iteration % args.val_freq == 0: 516 | 517 | ss = time.time() 518 | val_anchor_gi_all = [] 519 | val_label_all = [] 520 | val_anchor_gi_all = None 521 | val_label_all = None 522 | val_element_list = [] 523 | 524 | for keypoint_id in selected_keypoints: 525 | for index in range(patch_num): 526 | if keypoint_id >= index * num_point and keypoint_id < (index + 1) * num_point: 527 | for i in range(args.batch_gi_num//2): 528 | val_gi, val_label = sess.run(val_next_element_list[index]) 529 | 530 | if val_anchor_gi_all is None: 531 | val_anchor_gi_all = val_gi 532 | else: 533 | val_anchor_gi_all = np.append(val_anchor_gi_all, val_gi, axis=0) 534 | 535 | if val_label_all is None: 536 | val_label_all = val_label 537 | else: 538 | val_label_all = np.append(val_label_all, val_label, axis=0) 539 | # val_element_list.append(val_next_element_list[index]) 540 | break 541 | 542 | 543 | print('select val tfr time cost: %f' % (time.time() - ss)) 544 | 545 | val_positive_gi_all = np.zeros_like(val_anchor_gi_all) 546 | val_negative_gi_all = np.zeros_like(val_anchor_gi_all) 547 | 548 | for index in range(len(val_anchor_gi_all)): 549 | gi_same = np.asarray([target_gi for j, target_gi in enumerate(val_anchor_gi_all) 550 | if val_label_all[j] == val_label_all[index] and j != index]) 551 | val_positive_gi_all[index] = gi_same[np.random.choice(a=range(len(gi_same)), size=1)[0]] 552 | 553 | gi_diff = np.asarray([target_gi for j, target_gi in enumerate(val_anchor_gi_all) 554 | if val_label_all[j] != val_label_all[index]]) 555 | val_negative_gi_all[index] = gi_diff[np.random.choice(a=range(len(gi_diff)), size=1)[0]] 556 | 557 | 558 | vs = time.time() 559 | # summary = sess.run(merged_summary, feed_dict={anchor_placeholder: val_anchor_gi_all, 560 | # anchor_label_placeholder: val_label_all}) 561 | summary = sess.run(merged_summary, feed_dict={anchor_placeholder: val_anchor_gi_all, 562 | positive_placeholder: val_positive_gi_all, 563 | negative_placeholder: val_negative_gi_all}) 564 | print('val time cost: %f' % (time.time() - vs)) 565 | 566 | validation_writer.add_summary(summary, global_step=iteration) 567 | 568 | print('!!!!!!!!----------current iteration: %d'%iteration) 569 | 570 | 571 | 572 | if (iteration + 1) % (args.save_freq) == 0: 573 | saver.save(sess, join(args.model_saving_dir, 'training_model'), global_step=iteration) 574 | 575 | 576 | -------------------------------------------------------------------------------- /python_learning/train_softmax6890.py: -------------------------------------------------------------------------------- 1 | # ********************************************************** 2 | # This file is MODIFIED from a part of the 3D descriptor 3 | # learning framework by Hanyu Wang(王涵玉) 4 | # https://github.com/jianweiguo/local3Ddescriptorlearning 5 | # 6 | # Author: Yiqun Wang(王逸群) 7 | # https://github.com/yiqun-wang/LPS 8 | # ********************************************************** 9 | 10 | import argparse 11 | import os 12 | import sys 13 | import time 14 | import math 15 | from os.path import join 16 | 17 | import numpy as np 18 | import tensorflow as tf 19 | import tensorlayer as tl 20 | from tensorlayer.layers import * 21 | 22 | 23 | # In[2]: 24 | 25 | 26 | # Parameters 27 | parser = argparse.ArgumentParser(description='') 28 | 29 | parser.add_argument('--gpuid', '-g', default='7', type=str, metavar='N', 30 | help='GPU id to run') 31 | 32 | parser.add_argument('--learning_rate', '--lr', default=0.001, type=float, #0.001 33 | help='the learning rate') 34 | parser.add_argument('--l2_regularizer_scale', default=0.006, type=float, #0.005 35 | help='scale parameter used in the l2 regularization') 36 | parser.add_argument('--n_iteration', '-n', default=100000, type=int,metavar='N', 37 | help='number of training iterations') 38 | 39 | parser.add_argument('--batch_size', '--bs', default=2048, type=int, 40 | help='size of training batch, it is equal to batch_keypoint_num*batch_gi_num') 41 | parser.add_argument('--batch_keypoint_num', '--bkn', default=1024, type=int, 42 | help='number of different keypoints in a training batch') 43 | parser.add_argument('--batch_gi_num', '--bgn', default=2, type=int, 44 | help='number of geometry images of one keypoint in a training batch') 45 | parser.add_argument('--patch_num', '--pn', default=256, type=int, 46 | help='number of patch divide in training process') 47 | parser.add_argument('--desc_dims', '--dd', default=256, type=int, 48 | help='number of patch divide in training process') 49 | 50 | parser.add_argument('--print_freq', '--pf', default=100, type=int, 51 | help=r'print info every {print_freq} iterations') 52 | parser.add_argument('--save_freq', '--sf', default=100, type=int, 53 | help=r'save the current trained model every {save_freq} iterations') 54 | 55 | parser.add_argument('--summary_saving_dir', '--ssd', default='./summary', type=str, 56 | help='directory to save summaries') 57 | parser.add_argument('--model_saving_dir', '--msd', default='./saved_models', type=str, 58 | help='directory to save trained models') 59 | 60 | parser.add_argument('--restore', '-r', dest='restore',default=False, action='store_true', 61 | help='bool value, restore variables from saved model of not') 62 | parser.add_argument('--restore_path',default='/data/yqwang/Project/3dDescriptor/train_softmax_adam_cb/saved_models_z256/training_model-22799', 63 | type=str, 64 | help='path to the saved model(if restore)') 65 | 66 | parser.add_argument('--use_kpi_set', dest='use_kpi_set', default=True, action='store_true', 67 | help='bool value, use keypoint set from keypoint file or not') 68 | parser.add_argument('--keypoints_path', default='/data/yqwang/Dataset/faust_256p/keypoints_faust_all.kpi', 69 | type=str, 70 | help='path to the keypoint file(if use_kpi_set)') 71 | parser.add_argument('--n_all_points', default=6890, type=int, 72 | help='number of all points in the model') 73 | 74 | 75 | parser.add_argument('--shuffle_batch_capacity', default=13000, type=int, 76 | help='capacity of shuffle bacth buffer') 77 | parser.add_argument('--gi_size', default=32, type=int, 78 | help='length and width of geometry image, assuming it\'s square') 79 | parser.add_argument('--gi_channel', default=2, type=int, 80 | help='number of geometry image channels') 81 | parser.add_argument('--triplet_loss_gap', default=1, type=float, 82 | help='the gap value used in the triplet loss') 83 | parser.add_argument('--n_loss_compute_iter', default=17, type=int, 84 | help='number of iterations to compute the training loss') 85 | # parser.add_argument('--n_test_iter', default=100, type=int, 86 | # help='number of iterations to compute the test results') 87 | parser.add_argument('--tfr_dir', default='/data/yqwang/Dataset/faust_256p_025_cb/gi_TFRecords', 88 | type=str, 89 | help='directory of training TFRecords, containing' 90 | '3 subdirectories: \"train\", \"val\", and \"test\"') 91 | parser.add_argument('--tfr_name_template', default=r'pidx_%04d.tfrecords', type=str, 92 | help='name template of TFRecords filenames') 93 | 94 | global args 95 | 96 | 97 | # In[3]: 98 | 99 | 100 | # Function to read keypoint index file 101 | def read_index_file(path, delimiter=' '): 102 | """ 103 | Read indices from a text file and return a list of indices. 104 | :param path: path of the text file. 105 | :return: a list of indices. 106 | """ 107 | 108 | index_list = [] 109 | with open(path, 'r') as text: 110 | 111 | for line in text: 112 | ls = line.strip(' {}[]\t') 113 | 114 | if not ls or ls[0] == '#': # Comment content 115 | continue 116 | ll = ls.split(delimiter) 117 | 118 | for id_str in ll: 119 | idst = id_str.strip() 120 | if idst == '': 121 | continue 122 | index_list.append(int(idst)) 123 | 124 | return index_list 125 | 126 | 127 | # In[4]: 128 | 129 | 130 | def append_log(path, string_stream): 131 | """ 132 | Write string_stream in a log file. 133 | :param path: path of the log file. 134 | :param string_stream: string that will be write. 135 | """ 136 | 137 | with open(path, 'a') as log: 138 | log.write(string_stream) 139 | return 140 | 141 | 142 | # In[5]: 143 | 144 | 145 | # Triplet CNNs class 146 | class TripletNet: 147 | def __init__(self, args=None, is_training=True): 148 | self.args = args 149 | self.is_training = is_training 150 | # self.predict_net =None 151 | self.anchor_net = None # anchor_net is also the predict_net 152 | self.positive_net = None 153 | self.negative_net = None 154 | self.descriptors = None # descriptors of anchors 155 | self.cost = None 156 | self.cost_same = None 157 | self.cost_diff = None 158 | self.all_multiuse_params = None 159 | self.predictions = None 160 | self.acc = None 161 | 162 | # Method to construct one single CNN 163 | def inference(self, gi_placeholder, reuse=None): # reuse=None is equal to reuse=False(i.e. don't reuse) 164 | with tf.variable_scope('model', reuse=reuse): 165 | tl.layers.set_name_reuse(reuse) # reuse! 166 | 167 | network = tl.layers.InputLayer(gi_placeholder, name='input') 168 | 169 | """ conv2 """ 170 | network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 171 | padding='SAME', W_init=args.conv_initializer, name='conv2_1') 172 | 173 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 174 | is_train=self.is_training, name='bn2_1') 175 | 176 | network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), 177 | padding='SAME', name='pool2') 178 | 179 | """ conv3 """ 180 | network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 181 | padding='SAME', W_init=args.conv_initializer, name='conv3_1') 182 | 183 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 184 | is_train=self.is_training, name='bn3_1') 185 | 186 | network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), 187 | padding='SAME', name='pool3') 188 | 189 | """ conv4 """ 190 | network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.identity, 191 | padding='SAME', W_init=args.conv_initializer, name='conv4_1') 192 | 193 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 194 | is_train=self.is_training, name='bn4_1') 195 | 196 | network = MeanPool2d(network, filter_size=(2, 2), strides=(2, 2), 197 | padding='SAME', name='pool4') 198 | 199 | 200 | network = FlattenLayer(network, name='flatten') 201 | network = DenseLayer(network, n_units=512, act=tf.identity, name='fc1_relu') 202 | 203 | network = BatchNormLayer(network, decay=0.9, epsilon=1e-4, act=args.activation, 204 | is_train=self.is_training, name='bn_fc') 205 | # network = DenseLayer(network, n_units=4096, act=args.activation, name='fc2_relu') 206 | # network = DenseLayer(network, n_units=10, act=tf.identity, name='fc3_relu') 207 | network = DenseLayer(network, n_units=args.desc_dims, act=tf.identity, name='128d_embedding') 208 | 209 | return network 210 | 211 | # Method to construct the Triplet CNNs (3 parameter-shared CNN) using inference 212 | def build_nets(self, anchor_placeholder, positive_placeholder, negative_placeholder, anchor_label_placeholder, keypoint_num): 213 | self.anchor_net = self.inference(anchor_placeholder, reuse=None) 214 | self.positive_net = self.inference(positive_placeholder, reuse=True) 215 | self.negative_net = self.inference(negative_placeholder, reuse=True) 216 | 217 | gap = tf.constant(np.float32(args.triplet_loss_gap)) 218 | zero = tf.constant(np.float32(0)) 219 | 220 | self.all_multiuse_params = self.anchor_net.all_params.copy() 221 | 222 | self.anchor_net.outputs = args.activation(self.anchor_net.outputs) 223 | 224 | self.anchor_net = DenseLayer(self.anchor_net, n_units=keypoint_num, act=tf.identity, name='feature') 225 | 226 | logits = self.anchor_net.outputs 227 | self.predictions = tf.nn.softmax(logits) 228 | self.cost = tl.cost.cross_entropy(output=logits, target=anchor_label_placeholder, name='cost') 229 | 230 | correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), anchor_label_placeholder) 231 | self.acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='acc') 232 | 233 | # Add them to tf.summary to see them in tensorboard 234 | tf.summary.scalar(name='cost', tensor=self.cost) 235 | # tf.summary.scalar(name='delta', tensor=delta) 236 | # tf.summary.scalar(name='ratio', tensor=ratio) 237 | # tf.summary.scalar(name='cost_same', tensor=self.cost_same) 238 | # tf.summary.scalar(name='cost_diff', tensor=self.cost_diff) 239 | tf.summary.scalar(name='accuracy', tensor=self.acc) 240 | 241 | # Weight decay 242 | l2 = 0 243 | for p in tl.layers.get_variables_with_name('W_conv2d'): 244 | l2 += tf.contrib.layers.l2_regularizer(args.l2_regularizer_scale)(p) 245 | tf.summary.histogram(name=p.name, values=p) 246 | 247 | for p in tl.layers.get_variables_with_name('128d_embedding/W'): 248 | l2 += tf.contrib.layers.l2_regularizer(args.l2_regularizer_scale)(p) 249 | tf.summary.histogram(name=p.name, values=p) 250 | 251 | self.cost += l2 252 | 253 | 254 | # Parse args and start session 255 | args = parser.parse_args()#args=['-g 6'] 256 | setattr(args, 'conv_initializer', tf.contrib.layers.xavier_initializer()) 257 | setattr(args, 'activation', tl.activation.leaky_relu) 258 | 259 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpuid 260 | train_tfr_dir = join(args.tfr_dir, 'train') 261 | val_tfr_dir = join(args.tfr_dir, 'val') 262 | config = tf.ConfigProto()#p 263 | config.gpu_options.allow_growth = True 264 | sess = tf.InteractiveSession(config=config) 265 | 266 | 267 | # Get used keypoint indices 268 | if args.use_kpi_set: 269 | keypoint_list = read_index_file(args.keypoints_path) # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 270 | else: 271 | keypoint_list = list(range(args.n_all_points)) 272 | 273 | # debug 274 | 275 | # keypoint_list = list(range(16)) 276 | 277 | keypoint_num = len(keypoint_list) 278 | 279 | # rebuild 0-based index 280 | keypoint_list = list(range(keypoint_num)) 281 | 282 | 283 | # In[7]: 284 | 285 | 286 | # Function to parse and decode the tfrecords file(training data) 287 | def parse_and_decode(serialized_example): 288 | features = tf.parse_single_example(serialized_example, 289 | features={ 290 | 'gi_raw': tf.FixedLenFeature([], tf.string), 291 | 'label': tf.FixedLenFeature([], tf.int64), 292 | }) 293 | 294 | gi = tf.decode_raw(features['gi_raw'], tf.float32) 295 | gi = tf.reshape(gi, [args.gi_size, args.gi_size, args.gi_channel]) 296 | label = tf.cast(features['label'], tf.int32) # throw label tensor 297 | return gi, label 298 | 299 | 300 | # In[8]: 301 | 302 | 303 | run_time = time.localtime(time.time()) 304 | 305 | # Placeholder setup 306 | # [batch_size, height, width, channels] 307 | anchor_placeholder = tf.placeholder( 308 | dtype=tf.float32, 309 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 310 | 311 | positive_placeholder = tf.placeholder( 312 | dtype=tf.float32, 313 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 314 | 315 | negative_placeholder = tf.placeholder( 316 | dtype=tf.float32, 317 | shape=[None, args.gi_size, args.gi_size, args.gi_channel]) # [batch_size, height, width, channels] 318 | 319 | anchor_label_placeholder = tf.placeholder( 320 | dtype=tf.int32, 321 | shape=[None]) # [batch_size, height, width, channels] 322 | 323 | # Build the net 324 | triplet_net = TripletNet(is_training=True) # training 325 | 326 | triplet_net.build_nets( 327 | anchor_placeholder=anchor_placeholder, 328 | positive_placeholder=positive_placeholder, 329 | negative_placeholder=negative_placeholder, 330 | anchor_label_placeholder=anchor_label_placeholder, 331 | keypoint_num=keypoint_num 332 | ) 333 | 334 | train_params = triplet_net.anchor_net.all_params 335 | 336 | train_op = tf.train.AdamOptimizer(args.learning_rate, beta1=0.9, beta2=0.999, 337 | epsilon=1e-08, use_locking=False).minimize(triplet_net.cost, 338 | var_list=train_params) 339 | 340 | saver = tf.train.Saver() 341 | # train_op = tf.train.AdadeltaOptimizer().minimize(triplet_net.cost, var_list=train_params) 342 | 343 | # Summary for visualization. 344 | # tf.summary.scalar(name='cost', tensor=triplet_net.cost) 345 | # tf.summary.scalar(name='cost_same', tensor=triplet_net.cost_same) 346 | # tf.summary.scalar(name='cost_diff', tensor=triplet_net.cost_diff) 347 | merged_summary = tf.summary.merge_all() 348 | train_writer = tf.summary.FileWriter(join(args.summary_saving_dir, 'train'), sess.graph) 349 | validation_writer = tf.summary.FileWriter(join(args.summary_saving_dir, 'validation'), sess.graph) 350 | log_path = join('./', 'log_' + time.strftime('%Y-%m-%d_%H-%M-%S', run_time) + '.log') 351 | log_stream = 'Start running at ' + time.strftime('%Y-%m-%d %H:%M:%S', run_time) + '.\n' 352 | log_stream += '================================================================================\n' 353 | 354 | with open(log_path, 'w') as logf: 355 | logf.write(log_stream) 356 | 357 | log_stream = str(args) + '\n' 358 | append_log(log_path, log_stream) 359 | 360 | log_stream = '' 361 | 362 | if args.restore: 363 | 364 | load_saver = tf.train.Saver() 365 | load_saver.restore(sess, args.restore_path) 366 | info = 'Restore model parameters from %s' % args.restore_path 367 | log_stream += info 368 | log_stream += '\n' 369 | print(info) 370 | 371 | else: 372 | tl.layers.initialize_global_variables(sess) 373 | info = 'Successfully initialized global variables.' 374 | log_stream += info 375 | log_stream += '\n' 376 | print(info) 377 | 378 | triplet_net.anchor_net.print_params() 379 | triplet_net.anchor_net.print_layers() 380 | 381 | info = ' learning_rate: %f' % args.learning_rate 382 | log_stream += info 383 | log_stream += '\n' 384 | print(info) 385 | 386 | info = ' batch_size: %d' % args.batch_size 387 | log_stream += info 388 | log_stream += '\n' 389 | print(info) 390 | 391 | append_log(log_path, log_stream) 392 | log_stream = '' 393 | 394 | 395 | # In[9]: 396 | 397 | 398 | # Prepare training data iterator from tfrecords 399 | patch_num = args.patch_num 400 | num_point = math.ceil(6890//patch_num) 401 | 402 | train_placeholder_list = [] 403 | val_placeholder_list = [] 404 | 405 | train_iter_list = [] 406 | val_iter_list = [] 407 | 408 | train_next_element_list = [] 409 | val_next_element_list = [] 410 | 411 | train_filename_list = [] 412 | val_filename_list = [] 413 | train_filename_list = [[] for j in range(patch_num)] 414 | val_filename_list = [[] for j in range(patch_num)] 415 | 416 | for keypoint_idx in range(keypoint_num): 417 | # train_tfr_path = tf.placeholder(tf.string, shape=[]) 418 | # train_placeholder_list.append(train_tfr_path) 419 | for index in range(patch_num): 420 | if keypoint_idx>=index*num_point and keypoint_idx<(index+1)*num_point: 421 | train_filename_list[index].append(join(train_tfr_dir, args.tfr_name_template % keypoint_idx)) 422 | val_filename_list[index].append(join(val_tfr_dir, args.tfr_name_template % keypoint_idx)) 423 | break 424 | 425 | 426 | for i in range(patch_num): 427 | train_iter = tf.data.TFRecordDataset(train_filename_list[i]).map(parse_and_decode). shuffle(args.shuffle_batch_capacity).batch(args.batch_gi_num).repeat().make_one_shot_iterator() 428 | train_iter_list.append(train_iter) 429 | 430 | train_next_element = train_iter.get_next() 431 | train_next_element_list.append(train_next_element) 432 | 433 | val_iter = tf.data.TFRecordDataset(val_filename_list[i]).map(parse_and_decode). shuffle(args.shuffle_batch_capacity).batch(args.batch_gi_num).repeat().make_one_shot_iterator() 434 | val_iter_list.append(val_iter) 435 | 436 | val_next_element = val_iter.get_next() 437 | val_next_element_list.append(val_next_element) 438 | 439 | # Start training 440 | 441 | start_time = time.time() 442 | 443 | # pidx2position = dict() 444 | # loaded_keypoint_list = None 445 | 446 | for iteration in range(args.n_iteration): 447 | 448 | selected_keypoints = np.random.choice(a=keypoint_list, size=args.batch_keypoint_num, replace=False) 449 | 450 | ss = time.time() 451 | train_anchor_gi_all = [] 452 | train_label_all = [] 453 | train_anchor_gi_all = None 454 | train_label_all = None 455 | train_element_list = [] 456 | 457 | for keypoint_id in selected_keypoints: 458 | for index in range(patch_num): 459 | if keypoint_id >= index * num_point and keypoint_id < (index + 1) * num_point: 460 | 461 | train_gi, train_label = sess.run(train_next_element_list[index]) 462 | 463 | if train_anchor_gi_all is None: 464 | train_anchor_gi_all = train_gi 465 | else: 466 | train_anchor_gi_all = np.append(train_anchor_gi_all, train_gi, axis=0) 467 | 468 | if train_label_all is None: 469 | train_label_all = train_label 470 | else: 471 | train_label_all = np.append(train_label_all, train_label, axis=0) 472 | 473 | #train_element_list.append(train_next_element_list[index]) 474 | break 475 | 476 | 477 | print('select train tfr time cost: %f' % (time.time() - ss)) 478 | # triplet_net.is_training = False # compute descriptors 479 | 480 | triplet_net.is_training = True # train 481 | 482 | ts = time.time() 483 | _, summary = sess.run([train_op, merged_summary], 484 | feed_dict={anchor_placeholder: train_anchor_gi_all, 485 | anchor_label_placeholder: train_label_all}) 486 | print('train time cost: %f' % (time.time() - ts)) 487 | 488 | train_writer.add_summary(summary, global_step=iteration) 489 | 490 | ss = time.time() 491 | val_anchor_gi_all = [] 492 | val_label_all = [] 493 | val_anchor_gi_all = None 494 | val_label_all = None 495 | val_element_list = [] 496 | 497 | for keypoint_id in selected_keypoints: 498 | for index in range(patch_num): 499 | if keypoint_id >= index * num_point and keypoint_id < (index + 1) * num_point: 500 | 501 | val_gi, val_label = sess.run(val_next_element_list[index]) 502 | 503 | if val_anchor_gi_all is None: 504 | val_anchor_gi_all = val_gi 505 | else: 506 | val_anchor_gi_all = np.append(val_anchor_gi_all, val_gi, axis=0) 507 | 508 | if val_label_all is None: 509 | val_label_all = val_label 510 | else: 511 | val_label_all = np.append(val_label_all, val_label, axis=0) 512 | 513 | #val_element_list.append(val_next_element_list[index]) 514 | break 515 | 516 | 517 | print('select val tfr time cost: %f' % (time.time() - ss)) 518 | 519 | 520 | vs = time.time() 521 | summary = sess.run(merged_summary, feed_dict={anchor_placeholder: val_anchor_gi_all, 522 | anchor_label_placeholder: val_label_all}) 523 | print('val time cost: %f' % (time.time() - vs)) 524 | 525 | validation_writer.add_summary(summary, global_step=iteration) 526 | 527 | 528 | if iteration == 0 or (iteration + 1) % args.print_freq == 0: 529 | # Calculate train loss and validation loss. 530 | info = 'Iteration %d of %d took %fs from last displayed iteration.' % (iteration + 1, args.n_iteration, time.time() - start_time) 531 | log_stream += info 532 | log_stream += '\n' 533 | print(info) 534 | start_time = time.time() 535 | 536 | 537 | train_loss, train_acc = sess.run([triplet_net.cost, triplet_net.acc], 538 | feed_dict={anchor_placeholder: train_anchor_gi_all, 539 | anchor_label_placeholder: train_label_all}) 540 | 541 | 542 | # Compute validation loss. 543 | # hard-mining is disabled in this part. 544 | val_loss, val_acc = sess.run([triplet_net.cost, triplet_net.acc], 545 | feed_dict={anchor_placeholder: val_anchor_gi_all, 546 | anchor_label_placeholder: val_label_all}) 547 | 548 | 549 | info = ' train loss: %f' % (train_loss) 550 | log_stream += info 551 | log_stream += '\n' 552 | print(info) 553 | 554 | info = ' train acc: %f' % (train_acc) 555 | log_stream += info 556 | log_stream += '\n' 557 | print(info) 558 | 559 | info = ' validation loss: %f' % (val_loss) 560 | log_stream += info 561 | log_stream += '\n' 562 | print(info) 563 | 564 | info = ' validation acc: %f' % (val_acc) 565 | log_stream += info 566 | log_stream += '\n' 567 | print(info) 568 | 569 | append_log(log_path, log_stream) 570 | log_stream = '' 571 | 572 | if (iteration + 1) % (args.save_freq) == 0: 573 | saver.save(sess, join(args.model_saving_dir, 'training_model'), global_step=iteration) 574 | 575 | 576 | --------------------------------------------------------------------------------