├── .gitignore ├── LICENSE ├── README.md ├── blender_render ├── Platte.obj ├── __init__.py ├── create_table_pose.py ├── image_utils.py ├── render_grid.py ├── render_random_pose.py ├── render_utils.py ├── table_poses.npz └── textures │ ├── cfabric_01_pbr-l-color.jpg │ ├── cfabric_01_pbr-l-displacement.jpg │ ├── cloth_01-m-color.jpg │ ├── cwool01_pbr-l-color.jpg │ ├── fabric_cloak01-m-color.jpg │ ├── fabric_cloak02-m-color.jpg │ ├── fabric_cloth_dirty-m-color.jpg │ ├── fabric_cloth_grey-m-color.jpg │ ├── fabric_cloth_red-m-color.jpg │ ├── fabric_cloth_roughwool-l-color.jpg │ ├── fabric_leather_bright01-m-color.jpg │ ├── fabric_leather_bright02-m-color.jpg │ ├── fabric_leather_dark-m-color.jpg │ ├── fabric_leather_soft-m-bump.jpg │ ├── fabric_leather_soft-m-color.jpg │ ├── fabric_pattern_simple-l-color.jpg │ ├── fabric_patterned_1-m-color.jpg │ ├── fabric_patterned_10-m-specular.jpg │ ├── fabric_patterned_2-m-color.jpg │ ├── fabric_patterned_3-m-color.jpg │ ├── fabric_patterned_4-m-color.jpg │ ├── fabric_patterned_5-m-color.jpg │ ├── fabric_patterned_6-m-color.jpg │ ├── fabric_patterned_8-l-color.jpg │ ├── fabric_patterned_9-m-color.jpg │ ├── fabric_solid_1-l-color.jpg │ ├── fabric_solid_10-m-color.jpg │ ├── fabric_solid_11-m-color.jpg │ ├── fabric_solid_12-m-color.jpg │ ├── fabric_solid_14-m-color.jpg │ ├── fabric_solid_15-l-color.jpg │ ├── fabric_solid_16-m-color.jpg │ ├── fabric_solid_18-m-color.jpg │ ├── fabric_solid_19-m-color.jpg │ ├── fabric_solid_2-m-color.jpg │ ├── fabric_solid_20-l-color.jpg │ ├── fabric_solid_21-m-color.jpg │ ├── fabric_solid_22-m-color.jpg │ ├── fabric_solid_27-m-color.jpg │ ├── fabric_solid_3-m-color.jpg │ ├── fabric_solid_31-m-roughness.jpg.png │ ├── fabric_solid_4-m-color.jpg │ ├── fabric_solid_5-m-color.jpg │ ├── fabric_solid_6-l-color.jpg │ ├── fabric_solid_7-m-color.jpg │ ├── fabric_solid_8-l-color.jpg │ ├── fabric_solid_9-m-color.jpg │ └── fabric_wool_knitted_rough-m-color.jpg ├── create_annotation.py ├── data ├── downsample.mlx ├── meshes.py ├── multiviews.py └── pointclouds.py ├── img ├── ApolloCar3D.png ├── Doumanoglou.png ├── HomebrewedDB.png ├── KITTI.png ├── LINEMOD-O.jpg ├── LINEMOD.jpg ├── LINEMOD.png ├── ObjectNet3D.png ├── Pascal3D.png ├── Pix3D.png ├── ScanNet.png ├── T-LESS.png ├── Tejani.png └── YCB-Video.png ├── ply2obj.py └── retrieve_files.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Yang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ObjectPoseEstimationDatasets 2 | A repo to summarize **datasets** used for object pose estimation and 3 | **rendering methods** used to generate synthetic training data. 4 | 5 | In the following tables, 3D CAD model is noted as **model** and 6 | 2D pictured object is noted as **object**. 7 | 8 | Papers resuming some related datesets 9 | can be found [here](http://openaccess.thecvf.com/content_CVPRW_2019/papers/WiCV/Jalal_SIDOD_A_Synthetic_Image_Dataset_for_3D_Object_Pose_Recognition_CVPRW_2019_paper.pdf) 10 | and [here](https://arxiv.org/abs/1903.04229) 11 | 12 | 13 | ## Table of Content 14 | * [Challenges](#challenges) 15 | * [Objects in the controlled environments](#objects-in-the-controlled-environments) 16 | * [Objects in the wild](#objects-in-the-wild) 17 | * [3D model datasets](#3d-model-datasets) 18 | * [Rendering Methods](#rendering-methods) 19 | 20 | 21 | ## Challenges :space_invader: 22 | * [ICCV 2015: Occluded Object Challenge](https://hci.iwr.uni-heidelberg.de/vislearn/iccv2015-occlusion-challenge/) 23 | * [ICCV 2015: Recovering Object Pose](https://labicvl.github.io/3DPose-2015.html) 24 | * [ECCV 2016: Recovering Object Pose](https://labicvl.github.io/R6D) 25 | * [ICCV 2017: Recovering Object Pose](http://cmp.felk.cvut.cz/sixd/workshop_2017/) 26 | * [ECCV 2018: Recovering Object Pose](http://cmp.felk.cvut.cz/sixd/workshop_2018/) 27 | * [ICCV 2019: Recovering Object Pose](http://cmp.felk.cvut.cz/sixd/workshop_2019/) 28 | * [IROS 2019: Object Pose Estimation for Bin-Picking](http://www.bin-picking.ai/en/competition.html) 29 | 30 | 31 | ## Objects in the controlled environments :movie_camera: 32 | This table lists the datasets commonly known as **BOP: Benchmark 6D Object Pose Estimation**, 33 | which provide accurate 3D object models and precise 2D-3D alignment. 34 | 35 | You can download all the BOP datasets [here](https://bop.felk.cvut.cz/datasets/) and 36 | use the [toolkit](https://github.com/thodan/bop_toolkit) provided by the organizers. 37 | 38 | After downloading the data, 39 | you can use our code ```ply2obj.py``` to convert original **.ply** files to **.obj** files, 40 | and run ```create_annotation.py``` to create a single annotation file for all the scenes in a dataset. 41 | 42 | Datasets format can be found [here](https://github.com/thodan/bop_toolkit/blob/master/docs/bop_datasets_format.md), 43 | we use **instance id** in our annotation to indicate different instances pictured in the same image. 44 | 45 | | Dataset | Sample image | Annotation | Statistics | Reference | 46 | | :-----: | :-----: | :-----: | :-----: | :-----: | 47 | | [HomebrewedDB](https://bop.felk.cvut.cz/datasets/) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/HomebrewedDB.png) | 6D pose + Depth + BoundingBox| **33** models in **13** videos with **17,420** frames| [Preprint 2019](https://arxiv.org/abs/1904.03167)| 48 | | [YCB-Video](https://bop.felk.cvut.cz/datasets/) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/YCB-Video.png) | 6D Pose + Depth + Mask | **21** models in **92** videos with **133,827** frames| [RSS 2018](https://arxiv.org/abs/1711.00199) | 49 | | [T-LESS](https://bop.felk.cvut.cz/datasets/)| ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/T-LESS.png) | 6D Pose + Depth | **30** models in **20** videos with **~49K** frames | [WACV 2017](http://cmp.felk.cvut.cz/t-less/)| 50 | | [Doumanoglou](https://bop.felk.cvut.cz/datasets/)| ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/Doumanoglou.png)| 6D Pose + Depth | **2** models in **3** videos with **183** frames| [CVPR 2016](http://rkouskou.gitlab.io/research/6D_NBV.html)| 51 | | [Tejani](https://bop.felk.cvut.cz/datasets/) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/Tejani.png) | 6D Pose + Depth | **6** models in **6** videos with **2,067** frames | [ECCV 2014](http://rkouskou.gitlab.io/research/LCHF.html)| 52 | | [Occluded-LINEMOD](https://bop.felk.cvut.cz/datasets/) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/LINEMOD-O.jpg) | 6D Pose + Depth | **8** models in **1,214** frames with **8,992** objects | [ECCV 2014](http://wwwpub.zih.tu-dresden.de/~cvweb/publications/papers/2014/PoseEstimationECCV2014.pdf) | 53 | | [LINEMOD](https://bop.felk.cvut.cz/datasets/) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/LINEMOD.jpg) | 6D pose + Depth for one object | **15** models in **15** videos with **18,273** frames | [ACCV 2012](http://www.stefan-hinterstoisser.com/papers/hinterstoisser2012accv.pdf) | 54 | 55 | 56 | ## Objects in the wild :camera: 57 | In this table, **Pix3D** and **ScanNet** provide precise 2D-3D alignment 58 | while others only provide a coarse alignment. 59 | 60 | **PASCAL3D+** is the de facto benchmark used for viewpoint estimation. 61 | 62 | **ScanNet** is usually used to evaluate scene reconstruction and segmentation. 63 | 64 | | Dataset | Sample image | Annotation | Statistics | Reference | 65 | | :-----: | :-----: | :-----: | :-----: | :-----: | 66 | | [ApolloCar3D](http://apolloscape.auto/car_instance.html) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/ApolloCar3D.png) | 6D Pose + Mask | **34** car models with **60K+** objects in **5,277** images | [CVPR 2019](https://arxiv.org/abs/1811.12222) | 67 | | [Pix3D](http://pix3d.csail.mit.edu/) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/Pix3D.png) | 6D Pose + Mask | **9** categories containing **395 models** in **10,069** images | [CVPR 2018](http://pix3d.csail.mit.edu/papers/pix3d_cvpr.pdf) | 68 | | [ScanNet](http://www.scan-net.org/) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/ScanNet.png) | 6D Pose + Segmentation + Depth | **2.5M** RGB-D frames in **1,515** scenes | [CVPR 2017](https://arxiv.org/abs/1702.04405) | 69 | | [ObjectNet3D](http://cvgl.stanford.edu/projects/objectnet3d/) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/ObjectNet3D.png) | Euler Angles + BoundingBox | **100** categories with **201,888** objects in **90,127** images | [ECCV 2016](http://cvgl.stanford.edu/papers/xiang_eccv16.pdf) | 70 | | [PASCAL3D+](http://cvgl.stanford.edu/projects/pascal3d.html) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/Pascal3D.png) | Euler Angles + BoundingBox | **12** categories with **36,292** objects in **30,889** images | [WACV 2014](https://www-cs.stanford.edu/~roozbeh/papers/wacv14.pdf) | 71 | | [KITTI](http://www.cvlibs.net/datasets/kitti/eval_object.php) | ![](https://github.com/YoungXIAO13/6DPoseEstimationDatasets/blob/master/img/KITTI.png) | 3D BoundingBox | **80,256** objects in **14,999** images | [CVPR 2012](http://www.cvlibs.net/publications/Geiger2012CVPR.pdf) | 72 | 73 | 74 | ## 3D model datasets :bike: 75 | In order to testify the network **generalization** ability 76 | (tested on images containing **unseen** 3D models from the training set), 77 | the following dataset could be used to generate synthetic training data. 78 | 79 | Notice that **ABC** contains generic and arbitrary industrial CAD models 80 | while **ShapeNetCore** and **ModelNet** contain common category objects such as cars and chairs. 81 | 82 | | Dataset | Categories | Models in total | Reference | 83 | | :-----: | :-----: | :-----: | :-----: | 84 | | [ABC](https://deep-geometry.github.io/abc-dataset/) | - | 1 million | [CVPR 2019](https://arxiv.org/pdf/1812.06216.pdf) | 85 | | [ShapeNetCore](https://www.shapenet.org/download/shapenetcore) | 55 | ~51,300 | [ArXiv 2015](https://arxiv.org/abs/1512.03012) | 86 | | [ModelNet-40](http://modelnet.cs.princeton.edu/) | 40 | 26,960 | [CVPR 2015](https://3dshapenets.cs.princeton.edu/paper.pdf) | 87 | 88 | 89 | ## Rendering methods :bicyclist: 90 | 91 | ### Differentiable Renderer 92 | * [Neural 3D Mesh Renderer](http://hiroharu-kato.com/projects_en/neural_renderer.html): Kato el al. CVPR 2018 93 | 94 | * [RenderNet](https://github.com/thunguyenphuoc/RenderNet): Thu et al. NIPS 2018 95 | 96 | ### Blender Render :mountain_bicyclist: 97 | In this repo, we provide python code to generate rendering images from 3D models using 98 | blender as a python module that is easy to install and generate photo-realistic images : ) 99 | 100 | In order to generate synthetic data, we first need to simulate a set of poses where the 101 | camera is uniformly distributed on the upper semi-sphere around the table plane. 102 | 103 | ```./blender_render/table_poses.npz``` contains the poses obtained in LINEMOD-Occlusion dataset 104 | with the distribution listed below: 105 | 106 | * Range of object distances: 346 - 1500 mm (only 3 instances below 400 mm) 107 | * Azimuth range: 0 - 360 deg 108 | * Elevation range: -14 - 89 deg (only a few instances below 0 deg) 109 | 110 | You can download CAD models of the [ABC](https://deep-geometry.github.io/abc-dataset/) dataset 111 | and retrieve .obj files into the target directory using ```retrieve_files.py```. 112 | 113 | Then generate synthetic images of different models with various lightness and textures 114 | under random poses by running ```./blender_render/render_random_pose.py``` 115 | 116 | * Other works using blender can be found 117 | [here](https://github.com/weiaicunzai/blender_shapenet_render) that generates one model at a time. 118 | 119 | ### Physical Simulator 120 | 121 | [PyBullet](https://github.com/bulletphysics/bullet3/tree/master/examples/pybullet): 122 | a very popular one in the Robotics community. 123 | 124 | 125 | ### Others 126 | * [Glumpy](https://github.com/glumpy/glumpy): does not support headless rendering (failed on ssh mode) 127 | 128 | * [UnrealCV](https://github.com/unrealcv/unrealcv): extension of Unreal Engine 4, 129 | helps interact with virtual world and communicate with external program. 130 | 131 | * [SyntheticComputerVision](https://github.com/unrealcv/synthetic-computer-vision): 132 | resuming a lot of techniques used to generate synthetic image 133 | 134 | **Attention**: 3D models should be aligned in the same way through **meshlab** to 135 | ensure the consistent orientation while wandering across the different datasets. -------------------------------------------------------------------------------- /blender_render/Platte.obj: -------------------------------------------------------------------------------- 1 | # Blender v2.79 (sub 0) OBJ File: '' 2 | # www.blender.org 3 | 4 | o Platte 5 | v 70.000000 -110.000000 -2.500015 6 | v 70.000000 -110.000000 2.499985 7 | v -70.000000 -110.000000 2.499985 8 | v -70.000000 -110.000000 -2.500015 9 | v -70.000000 110.000000 -2.499985 10 | v -70.000000 110.000000 2.500015 11 | v 70.000000 110.000000 2.500015 12 | v 70.000000 110.000000 -2.499985 13 | vn 0.0000 -1.0000 0.0000 14 | vn 0.0000 1.0000 0.0000 15 | vn 0.0000 -0.0000 1.0000 16 | vn 0.0000 0.0000 -1.0000 17 | vn 1.0000 0.0000 0.0000 18 | vn -1.0000 0.0000 0.0000 19 | f 1//1 2//1 3//1 4//1 20 | f 5//2 6//2 7//2 8//2 21 | f 2//3 7//3 6//3 3//3 22 | f 8//4 1//4 4//4 5//4 23 | f 8//5 7//5 2//5 1//5 24 | f 4//6 3//6 6//6 5//6 25 | -------------------------------------------------------------------------------- /blender_render/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/__init__.py -------------------------------------------------------------------------------- /blender_render/create_table_pose.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import numpy as np 3 | import os 4 | from scipy.linalg import expm, norm, logm 5 | import math 6 | from tqdm import tqdm 7 | 8 | 9 | def compute_rotation_from_vertex(vertex): 10 | """Compute rotation matrix from viewpoint vertex """ 11 | up = [0, 0, 1] 12 | if vertex[0] == 0 and vertex[1] == 0 and vertex[2] != 0: 13 | up = [-1, 0, 0] 14 | rot = np.zeros((3, 3)) 15 | rot[:, 2] = -vertex / norm(vertex) # View direction towards origin 16 | rot[:, 0] = np.cross(rot[:, 2], up) 17 | rot[:, 0] /= norm(rot[:, 0]) 18 | rot[:, 1] = np.cross(rot[:, 0], -rot[:, 2]) 19 | return rot.T 20 | 21 | 22 | def create_pose(vertex, scale=0, angle_deg=0): 23 | """Compute rotation matrix from viewpoint vertex and inplane rotation """ 24 | rot = compute_rotation_from_vertex(vertex) 25 | transform = np.eye(4) 26 | rodriguez = np.asarray([0, 0, 1]) * (angle_deg * math.pi / 180.0) 27 | angle_axis = expm(np.cross(np.eye(3), rodriguez)) 28 | transform[0:3, 0:3] = np.matmul(angle_axis, rot) 29 | transform[0:3, 3] = [0, 0, scale] 30 | return transform 31 | 32 | 33 | def compute_inplane_from_rotation(R, vertex): 34 | rot = compute_rotation_from_vertex(vertex) 35 | angle_axis = logm(np.matmul(R, rot.T)) 36 | return angle_axis[1, 0] 37 | 38 | 39 | def compute_viewpoint_from_camera(c): 40 | # compute the viewpoint according to the camera position 41 | elevation = np.arctan(c[2] / np.sqrt(c[0] **2 + c[2] ** 2)) 42 | azimuth = np.arctan(c[1] / c[0]) 43 | if c[0] < 0 and c[1] < 0: 44 | azimuth -= np.pi 45 | if c[0] < 0 and c[1] > 0: 46 | azimuth += np.pi 47 | return azimuth[0] * 180./ np.pi, elevation[0] * 180./ np.pi 48 | 49 | 50 | def compute_angles_from_pose(R, t): 51 | #c = - np.dot(np.transpose(R), t) 52 | direction = -np.dot(np.transpose(R), np.array([0., 0., 1.]).reshape(3, 1)) 53 | azimuth = np.arctan2(direction[0], direction[1])[0] * 180./ np.pi 54 | elevation = np.arcsin(direction[-1])[0] * 180./ np.pi 55 | inplane = compute_inplane_from_rotation(R, direction.reshape(-1)) * 180./ np.pi 56 | return azimuth, elevation, inplane 57 | 58 | 59 | LINEMOD_dir = '/media/xiao/newhd/XiaoDatasets/LineMod' 60 | with open(os.path.join(LINEMOD_dir, 'test', '02', 'gt.yml'), 'r') as stream: 61 | data = yaml.load(stream) 62 | 63 | outfile = 'table_poses.npz' 64 | Rotations = np.zeros((1214, 9)) 65 | Translations = np.zeros((1214, 3)) 66 | Elevations = np.zeros(1214) 67 | 68 | for i in tqdm(range(0, len(data))): 69 | for j in range(0, len(data[i])): 70 | obj_id = data[i][j]['obj_id'] 71 | if obj_id != 2: 72 | continue 73 | x, y, w, h = data[i][j]['obj_bb'] 74 | R = np.array(data[i][j]['cam_R_m2c']).reshape(-1) 75 | t = np.array(data[i][j]['cam_t_m2c']).reshape(-1) 76 | Rotations[i, :] = R 77 | Translations[i, :] = t 78 | azimuth, elevation, inplane = compute_angles_from_pose(R.reshape(3, 3), t.reshape(3, 1)) 79 | Elevations[i] = elevation 80 | 81 | np.savez(outfile, R=Rotations, T=Translations, Ele=Elevations) 82 | -------------------------------------------------------------------------------- /blender_render/image_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import numpy as np 3 | from PIL import Image 4 | import cv2 5 | import ipdb 6 | 7 | value_list = [25, 39, 48, 56, 63, 69, 75, 80, 85, 89, 8 | 93, 97, 101, 105, 108, 111, 115, 118, 121, 124, 9 | 126, 129, 132, 134, 137, 139, 142, 144, 147, 149] 10 | 11 | 12 | # Obtain the bounding box from the mask 13 | def obtain_obj_region(mask_path, idx): 14 | mask = cv2.imread(mask_path, -1) 15 | obj_mask = np.array(mask == value_list[idx]).astype('uint8') 16 | bbox = cv2.boundingRect(obj_mask) 17 | px_visible = int(np.sum(obj_mask)) 18 | occupy_fract = px_visible / (bbox[2] * bbox[3]) if px_visible != 0 else 0 19 | return bbox, px_visible, occupy_fract 20 | 21 | 22 | # Obtain the object center from the translation vector 23 | def obtain_obj_center(T, fx, fy, px, py, height, width): 24 | cx = int(fx * T[0] / T[2] + px) 25 | cy = int(fy * T[1] / T[2] + py) 26 | outside = True if cx <= 0 or cy <= 0 or cx >= width or cy >= height else False 27 | return cx, cy, outside 28 | 29 | 30 | # Crop and Resize the image without changing the aspect ratio 31 | def resize_padding(im, desired_size): 32 | # compute the new size 33 | old_size = im.size 34 | ratio = float(desired_size) / max(old_size) 35 | new_size = tuple([int(x * ratio) for x in old_size]) 36 | 37 | im = im.resize(new_size, Image.BILINEAR) 38 | 39 | # create a new image and paste the resized on it 40 | new_im = Image.new("RGBA", (desired_size, desired_size)) 41 | new_im.paste(im, ((desired_size - new_size[0]) // 2, (desired_size - new_size[1]) // 2)) 42 | return new_im 43 | 44 | 45 | def resize_padding_v2(im, desired_size_in, desired_size_out): 46 | # compute the new size 47 | old_size = im.size 48 | ratio = float(desired_size_in)/max(old_size) 49 | new_size = tuple([int(x*ratio) for x in old_size]) 50 | 51 | im = im.resize(new_size, Image.ANTIALIAS) 52 | 53 | # create a new image and paste the resized on it 54 | new_im = Image.new("RGBA", (desired_size_out, desired_size_out)) 55 | new_im.paste(im, ((desired_size_out - new_size[0]) // 2, (desired_size_out - new_size[1]) // 2)) 56 | return new_im 57 | 58 | 59 | # Crop and resize the rendering images 60 | def clean_rendering_results(img_path, depth_path, normal_path, target_size=128): 61 | img = Image.open(img_path) 62 | depth = Image.open(depth_path) 63 | normal = Image.open(normal_path) 64 | bbox = img.getbbox() 65 | img, depth, normal = img.crop(bbox), depth.crop(bbox), normal.crop(bbox) 66 | img = resize_padding(img, target_size).convert('RGB') 67 | depth = resize_padding(depth, target_size).convert('L') 68 | normal = resize_padding(normal, target_size).convert('RGB') 69 | normal_array = np.array(normal) 70 | mask = np.array(depth) == 0 71 | normal_array[mask, :] = 0 72 | normal = Image.fromarray(normal_array) 73 | img.save(img_path) 74 | depth.save(depth_path) 75 | normal.save(normal_path) 76 | -------------------------------------------------------------------------------- /blender_render/render_grid.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import bpy 3 | from mathutils import Matrix, Vector 4 | from math import radians, sin, cos 5 | import numpy as np 6 | import random 7 | import json 8 | import ipdb 9 | 10 | cur_dir = os.path.dirname(os.path.abspath(__file__)) 11 | sys.path.append(cur_dir) 12 | from render_utils import remove_obj_lamp_and_mesh, render_without_output, setup_env, make_lamp 13 | from image_utils import clean_rendering_results 14 | 15 | 16 | # Define pose grid for 20 vertex on the regular dodecahedron 17 | phi = (1. + np.sqrt(5) / 2) 18 | dodecahedron_vertex_coord = np.array( 19 | [[-1, -1, -1], [-1, -1, 1], [-1, 1, -1], [-1, 1, 1], 20 | [1, -1, -1], [1, -1, 1], [1, 1, -1], [1, 1, 1], 21 | [0, -phi, -1/phi], [0, -phi, 1/phi], [0, phi, -1/phi], [0, phi, 1/phi], 22 | [-1/phi, 0, -phi], [-1/phi, 0, phi], [1/phi, 0, -phi], [1/phi, 0, phi], 23 | [-phi, -1/phi, 0], [-phi, 1/phi, 0], [phi, -1/phi, 0], [phi, 1/phi, 0]] 24 | ) 25 | 26 | 27 | # Define pose grid for vertex on the semi-sphere 28 | semi_sphere_coord = [] 29 | step_azi = 5 # one render image every 5 degrees in azimuth 30 | step_ele = 30 # one tour every 30 degrees in elevation 31 | n_azi = int(360 / step_azi) 32 | n_view = n_azi * int(90 / step_ele) 33 | r = np.sqrt(3) 34 | for i in range(0, n_view): 35 | azi = (i * step_azi) % 360 36 | ele = (i // n_azi) * step_ele 37 | loc_x = -r * cos(radians(ele)) * sin(radians(azi)) 38 | loc_y = -r * cos(radians(ele)) * cos(radians(azi)) 39 | loc_z = r * sin(radians(ele)) 40 | semi_sphere_coord.append([loc_x, loc_y, loc_z]) 41 | semi_sphere_coord = np.array(semi_sphere_coord) 42 | 43 | 44 | # Add constraint to the camera 45 | def parent_obj_to_camera(b_camera): 46 | # set the parenting to the origin 47 | origin = (0, 0, 0) 48 | b_empty = bpy.data.objects.new("Empty", None) 49 | b_empty.location = origin 50 | b_camera.parent = b_empty 51 | 52 | scn = bpy.context.scene 53 | scn.objects.link(b_empty) 54 | scn.objects.active = b_empty 55 | return b_empty 56 | 57 | 58 | # Setup the camera 59 | def setup_camera(scene): 60 | cam = scene.objects['Camera'] 61 | cam_constraint = cam.constraints.new(type='TRACK_TO') 62 | cam_constraint.track_axis = 'TRACK_NEGATIVE_Z' 63 | cam_constraint.up_axis = 'UP_Y' 64 | b_empty = parent_obj_to_camera(cam) 65 | cam_constraint.target = b_empty 66 | return cam 67 | 68 | 69 | # Import 3D model from .obj files 70 | def import_model(model_file, axis_forward=None, axis_up=None): 71 | if axis_forward is not None and axis_up is not None: 72 | bpy.ops.import_scene.obj(filepath=model_file, axis_forward=axis_forward, axis_up=axis_up) 73 | else: 74 | bpy.ops.import_scene.obj(filepath=model_file) 75 | model_name = model_file.split('/')[-1].split('.')[0] 76 | return model_name 77 | 78 | 79 | # Normalize the 3D model 80 | def normalize_model(obj): 81 | bpy.context.scene.objects.active = obj 82 | bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS') 83 | obj.location = (0, 0, 0) 84 | max_dim = max(obj.dimensions) 85 | obj.dimensions = obj.dimensions / max_dim 86 | 87 | 88 | # Create normalized coordinate map as a color map 89 | def create_coord_map(obj): 90 | mesh = obj.data 91 | vert_list = mesh.vertices 92 | vcos = [obj.matrix_world * v.co for v in vert_list] 93 | x, y, z = [[v[i] for v in vcos] for i in range(3)] 94 | min_x, min_y, min_z = min(x), min(y), min(z) 95 | size_x, size_y, size_z = max(x) - min(x), max(y) - min(y), max(z) - min(z) 96 | 97 | # get the color map to create as coordinate map 98 | if mesh.vertex_colors: 99 | color_map = mesh.vertex_colors.active 100 | else: 101 | color_map = mesh.vertex_colors.new() 102 | 103 | # apply the corresponding color to each vertex 104 | i = 0 105 | for poly in mesh.polygons: 106 | for idx in poly.loop_indices: 107 | loop = mesh.loops[idx] 108 | v = vert_list[loop.vertex_index] 109 | r = (v.co.x - min_x) / size_x if size_x != 0 else 1 110 | g = (v.co.y - min_y) / size_y if size_y != 0 else 1 111 | b = (v.co.z - min_z) / size_z if size_z != 0 else 1 112 | color_map.data[i].color = (r, g, b) 113 | i += 1 114 | 115 | mat = bpy.data.materials.new('vertex_material') 116 | mat.use_shadeless = True 117 | mat.use_vertex_color_paint = True 118 | if mesh.materials: 119 | mesh.materials[0] = mat 120 | else: 121 | mesh.materials.append(mat) 122 | 123 | 124 | class RenderMachine: 125 | """Creates a python blender render machine. 126 | 127 | model_files: a list containing all the obj files 128 | out_dir: where to save the render results 129 | rad: lamp radiance to adjust the lightness 130 | clip_end: rendering range in mm 131 | """ 132 | def __init__(self, 133 | model_file, out_dir, rendering='nocs', rad=30, clip_end=100, height=256, width=256): 134 | # Setting up the environment 135 | remove_obj_lamp_and_mesh(bpy.context) 136 | self.scene = bpy.context.scene 137 | self.depthFileOutput, self.normalFileOutput = setup_env(self.scene, True, True, height, width, clip_end) 138 | self.camera = setup_camera(self.scene) 139 | self.lamp = make_lamp(rad) 140 | self.height, self.width = height, width 141 | 142 | # Import 3D models and create the normalized object coordinate space as material 143 | self.model = import_model(model_file, axis_forward='Y', axis_up='Z') 144 | normalize_model(bpy.data.objects[self.model]) 145 | if rendering == 'nocs': 146 | create_coord_map(bpy.data.objects[self.model]) 147 | 148 | # Output setting 149 | self.out_dir = os.path.join(out_dir, rendering) 150 | self.depthFileOutput.base_path = os.path.join(out_dir, 'depth') 151 | self.normalFileOutput.base_path = os.path.join(out_dir, 'normal') 152 | self.scene.render.image_settings.file_format = 'PNG' 153 | 154 | def render_grid_pose(self, pose_grid): 155 | for i in range(pose_grid.shape[0]): 156 | self.camera.location = pose_grid[i] 157 | self.lamp.location = pose_grid[i] 158 | 159 | self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}'.format(i)) 160 | self.depthFileOutput.file_slots[0].path = '{:04d}_'.format(i) 161 | self.normalFileOutput.file_slots[0].path = '{:04d}_'.format(i) 162 | render_without_output(use_antialiasing=True) 163 | 164 | # Crop and resize the rendering results 165 | img_path = '{}.png'.format(self.scene.render.filepath) 166 | depth_path = os.path.join(self.depthFileOutput.base_path, '{:04d}_0001.png'.format(i)) 167 | normal_path = os.path.join(self.normalFileOutput.base_path, '{:04d}_0001.png'.format(i)) 168 | clean_rendering_results(img_path, depth_path, normal_path) 169 | 170 | 171 | if __name__ == '__main__': 172 | from tqdm import tqdm 173 | import argparse 174 | 175 | parser = argparse.ArgumentParser() 176 | parser.add_argument('--dataset_dir', type=str, help='dataset directory') 177 | parser.add_argument('--dataset_format', type=str, choices=['BOP', 'Pascal3D', 'ShapeNet'], help='dataset format') 178 | parser.add_argument('--input', type=str, help='subdirectory containing obj files in the dataset directory') 179 | parser.add_argument('--views', type=str, choices=['dodecahedron', 'semi_sphere'], help='poses under which the object will be rendered') 180 | args = parser.parse_args() 181 | 182 | input_dir = os.path.join(args.dataset_dir, args.input) 183 | output_dir = os.path.join(args.dataset_dir, args.views) 184 | 185 | if args.views == 'dodecahedron': 186 | poses = dodecahedron_vertex_coord 187 | elif args.views == 'semi_sphere': 188 | poses = semi_sphere_coord 189 | else: 190 | sys.exit(0) 191 | 192 | if args.dataset_format == 'BOP': 193 | model_files = sorted(os.listdir(input_dir)) 194 | for model_file in tqdm(model_files): 195 | model_path = os.path.join(input_dir, model_file) 196 | render_dir = os.path.join(output_dir, model_file.split(".")[0]) 197 | if os.path.isdir(render_dir): 198 | continue 199 | render_machine = RenderMachine(model_path, render_dir) 200 | render_machine.render_grid_pose(poses) 201 | 202 | elif args.dataset_format in ['Pascal3D', 'ShapeNet']: 203 | categories = sorted(os.listdir(input_dir)) 204 | for cat in tqdm(categories): 205 | cat_in = os.path.join(input_dir, cat) 206 | cat_out = os.path.join(output_dir, cat) 207 | model_files = sorted(os.listdir(cat_in)) 208 | for model_file in tqdm(model_files): 209 | if args.dataset_format == 'Pascal3D': 210 | model_path = os.path.join(cat_in, model_file) 211 | model_name = model_file.split(".")[0] 212 | else: 213 | model_path = os.path.join(cat_in, model_file, 'models', 'model_normalized.obj') 214 | model_name = model_file 215 | render_dir = os.path.join(cat_out, model_name) 216 | if os.path.isdir(render_dir): 217 | continue 218 | render_machine = RenderMachine(model_path, render_dir) 219 | render_machine.render_grid_pose(poses) 220 | else: 221 | sys.exit(0) 222 | 223 | os.system('rm blender_render.log') 224 | -------------------------------------------------------------------------------- /blender_render/render_random_pose.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import bpy 3 | from mathutils import Matrix, Vector 4 | from PIL import Image 5 | from math import radians, sin, cos 6 | import numpy as np 7 | import random 8 | import json 9 | import ipdb 10 | 11 | cur_dir = os.path.dirname(os.path.abspath(__file__)) 12 | sys.path.append(cur_dir) 13 | from image_utils import obtain_obj_region, obtain_obj_center 14 | from render_utils import * 15 | 16 | 17 | # Transform the R and T from numpy array to Matrix 18 | def convert_pose_array_to_matrix(R, T): 19 | mat = Matrix(R.reshape(3, 3)).to_4x4() 20 | mat.col[3][:3] = T 21 | return mat 22 | 23 | 24 | # Setup the camera 25 | def setup_camera(scene, fx=572, fy=574, cx=325, cy=242): 26 | cam = scene.objects['Camera'] 27 | width = scene.render.resolution_x 28 | height = scene.render.resolution_y 29 | cam.data.sensor_height = cam.data.sensor_width * height / width 30 | cam.data.lens = (fx + fy) / 2 * cam.data.sensor_width / width 31 | cam.data.shift_x = (width / 2 - cx) / width 32 | cam.data.shift_y = (cy - height / 2) / width 33 | # change to OpenCV camera coordinate system 34 | cam.matrix_world = Matrix(((1.0, 0.0, 0.0, 0.0), 35 | (0.0, -1.0, 0.0, 0.0), 36 | (0.0, 0.0, -1.0, 0.0), 37 | (0.0, 0.0, 0.0, 1.0))) 38 | return cam 39 | 40 | 41 | # Add material to object 42 | def add_color(obj, color=(1., 0., 0.), shadeless=True): 43 | mat = bpy.data.materials.new(name='Material') 44 | mat.use_shadeless = shadeless 45 | mat.diffuse_color = color 46 | if obj.data.materials: 47 | obj.data.materials[0] = mat 48 | else: 49 | obj.data.materials.append(mat) 50 | 51 | 52 | # Add texture map to object 53 | def add_texture_map(obj, texture_img): 54 | mat = bpy.data.materials.new(name='Material') 55 | tex = bpy.data.textures.new('UVMapping', 'IMAGE') 56 | tex.image = bpy.data.images.load(texture_img) 57 | slot = mat.texture_slots.add() 58 | slot.texture = tex 59 | if obj.data.materials: 60 | obj.data.materials[0] = mat 61 | else: 62 | obj.data.materials.append(mat) 63 | 64 | 65 | # Import 3D models from .obj files 66 | def import_models(model_files, use_defalut_texture=False): 67 | models = {} 68 | textures = {} 69 | repeat_count = {} 70 | for i in range(len(model_files)): 71 | models[i] = {} 72 | model_file = model_files[i] 73 | bpy.ops.import_scene.obj(filepath=model_file) 74 | model_name = model_file.split('/')[-1].split('.')[0] 75 | models[i]['model_name'] = model_name 76 | 77 | if model_name not in repeat_count.keys(): 78 | repeat_count[model_name] = 0 79 | else: 80 | repeat_count[model_name] += 1 81 | 82 | models[i]['object_name'] = model_name if repeat_count[model_name] == 0 else '{}.{:03d}'.format(model_name, repeat_count[model_name]) 83 | 84 | if use_defalut_texture: 85 | textures[model_name] = model_file.replace('.obj', '.png') 86 | return models, textures 87 | 88 | 89 | # Create random rotation matrix 90 | def rand_rotation(): 91 | # from http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c 92 | 93 | theta, phi, z = np.random.uniform(size=(3,)) 94 | theta = theta * 2.0 * np.pi # Rotation about the pole (Z). 95 | phi = phi * 2.0 * np.pi # For direction of pole deflection. 96 | z = z * 2.0 # For magnitude of pole deflection. 97 | 98 | # Compute a vector V used for distributing points over the sphere 99 | # via the reflection I - V Transpose(V). This formulation of V 100 | # will guarantee that if x[1] and x[2] are uniformly distributed, 101 | # the reflected points will be uniform on the sphere. Note that V 102 | # has length sqrt(2) to eliminate the 2 in the Householder matrix. 103 | 104 | r = np.sqrt(z) 105 | V = ( 106 | np.sin(phi) * r, 107 | np.cos(phi) * r, 108 | np.sqrt(2.0 - z) 109 | ) 110 | 111 | st = np.sin(theta) 112 | ct = np.cos(theta) 113 | 114 | R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1))) 115 | 116 | # Construct the rotation matrix ( V Transpose(V) - I ) R. 117 | 118 | M = (np.outer(V, V) - np.eye(3)).dot(R) 119 | return M 120 | 121 | 122 | class RenderMachine: 123 | """Creates a python blender render machine. 124 | 125 | model_files: a list containing all the obj files 126 | out_dir: where to save the render results 127 | table_file: 3D model of the table on which all objects could be placed 128 | hide_table: use the table model only when this arg is False 129 | texture_dir: directory containing the texture map images 130 | bg_dir: directory containing the background images 131 | dim_min: the minimum model dimension in mm 132 | dim_max: the maximum model dimension in mm 133 | grid: the distance between object models on the table 134 | rad: lamp radiance to adjust the lightness 135 | clip_end: rendering range in mm 136 | """ 137 | def __init__(self, 138 | model_files, out_dir, table_file='Platte.obj', hide_table=False, texture_dir=None, bg_dir=None, 139 | dim_min=50, dim_max=150, grid=150, rad=3000, clip_end=2000, 140 | fx=572, fy=574, cx=325, cy=242, height=480, width=640): 141 | # Setting up the environment 142 | remove_obj_lamp_and_mesh(bpy.context) 143 | self.scene = bpy.context.scene 144 | self.objs = bpy.data.objects 145 | self.depthFileOutput = setup_env(self.scene, True, False, height, width, clip_end) 146 | self.camera = setup_camera(self.scene, fx, fy, cx, cy) 147 | self.lamp = make_lamp(rad) 148 | self.rad = rad 149 | self.height, self.width = height, width 150 | self.fx, self.fy, self.cx, self.cy = fx, fy, cx, cy 151 | 152 | # Import table model and align it with camera frame 153 | bpy.ops.import_scene.obj(filepath=table_file) 154 | self.table = bpy.data.objects[table_file.split('.')[0]] 155 | self.offset = [0, -grid, grid, -2 * grid, 2 * grid, -3 * grid, 3 * grid] 156 | self.hide_table = hide_table 157 | 158 | # Import 3D models and register dimension range 159 | model_files = random.choices(model_files, k=30) if len(model_files) > 30 else model_files 160 | self.models, self.textures = import_models(model_files) 161 | self.dim_min, self.dim_max = dim_min, dim_max 162 | 163 | # Read texture maps and the background images 164 | self.texture_dir = texture_dir 165 | self.textures = os.listdir(texture_dir) 166 | self.bg_dir = bg_dir 167 | self.bg_imgs = os.listdir(bg_dir) 168 | 169 | # Output setting 170 | self.out_dir = out_dir 171 | self.scene.render.image_settings.file_format = 'PNG' 172 | self.depthFileOutput.base_path = out_dir 173 | self.depthFileOutput.format.file_format = 'OPEN_EXR' 174 | 175 | # TODO: to modify in order to be complied with T-LESS where multiple objects are present 176 | def render_pose_from_annotation(self, idx, R, T): 177 | self.table.hide_render = True 178 | 179 | # Render object masks 180 | for i in range(len(self.models)): 181 | model = self.models[i]['object_name'] 182 | if model in R: 183 | self.objs[model].hide_render = False 184 | self.objs[model].matrix_world = convert_pose_array_to_matrix(R[model], T[model]) 185 | add_color(self.objs[model], color=((i + 1) * 0.01, (i + 1) * 0.01, (i + 1) * 0.01), shadeless=True) 186 | else: 187 | self.objs[model].hide_render = True 188 | 189 | self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}_mask'.format(idx)) 190 | self.depthFileOutput.file_slots[0].path = '{:04d}_depth_'.format(idx) 191 | render_without_output(use_antialiasing=False) 192 | 193 | # Render textured image and depth map 194 | for i in range(len(self.models)): 195 | model = self.models[i]['object_name'] 196 | if model in R: 197 | add_texture_map(self.objs[model], self.textures[model]) 198 | 199 | self.depthFileOutput.file_slots[0].path = '{:04d}_depth_'.format(idx) 200 | self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}_image'.format(idx)) 201 | render_without_output(use_antialiasing=True) 202 | 203 | def render_random_pose(self, annot, start_idx, scene_id, image_id, R, T, ele): 204 | self.table.matrix_world = convert_pose_array_to_matrix( 205 | R, T + np.array([0, 200 * sin(radians(ele)), 200 * cos(radians(ele))]) 206 | ) 207 | self.table.scale = 6, 6, 6 208 | self.table.hide_render = self.hide_table 209 | 210 | # Randomize the lamp energy 211 | self.lamp.data.energy = np.random.uniform(self.rad * 0.5, self.rad * 1.5) / 30 212 | 213 | Rotations, Translations, Scales = {}, {}, {} 214 | # Render object masks 215 | for i in range(len(self.models)): 216 | model = self.models[i]['object_name'] 217 | R_model = rand_rotation() 218 | T_model = T + np.array( 219 | [self.offset[i % 5], sin(radians(ele)) * self.offset[i // 5], -cos(radians(ele)) * self.offset[i // 5]] 220 | ) 221 | self.objs[model].matrix_world = convert_pose_array_to_matrix(R_model, T_model) 222 | add_color(self.objs[model], color=((i + 1) * 0.01, (i + 1) * 0.01, (i + 1) * 0.01), shadeless=True) 223 | scale = np.random.uniform(self.dim_min, self.dim_max) / max(self.objs[model].dimensions) 224 | self.objs[model].scale = scale, scale, scale 225 | Rotations[i], Translations[i], Scales[i] = R_model, T_model, scale 226 | 227 | add_color(self.table, color=(0, 0, 0), shadeless=True) 228 | self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}_mask'.format(image_id)) 229 | self.depthFileOutput.file_slots[0].path = '{:04d}_depth_'.format(image_id) 230 | render_without_output(use_antialiasing=False) 231 | 232 | # Save mask as uint8 image 233 | mask = Image.open(os.path.join(self.out_dir, '{:04d}_mask.png'.format(image_id))).convert('L') 234 | mask.save(os.path.join(self.out_dir, '{:04d}_mask.png'.format(image_id))) 235 | 236 | # Render textured image and depth map 237 | for i in range(len(self.models)): 238 | model = self.models[i]['object_name'] 239 | add_texture_map(self.objs[model], os.path.join(self.texture_dir, random.choice(self.textures))) 240 | # Generate the sample annotation 241 | sample_frame = {} 242 | sample_frame["scene_id"] = scene_id 243 | sample_frame["image_id"] = image_id 244 | sample_frame["obj_id"] = i 245 | 246 | sample_frame["model_path"] = self.models[i]['model_name'] 247 | sample_frame["scale"] = Scales[i] 248 | sample_frame["cam_R_m2c"] = list(Rotations[i].reshape(-1)) 249 | sample_frame["cam_t_m2c"] = list(Translations[i]) 250 | cx, cy, outside = obtain_obj_center(Translations[i], self.fx, self.fy, self.cx, self.cy, self.height, self.width) 251 | sample_frame["obj_center"] = [cx, cy] 252 | sample_frame["obj_outside"] = outside 253 | bbox, px_visib, occupy_fract = obtain_obj_region(os.path.join(self.out_dir, '{:04d}_mask.png'.format(image_id)), i) 254 | sample_frame["bbox"] = bbox 255 | sample_frame["px_visib"] = px_visib 256 | sample_frame["occupy_fract"] = occupy_fract 257 | annot['{}'.format(start_idx + i)] = sample_frame 258 | 259 | add_texture_map(self.table, os.path.join(self.bg_dir, random.choice(self.bg_imgs))) 260 | self.scene.render.filepath = os.path.join(self.out_dir, '{:04d}_image'.format(image_id)) 261 | render_without_output(use_antialiasing=True) 262 | 263 | 264 | if __name__ == '__main__': 265 | # input and output directory 266 | model_dir = '/media/xiao/newhd/XiaoDatasets/ABC/abc_0000' 267 | out_dir = '/media/xiao/newhd/XiaoDatasets/ABC/synthetic_data_0000' 268 | scene_id = len(os.listdir(out_dir)) 269 | out_dir = os.path.join(out_dir, '{:06d}'.format(scene_id)) 270 | images_per_scene = 100 271 | 272 | # textures and backgrounds directory 273 | texture_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'textures') 274 | bg_dir = '/media/xiao/newhd/XiaoDatasets/PascalVOC/VOC2012/JPEGImages' 275 | 276 | # TODO: consider mutilple instances of the same shape 277 | model_files = [name for name in os.listdir(model_dir) if os.path.getsize(os.path.join(model_dir, name)) / (2 ** 20) < 10] 278 | model_number = np.random.randint(5, 25) 279 | model_files = random.choices(model_files, k=model_number) 280 | model_files = [os.path.join(model_dir, name) for name in model_files] 281 | 282 | render_machine = RenderMachine(model_files, out_dir, texture_dir=texture_dir, bg_dir=bg_dir, rad=3000) 283 | 284 | # Load table poses from the LINEMOD-OCCLUSION dataset 285 | table_poses = np.load('table_poses.npz') 286 | R = table_poses['R'] 287 | T = table_poses['T'] 288 | Ele = table_poses['Ele'] 289 | idx = np.random.randint(0, R.shape[0], size=(images_per_scene,)) 290 | 291 | # Read in annotation json file 292 | annotation_file = '/media/xiao/newhd/XiaoDatasets/ABC/annotation_0000.json' 293 | if os.path.isfile(annotation_file): 294 | annot = json.load(open(annotation_file)) 295 | start_idx = len(annot) 296 | else: 297 | annot = {} 298 | start_idx = 0 299 | 300 | for i in range(len(idx)): 301 | render_machine.render_random_pose( 302 | annot, start_idx + i * model_number, scene_id, i, R[idx[i], :], T[idx[i], :], Ele[idx[i]]) 303 | 304 | with open(annotation_file, 'w') as f: 305 | json.dump(annot, f, indent=4) 306 | 307 | os.system('rm blender_render.log') 308 | -------------------------------------------------------------------------------- /blender_render/render_utils.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import bpy 3 | from mathutils import Matrix, Vector 4 | 5 | 6 | # Create directory if not existed 7 | def create_dir(dir): 8 | if not os.path.isdir(dir): 9 | os.makedirs(dir) 10 | 11 | 12 | # Compute the calibration matrix K of camera 13 | def get_calibration_matrix_K_from_blender(camd): 14 | f_in_mm = camd.lens 15 | scene = bpy.context.scene 16 | resolution_x_in_px = scene.render.resolution_x 17 | resolution_y_in_px = scene.render.resolution_y 18 | scale = scene.render.resolution_percentage / 100 19 | sensor_width_in_mm = camd.sensor_width 20 | sensor_height_in_mm = camd.sensor_height 21 | pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y 22 | if (camd.sensor_fit == 'VERTICAL'): 23 | # the sensor height is fixed (sensor fit is horizontal), the sensor width is effectively changed with the pixel aspect ratio 24 | s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio 25 | s_v = resolution_y_in_px * scale / sensor_height_in_mm 26 | else: # 'HORIZONTAL' and 'AUTO' 27 | # the sensor width is fixed (sensor fit is horizontal), 28 | # the sensor height is effectively changed with the pixel aspect ratio 29 | pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y 30 | s_u = resolution_x_in_px * scale / sensor_width_in_mm 31 | s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm 32 | 33 | # Parameters of intrinsic calibration matrix K 34 | alpha_u = f_in_mm * s_u 35 | alpha_v = f_in_mm * s_v 36 | u_0 = resolution_x_in_px*scale / 2 37 | v_0 = resolution_y_in_px*scale / 2 38 | skew = 0 # only use rectangular pixels 39 | 40 | K = Matrix(((alpha_u, skew, u_0), 41 | (0, alpha_v, v_0), 42 | (0, 0, 1))) 43 | return K 44 | 45 | 46 | # Function to clean the blender workspace 47 | def remove_obj_lamp_and_mesh(context): 48 | scene = context.scene 49 | objs = bpy.data.objects 50 | meshes = bpy.data.meshes 51 | for obj in objs: 52 | if obj.type == 'MESH' or obj.type == 'LAMP': 53 | scene.objects.unlink(obj) 54 | objs.remove(obj) 55 | for mesh in meshes: 56 | meshes.remove(mesh) 57 | 58 | 59 | # Render the current frame with a redirection of the flow in a log file 60 | def render_without_output(use_antialiasing=True): 61 | # redirect output to log file 62 | logfile = 'blender_render.log' 63 | open(logfile, 'a').close() 64 | old = os.dup(1) 65 | sys.stdout.flush() 66 | os.close(1) 67 | os.open(logfile, os.O_WRONLY) 68 | # Render 69 | bpy.context.scene.render.use_antialiasing = use_antialiasing 70 | bpy.ops.render.render(write_still=True) 71 | # disable output redirection 72 | os.close(1) 73 | os.dup(old) 74 | os.close(old) 75 | 76 | 77 | # Creating a lamp with an appropriate energy 78 | def make_lamp(rad): 79 | # Create new lamp datablock 80 | lamp_data = bpy.data.lamps.new(name="Lamp", type='POINT') 81 | lamp_data.distance = rad * 2.5 82 | lamp_data.energy = rad / 30.0 83 | # Create new object with our lamp datablock 84 | lamp_object = bpy.data.objects.new(name="Lamp", object_data=lamp_data) 85 | # Link lamp object to the scene so it'll appear in this scene 86 | scene = bpy.context.scene 87 | scene.objects.link(lamp_object) 88 | lamp_object.location = (0, 0, 0) 89 | return lamp_object 90 | 91 | 92 | # Setup the environment 93 | def setup_env(scene, depth=False, normal=False, height=480, width=640, clip_end=2000): 94 | scene.render.resolution_x = width 95 | scene.render.resolution_y = height 96 | scene.render.resolution_percentage = 100 97 | scene.render.alpha_mode = 'TRANSPARENT' 98 | bpy.context.scene.camera.data.clip_end = clip_end 99 | 100 | if depth is False: 101 | return True 102 | 103 | elif normal is False: 104 | # Set up rendering of depth map: 105 | scene.use_nodes = True 106 | tree = scene.node_tree 107 | links = tree.links 108 | 109 | # clear default nodes 110 | for n in tree.nodes: 111 | tree.nodes.remove(n) 112 | 113 | # create input render layer node 114 | rl = tree.nodes.new('CompositorNodeRLayers') 115 | 116 | map = tree.nodes.new(type="CompositorNodeMapValue") 117 | links.new(rl.outputs['Z'], map.inputs[0]) 118 | invert = tree.nodes.new(type="CompositorNodeInvert") 119 | links.new(map.outputs[0], invert.inputs[1]) 120 | 121 | # Create a file output node for depth 122 | depthFileOutput = tree.nodes.new(type="CompositorNodeOutputFile") 123 | depthFileOutput.label = 'Depth Output' 124 | links.new(invert.outputs[0], depthFileOutput.inputs[0]) 125 | return depthFileOutput 126 | 127 | else: 128 | # Set up rendering of depth map: 129 | scene.use_nodes = True 130 | tree = scene.node_tree 131 | links = tree.links 132 | 133 | # Add passes for additionally dumping albed and normals. 134 | bpy.context.scene.render.layers["RenderLayer"].use_pass_normal = True 135 | bpy.context.scene.render.layers["RenderLayer"].use_pass_color = True 136 | 137 | # clear default nodes 138 | for n in tree.nodes: 139 | tree.nodes.remove(n) 140 | 141 | # create input render layer node 142 | rl = tree.nodes.new('CompositorNodeRLayers') 143 | map = tree.nodes.new(type="CompositorNodeMapValue") 144 | 145 | # Size is chosen kind of arbitrarily, try out until you're satisfied with 146 | # resulting depth map. 147 | map.offset = [0] 148 | map.size = [0.4] 149 | map.use_min = True 150 | map.min = [0] 151 | map.use_max = True 152 | map.max = [255] 153 | links.new(rl.outputs['Z'], map.inputs[0]) 154 | 155 | invert = tree.nodes.new(type="CompositorNodeInvert") 156 | links.new(map.outputs[0], invert.inputs[1]) 157 | 158 | # create a file output node and set the path 159 | depthFileOutput = tree.nodes.new(type="CompositorNodeOutputFile") 160 | depthFileOutput.label = 'Depth Output' 161 | links.new(invert.outputs[0], depthFileOutput.inputs[0]) 162 | 163 | scale_normal = tree.nodes.new(type="CompositorNodeMixRGB") 164 | scale_normal.blend_type = 'MULTIPLY' 165 | # scale_normal.use_alpha = True 166 | scale_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 1) 167 | links.new(rl.outputs['Normal'], scale_normal.inputs[1]) 168 | 169 | bias_normal = tree.nodes.new(type="CompositorNodeMixRGB") 170 | bias_normal.blend_type = 'ADD' 171 | # bias_normal.use_alpha = True 172 | bias_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 0) 173 | links.new(scale_normal.outputs[0], bias_normal.inputs[1]) 174 | 175 | normalFileOutput = tree.nodes.new(type="CompositorNodeOutputFile") 176 | normalFileOutput.label = 'Normal Output' 177 | links.new(bias_normal.outputs[0], normalFileOutput.inputs[0]) 178 | return depthFileOutput, normalFileOutput 179 | 180 | 181 | -------------------------------------------------------------------------------- /blender_render/table_poses.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/table_poses.npz -------------------------------------------------------------------------------- /blender_render/textures/cfabric_01_pbr-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/cfabric_01_pbr-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/cfabric_01_pbr-l-displacement.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/cfabric_01_pbr-l-displacement.jpg -------------------------------------------------------------------------------- /blender_render/textures/cloth_01-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/cloth_01-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/cwool01_pbr-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/cwool01_pbr-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_cloak01-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_cloak01-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_cloak02-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_cloak02-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_cloth_dirty-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_cloth_dirty-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_cloth_grey-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_cloth_grey-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_cloth_red-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_cloth_red-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_cloth_roughwool-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_cloth_roughwool-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_leather_bright01-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_leather_bright01-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_leather_bright02-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_leather_bright02-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_leather_dark-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_leather_dark-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_leather_soft-m-bump.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_leather_soft-m-bump.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_leather_soft-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_leather_soft-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_pattern_simple-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_pattern_simple-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_1-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_1-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_10-m-specular.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_10-m-specular.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_2-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_2-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_3-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_3-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_4-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_4-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_5-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_5-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_6-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_6-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_8-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_8-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_patterned_9-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_patterned_9-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_1-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_1-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_10-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_10-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_11-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_11-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_12-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_12-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_14-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_14-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_15-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_15-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_16-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_16-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_18-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_18-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_19-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_19-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_2-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_2-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_20-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_20-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_21-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_21-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_22-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_22-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_27-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_27-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_3-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_3-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_31-m-roughness.jpg.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_31-m-roughness.jpg.png -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_4-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_4-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_5-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_5-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_6-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_6-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_7-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_7-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_8-l-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_8-l-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_solid_9-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_solid_9-m-color.jpg -------------------------------------------------------------------------------- /blender_render/textures/fabric_wool_knitted_rough-m-color.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/blender_render/textures/fabric_wool_knitted_rough-m-color.jpg -------------------------------------------------------------------------------- /create_annotation.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from tqdm import tqdm 4 | 5 | dataset = 'LINEMOD-Occlusion' 6 | test_dir = '/home/xiao/Datasets/{}/test'.format(dataset) 7 | scenes = os.listdir(test_dir) 8 | scenes.sort() 9 | 10 | annot = {} 11 | idx = 0 12 | 13 | for scene_id in tqdm(range(len(scenes))): 14 | scene_dir = os.path.join(test_dir, scenes[scene_id]) 15 | scene_gt = json.load(open(os.path.join(scene_dir, 'scene_gt.json'))) 16 | scene_gt_info = json.load(open(os.path.join(scene_dir, 'scene_gt_info.json'))) 17 | scene_camera = json.load(open(os.path.join(scene_dir, 'scene_camera.json'))) 18 | 19 | for image_id in tqdm(range(len(scene_gt))): 20 | 21 | # Loop on obj ids of one image 22 | for n in range(len(scene_gt["{}".format(image_id)])): 23 | sample_frame = {} 24 | sample_frame["scene_id"] = int(scenes[scene_id]) 25 | sample_frame["image_id"] = image_id 26 | sample_frame["instance_id"] = n 27 | 28 | # get annotation from scene_gt 29 | sample_frame["obj_id"] = scene_gt["{}".format(image_id)][n]["obj_id"] 30 | sample_frame["cam_R_m2c"] = scene_gt["{}".format(image_id)][n]["cam_R_m2c"] 31 | T = scene_gt["{}".format(image_id)][0]["cam_t_m2c"] 32 | sample_frame["cam_t_m2c"] = T 33 | cam_K = scene_camera["{}".format(image_id)]["cam_K"] 34 | fx, fy, px, py = cam_K[0], cam_K[4], cam_K[2], cam_K[5] 35 | cx = int(fx * T[0] / T[2] + px) 36 | cy = int(fy * T[1] / T[2] + py) 37 | sample_frame["obj_center"] = [cx, cy] 38 | 39 | # get annotation from scene_gt_info 40 | sample_frame["bbox"] = scene_gt_info["{}".format(image_id)][n]["bbox_obj"] 41 | sample_frame["bbox_visib"] = scene_gt_info["{}".format(image_id)][n]["bbox_visib"] 42 | sample_frame["px_count_visib"] = scene_gt_info["{}".format(image_id)][n]["px_count_visib"] 43 | sample_frame["visib_fract"] = scene_gt_info["{}".format(image_id)][n]["visib_fract"] 44 | 45 | annot['{}'.format(idx)] = sample_frame 46 | idx += 1 47 | 48 | annotation_file = '/home/xiao/Datasets/{}/{}.json'.format(dataset, dataset) 49 | with open(annotation_file, 'w') as f: 50 | json.dump(annot, f, indent=4) 51 | 52 | 53 | -------------------------------------------------------------------------------- /data/downsample.mlx: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /data/meshes.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | import argparse 3 | import os 4 | from os.path import join, isdir 5 | 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('--dataset_dir', type=str, help='dataset directory') 9 | parser.add_argument('--dataset_format', type=str, choices=['BOP', 'Pascal3D', 'ShapeNet'], help='dataset format') 10 | parser.add_argument('--input', type=str, help='subdirectory containing obj files in the dataset directory') 11 | parser.add_argument('--filter', type=str, default='downsample.mlx', help='downsample filter used in meshlab') 12 | args = parser.parse_args() 13 | 14 | input_dir = join(args.dataset_dir, args.input) 15 | output_dir = join(args.dataset_dir, 'mesh') 16 | 17 | if args.dataset_format == 'BOP': 18 | model_files = sorted(os.listdir(input_dir)) 19 | for model_file in tqdm(model_files): 20 | model_path = join(input_dir, model_file) 21 | example_dir = join(output_dir, model_file.split(".")[0]) 22 | mesh_path = join(example_dir, 'compressed.obj') 23 | os.system('meshlabserver -i {} -o {} -s {}'.format(model_path, mesh_path, args.filter)) 24 | 25 | elif args.dataset_format in ['Pascal3D', 'ShapeNet']: 26 | categories = sorted(os.listdir(input_dir)) 27 | for cat in tqdm(categories): 28 | cat_in = join(input_dir, cat) 29 | cat_out = join(output_dir, cat) 30 | model_files = sorted(os.listdir(cat_in)) 31 | for model_file in tqdm(model_files): 32 | if args.dataset_format == 'Pascal3D': 33 | model_path = join(cat_in, model_file) 34 | model_name = model_file.split(".")[0] 35 | else: 36 | model_path = join(cat_in, model_file, 'models', 'model_normalized.obj') 37 | model_name = model_file 38 | 39 | example_dir = os.path.join(cat_out, model_name) 40 | if isdir(example_dir) and len(os.listdir(example_dir)) == 1: 41 | continue 42 | if not isdir(example_dir): 43 | os.makedirs(example_dir) 44 | 45 | mesh_path = join(example_dir, 'compressed.obj') 46 | os.system('meshlabserver -i {} -o {} -s {}'.format(model_path, mesh_path, args.filter)) 47 | else: 48 | sys.exit(0) 49 | -------------------------------------------------------------------------------- /data/multiviews.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | import argparse 3 | import os 4 | import sys 5 | 6 | sys.path.append('..') 7 | from blender_render.render_grid import dodecahedron_vertex_coord, semi_sphere_coord, RenderMachine 8 | 9 | parser = argparse.ArgumentParser() 10 | parser.add_argument('--dataset_dir', type=str, help='dataset directory') 11 | parser.add_argument('--dataset_format', type=str, choices=['BOP', 'Pascal3D', 'ShapeNet'], help='dataset format') 12 | parser.add_argument('--input', type=str, help='subdirectory containing obj files in the dataset directory') 13 | parser.add_argument('--rendering', type=str, choices=['nocs', 'nontextured'], default='nocs', 14 | help='rendering format which could be local field or non-textured images') 15 | parser.add_argument('--views', type=str, choices=['dodecahedron', 'semisphere'], default='dodecahedron', 16 | help='views under which the object will be rendered') 17 | args = parser.parse_args() 18 | 19 | input_dir = os.path.join(args.dataset_dir, args.input) 20 | output_dir = os.path.join(args.dataset_dir, args.views) 21 | 22 | if args.views == 'dodecahedron': 23 | views = dodecahedron_vertex_coord 24 | elif args.views == 'semisphere': 25 | views = semi_sphere_coord 26 | else: 27 | sys.exit(0) 28 | 29 | if args.dataset_format == 'BOP': 30 | model_files = sorted(os.listdir(input_dir)) 31 | for model_file in tqdm(model_files): 32 | model_path = os.path.join(input_dir, model_file) 33 | render_dir = os.path.join(output_dir, model_file.split(".")[0]) 34 | if os.path.isdir(render_dir): 35 | continue 36 | render_machine = RenderMachine(model_path, render_dir, rendering=args.rendering) 37 | render_machine.render_grid_pose(views) 38 | 39 | elif args.dataset_format in ['Pascal3D', 'ShapeNet']: 40 | categories = sorted(os.listdir(input_dir)) 41 | for cat in tqdm(categories): 42 | cat_in = os.path.join(input_dir, cat) 43 | cat_out = os.path.join(output_dir, cat) 44 | model_files = sorted(os.listdir(cat_in)) 45 | for model_file in tqdm(model_files): 46 | if args.dataset_format == 'Pascal3D': 47 | model_path = os.path.join(cat_in, model_file) 48 | model_name = model_file.split(".")[0] 49 | else: 50 | model_path = os.path.join(cat_in, model_file, 'models', 'model_normalized.obj') 51 | model_name = model_file 52 | render_dir = os.path.join(cat_out, model_name) 53 | if os.path.isdir(render_dir): 54 | continue 55 | render_machine = RenderMachine(model_path, render_dir, rendering=args.rendering) 56 | render_machine.render_grid_pose(views) 57 | else: 58 | sys.exit(0) 59 | 60 | os.system('rm blender_render.log') 61 | -------------------------------------------------------------------------------- /data/pointclouds.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | import argparse 3 | import os 4 | from os.path import join, isdir, basename, dirname 5 | import shutil 6 | import open3d as o3d 7 | import numpy as np 8 | 9 | 10 | def sample_point_cloud_from_obj(virtualscanner, obj, out): 11 | """ 12 | :param virtualscanner: executable path of virtual scanner 13 | :param obj: obj file path 14 | :param out: output directory 15 | :return: destination path to save the generated point cloud 16 | """ 17 | command = '{} {} 10'.format(virtualscanner, obj) 18 | os.system(command) 19 | ply = obj.replace('.obj', '.ply') 20 | ply_dest = join(out, 'sampled.ply') 21 | shutil.move(ply, ply_dest) 22 | return ply_dest 23 | 24 | 25 | def downsample_pointcloud(pc_path, ratio): 26 | pcd = o3d.io.read_point_cloud(pc_path) 27 | points = np.asarray(pcd.points) 28 | size = np.max(points) - np.min(points) 29 | downpcd = o3d.geometry.voxel_down_sample(pcd, voxel_size=size*ratio) 30 | o3d.io.write_point_cloud(join(dirname(pc_path), 'compressed.ply'), downpcd, True) 31 | 32 | 33 | parser = argparse.ArgumentParser() 34 | parser.add_argument('--dataset_dir', type=str, help='dataset directory') 35 | parser.add_argument('--dataset_format', type=str, choices=['BOP', 'Pascal3D', 'ShapeNet'], help='dataset format') 36 | parser.add_argument('--input', type=str, help='subdirectory containing obj files in the dataset directory') 37 | parser.add_argument('--virtualscanner', type=str, help='executable path of virtual scanner') 38 | parser.add_argument('--downsample', type=float, default=0.01, 39 | help='voxel size is ratio * object size to downsample the ponitcloud') 40 | args = parser.parse_args() 41 | 42 | input_dir = join(args.dataset_dir, args.input) 43 | output_dir = join(args.dataset_dir, 'pointcloud') 44 | 45 | if args.dataset_format == 'BOP': 46 | model_files = sorted(os.listdir(input_dir)) 47 | for model_file in tqdm(model_files): 48 | model_path = join(input_dir, model_file) 49 | example_dir = join(output_dir, model_file.split(".")[0]) 50 | ply_path = sample_point_cloud_from_obj(args.virtualscanner, model_path, example_dir) 51 | downsample_pointcloud(ply_path, args.downsample) 52 | 53 | elif args.dataset_format in ['Pascal3D', 'ShapeNet']: 54 | categories = sorted(os.listdir(input_dir)) 55 | for cat in tqdm(categories): 56 | cat_in = join(input_dir, cat) 57 | cat_out = join(output_dir, cat) 58 | model_files = sorted(os.listdir(cat_in)) 59 | for model_file in tqdm(model_files): 60 | if args.dataset_format == 'Pascal3D': 61 | model_path = join(cat_in, model_file) 62 | model_name = model_file.split(".")[0] 63 | else: 64 | model_path = join(cat_in, model_file, 'models', 'model_normalized.obj') 65 | model_name = model_file 66 | 67 | example_dir = join(cat_out, model_name) 68 | if os.path.isdir(example_dir) and len(os.listdir(example_dir)) == 2: 69 | continue 70 | if not os.path.isdir(example_dir): 71 | os.makedirs(example_dir) 72 | 73 | ply_path = sample_point_cloud_from_obj(args.virtualscanner, model_path, example_dir) 74 | downsample_pointcloud(ply_path, args.downsample) 75 | else: 76 | sys.exit(0) 77 | -------------------------------------------------------------------------------- /img/ApolloCar3D.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/ApolloCar3D.png -------------------------------------------------------------------------------- /img/Doumanoglou.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/Doumanoglou.png -------------------------------------------------------------------------------- /img/HomebrewedDB.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/HomebrewedDB.png -------------------------------------------------------------------------------- /img/KITTI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/KITTI.png -------------------------------------------------------------------------------- /img/LINEMOD-O.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/LINEMOD-O.jpg -------------------------------------------------------------------------------- /img/LINEMOD.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/LINEMOD.jpg -------------------------------------------------------------------------------- /img/LINEMOD.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/LINEMOD.png -------------------------------------------------------------------------------- /img/ObjectNet3D.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/ObjectNet3D.png -------------------------------------------------------------------------------- /img/Pascal3D.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/Pascal3D.png -------------------------------------------------------------------------------- /img/Pix3D.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/Pix3D.png -------------------------------------------------------------------------------- /img/ScanNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/ScanNet.png -------------------------------------------------------------------------------- /img/T-LESS.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/T-LESS.png -------------------------------------------------------------------------------- /img/Tejani.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/Tejani.png -------------------------------------------------------------------------------- /img/YCB-Video.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hz-ants/ObjectPoseEstimationDatasets/2217b4f8587634fc845f68049928eebd1876ee02/img/YCB-Video.png -------------------------------------------------------------------------------- /ply2obj.py: -------------------------------------------------------------------------------- 1 | import os 2 | from tqdm import tqdm 3 | 4 | dataset_dir = '/home/xiao/Datasets/T-LESS' 5 | 6 | # where contains the original ply files 7 | ply_dir = os.path.join(dataset_dir, "models_cad") 8 | 9 | # where to save the converted obj files 10 | obj_dir = os.path.join(dataset_dir, "models_obj") 11 | if not os.path.isdir(obj_dir): 12 | os.mkdir(obj_dir) 13 | 14 | plys = [name for name in os.listdir(ply_dir) if name.endswith(".ply")] 15 | 16 | for ply in tqdm(plys): 17 | os.system("meshlabserver -i %s -o %s" % (os.path.join(ply_dir, ply), os.path.join(obj_dir, ply.replace(".ply", ".obj")[-6:]))) -------------------------------------------------------------------------------- /retrieve_files.py: -------------------------------------------------------------------------------- 1 | """ 2 | A simple script used to retrieve .obj files from the ABC dataset 3 | """ 4 | 5 | import os 6 | import shutil 7 | from tqdm import tqdm 8 | 9 | cur_dir = os.path.dirname(os.path.abspath(__file__)) 10 | input_dir = os.path.join(cur_dir, 'retrieve_FILES') 11 | output_dir = os.path.join(cur_dir, 'abc_0000') 12 | 13 | subdirs = os.listdir(input_dir) 14 | 15 | for subdir in tqdm(subdirs): 16 | if len(os.listdir(os.path.join(input_dir, subdir))) == 0: 17 | continue 18 | file_name = os.listdir(os.path.join(input_dir, subdir))[0] 19 | ori_file_path = os.path.join(input_dir, subdir, file_name) 20 | new_file_path = os.path.join(output_dir, file_name) 21 | shutil.move(ori_file_path, new_file_path) 22 | --------------------------------------------------------------------------------