├── .gitignore
├── HorizonNet
├── LICENSE
├── README.md
├── __init__.py
├── convert_dataset.py
├── dataset.py
├── eval_cuboid.py
├── eval_general.py
├── inference.py
├── layout_viewer.py
├── misc
│ ├── __init__.py
│ ├── gen_txt_structured3d.py
│ ├── pano_lsd_align.py
│ ├── panostretch.py
│ ├── post_proc.py
│ ├── structured3d_extract_zip.py
│ ├── structured3d_prepare_dataset.py
│ └── utils.py
├── model.py
├── panotools
│ ├── __init__.py
│ ├── bbox.py
│ ├── house.py
│ ├── panorama.py
│ ├── tools.py
│ ├── tree.py
│ └── visualize.py
├── preprocess.py
└── train.py
├── JigsawAnnotator
├── .gitignore
├── MainWindow.py
├── PanoAnnotator
│ ├── .gitignore
│ ├── .idea
│ │ ├── PanoAnnotator.iml
│ │ ├── misc.xml
│ │ ├── modules.xml
│ │ └── workspace.xml
│ ├── PanoAnnotator.py
│ ├── README.md
│ ├── configs
│ │ ├── Params.py
│ │ ├── User.py
│ │ └── __init__.py
│ ├── data
│ │ ├── Annotation.py
│ │ ├── FloorPlane.py
│ │ ├── GeoEdge.py
│ │ ├── GeoPoint.py
│ │ ├── Object2D.py
│ │ ├── Resource.py
│ │ ├── Scene.py
│ │ ├── WallPlane.py
│ │ └── __init__.py
│ ├── estimator
│ │ ├── __init__.py
│ │ ├── depth
│ │ │ ├── DepthPred.py
│ │ │ ├── models
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fcrn.py
│ │ │ │ └── network.py
│ │ │ └── utils
│ │ │ │ ├── __init__.py
│ │ │ │ ├── evaluation.py
│ │ │ │ └── visualization.py
│ │ └── push
│ │ │ ├── PushPred.py
│ │ │ └── PushPredLite.py
│ ├── figs
│ │ ├── outputmaps.jpg
│ │ └── teasor.jpg
│ ├── qdarkstyle
│ │ ├── .gitignore
│ │ ├── __init__.py
│ │ ├── pyqt5_style_rc.py
│ │ ├── pyqt_style_rc.py
│ │ ├── pyqtgraph_style_rc.py
│ │ ├── pyside_style_rc.py
│ │ ├── qtpy_style_rc.py
│ │ ├── rc
│ │ │ ├── Hmovetoolbar.png
│ │ │ ├── Hsepartoolbar.png
│ │ │ ├── Vmovetoolbar.png
│ │ │ ├── Vsepartoolbar.png
│ │ │ ├── branch_closed-on.png
│ │ │ ├── branch_closed.png
│ │ │ ├── branch_open-on.png
│ │ │ ├── branch_open.png
│ │ │ ├── checkbox_checked.png
│ │ │ ├── checkbox_checked_disabled.png
│ │ │ ├── checkbox_checked_focus.png
│ │ │ ├── checkbox_indeterminate.png
│ │ │ ├── checkbox_indeterminate_disabled.png
│ │ │ ├── checkbox_indeterminate_focus.png
│ │ │ ├── checkbox_unchecked.png
│ │ │ ├── checkbox_unchecked_disabled.png
│ │ │ ├── checkbox_unchecked_focus.png
│ │ │ ├── close-hover.png
│ │ │ ├── close-pressed.png
│ │ │ ├── close.png
│ │ │ ├── down_arrow.png
│ │ │ ├── down_arrow_disabled.png
│ │ │ ├── left_arrow.png
│ │ │ ├── left_arrow_disabled.png
│ │ │ ├── radio_checked.png
│ │ │ ├── radio_checked_disabled.png
│ │ │ ├── radio_checked_focus.png
│ │ │ ├── radio_unchecked.png
│ │ │ ├── radio_unchecked_disabled.png
│ │ │ ├── radio_unchecked_focus.png
│ │ │ ├── right_arrow.png
│ │ │ ├── right_arrow_disabled.png
│ │ │ ├── sizegrip.png
│ │ │ ├── stylesheet-branch-end.png
│ │ │ ├── stylesheet-branch-more.png
│ │ │ ├── stylesheet-vline.png
│ │ │ ├── transparent.png
│ │ │ ├── undock.png
│ │ │ ├── up_arrow.png
│ │ │ └── up_arrow_disabled.png
│ │ ├── style.qrc
│ │ └── style.qss
│ ├── scripts
│ │ ├── annotator_env.yml
│ │ └── json2maps.py
│ ├── utils
│ │ ├── GeometryTool.py
│ │ ├── IOTool.py
│ │ ├── ImageTool.py
│ │ ├── LayoutTool.py
│ │ ├── PanoTool.py
│ │ ├── ProgressTool.py
│ │ ├── TimeTool.py
│ │ └── __init__.py
│ └── views
│ │ ├── FPPanoView.py
│ │ ├── FPResultView.py
│ │ ├── LabelListView.py
│ │ ├── MainWindowUi.py
│ │ ├── MonoView.py
│ │ ├── PanoView.py
│ │ ├── ResultView.py
│ │ └── __init__.py
├── panotools
│ ├── __init__.py
│ ├── bbox.py
│ ├── house.py
│ ├── panorama.py
│ ├── tools.py
│ ├── tree.py
│ └── visualize.py
├── qdarkstyle
│ ├── .gitignore
│ ├── __init__.py
│ ├── pyqt5_style_rc.py
│ ├── pyqt_style_rc.py
│ ├── pyqtgraph_style_rc.py
│ ├── pyside_style_rc.py
│ ├── qtpy_style_rc.py
│ ├── rc
│ │ ├── Hmovetoolbar.png
│ │ ├── Hsepartoolbar.png
│ │ ├── Vmovetoolbar.png
│ │ ├── Vsepartoolbar.png
│ │ ├── branch_closed-on.png
│ │ ├── branch_closed.png
│ │ ├── branch_open-on.png
│ │ ├── branch_open.png
│ │ ├── checkbox_checked.png
│ │ ├── checkbox_checked_disabled.png
│ │ ├── checkbox_checked_focus.png
│ │ ├── checkbox_indeterminate.png
│ │ ├── checkbox_indeterminate_disabled.png
│ │ ├── checkbox_indeterminate_focus.png
│ │ ├── checkbox_unchecked.png
│ │ ├── checkbox_unchecked_disabled.png
│ │ ├── checkbox_unchecked_focus.png
│ │ ├── close-hover.png
│ │ ├── close-pressed.png
│ │ ├── close.png
│ │ ├── down_arrow.png
│ │ ├── down_arrow_disabled.png
│ │ ├── left_arrow.png
│ │ ├── left_arrow_disabled.png
│ │ ├── radio_checked.png
│ │ ├── radio_checked_disabled.png
│ │ ├── radio_checked_focus.png
│ │ ├── radio_unchecked.png
│ │ ├── radio_unchecked_disabled.png
│ │ ├── radio_unchecked_focus.png
│ │ ├── right_arrow.png
│ │ ├── right_arrow_disabled.png
│ │ ├── sizegrip.png
│ │ ├── stylesheet-branch-end.png
│ │ ├── stylesheet-branch-more.png
│ │ ├── stylesheet-vline.png
│ │ ├── transparent.png
│ │ ├── undock.png
│ │ ├── up_arrow.png
│ │ └── up_arrow_disabled.png
│ ├── style.qrc
│ └── style.qss
├── room_type_annotator.py
└── utils
│ ├── fileListWidget.py
│ ├── flagListWidget.py
│ ├── floorPlanWidget.py
│ ├── house.py
│ ├── imageListWidget.py
│ ├── layout_loader.py
│ ├── roomViewWidget.py
│ ├── room_type_annotator.py
│ └── typeListWidget.py
├── README.md
├── detection
├── __init__.py
├── load_data.py
├── panotools
│ ├── __init__.py
│ ├── bbox.py
│ ├── panorama.py
│ └── tools.py
├── readme.md
└── test.py
├── figs
└── teaser.jpg
├── parser.py
├── requirements.txt
├── run.sh
├── src
├── __init__.py
├── loaders
│ ├── __init__.py
│ ├── main_loader.py
│ └── room_type_classification.py
├── models
│ ├── __init__.py
│ ├── convmpn.py
│ ├── model.py
│ └── unet.py
├── panotools
│ ├── __init__.py
│ ├── bbox.py
│ ├── house.py
│ ├── panorama.py
│ ├── tools.py
│ ├── tree.py
│ └── visualize.py
├── requirements.txt
├── tests
│ ├── predict_arrangements.py
│ └── room_type_classification.py
└── utils
│ ├── __init__.py
│ └── summary_writer.py
├── test.txt
└── train.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/HorizonNet/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Cheng Sun
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/HorizonNet/README.md:
--------------------------------------------------------------------------------
1 | # HorizonNet
2 |
3 | This is a modified version of "[
4 | HorizonNet: Learning Room Layout with 1D Representation and Pano Stretch Data Augmentation](https://arxiv.org/abs/1901.03861)" ([project page](https://sunset1995.github.io/HorizonNet/)), which is used for our work. For more information, original code, and the paper, please visit the project webpage.
5 |
6 |
7 |
--------------------------------------------------------------------------------
/HorizonNet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/HorizonNet/__init__.py
--------------------------------------------------------------------------------
/HorizonNet/convert_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | from shutil import copy2
4 | from glob import glob
5 | from panotools.panorama import Panorama
6 |
7 | if __name__ == '__main__':
8 | folder_list = glob('clean_data/*')
9 | folder_list.sort()
10 | imgpath = []
11 | for folder in folder_list:
12 | file_list = glob('{}/aligned_*.json'.format(folder))
13 | for f in file_list:
14 | imgpath.append(f)
15 |
16 | for f in imgpath[:-20]:
17 | house_name = f.split('/')[1]
18 | pano_name = f.split('/')[2][8:-5]
19 | copy2(f.replace('json','png'),'img/{}_{}.png'.format(house_name,pano_name))
20 | pano = Panorama('{}/{}'.format('clean_data',house_name), 'aligned_{}'.format(pano_name))
21 | with open('label_cor/{}_{}.txt'.format(house_name,pano_name), 'w') as fc:
22 | points = pano.get_layout_points()
23 | for p in points:
24 | p = [int(x) for x in p]
25 | fc.write('{} {}\n'.format(p[0], p[1]))
26 | for f in imgpath[-20:]:
27 | house_name = f.split('/')[1]
28 | pano_name = f.split('/')[2][8:-5]
29 | copy2(f.replace('json','png'),'val/img/{}_{}.png'.format(house_name,pano_name))
30 | pano = Panorama('{}/{}'.format('clean_data',house_name), 'aligned_{}'.format(pano_name))
31 | with open('val/label_cor/{}_{}.txt'.format(house_name,pano_name), 'w') as fc:
32 | points = pano.get_layout_points()
33 | for p in points:
34 | p = [int(x) for x in p]
35 | fc.write('{} {}\n'.format(p[0], p[1]))
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/HorizonNet/layout_viewer.py:
--------------------------------------------------------------------------------
1 | import json
2 | import open3d
3 | import numpy as np
4 | from PIL import Image
5 | from tqdm import tqdm, trange
6 | from scipy.ndimage import map_coordinates
7 |
8 | from misc.post_proc import np_coor2xy, np_coorx2u, np_coory2v
9 | from misc.panostretch import pano_connect_points
10 | from eval_general import layout_2_depth
11 |
12 |
13 | if __name__ == '__main__':
14 |
15 | import argparse
16 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
17 | parser.add_argument('--img', required=True,
18 | help='Image texture in equirectangular format')
19 | parser.add_argument('--layout', required=True,
20 | help='Txt file containing layout corners (cor_id)')
21 | parser.add_argument('--scale', default=1, type=float,
22 | help='Scale texture for visualization')
23 | parser.add_argument('--ignore_floor', action='store_true',
24 | help='Skip rendering floor')
25 | parser.add_argument('--ignore_ceiling', action='store_true',
26 | help='Skip rendering ceiling')
27 | parser.add_argument('--ignore_wall', action='store_true',
28 | help='Skip rendering wall')
29 | parser.add_argument('--ignore_wireframe', action='store_true',
30 | help='Skip rendering wireframe')
31 | args = parser.parse_args()
32 |
33 | # Reading source (texture img, cor_id txt)
34 | equirect_texture = Image.open(args.img)
35 | if args.scale != 1:
36 | W, H = equirect_texture.size
37 | W = int(W * args.scale)
38 | H = int(H * args.scale)
39 | equirect_texture = equirect_texture.resize((W, H))
40 | equirect_texture = np.array(equirect_texture) / 255.0
41 | H, W = equirect_texture.shape[:2]
42 | with open(args.layout) as f:
43 | inferenced_result = json.load(f)
44 | cor_id = np.array(inferenced_result['uv'], np.float32)
45 | cor_id[:, 0] *= W
46 | cor_id[:, 1] *= H
47 |
48 | # Show wireframe
49 | if not args.ignore_wireframe:
50 | # Convert cor_id to 3d xyz
51 | N = len(cor_id) // 2
52 | floor_z = -1.6
53 | floor_xy = np_coor2xy(cor_id[1::2], floor_z, W, H, floorW=1, floorH=1)
54 | c = np.sqrt((floor_xy**2).sum(1))
55 | v = np_coory2v(cor_id[0::2, 1], H)
56 | ceil_z = (c * np.tan(v)).mean()
57 |
58 | # Prepare wireframe in open3d
59 | assert N == len(floor_xy)
60 | wf_points = [[x, y, floor_z] for x, y in floor_xy] +\
61 | [[x, y, ceil_z] for x, y in floor_xy]
62 | wf_lines = [[i, (i+1)%N] for i in range(N)] +\
63 | [[i+N, (i+1)%N+N] for i in range(N)] +\
64 | [[i, i+N] for i in range(N)]
65 | wf_colors = [[1, 0, 0] for i in range(len(wf_lines))]
66 | wf_line_set = open3d.geometry.LineSet()
67 | wf_line_set.points = open3d.utility.Vector3dVector(wf_points)
68 | wf_line_set.lines = open3d.utility.Vector2iVector(wf_lines)
69 | wf_line_set.colors = open3d.utility.Vector3dVector(wf_colors)
70 |
71 | # Convert corners to layout
72 | depth, floor_mask, ceil_mask, wall_mask = layout_2_depth(cor_id, H, W, return_mask=True)
73 | coorx, coory = np.meshgrid(np.arange(W), np.arange(H))
74 | us = np_coorx2u(coorx, W)
75 | vs = np_coory2v(coory, H)
76 | zs = depth * np.sin(vs)
77 | cs = depth * np.cos(vs)
78 | xs = cs * np.sin(us)
79 | ys = -cs * np.cos(us)
80 |
81 | # Prepare points cloud
82 | all_xyz = np.stack([xs, ys, zs], -1).reshape(-1, 3)
83 | all_rgb = equirect_texture.reshape(-1, 3)
84 | if args.ignore_ceiling:
85 | mask = (~ceil_mask).reshape(-1)
86 | all_xyz = all_xyz[mask]
87 | all_rgb = all_rgb[mask]
88 | if args.ignore_floor:
89 | mask = (~floor_mask).reshape(-1)
90 | all_xyz = all_xyz[mask]
91 | all_rgb = all_rgb[mask]
92 | if args.ignore_wall:
93 | mask = (~wall_mask).reshape(-1)
94 | all_xyz = all_xyz[mask]
95 | all_rgb = all_rgb[mask]
96 |
97 | # Launch point cloud viewer
98 | pcd = open3d.geometry.PointCloud()
99 | pcd.points = open3d.utility.Vector3dVector(all_xyz)
100 | pcd.colors = open3d.utility.Vector3dVector(all_rgb)
101 |
102 | # Visualize result
103 | tobe_visualize = [pcd]
104 | if not args.ignore_wireframe:
105 | tobe_visualize.append(wf_line_set)
106 | # open3d.visualization.draw_geometries(tobe_visualize)
107 | open3d.io.write_point_cloud('testpcd.ply', pcd)
108 |
--------------------------------------------------------------------------------
/HorizonNet/misc/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/HorizonNet/misc/__init__.py
--------------------------------------------------------------------------------
/HorizonNet/misc/gen_txt_structured3d.py:
--------------------------------------------------------------------------------
1 | '''
2 | Help generate txt for train.py
3 | Please contact https://github.com/bertjiazheng/Structured3D for dataset.
4 | '''
5 |
6 | import os
7 | import glob
8 | import argparse
9 |
10 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
11 | parser.add_argument('--root', required=True,
12 | help='path to the dataset directory')
13 | parser.add_argument('--train_txt', required=True,
14 | help='path to save txt for train')
15 | parser.add_argument('--valid_txt', required=True,
16 | help='path to save txt for valid')
17 | parser.add_argument('--test_txt', required=True,
18 | help='path to save txt for test')
19 | args = parser.parse_args()
20 |
21 | train_scene = ['scene_%05d' % i for i in range(0, 3000)]
22 | valid_scene = ['scene_%05d' % i for i in range(3000, 3250)]
23 | test_scene = ['scene_%05d' % i for i in range(3250, 3500)]
24 |
25 | # Simple check: all directories exist
26 | for path in train_scene + valid_scene + test_scene:
27 | assert os.path.isdir(os.path.join(args.root, path)), '%s not found' % path
28 |
29 | def gen_pairs(scene_id_lst):
30 | pairs = []
31 | for scene_id in scene_id_lst:
32 | for fname in os.listdir(os.path.join(args.root, scene_id, 'rgb')):
33 | room_id = os.path.split(fname)[1].split('_')[0]
34 |
35 | img_k = os.path.join(os.path.join(scene_id, 'rgb', fname))
36 | layout_k = os.path.join(os.path.join(scene_id, 'layout', room_id + '_layout.txt'))
37 | assert os.path.isfile(os.path.join(args.root, img_k))
38 | assert os.path.isfile(os.path.join(args.root, layout_k))
39 | pairs.append((img_k, layout_k))
40 | return pairs
41 |
42 | with open(args.train_txt, 'w') as f:
43 | pairs = gen_pairs(train_scene)
44 | f.write('\n'.join([' '.join(p) for p in pairs]))
45 |
46 | with open(args.valid_txt, 'w') as f:
47 | pairs = gen_pairs(valid_scene)
48 | f.write('\n'.join([' '.join(p) for p in pairs]))
49 |
50 | with open(args.test_txt, 'w') as f:
51 | pairs = gen_pairs(test_scene)
52 | f.write('\n'.join([' '.join(p) for p in pairs]))
53 |
--------------------------------------------------------------------------------
/HorizonNet/misc/structured3d_extract_zip.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | from zipfile import ZipFile
4 | from tqdm import tqdm
5 | import imageio
6 |
7 | '''
8 | Zipfile format assumption:
9 | Structured3D
10 | -- [scene_xxxxx]
11 | -- other something
12 | -- 2D_rendering
13 | -- [image_id]
14 | -- panorama
15 | -- camera_xyz.txt
16 | -- layout.txt
17 | -- [empty|simple|full]
18 | -- depth.png
19 | -- rgb_rawlight.png
20 | -- rgb_coldlight.png
21 | -- rgb_warmlight.png
22 | -- other something
23 |
24 | Output format
25 | outdir
26 | -- [scene_xxxxx]
27 | -- img
28 | -- layout
29 | '''
30 |
31 | parser = argparse.ArgumentParser()
32 | parser.add_argument('--zippath', required=True)
33 | parser.add_argument('--style', default='full')
34 | parser.add_argument('--outdir', default='structured3d')
35 | args = parser.parse_args()
36 |
37 | path_format = 'Structured3D/%s/2D_rendering/%s/panorama/%s'
38 |
39 | with ZipFile(args.zippath) as zipf:
40 | id_set = set()
41 | for path in zipf.namelist():
42 | assert path.startswith('Structured3D')
43 | if path.endswith('camera_xyz.txt'):
44 | path_lst = path.split('/')
45 | scene_id = path_lst[1]
46 | image_id = path_lst[3]
47 | id_set.add((scene_id, image_id))
48 |
49 | for scene_id, image_id in tqdm(id_set):
50 | path_img = path_format % (scene_id, image_id, '%s/rgb_rawlight.png' % args.style)
51 | path_layout = path_format % (scene_id, image_id, 'layout.txt')
52 |
53 | os.makedirs(os.path.join(args.outdir, scene_id, 'rgb'), exist_ok=True)
54 | os.makedirs(os.path.join(args.outdir, scene_id, 'layout'), exist_ok=True)
55 |
56 | with zipf.open(path_img) as f:
57 | rgb = imageio.imread(f)[..., :3]
58 | imageio.imwrite(os.path.join(args.outdir, scene_id, 'rgb', image_id + '_rgb_rawlight.png'), rgb)
59 | with zipf.open(path_layout) as f:
60 | with open(os.path.join(args.outdir, scene_id, 'layout', image_id + '_layout.txt'), 'w') as fo:
61 | fo.write(f.read().decode())
62 |
--------------------------------------------------------------------------------
/HorizonNet/misc/structured3d_prepare_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 | from zipfile import ZipFile
4 | from tqdm import tqdm
5 | import imageio
6 |
7 | '''
8 | Assume datas is extracted by `misc/structured3d_extract_zip.py`.
9 | That is to said, assuming following structure:
10 | - {in_root}/scene_xxxxx
11 | - rgb/
12 | - *png
13 | - layout/
14 | - *txt
15 |
16 | The reorganized structure as follow:
17 | - {out_train_root}
18 | - img/
19 | - scene_xxxxx_*png (softlink)
20 | - label_cor/
21 | - scene_xxxxx_*txt (softlink)
22 | - {out_valid_root} ...
23 | - {out_test_root} ...
24 | '''
25 | TRAIN_SCENE = ['scene_%05d' % i for i in range(0, 3000)]
26 | VALID_SCENE = ['scene_%05d' % i for i in range(3000, 3250)]
27 | TEST_SCENE = ['scene_%05d' % i for i in range(3250, 3500)]
28 |
29 | parser = argparse.ArgumentParser()
30 | parser.add_argument('--in_root', required=True)
31 | parser.add_argument('--out_train_root', default='data/st3d_train_full_raw_light')
32 | parser.add_argument('--out_valid_root', default='data/st3d_valid_full_raw_light')
33 | parser.add_argument('--out_test_root', default='data/st3d_test_full_raw_light')
34 | args = parser.parse_args()
35 |
36 | def prepare_dataset(scene_ids, out_dir):
37 | root_img = os.path.join(out_dir, 'img')
38 | root_cor = os.path.join(out_dir, 'label_cor')
39 | os.makedirs(root_img, exist_ok=True)
40 | os.makedirs(root_cor, exist_ok=True)
41 | for scene_id in tqdm(scene_ids):
42 | source_img_root = os.path.join(args.in_root, scene_id, 'rgb')
43 | source_cor_root = os.path.join(args.in_root, scene_id, 'layout')
44 | for fname in os.listdir(source_cor_root):
45 | room_id = fname.split('_')[0]
46 | source_img_path = os.path.join(args.in_root, scene_id, 'rgb', room_id + '_rgb_rawlight.png')
47 | source_cor_path = os.path.join(args.in_root, scene_id, 'layout', room_id + '_layout.txt')
48 | target_img_path = os.path.join(root_img, '%s_%s.png' % (scene_id, room_id))
49 | target_cor_path = os.path.join(root_cor, '%s_%s.txt' % (scene_id, room_id))
50 | assert os.path.isfile(source_img_path)
51 | assert os.path.isfile(source_cor_path)
52 | os.symlink(source_img_path, target_img_path)
53 | os.symlink(source_cor_path, target_cor_path)
54 |
55 | prepare_dataset(TRAIN_SCENE, args.out_train_root)
56 | prepare_dataset(VALID_SCENE, args.out_valid_root)
57 | prepare_dataset(TEST_SCENE, args.out_test_root)
58 |
--------------------------------------------------------------------------------
/HorizonNet/misc/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | from collections import OrderedDict
4 |
5 |
6 | def group_weight(module):
7 | # Group module parameters into two group
8 | # One need weight_decay and the other doesn't
9 | group_decay = []
10 | group_no_decay = []
11 | for m in module.modules():
12 | if isinstance(m, nn.Linear):
13 | group_decay.append(m.weight)
14 | if m.bias is not None:
15 | group_no_decay.append(m.bias)
16 | elif isinstance(m, nn.modules.conv._ConvNd):
17 | group_decay.append(m.weight)
18 | if m.bias is not None:
19 | group_no_decay.append(m.bias)
20 | elif isinstance(m, nn.modules.batchnorm._BatchNorm):
21 | if m.weight is not None:
22 | group_no_decay.append(m.weight)
23 | if m.bias is not None:
24 | group_no_decay.append(m.bias)
25 | elif isinstance(m, nn.GroupNorm):
26 | if m.weight is not None:
27 | group_no_decay.append(m.weight)
28 | if m.bias is not None:
29 | group_no_decay.append(m.bias)
30 |
31 | assert len(list(module.parameters())) == len(group_decay) + len(group_no_decay)
32 | return [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
33 |
34 |
35 | def adjust_learning_rate(optimizer, args):
36 | if args.cur_iter < args.warmup_iters:
37 | frac = args.cur_iter / args.warmup_iters
38 | step = args.lr - args.warmup_lr
39 | args.running_lr = args.warmup_lr + step * frac
40 | else:
41 | frac = (float(args.cur_iter) - args.warmup_iters) / (args.max_iters - args.warmup_iters)
42 | scale_running_lr = max((1. - frac), 0.) ** args.lr_pow
43 | args.running_lr = args.lr * scale_running_lr
44 |
45 | for param_group in optimizer.param_groups:
46 | param_group['lr'] = args.running_lr
47 |
48 |
49 | def save_model(net, path, args):
50 | state_dict = OrderedDict({
51 | 'args': args.__dict__,
52 | 'kwargs': {
53 | 'backbone': net.backbone,
54 | 'use_rnn': net.use_rnn,
55 | },
56 | 'state_dict': net.state_dict(),
57 | })
58 | torch.save(state_dict, path)
59 |
60 |
61 | def load_trained_model(Net, path):
62 | state_dict = torch.load(path, map_location='cpu')
63 | net = Net(**state_dict['kwargs'])
64 | net.load_state_dict(state_dict['state_dict'])
65 | return net
66 |
--------------------------------------------------------------------------------
/HorizonNet/panotools/__init__.py:
--------------------------------------------------------------------------------
1 | from .house import House
2 | from .bbox import BBox
3 | from .panorama import Panorama
4 | from . import tools
5 | from . import visualize as vis
6 |
--------------------------------------------------------------------------------
/HorizonNet/panotools/bbox.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class BBox:
4 | def __init__(self, bbox=None, obj_type=None):
5 | self.bbox = bbox #BoundingBox (2,3)
6 | self.type = obj_type
7 | if abs(self.bbox[0][0]-self.bbox[1][0])<1e-4:
8 | if self.bbox[0][0]>0:
9 | self.direction = 0
10 | else:
11 | self.direction = 2
12 | else:
13 | if self.bbox[0][2]>0:
14 | self.direction = 1
15 | else:
16 | self.direction = 3
17 |
18 |
--------------------------------------------------------------------------------
/HorizonNet/panotools/house.py:
--------------------------------------------------------------------------------
1 | import copy
2 | from scipy.spatial.distance import cdist
3 | import numpy as np
4 | from PIL import Image, ImageDraw
5 | import shapely.geometry as sg
6 | import shapely.ops as so
7 | from shapely.ops import transform, nearest_points
8 | from shapely import affinity
9 | import os
10 | from .tree import Tree
11 | from .panorama import Panorama
12 | from . import tools
13 | import json
14 | import matplotlib.pyplot as plt
15 |
16 | class House:
17 | def __init__(self, path, name):
18 | self.path = path
19 | self.name = name
20 | self.labeld = False
21 | if not os.path.exists("{}/{}/labels.json".format(path, name)):
22 | return
23 | data = json.load(open("{}/{}/labels.json".format(path, name)))
24 | if len(data['flags'])!=0 and int(data['flags'][0])<4:
25 | return
26 | self.labeld = True
27 | self.pano_names = data['pano_names']
28 | self.rotations = data['rotations']
29 | self.room_types = data['room_types']
30 | self.positions = data['positions']
31 | self.scale = data['scales'][0]
32 | self.pano_scale = data['scales'][1]
33 |
34 | self.fp = "{}/{}/floorplan.jpg".format(path, name)
35 | self.panos = []
36 | for name in self.pano_names:
37 | self.panos.append(Panorama("{}/{}".format(self.path, self.name), 'aligned_'+name))
38 |
39 | self.positive_pairs = []
40 | self.check_connections()
41 |
42 | def get_fp_img(self, type="RGB"):
43 | img = Image.open(self.fp).convert(type)
44 | img = img.resize((int(img.size[0] * self.scale), int(img.size[1] * self.scale)))
45 | return img
46 |
47 | def dindex_to_panoindex(self, index):
48 | for i, pano in enumerate(self.panos):
49 | if (index < len(pano.doors)):
50 | return i, int(index)
51 | index -= len(pano.doors)
52 |
53 | def visualize_alignment(self):
54 | fp = self.get_fp_img()
55 | for i in self.positions:
56 | pos = self.positions[i]
57 | i = int(i)
58 | pano = self.panos[i].get_top_down_view()
59 | pano = pano.rotate(-90 * self.rotations[i])
60 | pano = pano.resize((int(pano.size[0] * self.pano_scale), int(pano.size[1] * self.pano_scale)))
61 | pano = pano.crop((-pos[0], -pos[1], fp.size[0] - pos[0], fp.size[1] - pos[1]))
62 | alpha = pano.split()[-1]
63 | fp = Image.composite(pano, fp, alpha)
64 | fp.show()
65 |
66 | def check_connections(self):
67 | objs = []
68 | for name in self.positions:
69 | pano = self.panos[int(name)]
70 | for j, obj in enumerate(pano.obj_list):
71 | dtype = obj.type
72 | bbox = obj.bbox * 25.6 + 256
73 | obj = sg.LineString([(bbox[0][0], bbox[0][2]), (bbox[1][0], bbox[1][2])])
74 | obj = affinity.rotate(obj, 90 * self.rotations[int(name)], (256,256))
75 | obj = affinity.translate(obj, self.positions[name][0], self.positions[name][1])
76 | objs.append([obj, int(name), j])
77 | dists = np.zeros([len(objs), len(objs)]) + 1e10
78 | for i in range(len(objs)):
79 | for j in range(len(objs)):
80 | if i==j:
81 | continue
82 | tmp = nearest_points(objs[i][0].centroid, objs[j][0])
83 | d = tmp[1].distance(objs[i][0].centroid)
84 | dists[i,j] = d
85 | dists = np.round(dists,3)
86 | args = np.argmin(dists, 1)
87 | dists = np.min(dists,1)
88 | for i in range(len(objs)):
89 | for j in range(i+1, len(objs)):
90 | if(args[i]==j and args[j]==i and dists[i]<10):
91 | self.positive_pairs.append([objs[i][1:], objs[j][1:]])
92 | # print(self.panos[objs[i][1]].obj_list[objs[i][2]].direction+self.rotations[objs[i][1]],
93 | # self.panos[objs[j][1]].obj_list[objs[j][2]].direction+self.rotations[objs[j][1]])
94 |
95 | #####################################################################################
96 | # import glob
97 | # names = os.listdir("clean_data/")
98 | # cnt = 0
99 | # for name in names[3:]:
100 | # house = House("clean_data", name)
101 | # if house.labeld:
102 | # cnt += len(house.positive_pairs)
103 | # house.visualize_alignment()
104 | # break
105 | # print(cnt)
106 |
--------------------------------------------------------------------------------
/HorizonNet/panotools/tools.py:
--------------------------------------------------------------------------------
1 | import shapely.geometry as sg
2 | import shapely.ops as so
3 | import numpy as np
4 | from shapely.ops import transform
5 | import math
6 | import seaborn as sns
7 |
8 |
9 | colors = sns.color_palette("bright", 8)
10 | colors = [[x[0] * 255, x[1] * 255, x[2] * 255, 255] for x in colors]
11 | colors = np.array(colors, dtype=int)
12 |
13 |
14 | rcolors = sns.color_palette("dark", 10)
15 | rcolors = [[x[0] * 255, x[1] * 255, x[2] * 255, 200] for x in rcolors]
16 | rcolors = np.array(rcolors, dtype=int)
17 |
18 | def flip(x, y):
19 | return x, -y
20 |
21 |
22 | def non_max_suppression_fast(boxes, probs=None, overlapThresh=0.3):
23 | # if there are no boxes, return an empty list
24 | if len(boxes) == 0:
25 | return []
26 |
27 | # if the bounding boxes are integers, convert them to floats -- this
28 | # is important since we'll be doing a bunch of divisions
29 | if boxes.dtype.kind == "i":
30 | boxes = boxes.astype("float")
31 |
32 | # initialize the list of picked indexes
33 | pick = []
34 |
35 | # grab the coordinates of the bounding boxes
36 | x1 = boxes[:, 0]
37 | y1 = boxes[:, 1]
38 | x2 = boxes[:, 2]
39 | y2 = boxes[:, 3]
40 |
41 | # compute the area of the bounding boxes and grab the indexes to sort
42 | # (in the case that no probabilities are provided, simply sort on the
43 | # bottom-left y-coordinate)
44 | area = (x2 - x1 + 1) * (y2 - y1 + 1)
45 | idxs = y2
46 |
47 | # if probabilities are provided, sort on them instead
48 | if probs is not None:
49 | idxs = probs
50 |
51 | # sort the indexes
52 | idxs = np.argsort(idxs)
53 |
54 | # keep looping while some indexes still remain in the indexes list
55 | while len(idxs) > 0:
56 | # grab the last index in the indexes list and add the index value
57 | # to the list of picked indexes
58 | last = len(idxs) - 1
59 | i = idxs[last]
60 | pick.append(i)
61 |
62 | # find the largest (x, y) coordinates for the start of the bounding
63 | # box and the smallest (x, y) coordinates for the end of the bounding
64 | # box
65 | xx1 = np.maximum(x1[i], x1[idxs[:last]])
66 | yy1 = np.maximum(y1[i], y1[idxs[:last]])
67 | xx2 = np.minimum(x2[i], x2[idxs[:last]])
68 | yy2 = np.minimum(y2[i], y2[idxs[:last]])
69 |
70 | # compute the width and height of the bounding box
71 | w = np.maximum(0, xx2 - xx1 + 1)
72 | h = np.maximum(0, yy2 - yy1 + 1)
73 |
74 | # compute the ratio of overlap
75 | overlap = (w * h) / area[idxs[:last]]
76 |
77 | # delete all indexes from the index list that have overlap greater
78 | # than the provided overlap threshold
79 | idxs = np.delete(
80 | idxs, np.concatenate(
81 | ([last], np.where(overlap > overlapThresh)[0])))
82 |
83 | # return only the bounding boxes that were picked
84 | return boxes[pick].astype("int")
85 |
86 |
87 | def pano_to_fp(point, polygon, pano_size, rot90=0):
88 | x = point[0]
89 | degree = (pano_size[1] - x) / pano_size[1] * (2 * np.pi)
90 | degree = (degree + (np.pi / 2 * rot90))
91 | ray = [(0, 0), (512 * np.cos(degree), 512 * np.sin(degree))]
92 | ray = sg.LineString(ray)
93 | intersect = polygon.exterior.intersection(ray)
94 | if (intersect.type == "MultiPoint"):
95 | intersect = intersect[0]
96 | if (intersect.type == "LineString"):
97 | return intersect, 0
98 | x, y = polygon.exterior.coords.xy
99 | for i in range(1, len(x)):
100 | line = sg.LineString([(x[i - 1], y[i - 1]), (x[i], y[i])])
101 | check = line.intersects(ray)
102 | if (check):
103 | break
104 | x, y = line.xy
105 | if (abs(x[0] - x[1]) < abs(y[0] - y[1])):
106 | is_vertical = 1 if (x[0] < 0) else 3
107 | else:
108 | is_vertical = 0 if (y[0] < 0) else 2
109 | return intersect, is_vertical
110 |
111 |
112 | def map_pano_to_tdv(pano):
113 | mapping = np.zeros([2, pano.size[0], pano.size[1]])
114 | for i in range(pano.size[1]):
115 | p, _ = pano_to_fp([i, 0], pano.poly, pano.size, rot90=1)
116 | mapping[0, 100:, i] = np.linspace(
117 | p.y, 0, num=pano.size[0] - 100, endpoint=False) + 512
118 | mapping[1, 100:, i] = np.linspace(
119 | p.x, 0, num=pano.size[0] - 100, endpoint=False) + 512
120 | mapping = mapping.astype(int)
121 | return mapping
122 |
123 | def uv2coords(uv):
124 |
125 | coordsX = uv[0] / (2 * math.pi) + 0.5
126 | coordsY = -uv[1] / math.pi + 0.5
127 |
128 | coords = (coordsX, coordsY)
129 |
130 | return coords
131 |
132 |
133 | def xyz2uv(xyz):
134 |
135 | normXZ = math.sqrt(math.pow(xyz[0], 2) + math.pow(xyz[2], 2))
136 | if normXZ < 0.000001:
137 | normXZ = 0.000001
138 |
139 | normXYZ = math.sqrt(
140 | math.pow(xyz[0], 2) + math.pow(xyz[1], 2) + math.pow(xyz[2], 2))
141 |
142 | v = math.asin(xyz[1] / normXYZ)
143 | u = math.asin(xyz[0] / normXZ)
144 |
145 | if xyz[2] > 0 and u > 0:
146 | u = math.pi - u
147 | elif xyz[2] > 0 and u < 0:
148 | u = -math.pi - u
149 |
150 | uv = (u, v)
151 |
152 | return uv
153 |
154 |
155 | def xyz2coords(xyz):
156 |
157 | uv = xyz2uv(xyz)
158 | coords = uv2coords(uv)
159 |
160 | return coords
161 |
--------------------------------------------------------------------------------
/HorizonNet/panotools/tree.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class Tree:
5 | def __init__(self):
6 | self.rooms = []
7 | self.pairs_list = []
8 | self.num_pos = 0
9 | self.num_neg = 0
10 |
11 | def __len__(self):
12 | return len(self.rooms)
13 |
14 | def add_pair(self, pair, is_positive):
15 | self.pairs_list.append(pair)
16 | if (is_positive):
17 | self.num_pos += 1
18 | else:
19 | self.num_neg += 1
20 |
21 | def drop_last(self, is_positive):
22 | self.pairs_list = self.pairs_list[:-1]
23 | if (is_positive):
24 | self.num_pos -= 1
25 | else:
26 | self.num_neg -= 1
27 |
--------------------------------------------------------------------------------
/HorizonNet/preprocess.py:
--------------------------------------------------------------------------------
1 | '''
2 | This script preprocess the given 360 panorama image under euqirectangular projection
3 | and dump them to the given directory for further layout prediction and visualization.
4 | The script will:
5 | - extract and dump the vanishing points
6 | - rotate the equirect image to align with the detected VP
7 | - extract the VP aligned line segments (for further layout prediction model)
8 | The dump files:
9 | - `*_VP.txt` is the vanishg points
10 | - `*_aligned_rgb.png` is the VP aligned RGB image
11 | - `*_aligned_line.png` is the VP aligned line segments images
12 |
13 | Author: Cheng Sun
14 | Email : chengsun@gapp.nthu.edu.tw
15 | '''
16 |
17 | import os
18 | import glob
19 | import argparse
20 | import numpy as np
21 | from PIL import Image
22 |
23 | from misc.pano_lsd_align import panoEdgeDetection, rotatePanorama
24 |
25 |
26 | parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
27 | # I/O related arguments
28 | parser.add_argument('--img_glob', required=True,
29 | help='NOTE: Remeber to quote your glob path.')
30 | parser.add_argument('--output_dir', required=True)
31 | parser.add_argument('--rgbonly', action='store_true',
32 | help='Add this if use are preparing customer dataset')
33 | # Preprocessing related arguments
34 | parser.add_argument('--q_error', default=0.7, type=float)
35 | parser.add_argument('--refine_iter', default=3, type=int)
36 | args = parser.parse_args()
37 |
38 | paths = sorted(glob.glob(args.img_glob))
39 | if len(paths) == 0:
40 | print('no images found')
41 |
42 | # Check given path exist
43 | for path in paths:
44 | assert os.path.isfile(path), '%s not found' % path
45 |
46 | # Check target directory
47 | if not os.path.isdir(args.output_dir):
48 | print('Output directory %s not existed. Create one.')
49 | os.makedirs(args.output_dir)
50 |
51 | # Process each input
52 | for i_path in paths:
53 | print('Processing', i_path, flush=True)
54 |
55 | # Load and cat input images
56 | img_ori = np.array(Image.open(i_path).resize((1024, 512), Image.BICUBIC))[..., :3]
57 |
58 | # VP detection and line segment extraction
59 | _, vp, _, _, panoEdge, _, _ = panoEdgeDetection(img_ori,
60 | qError=args.q_error,
61 | refineIter=args.refine_iter)
62 | panoEdge = (panoEdge > 0)
63 |
64 | # Align images with VP
65 | i_img = rotatePanorama(img_ori / 255.0, vp[2::-1])
66 | l_img = rotatePanorama(panoEdge.astype(np.float32), vp[2::-1])
67 |
68 | # Dump results
69 | basename = os.path.splitext(os.path.basename(i_path))[0]
70 | if args.rgbonly:
71 | path = os.path.join(args.output_dir, '%s.png' % basename)
72 | Image.fromarray((i_img * 255).astype(np.uint8)).save(path)
73 | else:
74 | path_VP = os.path.join(args.output_dir, '%s_VP.txt' % basename)
75 | path_i_img = os.path.join(args.output_dir, '%s_aligned_rgb.png' % basename)
76 | path_l_img = os.path.join(args.output_dir, '%s_aligned_line.png' % basename)
77 |
78 | with open(path_VP, 'w') as f:
79 | for i in range(3):
80 | f.write('%.6f %.6f %.6f\n' % (vp[i, 0], vp[i, 1], vp[i, 2]))
81 | Image.fromarray((i_img * 255).astype(np.uint8)).save(path_i_img)
82 | Image.fromarray((l_img * 255).astype(np.uint8)).save(path_l_img)
83 |
--------------------------------------------------------------------------------
/JigsawAnnotator/.gitignore:
--------------------------------------------------------------------------------
1 | # For this project
2 | dataset/*
3 | __pycache__/
4 | labels
5 | clean_data/*
6 | clean_data.zip
7 | preds_data/
8 | HorizonNet/data
9 | HorizonNet/logs
10 | HorizonNet/ckpt
11 | annotations/
12 | room_types
13 |
14 | # Byte-compiled / optimized / DLL files
15 | __pycache__/
16 | *.py[cod]
17 | *$py.class
18 |
19 | # C extensions
20 | *.so
21 |
22 | # Distribution / packaging
23 | .Python
24 | build/
25 | develop-eggs/
26 | dist/
27 | downloads/
28 | eggs/
29 | .eggs/
30 | lib/
31 | lib64/
32 | parts/
33 | sdist/
34 | var/
35 | wheels/
36 | *.egg-info/
37 | .installed.cfg
38 | *.egg
39 | MANIFEST
40 |
41 | # PyInstaller
42 | # Usually these files are written by a python script from a template
43 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
44 | *.manifest
45 | *.spec
46 |
47 | # Installer logs
48 | pip-log.txt
49 | pip-delete-this-directory.txt
50 |
51 | # Unit test / coverage reports
52 | htmlcov/
53 | .tox/
54 | .coverage
55 | .coverage.*
56 | .cache
57 | nosetests.xml
58 | coverage.xml
59 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | *.pyc
3 | ./vscode
4 |
5 | estimator/depth/models/trained/*
6 |
7 | preprocess/matlab/
8 |
9 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/.idea/PanoAnnotator.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/PanoAnnotator.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import os
3 | import argparse
4 |
5 | import PanoAnnotator.data as data
6 | import PanoAnnotator.configs.Params as pm
7 | import PanoAnnotator.utils as utils
8 | import PanoAnnotator.views as views
9 | import qdarkstyle
10 | import HorizonNet.layout_viewer as layout_viewer
11 | #import estimator
12 |
13 | from PyQt5 import QtCore, QtGui, QtWidgets
14 |
15 | from PanoAnnotator.views.PanoView import PanoView
16 | from PanoAnnotator.views.MonoView import MonoView
17 | from PanoAnnotator.views.ResultView import ResultView
18 | from PanoAnnotator.views.LabelListView import LabelListView
19 |
20 | from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QProgressDialog
21 | from PyQt5.QtCore import QCoreApplication
22 |
23 |
24 | class PanoAnnotator(QMainWindow, views.MainWindowUi):
25 | def __init__(self, app, parent):
26 | super(PanoAnnotator, self).__init__()
27 | self.app = app
28 | self.pr = parent
29 | self.setupUi(self)
30 | self.actionOpenImage.triggered.connect(self.openImageFile)
31 | self.actionOpenJson.triggered.connect(self.openJsonFile)
32 |
33 | self.actionSaveFile.triggered.connect(self.saveSceneFile)
34 |
35 | self.mainScene = data.Scene(self)
36 |
37 | if pm.isDepthPred:
38 | # import PanoAnnotator.estimator as estimator
39 | # self.depthPred = estimator.DepthPred()
40 | self.depthPred = layout_viewer.get_depth
41 | else:
42 | self.depthPred = None
43 |
44 | self.panoView.setMainWindow(self)
45 | self.monoView.setMainWindow(self)
46 | self.resultView.setMainWindow(self)
47 | self.labelListView.setMainWindow(self)
48 |
49 | def openImageFile(self):
50 | filePath, ok = QFileDialog.getOpenFileName(self, "open",
51 | pm.fileDefaultOpenPath,
52 | "Images (*.png *.jpg)")
53 | if ok:
54 | self.openImage(filePath)
55 | # self.mainScene = self.createNewScene(filePath)
56 | # self.mainScene.initLabel()
57 | # self.initViewsByScene(self.mainScene)
58 | else:
59 | print('open file error')
60 | return ok
61 |
62 | def openImage(self, filepath):
63 | self.filepath = filepath
64 | self.mainScene = self.createNewScene(self.filepath)
65 | if (os.path.exists("{}.json".format(self.filepath[:-4]))):
66 | self.mainScene.loadLabel("{}.json".format(self.filepath[:-4]))
67 | else:
68 | self.mainScene.loadOldLabel("{}.json".format(self.filepath[:-4]))
69 | # self.mainScene.initLabel()
70 | self.initViewsByScene(self.mainScene)
71 |
72 | def openJsonFile(self):
73 | filePath, ok = QFileDialog.getOpenFileName(self, "open",
74 | pm.fileDefaultOpenPath,
75 | "Json (*.json)")
76 | if ok:
77 | imagePath = os.path.join(os.path.dirname(filePath),
78 | pm.colorFileDefaultName)
79 | self.mainScene = self.createNewScene(imagePath)
80 | self.mainScene.loadLabel(filePath)
81 | self.initViewsByScene(self.mainScene)
82 | else:
83 | print('open file error')
84 | return ok
85 |
86 | def saveSceneFile(self):
87 |
88 | curPath = self.mainScene.getCurrentPath()
89 | savePath = "{}.json".format(self.filepath[:-4])
90 | #utils.saveSceneAsMaps(savePath, self.mainScene)
91 | utils.saveSceneAsJson(savePath, self.mainScene)
92 | self.close()
93 |
94 | def createNewScene(self, filePath):
95 | scene = data.Scene(self)
96 | scene.initScene(filePath, self.depthPred)
97 | return scene
98 |
99 | def initViewsByScene(self, scene):
100 | self.panoView.initByScene(scene)
101 | self.monoView.initByScene(scene)
102 | self.resultView.initByScene(scene)
103 | self.labelListView.initByScene(scene)
104 |
105 | def moveMonoCamera(self, coords):
106 | self.monoView.moveCamera(coords)
107 |
108 | def updateViews(self):
109 | self.panoView.update()
110 | self.monoView.update()
111 | self.resultView.update()
112 |
113 | def updateListView(self):
114 | self.labelListView.refreshList()
115 |
116 | def updataProgressView(self, val):
117 | self.progressView.setValue(val)
118 | QCoreApplication.processEvents()
119 |
120 | def refleshProcessEvent(self):
121 | QCoreApplication.processEvents()
122 |
123 | def closeEvent(self, event):
124 | # if self.depthPred:
125 | # self.depthPred.sess.close()
126 | event.accept()
127 | return
128 |
129 | def keyPressEvent(self, event):
130 | print("main")
131 | key = event.key()
132 |
133 | def run(self, path):
134 | print(path)
135 | self.showMaximized()
136 | self.openImage(path)
137 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/README.md:
--------------------------------------------------------------------------------
1 | # PanoAnnotator
2 | This is a modified version of "[
3 | PanoAnnotator](https://docs.google.com/document/d/1V88R7Uzds8TemWHFnyrvGDoXIhyt7W1g8M9Fll204S0/edit)" ([project page](https://github.com/SunDaDenny/PanoAnnotator)), which is used for our work. For more information and original code, please visit the project webpage.
4 |
5 |
6 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/configs/Params.py:
--------------------------------------------------------------------------------
1 | class Params(object):
2 |
3 | fileDefaultOpenPath = ""
4 | labelFileDefaultName = "label.json"
5 |
6 | colorFileDefaultName = "color.png"
7 | depthFileDefaultName = "depth.png"
8 | linesFileDefaultName = "None" #"lines.png"
9 | omapFileDefaultName = "None" #"omap.png"
10 |
11 | #GPU
12 | isDepthPred = True
13 | isGUI = True
14 |
15 | #Annotation
16 | layoutMapSize = [512, 1024, 3]
17 |
18 | defaultCameraHeight = 1.8
19 | defaultLayoutHeight = 3.2
20 |
21 | #Input
22 | keyDict = {
23 | 'none': 0,
24 | 'ctrl': 1,
25 | 'shift': 2,
26 | 'alt': 3,
27 | 'door': 4,
28 | 'glass_door': 5,
29 | 'frame': 6,
30 | 'window': 7,
31 | 'kitchen_counter': 8,
32 | 'closet': 9
33 | }
34 |
35 | layoutHeightSampleRange = 0.3
36 | layoutHeightSampleStep = 0.01
37 |
38 | #MonoView
39 | monoViewFov = (-1, 90)
40 |
41 | #PanoTool
42 | pcSampleStride = 30
43 | meshProjSampleStep = 30
44 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/configs/User.py:
--------------------------------------------------------------------------------
1 | class User(object):
2 |
3 | name = "Amin Shabani"
4 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/configs/__init__.py:
--------------------------------------------------------------------------------
1 | from .Params import *
2 | from .User import *
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/data/FloorPlane.py:
--------------------------------------------------------------------------------
1 | import PanoAnnotator.data as data
2 | import PanoAnnotator.configs.Params as pm
3 | import PanoAnnotator.utils as utils
4 |
5 | fpInstanceCount = 0
6 |
7 |
8 | class FloorPlane(object):
9 | def __init__(self, scene, isCeiling=False):
10 |
11 | self.__scene = scene
12 |
13 | self.__isCeiling = isCeiling
14 |
15 | self.gPoints = scene.label.getLayoutPoints()
16 | self.walls = scene.label.getLayoutWalls()
17 | self.color = (0, 0, 0)
18 |
19 | self.normal = (0, -1, 0) if isCeiling else (0, 1, 0)
20 | self.height = 0
21 | self.planeEquation = (0, 0, 0, 0)
22 |
23 | self.corners = []
24 | self.edges = []
25 | self.bbox2d = ((0, 0), (1, 1))
26 |
27 | self.id = 0
28 |
29 | self.init()
30 |
31 | global fpInstanceCount
32 | fpInstanceCount += 1
33 | self.id = fpInstanceCount
34 |
35 | def init(self):
36 |
37 | self.updateGeometry()
38 |
39 | def updateGeometry(self):
40 |
41 | cameraH = self.__scene.label.getCameraHeight()
42 | cam2ceilH = self.__scene.label.getCam2CeilHeight()
43 | self.height = cam2ceilH if self.__isCeiling else cameraH
44 | self.planeEquation = self.normal + (self.height, )
45 | self.color = utils.normal2color(self.normal)
46 |
47 | self.updateCorners()
48 | self.updateEdges()
49 | self.updateBbox2d()
50 |
51 | def updateCorners(self):
52 | self.corners = []
53 | for gp in self.gPoints:
54 | if self.__isCeiling:
55 | xyz = (gp.xyz[0], self.height, gp.xyz[2])
56 | else:
57 | xyz = (gp.xyz[0], -self.height, gp.xyz[2])
58 | corner = data.GeoPoint(self.__scene, None, xyz)
59 | self.corners.append(corner)
60 |
61 | def updateEdges(self):
62 |
63 | self.edges = []
64 | cnum = len(self.corners)
65 | for i in range(cnum):
66 | edge = data.GeoEdge(
67 | self.__scene, (self.corners[i], self.corners[(i + 1) % cnum]))
68 | self.edges.append(edge)
69 |
70 | def updateBbox2d(self):
71 | coords = []
72 | for c in [e.coords for e in self.edges]:
73 | coords += c
74 | self.bbox2d = utils.imagePointsBox(coords)
75 |
76 | def isCeiling(self):
77 | return self.__isCeiling
78 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/data/GeoEdge.py:
--------------------------------------------------------------------------------
1 | import PanoAnnotator.configs.Params as pm
2 | import PanoAnnotator.utils as utils
3 |
4 | geInstanceCount = 0
5 |
6 |
7 | class GeoEdge(object):
8 | def __init__(self, scene, gPoints):
9 |
10 | self.__scene = scene
11 |
12 | if (len(gPoints) < 2):
13 | print("Two point at least")
14 |
15 | self.gPoints = gPoints
16 | self.vector = (0, 0, 0)
17 |
18 | self.sample = []
19 | self.coords = []
20 |
21 | self.id = 0
22 |
23 | self.init()
24 |
25 | global geInstanceCount
26 | geInstanceCount += 1
27 | self.id = geInstanceCount
28 |
29 | def init(self):
30 |
31 | p1 = self.gPoints[0].xyz
32 | p2 = self.gPoints[1].xyz
33 | self.vector = utils.pointsDirection(p1, p2)
34 |
35 | self.sample = utils.pointsSample(p1, p2, 30)
36 | self.coords = utils.points2coords(self.sample)
37 |
38 | def checkCross(self):
39 | for i in range(len(self.coords) - 1):
40 | isCross, l, r = utils.pointsCrossPano(self.sample[i],
41 | self.sample[i + 1])
42 | if isCross:
43 | return True, l, r
44 | return False, None, None
45 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/data/GeoPoint.py:
--------------------------------------------------------------------------------
1 | import PanoAnnotator.utils as utils
2 |
3 | gpInstanceCount = 0
4 |
5 |
6 | class GeoPoint(object):
7 | def __init__(self, scene, coords=None, xyz=None):
8 |
9 | self.__scene = scene
10 |
11 | self.coords = coords
12 | self.color = (0, 0, 0)
13 | self.depth = 0
14 | self.xyz = xyz
15 |
16 | self.type = 0 # [convex, concave, occul]
17 | self.id = 0
18 |
19 | self.initByScene()
20 |
21 | global gpInstanceCount
22 | gpInstanceCount += 1
23 | self.id = gpInstanceCount
24 |
25 | def initByScene(self):
26 |
27 | if self.coords == None:
28 | self.coords = utils.xyz2coords(self.xyz)
29 |
30 | coordsT = (self.coords[1], self.coords[0])
31 |
32 | colorData = self.__scene.getPanoColorData()
33 |
34 | colorPos = utils.coords2pos(coordsT, colorData.shape)
35 | rgb = colorData[colorPos[0]][colorPos[1]]
36 | self.color = (rgb[0], rgb[1], rgb[2])
37 |
38 | depthData = self.__scene.getPanoDepthData()
39 |
40 | depthPos = utils.coords2pos(coordsT, depthData.shape)
41 | depthMean = utils.imageRegionMean(depthData, depthPos, (5, 5))
42 | self.depth = depthMean
43 | #self.depth = depthData[depthPos[0]][depthPos[1]]
44 |
45 | if self.xyz == None:
46 | self.xyz = utils.coords2xyz(self.coords, self.depth)
47 |
48 | #self.calcGeometryType()
49 |
50 | def moveByVector(self, vec):
51 |
52 | self.xyz = utils.vectorAdd(self.xyz, vec)
53 | self.coords = utils.xyz2coords(self.xyz)
54 |
55 | '''
56 | def calcGeometryType(self):
57 |
58 | coordsT = (self.coords[1], self.coords[0])
59 | depthData = self.__scene.getPanoDepthData()
60 |
61 | depthPos = utils.coords2pos(coordsT, depthData.shape)
62 | depth = depthData[depthPos[0]][depthPos[1]]
63 | if depth <= 0:
64 | return
65 |
66 | #print(depthPos)
67 | lt, rb = utils.calcCenterRegionPos(depthPos,
68 | ( int(50/depth), int(50/depth))
69 | ,depthData.shape)
70 | #print("{0} {1}".format(lt, rb))
71 |
72 | cb = (rb[0], depthPos[1])
73 | #print("cb {0}".format(cb))
74 | regionL = utils.getRegionData(depthData, lt, cb)
75 | #print(regionL.shape)
76 | ct = (lt[0], depthPos[1])
77 | #print("ct {0}".format(ct))
78 | regionR = utils.getRegionData(depthData, ct, rb)
79 | #print(regionR.shape)
80 |
81 | avgL = np.nanmean(regionL)
82 | avgR = np.nanmean(regionR)
83 |
84 | #print("L : {0} R : {1}".format(avgL, avgR))
85 | if abs(avgL - avgR) > 0.75:
86 | self.type = 2
87 | '''
88 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/data/Object2D.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | import PanoAnnotator.data as data
4 | import PanoAnnotator.configs.Params as pm
5 | import PanoAnnotator.utils as utils
6 |
7 | obj2dInstanceCount = 0
8 |
9 |
10 | class Object2D(object):
11 | def __init__(self, scene, gPoints, wall, indx=None, obj_type=None):
12 |
13 | self.__scene = scene
14 | self.obj_type = obj_type
15 |
16 | self.gPoints = gPoints
17 | self.attach = wall
18 | self.color = (1, 1, 1)
19 |
20 | self.normal = (0, 0, 0)
21 | self.planeEquation = (0, 0, 0, 0)
22 | self.width = 0
23 |
24 | self.corners = []
25 | self.edges = []
26 |
27 | self.bbox2d = ((0, 0), (1, 1))
28 | self.localBbox2d = ((0, 0), (1, 1))
29 |
30 | self.init()
31 | if (indx is None):
32 | global obj2dInstanceCount
33 | obj2dInstanceCount += 1
34 | self.id = obj2dInstanceCount
35 | else:
36 | self.id = indx
37 |
38 | def init(self):
39 |
40 | if not self in self.attach.attached:
41 | self.attach.attached.append(self)
42 |
43 | self.updateGeometry()
44 |
45 | def moveByNormal(self, val):
46 |
47 | vec = utils.vectorMultiplyC(self.normal, val)
48 |
49 | for gp in self.gPoints:
50 | gp.moveByVector(vec)
51 |
52 | self.updateGeometry()
53 |
54 | def updateGeometry(self):
55 |
56 | self.updateGeoPoints()
57 | self.updateCorners()
58 | self.updateEdges()
59 | self.updateBbox2d()
60 |
61 | self.normal = utils.pointsNormal(self.corners[0].xyz,
62 | self.corners[1].xyz,
63 | self.corners[3].xyz)
64 | #self.color = utils.normal2color(self.normal)
65 | self.planeEquation = utils.planeEquation(self.normal,
66 | self.corners[0].xyz)
67 | self.width = utils.pointsDistance(self.corners[0].xyz,
68 | self.corners[1].xyz)
69 |
70 | def updateGeoPoints(self):
71 |
72 | gps = self.gPoints
73 | acs = self.attach.corners
74 |
75 | #make sure the gpoints are left-up and right-down
76 | dis = [[], []]
77 | xyzs = [
78 | gps[0].xyz, (gps[1].xyz[0], gps[0].xyz[1], gps[1].xyz[2]),
79 | gps[1].xyz, (gps[0].xyz[0], gps[1].xyz[1], gps[0].xyz[2])
80 | ]
81 | for i in range(2):
82 | for xyz in xyzs:
83 | dis[i].append(utils.pointsDistance(xyz, acs[i * 2].xyz))
84 | xyz = xyzs[dis[i].index(min(dis[i]))]
85 | gps[i] = data.GeoPoint(self.__scene, None, xyz)
86 |
87 | # stick to wall boundary
88 | localBbox2d = []
89 | for i in range(2):
90 | xyz = list(gps[i].xyz)
91 | dis = utils.pointsDirectionPow(acs[i * 2].xyz, gps[i].xyz, 2)
92 | cxz = math.sqrt(dis[0] + dis[2]) / self.attach.width
93 | cy = math.sqrt(dis[1]) / self.__scene.label.getLayoutHeight()
94 | if cxz <= 0.03:
95 | xyz[0] = acs[i * 2].xyz[0]
96 | xyz[2] = acs[i * 2].xyz[2]
97 | cxz = 0
98 | if cy <= 0.03:
99 | xyz[1] = acs[i * 2].xyz[1]
100 | cy = 0
101 | gps[i] = data.GeoPoint(self.__scene, None, tuple(xyz))
102 | coord = (cxz, cy) if i == 0 else (1 - cxz, 1 - cy)
103 | localBbox2d.append(coord)
104 | self.localBbox2d = tuple(localBbox2d)
105 | #print(self.localBbox2d)
106 |
107 | def updateCorners(self):
108 |
109 | gps = self.gPoints
110 | scene = self.__scene
111 |
112 | self.corners = [
113 | data.GeoPoint(scene, None, gps[0].xyz),
114 | data.GeoPoint(scene, None,
115 | (gps[1].xyz[0], gps[0].xyz[1], gps[1].xyz[2])),
116 | data.GeoPoint(scene, None, gps[1].xyz),
117 | data.GeoPoint(scene, None,
118 | (gps[0].xyz[0], gps[1].xyz[1], gps[0].xyz[2]))
119 | ]
120 |
121 | def updateEdges(self):
122 |
123 | scene = self.__scene
124 | self.edges = [
125 | data.GeoEdge(scene, (self.corners[0], self.corners[1])),
126 | data.GeoEdge(scene, (self.corners[1], self.corners[2])),
127 | data.GeoEdge(scene, (self.corners[2], self.corners[3])),
128 | data.GeoEdge(scene, (self.corners[3], self.corners[0]))
129 | ]
130 |
131 | def updateBbox2d(self):
132 |
133 | coords = []
134 | for c in [e.coords for e in self.edges]:
135 | coords += c
136 | self.bbox2d = utils.imagePointsBox(coords)
137 |
138 | def checkRayHit(self, vec, orig=(0, 0, 0)):
139 |
140 | point = utils.vectorPlaneHit(vec, self.planeEquation)
141 | if point is None:
142 | return False, None
143 |
144 | cs = self.corners
145 | if cs[2].xyz[1] <= point[1] <= cs[0].xyz[1]:
146 |
147 | p1 = (point[0], cs[0].xyz[1], point[2])
148 | dis1 = utils.pointsDistance(p1, cs[0].xyz)
149 | dis2 = utils.pointsDistance(p1, cs[1].xyz)
150 | dis3 = utils.pointsDistance(cs[0].xyz, cs[1].xyz)
151 |
152 | if dis1 + dis2 <= dis3 * 1.0005:
153 | return True, point
154 |
155 | return False, None
156 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/data/Resource.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | import PanoAnnotator.configs.Params as pm
5 | import PanoAnnotator.utils as utils
6 |
7 | from PIL import Image as Image
8 | from PIL import ImageEnhance
9 | from PyQt5.QtGui import QPixmap
10 |
11 |
12 | class Resource(object):
13 | def __init__(self, name):
14 |
15 | self.name = name
16 |
17 | self.path = ''
18 | self.image = None #(w,h)
19 | self.data = None #(h,w)
20 | self.pixmap = None
21 |
22 | def initByImageFile(self, filePath):
23 |
24 | if os.path.exists(filePath):
25 | self.path = filePath
26 | self.image = Image.open(filePath).convert('RGB')
27 | enhancer = ImageEnhance.Contrast(self.image)
28 | img2 = enhancer.enhance(2)
29 | enhancer = ImageEnhance.Sharpness(img2)
30 | img2 = enhancer.enhance(2)
31 | self.data = np.asarray(self.image).astype(np.float)
32 | self.image = img2
33 | if pm.isGUI:
34 | self.pixmap = QPixmap(filePath)
35 | return True
36 | else:
37 | print("No default {0} image found".format(self.name))
38 | return False
39 |
40 | def initByImageFileDepth(self, filePath):
41 |
42 | if os.path.exists(filePath):
43 | self.path = filePath
44 | self.image = Image.open(filePath)
45 | self.data = np.asarray(self.image).astype(np.float)
46 | return True
47 | else:
48 | print("No default {0} image found".format(self.name))
49 | return False
50 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/data/Scene.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | import PanoAnnotator.data as data
5 | import PanoAnnotator.utils as utils
6 | import PanoAnnotator.configs.Params as pm
7 | #import estimator
8 |
9 |
10 | class Scene(object):
11 | def __init__(self, mainWindows):
12 |
13 | self.__mainWindows = mainWindows
14 | self.__isAvailable = False
15 | self.__mainDirPath = ""
16 |
17 | ### Pano color
18 | self.__panoColor = data.Resource('Color')
19 |
20 | ### Pano depth
21 | self.depthPred = None
22 | self.__panoDepth = data.Resource('Depth')
23 | self.__panoPointCloud = None
24 |
25 | ### Pano Lines and Omap
26 | self.__panoLines = data.Resource('Lines')
27 | self.__panoOmap = data.Resource('Omap')
28 |
29 | ### Annotation
30 | self.label = data.Annotation(self)
31 | self.selectObjs = []
32 |
33 | def initScene(self, filePath, depthPred=None):
34 |
35 | self.__mainDirPath = os.path.dirname(os.path.realpath(filePath))
36 | self.__panoColor.path = filePath
37 |
38 | self.__initColor()
39 |
40 | self.depthPred = depthPred
41 | self.__initDepth()
42 |
43 | self.__initLines()
44 | self.__initOmap()
45 |
46 | self.__checkIsAvailable()
47 |
48 | return self.isAvailable()
49 |
50 | def initEmptyScene(self):
51 |
52 | self.__panoColor.data = np.zeros([100, 100, 3])
53 | self.__panoDepth.data = np.zeros([100, 100])
54 |
55 | def initLabel(self):
56 | self.label.calcInitLayout()
57 |
58 | def loadLabel(self, path):
59 | utils.loadLabelByJson(path, self)
60 |
61 | def loadOldLabel(self, path):
62 | path = path.replace('clean_data', 'preds_data/layout_preds')
63 | path = path.replace('.png', '.json')
64 | utils.loadOldLabelByJson(path, self)
65 |
66 | def __initColor(self):
67 | self.__panoColor.initByImageFile(self.__panoColor.path)
68 |
69 | def __initDepth(self):
70 |
71 | panoDepthPath = os.path.join(self.__mainDirPath,
72 | pm.depthFileDefaultName)
73 | isExist = self.__panoDepth.initByImageFileDepth(panoDepthPath)
74 | if isExist:
75 | depthData = self.__panoDepth.data.astype(
76 | float) / 4000 #For Matterport3d GT
77 | self.__panoDepth.data = depthData
78 | else:
79 | if self.depthPred:
80 | # pred = self.depthPred.predict(self.__panoColor.image)
81 | pred = self.depthPred(self.__panoColor.path)
82 | self.__panoDepth.data = pred
83 | else:
84 | self.__panoDepth.data = np.ones((256, 512))
85 |
86 | def __initLines(self):
87 |
88 | panoLinesPath = os.path.join(self.__mainDirPath,
89 | pm.linesFileDefaultName)
90 | isExist = self.__panoLines.initByImageFile(panoLinesPath)
91 |
92 | #dilation & BLur data
93 | if isExist:
94 | self.__panoLines.data /= 255
95 | dataDilate = utils.imageDilation(self.__panoLines.data, 8)
96 | dataBlur = utils.imageGaussianBlur(dataDilate, 10)
97 | self.__panoLines.pixmap = utils.data2Pixmap(dataBlur)
98 |
99 | def __initOmap(self):
100 |
101 | panoOmapPath = os.path.join(self.__mainDirPath, pm.omapFileDefaultName)
102 | isExist = self.__panoOmap.initByImageFile(panoOmapPath)
103 | if isExist:
104 | self.__panoOmap.data /= 255
105 | #self.__panoOmap.data[(self.__panoOmap.data[:,:,0]>0)] = [1,0,0]
106 | #self.__panoOmap.data[(self.__panoOmap.data[:,:,1]>0)] = [0,1,0]
107 | #self.__panoOmap.data[(self.__panoOmap.data[:,:,2]>0)] = [0,0,1]
108 | self.__panoOmap.pixmap = utils.data2Pixmap(self.__panoOmap.data)
109 |
110 | #####
111 | #Getter & Setter
112 | #####
113 |
114 | #Available
115 | def __checkIsAvailable(self):
116 |
117 | if self.__panoColor.image and (self.__panoDepth.data is not None):
118 | self.__isAvailable = True
119 | else:
120 | self.__isAvailable = False
121 |
122 | def isAvailable(self):
123 | return self.__isAvailable
124 |
125 | #Mainwindows
126 | def getMainWindows(self):
127 | return self.__mainWindows
128 |
129 | def getCurrentPath(self):
130 | filePath = self.__panoColor.path
131 | curPath = os.path.dirname(filePath) + '/'
132 | return curPath
133 |
134 | #Pano Color
135 | def getPanoColorPath(self):
136 | return self.__panoColor.path
137 |
138 | def getPanoColorImage(self):
139 | return self.__panoColor.image
140 |
141 | def getPanoColorPixmap(self):
142 | return self.__panoColor.pixmap
143 |
144 | def getPanoColorData(self):
145 | return self.__panoColor.data
146 |
147 | #Pano Depth
148 | def getPanoDepthData(self):
149 | return self.__panoDepth.data
150 |
151 | #Pano lines and Omap
152 | def getPanoLinesData(self):
153 | return self.__panoLines.data
154 |
155 | def getPanoLinesPixmap(self):
156 | return self.__panoLines.pixmap
157 |
158 | def getPanoOmapData(self):
159 | return self.__panoOmap.data
160 |
161 | def getPanoOmapPixmap(self):
162 | return self.__panoOmap.pixmap
163 |
164 | #Pano Point Cloud
165 | def setPanoPointCloud(self, pc):
166 | self.__panoPointCloud = pc
167 | return self.__panoPointCloud
168 |
169 | def getPanoPointCloud(self):
170 | return self.__panoPointCloud
171 |
172 | def getSelectObjs(self, objType=None):
173 | objs = []
174 | typeDict = {
175 | 'GeoPoint': data.GeoPoint,
176 | 'WallPlane': data.WallPlane,
177 | 'FloorPlane': data.FloorPlane,
178 | 'Object2D': data.Object2D
179 | }
180 | if objType:
181 | for obj in self.selectObjs:
182 | if type(obj) == typeDict[objType]:
183 | objs.append(obj)
184 | return objs
185 | elif objType == None:
186 | return self.selectObjs
187 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/data/WallPlane.py:
--------------------------------------------------------------------------------
1 | import random
2 |
3 | import PanoAnnotator.data as data
4 | import PanoAnnotator.configs.Params as pm
5 | import PanoAnnotator.utils as utils
6 |
7 | wpInstanceCount = 0
8 |
9 |
10 | class WallPlane(object):
11 | def __init__(self, scene, gPoints):
12 |
13 | self.__scene = scene
14 |
15 | if (len((gPoints)) < 2):
16 | print("Two point at least")
17 |
18 | self.gPoints = gPoints
19 | self.attached = []
20 | self.color = (random.random(), random.random(), random.random())
21 |
22 | self.normal = (0, 0, 0)
23 | self.planeEquation = (0, 0, 0, 0)
24 | self.width = 0
25 |
26 | self.corners = []
27 | self.edges = []
28 | self.bbox2d = ((0, 0), (1, 1))
29 |
30 | self.id = 0
31 |
32 | self.init()
33 |
34 | global wpInstanceCount
35 | wpInstanceCount += 1
36 | self.id = wpInstanceCount
37 |
38 | def init(self):
39 |
40 | self.updateGeometry()
41 |
42 | def moveByNormal(self, val):
43 |
44 | vec = utils.vectorMultiplyC(self.normal, val)
45 | for gp in self.gPoints:
46 | gp.moveByVector(vec)
47 |
48 | for obj2d in self.attached:
49 | obj2d.moveByNormal(val)
50 |
51 | self.updateGeometry()
52 |
53 | def updateGeometry(self):
54 |
55 | self.updateCorners()
56 | self.updateEdges()
57 | self.updateBbox2d()
58 |
59 | self.normal = utils.pointsNormal(self.corners[0].xyz,
60 | self.corners[1].xyz,
61 | self.corners[3].xyz)
62 | self.color = utils.normal2color(self.normal)
63 | self.planeEquation = utils.planeEquation(self.normal,
64 | self.corners[0].xyz)
65 | self.width = utils.pointsDistance(self.corners[0].xyz,
66 | self.corners[1].xyz)
67 |
68 | for obj2d in self.attached:
69 | obj2d.updateGeometry()
70 |
71 | def updateCorners(self):
72 |
73 | gps = self.gPoints
74 | scene = self.__scene
75 | cameraH = scene.label.getCameraHeight()
76 | cam2ceilH = scene.label.getCam2CeilHeight()
77 |
78 | self.corners = [
79 | data.GeoPoint(scene, None,
80 | (gps[0].xyz[0], cam2ceilH, gps[0].xyz[2])),
81 | data.GeoPoint(scene, None,
82 | (gps[1].xyz[0], cam2ceilH, gps[1].xyz[2])),
83 | data.GeoPoint(scene, None,
84 | (gps[1].xyz[0], -cameraH, gps[1].xyz[2])),
85 | data.GeoPoint(scene, None,
86 | (gps[0].xyz[0], -cameraH, gps[0].xyz[2]))
87 | ]
88 |
89 | def updateEdges(self):
90 |
91 | scene = self.__scene
92 | self.edges = [
93 | data.GeoEdge(scene, (self.corners[0], self.corners[1])),
94 | data.GeoEdge(scene, (self.corners[1], self.corners[2])),
95 | data.GeoEdge(scene, (self.corners[2], self.corners[3])),
96 | data.GeoEdge(scene, (self.corners[3], self.corners[0]))
97 | ]
98 |
99 | def updateBbox2d(self):
100 |
101 | coords = []
102 | for c in [e.coords for e in self.edges]:
103 | coords += c
104 | self.bbox2d = utils.imagePointsBox(coords)
105 |
106 | #manh only
107 | def checkRayHit(self, vec, orig=(0, 0, 0)):
108 |
109 | point = utils.vectorPlaneHit(vec, self.planeEquation)
110 | if point is None:
111 | return False, None
112 |
113 | cs = self.corners
114 | if cs[2].xyz[1] <= point[1] <= cs[0].xyz[1]:
115 |
116 | p1 = (point[0], cs[0].xyz[1], point[2])
117 | dis1 = utils.pointsDistance(p1, cs[0].xyz)
118 | dis2 = utils.pointsDistance(p1, cs[1].xyz)
119 | dis3 = utils.pointsDistance(cs[0].xyz, cs[1].xyz)
120 |
121 | if dis1 + dis2 <= dis3 * 1.0005:
122 | return True, point
123 |
124 | return False, None
125 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .Scene import *
2 | from .Annotation import *
3 | from .GeoPoint import *
4 | from .GeoEdge import *
5 | from .Resource import *
6 | from .WallPlane import *
7 | from .FloorPlane import *
8 | from .Object2D import *
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/estimator/__init__.py:
--------------------------------------------------------------------------------
1 | # from .depth.DepthPred import *
2 | from .push.PushPred import *
3 | from .push.PushPredLite import *
4 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/estimator/depth/DepthPred.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow.compat.v1 as tf
3 | tf.disable_v2_behavior()
4 | from PIL import Image
5 |
6 | from .models import *
7 |
8 | MODEL_FILE = 'PanoAnnotator/estimator/depth/models/trained/NYU_ResNet-UpProj_pano_512_20_epoch.ckpt'
9 |
10 | IMAGE_SIZE = [512, 1024]
11 | DEPTH_SIZE = [256, 512]
12 |
13 | INPUT_SHAPE = [None, IMAGE_SIZE[0], IMAGE_SIZE[1], 3]
14 | OUTPUT_SHAPE = [None, DEPTH_SIZE[0], DEPTH_SIZE[1], 1]
15 |
16 | BATCH_SIZE = 4
17 |
18 | DEPTH_RANGE = [0, 10]
19 |
20 |
21 | class DepthPred(object):
22 | def __init__(self, parent=None):
23 |
24 | self.__isAvailable = False
25 |
26 | self.input_node = tf.placeholder(tf.float32, shape=INPUT_SHAPE)
27 | self.net = ResNet50UpProj({'data': self.input_node}, BATCH_SIZE, 1,
28 | False)
29 |
30 | self.y_predict = self.net.get_output()
31 | self.y_label = tf.placeholder(tf.float32,
32 | shape=OUTPUT_SHAPE,
33 | name="y_label")
34 |
35 | self.sess = tf.Session()
36 | self.saver = tf.train.Saver()
37 |
38 | self.initEstimator()
39 |
40 | def initEstimator(self):
41 |
42 | init_op = tf.variables_initializer(tf.global_variables())
43 | print("sess init")
44 | self.sess.run(init_op)
45 | print("model loading")
46 | self.saver.restore(self.sess, MODEL_FILE)
47 | print("done")
48 | self.__isAvailable = True
49 |
50 | def predict(self, image):
51 | #image : PIL.Image
52 |
53 | image = image.resize((IMAGE_SIZE[1], IMAGE_SIZE[0]), Image.ANTIALIAS)
54 | image_data = np.asarray(image, dtype="float32")
55 | image_data = image_data[:, :, 0:3]
56 | image_data = np.expand_dims(image_data, axis=0)
57 |
58 | input_data = np.zeros(
59 | (BATCH_SIZE, INPUT_SHAPE[1], INPUT_SHAPE[2], INPUT_SHAPE[3]))
60 | input_data[0] = image_data
61 | #print(input_data.shape)
62 |
63 | pred = self.sess.run(self.net.get_output(),
64 | feed_dict={self.input_node: input_data})
65 | #print(pred.shape)
66 |
67 | return np.squeeze(pred[0])
68 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/estimator/depth/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .fcrn import ResNet50UpProj
2 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/estimator/depth/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .visualization import show_rgb
2 | from .visualization import show_depth
3 | from .visualization import show_list_curve
4 |
5 | from .evaluation import show_test_gt
6 | from .evaluation import show_test_pred
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/estimator/depth/utils/evaluation.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import tensorflow as tf
3 |
4 | from ..models import *
5 | import data_loader
6 | import utils
7 | import configs
8 | params = configs.get_param()
9 |
10 |
11 | def show_test_gt(sess, data_test_iterator, num):
12 |
13 | next_element = data_test_iterator.get_next()
14 | sess.run(data_test_iterator.initializer)
15 |
16 | for i in range(0, num):
17 | batch = sess.run(next_element)
18 | utils.show_rgb(batch[0], True, "test/gt_rgb_{0}_".format(i))
19 | utils.show_depth(batch[1], True, "test/gt_{0}_".format(i))
20 |
21 |
22 | def show_test_pred(sess, net, input_node, data_test_iterator, epoch, num):
23 |
24 | next_element = data_test_iterator.get_next()
25 | sess.run(data_test_iterator.initializer)
26 |
27 | for i in range(0, num):
28 | batch = sess.run(next_element)
29 | pred = sess.run(net.get_output(), feed_dict={input_node: batch[0]})
30 |
31 | utils.show_depth(pred, True, "test/epoh{0}_{1}_".format(epoch, i))
32 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/estimator/depth/utils/visualization.py:
--------------------------------------------------------------------------------
1 | from matplotlib import pyplot as plt
2 | from PIL import Image
3 |
4 | plt.ioff()
5 |
6 |
7 | def show_rgb(img, save, output=''):
8 | for i in range(0, img.shape[0]):
9 | fig = plt.figure()
10 | plt.imshow(img[i] / 255.0)
11 | if (save):
12 | plt.savefig(output + str(i) + ".png")
13 | else:
14 | plt.show()
15 | plt.close(fig)
16 |
17 |
18 | def show_depth(depth, save, output=''):
19 | for i in range(0, depth.shape[0]):
20 | fig = plt.figure()
21 | ii = plt.imshow(depth[i, :, :, 0], interpolation='nearest', cmap='jet')
22 | plt.clim(0, 6)
23 | fig.colorbar(ii)
24 | if (save):
25 | plt.savefig(output + str(i) + ".png")
26 | else:
27 | plt.show()
28 | plt.close(fig)
29 |
30 |
31 | def show_list_curve(epoch_list, data_list, ylabel, save, output=''):
32 |
33 | plt.plot(epoch_list, data_list)
34 | plt.xlabel("Epochs")
35 | plt.ylabel(ylabel)
36 | plt.savefig(output)
37 |
38 | plt.close()
39 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/estimator/push/PushPredLite.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import matplotlib.pyplot as pyplot
4 | from skimage import transform
5 |
6 | import PanoAnnotator.data as data
7 | import PanoAnnotator.utils as utils
8 |
9 |
10 | class PushPredLite(object):
11 | def __init__(self, scene):
12 |
13 | self.__scene = scene
14 | self.size = [128, 256, 3] #[256, 512, 3]
15 |
16 | def optimizeWall(self, wall, val):
17 |
18 | dist = abs(wall.planeEquation[3]) / 10
19 | step = dist if val >= 0 else -dist
20 |
21 | sampleList = [step * (i + 1) for i in range(5)]
22 |
23 | errList = []
24 | tmpPlane = self.genTmpWall(wall)
25 | for step in sampleList:
26 | tmpPlane.moveByNormal(step)
27 | err = self.calcMapError(tmpPlane)
28 | errList.append(err)
29 | minVal = min(errList)
30 | minIdx = errList.index(minVal)
31 | moveVal = sampleList[minIdx]
32 |
33 | self.__scene.label.moveWallByNormal(wall, moveVal)
34 |
35 | def genTmpWall(self, wall):
36 |
37 | gp1 = data.GeoPoint(self.__scene, None, wall.gPoints[0].xyz)
38 | gp2 = data.GeoPoint(self.__scene, None, wall.gPoints[1].xyz)
39 | wall = data.WallPlane(self.__scene, [gp1, gp2])
40 |
41 | return wall
42 |
43 | def genEdgeMap(self, wall):
44 |
45 | edgeMap = np.zeros(self.size)
46 | utils.imageDrawWallEdge(edgeMap, wall)
47 | edgeMapDilation = utils.imageDilation(edgeMap, 1)
48 | edgeMapBlur = utils.imageGaussianBlur(edgeMapDilation, 2)
49 |
50 | return edgeMapBlur
51 |
52 | def genNormalMap(self, wall):
53 |
54 | normalMap = np.zeros(self.size)
55 | utils.imageDrawWallFace(normalMap, wall)
56 |
57 | return normalMap
58 |
59 | def genBbox2d(self, wall):
60 |
61 | size = (self.size[0], self.size[1])
62 | sizeT = utils.posTranspose(size)
63 | extend = 10
64 |
65 | bbox = wall.bbox2d
66 | poslt = utils.posTranspose(utils.coords2pos(bbox[0], sizeT))
67 | posrb = utils.posTranspose(utils.coords2pos(bbox[1], sizeT))
68 |
69 | poslt = (poslt[0] - extend, poslt[1] - extend)
70 | poslt = utils.checkImageBoundary(poslt, size)
71 | posrb = (posrb[0] + extend, posrb[1] + extend)
72 | posrb = utils.checkImageBoundary(posrb, size)
73 |
74 | return poslt, posrb
75 |
76 | def calcMapError(self, wall):
77 |
78 | size = (self.size[0], self.size[1])
79 | poslt, posrb = self.genBbox2d(wall)
80 |
81 | normalMap = self.genNormalMap(wall)
82 | normalMapRoi = utils.imageROI(normalMap, poslt, posrb)
83 |
84 | oMap = self.__scene.getPanoOmapData()
85 | oMapR = utils.imageResize(oMap, size)
86 | oMapRoi = utils.imageROI(oMapR, poslt, posrb)
87 |
88 | omapMSE = utils.imagesMSE(normalMapRoi, oMapRoi, size)
89 |
90 | edgeMap = self.genEdgeMap(wall)
91 | edgeMapRoi = utils.imageROI(edgeMap, poslt, posrb)
92 |
93 | linesMap = self.__scene.getPanoLinesData()
94 | linesMapR = utils.imageResize(linesMap, size)
95 | linesMapRoi = utils.imageROI(linesMapR, poslt, posrb)
96 |
97 | lineMSE = utils.imagesMSE(edgeMapRoi, linesMapRoi, size)
98 |
99 | #utils.showImage(edgeMapRoi)
100 | #utils.showImage(linesMapRoi)
101 | #print('MSE lines:{0:.3f} normal:{1:.3f}'.format(lineMSE,omapMSE))
102 |
103 | mix = omapMSE + lineMSE
104 | return mix
105 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/figs/outputmaps.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/figs/outputmaps.jpg
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/figs/teasor.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/figs/teasor.jpg
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/.gitignore:
--------------------------------------------------------------------------------
1 | /*.pyc
2 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/Hmovetoolbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/Hmovetoolbar.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/Hsepartoolbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/Hsepartoolbar.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/Vmovetoolbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/Vmovetoolbar.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/Vsepartoolbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/Vsepartoolbar.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/branch_closed-on.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/branch_closed-on.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/branch_closed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/branch_closed.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/branch_open-on.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/branch_open-on.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/branch_open.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/branch_open.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_checked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_checked.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_checked_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_checked_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_checked_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_checked_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_indeterminate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_indeterminate.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_indeterminate_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_indeterminate_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_indeterminate_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_indeterminate_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_unchecked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_unchecked.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_unchecked_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_unchecked_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_unchecked_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/checkbox_unchecked_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/close-hover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/close-hover.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/close-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/close-pressed.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/close.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/down_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/down_arrow.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/down_arrow_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/down_arrow_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/left_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/left_arrow.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/left_arrow_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/left_arrow_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_checked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_checked.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_checked_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_checked_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_checked_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_checked_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_unchecked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_unchecked.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_unchecked_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_unchecked_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_unchecked_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/radio_unchecked_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/right_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/right_arrow.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/right_arrow_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/right_arrow_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/sizegrip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/sizegrip.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/stylesheet-branch-end.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/stylesheet-branch-end.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/stylesheet-branch-more.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/stylesheet-branch-more.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/stylesheet-vline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/stylesheet-vline.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/transparent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/transparent.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/undock.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/undock.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/up_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/up_arrow.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/up_arrow_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/PanoAnnotator/qdarkstyle/rc/up_arrow_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/qdarkstyle/style.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | rc/up_arrow_disabled.png
4 | rc/Hmovetoolbar.png
5 | rc/stylesheet-branch-end.png
6 | rc/branch_closed-on.png
7 | rc/stylesheet-vline.png
8 | rc/branch_closed.png
9 | rc/branch_open-on.png
10 | rc/transparent.png
11 | rc/right_arrow_disabled.png
12 | rc/sizegrip.png
13 | rc/close.png
14 | rc/close-hover.png
15 | rc/close-pressed.png
16 | rc/down_arrow.png
17 | rc/Vmovetoolbar.png
18 | rc/left_arrow.png
19 | rc/stylesheet-branch-more.png
20 | rc/up_arrow.png
21 | rc/right_arrow.png
22 | rc/left_arrow_disabled.png
23 | rc/Hsepartoolbar.png
24 | rc/branch_open.png
25 | rc/Vsepartoolbar.png
26 | rc/down_arrow_disabled.png
27 | rc/undock.png
28 | rc/checkbox_checked_disabled.png
29 | rc/checkbox_checked_focus.png
30 | rc/checkbox_checked.png
31 | rc/checkbox_indeterminate.png
32 | rc/checkbox_indeterminate_focus.png
33 | rc/checkbox_unchecked_disabled.png
34 | rc/checkbox_unchecked_focus.png
35 | rc/checkbox_unchecked.png
36 | rc/radio_checked_disabled.png
37 | rc/radio_checked_focus.png
38 | rc/radio_checked.png
39 | rc/radio_unchecked_disabled.png
40 | rc/radio_unchecked_focus.png
41 | rc/radio_unchecked.png
42 |
43 |
44 | style.qss
45 |
46 |
47 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/scripts/annotator_env.yml:
--------------------------------------------------------------------------------
1 | name: Annotator
2 | channels:
3 | - defaults
4 | dependencies:
5 | - _tflow_1100_select=0.0.1=gpu
6 | - absl-py=0.4.1=py35_0
7 | - astor=0.7.1=py35_0
8 | - blas=1.0=mkl
9 | - ca-certificates=2018.03.07=0
10 | - certifi=2018.8.24=py35_1
11 | - cloudpickle=0.5.5=py35_0
12 | - cudatoolkit=9.0=1
13 | - cudnn=7.1.4=cuda9.0_0
14 | - cycler=0.10.0=py35hcc71164_0
15 | - dask-core=0.19.0=py35_0
16 | - decorator=4.3.0=py35_0
17 | - freetype=2.9.1=ha9979f8_1
18 | - gast=0.2.0=py35_0
19 | - grpcio=1.12.1=py35h1a1b453_0
20 | - icc_rt=2017.0.4=h97af966_0
21 | - icu=58.2=ha66f8fd_1
22 | - imageio=2.3.0=py35_0
23 | - intel-openmp=2018.0.3=0
24 | - jpeg=9b=hb83a4c4_2
25 | - kiwisolver=1.0.1=py35h6538335_0
26 | - libpng=1.6.34=h79bbb47_0
27 | - libprotobuf=3.6.0=h1a1b453_0
28 | - libtiff=4.0.9=h36446d0_2
29 | - markdown=2.6.11=py35_0
30 | - matplotlib=2.2.3=py35hd159220_0
31 | - mkl=2018.0.3=1
32 | - mkl_fft=1.0.4=py35h1e22a9b_1
33 | - mkl_random=1.0.1=py35h77b88f5_1
34 | - networkx=2.1=py35_0
35 | - numpy=1.15.1=py35ha559c80_0
36 | - numpy-base=1.15.1=py35h8128ebf_0
37 | - olefile=0.45.1=py35_0
38 | - openssl=1.0.2p=hfa6e2cd_0
39 | - pillow=5.2.0=py35h08bbbbd_0
40 | - pip=10.0.1=py35_0
41 | - protobuf=3.6.0=py35he025d50_0
42 | - pyopengl=3.1.1a1=py35_0
43 | - pyparsing=2.2.0=py35_1
44 | - pyqt=5.9.2=py35ha878b3d_0
45 | - python=3.5.6=he025d50_0
46 | - python-dateutil=2.7.3=py35_0
47 | - pytz=2018.5=py35_0
48 | - pywavelets=1.0.0=py35h452e1ab_0
49 | - qt=5.9.6=vc14h62aca36_0
50 | - scikit-image=0.14.0=py35h6538335_1
51 | - scipy=1.1.0=py35h4f6bf74_1
52 | - setuptools=40.2.0=py35_0
53 | - sip=4.19.12=py35h6538335_0
54 | - six=1.11.0=py35_1
55 | - sqlite=3.24.0=h7602738_0
56 | - tensorboard=1.10.0=py35he025d50_0
57 | - tensorflow=1.10.0=gpu_py35ha5d5ef7_0
58 | - tensorflow-base=1.10.0=gpu_py35h6e53903_0
59 | - tensorflow-gpu=1.10.0=hf154084_0
60 | - termcolor=1.1.0=py35_1
61 | - tk=8.6.8=hfa6e2cd_0
62 | - toolz=0.9.0=py35_0
63 | - tornado=5.1=py35hfa6e2cd_0
64 | - vc=14=h0510ff6_3
65 | - vs2015_runtime=14.0.25123=3
66 | - werkzeug=0.14.1=py35_0
67 | - wheel=0.31.1=py35_0
68 | - wincertstore=0.2=py35hfebbdb8_0
69 | - zlib=1.2.11=h8395fce_2
70 | - pip:
71 | - dask==0.19.0
72 | - mkl-fft==1.0.4
73 | - mkl-random==1.0.1
74 | prefix: C:\Users\SunDa\AppData\Local\conda\conda\envs\Annotator
75 |
76 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/scripts/json2maps.py:
--------------------------------------------------------------------------------
1 | import sys
2 | sys.path.insert(0, "../")
3 | import os
4 | import argparse
5 |
6 | import configs.Params as pm
7 | import data
8 | import utils
9 |
10 | if __name__ == '__main__':
11 |
12 | parser = argparse.ArgumentParser()
13 | parser.add_argument('-i', required=True)
14 | args = parser.parse_args()
15 |
16 | labelPath = args.i
17 | outputPath = os.path.dirname(args.i)
18 |
19 | scene = data.Scene(None)
20 | scene.initEmptyScene()
21 |
22 | utils.loadLabelByJson(labelPath, scene)
23 | utils.saveSceneAsMaps(outputPath, scene)
24 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/utils/GeometryTool.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 |
4 |
5 | def vectorAdd(v1, v2):
6 |
7 | ans = (v1[0] + v2[0], v1[1] + v2[1], v1[2] + v2[2])
8 | return ans
9 |
10 |
11 | def vectorSum(vList):
12 |
13 | ans = (0, 0, 0)
14 | for v in vList:
15 | ans = vectorAdd(ans, v)
16 | return ans
17 |
18 |
19 | def vectorCross(v1, v2):
20 |
21 | v1 = list(v1)
22 | v2 = list(v2)
23 | ans = tuple(np.cross(v1, v2))
24 | return ans
25 |
26 |
27 | def vectorDot(v1, v2):
28 |
29 | ans = v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]
30 | return ans
31 |
32 |
33 | def vectorMultiplyC(v1, C):
34 |
35 | ans = (v1[0] * C, v1[1] * C, v1[2] * C)
36 | return ans
37 |
38 |
39 | def vectorDividedC(v1, C):
40 |
41 | ans = (float(v1[0]) / C, float(v1[1]) / C, float(v1[2]) / C)
42 | return ans
43 |
44 |
45 | def pointsMean(pList):
46 |
47 | sum_ = vectorSum(pList)
48 | ans = vectorDividedC(sum_, len(pList))
49 | return ans
50 |
51 |
52 | def pointsDistance(p1, p2):
53 |
54 | vec = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]
55 | dis = math.sqrt(
56 | math.pow(vec[0], 2) + math.pow(vec[1], 2) + math.pow(vec[2], 2))
57 | return dis
58 |
59 |
60 | def pointsDirection(p1, p2):
61 |
62 | vec = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]
63 | scalar = float(np.linalg.norm(vec))
64 | if not scalar == 0:
65 | ans = (vec[0] / scalar, vec[1] / scalar, vec[2] / scalar)
66 | else:
67 | ans = (vec[0], vec[1], vec[2])
68 | return ans
69 |
70 |
71 | def pointsDirectionPow(p1, p2, pow_):
72 | vec = [p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]
73 | ans = (math.pow(vec[0], pow_), math.pow(vec[1],
74 | pow_), math.pow(vec[2], pow_))
75 | return ans
76 |
77 |
78 | def pointsNormal(c, p1, p2):
79 |
80 | vec1 = pointsDirection(c, p1)
81 | vec2 = pointsDirection(c, p2)
82 | normal = vectorCross(vec1, vec2)
83 | return normal
84 |
85 |
86 | def pointsSample(p1, p2, rate):
87 |
88 | ans = [p1]
89 | vec = pointsDirectionPow(p1, p2, 1)
90 | step = vectorDividedC(vec, rate)
91 |
92 | for i in range(1, rate):
93 | xyz = vectorAdd(p1, vectorMultiplyC(step, i))
94 | ans.append(xyz)
95 | ans.append(p2)
96 | return ans
97 |
98 |
99 | def planeEquation(normal, p):
100 |
101 | d = -vectorDot(normal, p)
102 | equation = normal + (d, )
103 | return equation
104 |
105 |
106 | def vectorPlaneHit(vec, plane):
107 |
108 | normal = (plane[0], plane[1], plane[2])
109 | nv = vectorDot(normal, vec)
110 | d = plane[3]
111 |
112 | if nv == 0:
113 | return None
114 | t = -d / nv
115 | if t < 0:
116 | return None
117 | point = vectorMultiplyC(vec, t)
118 | return point
119 |
120 |
121 | def normal2color(normal):
122 |
123 | vec = vectorMultiplyC(normal, -0.5)
124 | color = vectorAdd(vec, (0.5, 0.5, 0.5))
125 |
126 | return color
127 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/utils/ImageTool.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import os
4 |
5 | import PanoAnnotator.utils as utils
6 | import PanoAnnotator.configs.Params as pm
7 |
8 | from PyQt5.QtGui import QImage, QPixmap
9 | from skimage import morphology, filters, draw, transform
10 | from PIL import Image
11 |
12 |
13 | def imageROI(data, lt, rb):
14 |
15 | regionDate = data[lt[0]:rb[0], lt[1]:rb[1]]
16 | return regionDate
17 |
18 |
19 | def imageRegionMean(data, center, steps):
20 |
21 | lt, rb = imageRegionBox(center, steps, data.shape)
22 | roi = imageROI(data, lt, rb)
23 | mean = np.nanmean(roi)
24 | return mean
25 |
26 |
27 | def imageRegionBox(center, steps, size):
28 |
29 | lt = (center[0] - steps[0], center[1] - steps[1])
30 | rb = (center[0] + steps[0], center[1] + steps[1])
31 |
32 | lt = checkImageBoundary(lt, size)
33 | rb = checkImageBoundary(rb, size)
34 | return lt, rb
35 |
36 |
37 | def imagePointsBox(posList):
38 |
39 | X = [pos[0] for pos in posList]
40 | Y = [pos[1] for pos in posList]
41 |
42 | lt = (min(X), min(Y))
43 | rb = (max(X), max(Y))
44 | return lt, rb
45 |
46 |
47 | def checkImageBoundary(pos, size):
48 |
49 | x = sorted([0, pos[0], size[0]])[1]
50 | y = sorted([0, pos[1], size[1]])[1]
51 | return (x, y)
52 |
53 |
54 | def data2Pixmap(data):
55 |
56 | imgData = data * 255
57 | imgData = imgData.astype(dtype=np.uint8)
58 | image = QImage(imgData, data.shape[1], data.shape[0], QImage.Format_RGB888)
59 | pixmap = QPixmap.fromImage(image)
60 | return pixmap
61 |
62 |
63 | def imageResize(data, size):
64 |
65 | dataR = transform.resize(data, size, mode='constant')
66 | return dataR
67 |
68 |
69 | def imageDilation(data, rad):
70 |
71 | ans = np.zeros(data.shape, dtype=np.float)
72 | for i in range(data.shape[2]):
73 | channel = data[:, :, i]
74 | ans[:, :, i] = morphology.dilation(channel, morphology.diamond(rad))
75 | return ans
76 |
77 |
78 | def imageGaussianBlur(data, sigma):
79 |
80 | ans = np.zeros(data.shape, dtype=np.float)
81 | for i in range(data.shape[2]):
82 | channel = data[:, :, i]
83 | ans[:, :, i] = filters.gaussian(channel, sigma)
84 | return ans
85 |
86 |
87 | def imagesMSE(data1, data2):
88 |
89 | if not data1.shape == data2.shape:
90 | print('size error')
91 | #data1r = transform.resize(data1, size, mode='constant')
92 | #data2r = transform.resize(data2, size, mode='constant')
93 |
94 | #data1r[data1r==0] = np.nan
95 | #data2r[data2r==0] = np.nan
96 | #mse = np.nanmean((data1r - data2r)**2)
97 | mse = np.mean((data1 - data2)**2)
98 |
99 | return mse
100 |
101 |
102 | def imageDrawLine(data, p1, p2, color):
103 |
104 | rr, cc = draw.line(p1[1], p1[0], p2[1], p2[0])
105 | draw.set_color(data, [rr, cc], list(color))
106 |
107 |
108 | def imageDrawPolygon(data, points, color):
109 |
110 | X = np.array([p[0] for p in points])
111 | Y = np.array([p[1] for p in points])
112 | rr, cc = draw.polygon(Y, X)
113 | draw.set_color(data, [rr, cc], list(color))
114 |
115 |
116 | def imageDrawWallDepth(data, polygon, wall):
117 |
118 | size = (data.shape[1], data.shape[0])
119 | polyx = np.array([p[0] for p in polygon])
120 | polyy = np.array([p[1] for p in polygon])
121 |
122 | posy, posx = draw.polygon(polyy, polyx)
123 |
124 | for i in range(len(posy)):
125 | coords = utils.pos2coords((posx[i], posy[i]), size)
126 | vec = utils.coords2xyz(coords, 1)
127 |
128 | point = utils.vectorPlaneHit(vec, wall.planeEquation)
129 | depth = 0 if point is None else utils.pointsDistance((0, 0, 0), point)
130 | color = (depth, depth, depth)
131 | draw.set_color(data, [posy[i], posx[i]], list(color))
132 |
133 |
134 | def showImage(image):
135 |
136 | plt.figure()
137 | plt.imshow(image)
138 | plt.show()
139 |
140 |
141 | def saveImage(image, path):
142 |
143 | im = Image.fromarray(np.uint8(image * 255))
144 | im.save(path)
145 |
146 |
147 | def saveDepth(depth, path):
148 |
149 | depth = depth[:, :, 0]
150 | data = np.uint16(depth * 4000)
151 |
152 | array_buffer = data.tobytes()
153 | img = Image.new("I", data.T.shape)
154 | img.frombytes(array_buffer, 'raw', "I;16")
155 | img.save(path)
156 |
157 |
158 | def saveMask(mask, path):
159 |
160 | mask = mask[:, :, 0]
161 | im = Image.fromarray(np.uint8(mask * 255))
162 | im.save(path)
163 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/utils/PanoTool.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 |
4 | import PanoAnnotator.utils as utils
5 | import PanoAnnotator.configs.Params as pm
6 |
7 |
8 | def coords2uv(coords):
9 | #coords: 0.0 - 1.0
10 | coords = (coords[0] - 0.5, coords[1] - 0.5)
11 |
12 | uv = (coords[0] * 2 * math.pi, -coords[1] * math.pi)
13 |
14 | return uv
15 |
16 |
17 | def uv2coords(uv):
18 |
19 | coordsX = uv[0] / (2 * math.pi) + 0.5
20 | coordsY = -uv[1] / math.pi + 0.5
21 |
22 | coords = (coordsX, coordsY)
23 |
24 | return coords
25 |
26 |
27 | def uv2xyz(uv, N):
28 |
29 | x = math.cos(uv[1]) * math.sin(uv[0])
30 | y = math.sin(uv[1])
31 | z = math.cos(uv[1]) * math.cos(uv[0])
32 | ##Flip Z axis
33 | xyz = (N * x, N * y, -N * z)
34 |
35 | return xyz
36 |
37 |
38 | def xyz2uv(xyz):
39 |
40 | normXZ = math.sqrt(math.pow(xyz[0], 2) + math.pow(xyz[2], 2))
41 | if normXZ < 0.000001:
42 | normXZ = 0.000001
43 |
44 | normXYZ = math.sqrt(
45 | math.pow(xyz[0], 2) + math.pow(xyz[1], 2) + math.pow(xyz[2], 2))
46 |
47 | v = math.asin(xyz[1] / normXYZ)
48 | u = math.asin(xyz[0] / normXZ)
49 |
50 | if xyz[2] > 0 and u > 0:
51 | u = math.pi - u
52 | elif xyz[2] > 0 and u < 0:
53 | u = -math.pi - u
54 |
55 | uv = (u, v)
56 |
57 | return uv
58 |
59 |
60 | def coords2xyz(coords, N):
61 |
62 | uv = coords2uv(coords)
63 | xyz = uv2xyz(uv, N)
64 |
65 | return xyz
66 |
67 |
68 | def xyz2coords(xyz):
69 |
70 | uv = xyz2uv(xyz)
71 | coords = uv2coords(uv)
72 |
73 | return coords
74 |
75 |
76 | def pos2coords(pos, size):
77 |
78 | coords = (float(pos[0]) / size[0], float(pos[1]) / size[1])
79 | return coords
80 |
81 |
82 | def coords2pos(coords, size):
83 |
84 | pos = (int(coords[0] * (size[0] - 1)), int(coords[1] * (size[1] - 1)))
85 | return pos
86 |
87 |
88 | def xyz2pos(xyz, size):
89 |
90 | coords = xyz2coords(xyz)
91 | pos = coords2pos(coords, size)
92 | return pos
93 |
94 |
95 | def pos2xyz(pos, size, N):
96 |
97 | coords = pos2coords(pos, size)
98 | xyz = coords2xyz(coords, N)
99 | return xyz
100 |
101 |
102 | def posTranspose(pos):
103 |
104 | ans = (pos[1], pos[0])
105 | return ans
106 |
107 |
108 | def points2coords(points):
109 |
110 | ans = []
111 | for p in points:
112 | ans.append(xyz2coords(p))
113 | return ans
114 |
115 |
116 | def pointsCrossPano(p1, p2):
117 |
118 | if p1[2] > 0 and p2[2] > 0:
119 | if p1[0] < 0 and p2[0] > 0:
120 | return True, p1, p2
121 | elif p1[0] > 0 and p2[0] < 0:
122 | return True, p2, p1
123 | else:
124 | return False, None, None
125 | else:
126 | return False, None, None
127 |
128 |
129 | def cameraCoords2Vector(camPose, coords, fov):
130 |
131 | x_offset = -(coords[0] - 0.5) * fov[0]
132 | y_offset = (coords[1] - 0.5) * fov[1]
133 |
134 | hcam_rad = (camPose[0] + x_offset) / 180.0 * math.pi
135 | vcam_rad = -(camPose[1] + y_offset) / 180.0 * math.pi
136 |
137 | x = math.sin(hcam_rad)
138 | z = math.cos(hcam_rad)
139 | y = math.sin(vcam_rad)
140 |
141 | return (x, y, z)
142 |
143 |
144 | def createPointCloud(color, depth):
145 | ### color:np.array (h, w)
146 | ### depth: np.array (h, w)
147 |
148 | heightScale = float(color.shape[0]) / depth.shape[0]
149 | widthScale = float(color.shape[1]) / depth.shape[1]
150 |
151 | pointCloud = []
152 | for i in range(color.shape[0]):
153 | if not i % pm.pcSampleStride == 0:
154 | continue
155 | for j in range(color.shape[1]):
156 | if not j % pm.pcSampleStride == 0:
157 | continue
158 |
159 | rgb = (color[i][j][0], color[i][j][1], color[i][j][2])
160 | d = depth[int(i / heightScale)][int(j / widthScale)]
161 | if d <= 0:
162 | continue
163 |
164 | coordsX = float(j) / color.shape[1]
165 | coordsY = float(i) / color.shape[0]
166 | xyz = coords2xyz((coordsX, coordsY), d)
167 |
168 | point = (xyz, rgb)
169 | pointCloud.append(point)
170 |
171 | #if i % int(color.shape[0]/10) == 0:
172 | # print("PC generating {0}%".format(i/color.shape[0]*100))
173 |
174 | return pointCloud
175 |
176 |
177 | '''
178 | def cameraPoint2pano(camPose, screenPos, screenSize, fov):
179 |
180 | p_theta = (screenPos[0] - screenSize[0] / 2.0) / screenSize[0] * fov[0] / 180.0 * math.pi
181 | p_phi = -(screenPos[1] - screenSize[1] / 2.0) / screenSize[1] * fov[1] / 180.0 * math.pi
182 |
183 | p_x = math.cos(p_phi) * math.cos(p_theta)
184 | p_y = math.cos(p_phi) * math.sin(p_theta)
185 | p_z = math.sin(p_phi)
186 |
187 | p0 = np.array([p_x, p_y, p_z], np.float)
188 |
189 | hcam_rad = camPose[0] / 180.0 * math.pi
190 | vcam_rad = camPose[1] / 180.0 * math.pi
191 |
192 | rot_y = np.array([[math.cos(vcam_rad), 0, math.sin(vcam_rad)],
193 | [0, 1, 0],
194 | [-math.sin(vcam_rad), 0, math.cos(vcam_rad)]], np.float)
195 | rot_z = np.array([[math.cos(hcam_rad), -math.sin(hcam_rad), 0],
196 | [math.sin(hcam_rad), math.cos(hcam_rad), 0],
197 | [0, 0, 1]], np.float)
198 |
199 | p1 = rot_y.dot(p0)
200 | p2 = rot_z.dot(p1)
201 |
202 | theta = math.atan2(p2[1], p2[0])
203 | phi = math.asin(p2[2])
204 |
205 | lon = theta / math.pi * 180.0
206 | lat = phi / math.pi * 180.0
207 | #print("lon : {0} , lat : {1}".format(lon, lat))
208 |
209 | panoCoords = ((lon + 180) / 360, 1.0 - (lat + 90) / 180 )
210 |
211 | return panoCoords
212 | '''
213 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/utils/ProgressTool.py:
--------------------------------------------------------------------------------
1 | progMax = 0
2 | progCount = 0
3 |
4 |
5 | def resetProgress(scene, maxVal=1):
6 |
7 | global progMax
8 | progMax = maxVal
9 | global progCount
10 | progCount = 0
11 | setProgressVal(scene)
12 |
13 |
14 | def updateProgress(scene):
15 |
16 | global progMax
17 | global progCount
18 | progCount += 1
19 | if progCount >= progMax:
20 | resetProgress(scene)
21 | else:
22 | setProgressVal(scene)
23 |
24 |
25 | def setProgressVal(scene):
26 |
27 | global progMax
28 | global progCount
29 |
30 | mainWindows = scene.getMainWindows()
31 | val = float(progCount) / progMax * 100
32 | mainWindows.updataProgressView(val)
33 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/utils/TimeTool.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | timeStartRun = 0
4 | timeStartFPS = 0
5 |
6 |
7 | def getFPS():
8 |
9 | global timeStartFPS
10 | durning = time.perf_counter() - timeStartFPS
11 | if not durning == 0:
12 | fps = 1.0 / (time.perf_counter() - timeStartFPS)
13 | else:
14 | fps = 0.0
15 | timeStartFPS = time.perf_counter()
16 | return fps
17 |
18 |
19 | def resetTimer():
20 | global timeStartRun
21 | timeStartRun = time.perf_counter()
22 |
23 |
24 | def getRunTime():
25 | global timeStartRun
26 | print(time.perf_counter() - timeStartRun)
27 | timeStartRun = time.perf_counter()
28 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .GeometryTool import *
2 | from .PanoTool import *
3 | from .LayoutTool import *
4 | from .ImageTool import *
5 | from .TimeTool import *
6 | from .ProgressTool import *
7 | from .IOTool import *
8 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/views/LabelListView.py:
--------------------------------------------------------------------------------
1 | import PanoAnnotator.data as data
2 | import PanoAnnotator.configs.Params as pm
3 | import PanoAnnotator.utils as utils
4 | import PanoAnnotator.views as views
5 |
6 | from PyQt5.QtCore import Qt
7 | from PyQt5.QtWidgets import QTreeWidget, QTreeWidgetItem, QAbstractItemView
8 |
9 |
10 | class LabelListView(QTreeWidget):
11 | def __init__(self, parent=None):
12 | super(LabelListView, self).__init__(parent)
13 |
14 | self.__isAvailable = False
15 | self.__mainWindow = None
16 | self.__scene = None
17 |
18 | self.setColumnCount(2)
19 | self.setHeaderLabels(['Name', 'ID'])
20 |
21 | self.itemLinks = {}
22 |
23 | self.clicked.connect(self.onTreeClicked)
24 |
25 | self.setSelectionMode(QAbstractItemView.ExtendedSelection)
26 |
27 | def initByScene(self, scene):
28 |
29 | self.__scene = scene
30 |
31 | self.refreshList()
32 |
33 | self.__isAvailable = True
34 | self.update()
35 |
36 | def refreshList(self):
37 |
38 | self.clear()
39 | self.itemLinks = {}
40 |
41 | def genItem(obj, name):
42 | item = QTreeWidgetItem(self)
43 | item.setText(0, name)
44 | item.setText(1, str(obj.id).zfill(5))
45 | self.itemLinks[obj] = item
46 |
47 | floor = self.__scene.label.getLayoutFloor()
48 | genItem(floor, 'Floor')
49 | ceiling = self.__scene.label.getLayoutCeiling()
50 | genItem(ceiling, 'Ceiling')
51 |
52 | walls = self.__scene.label.getLayoutWalls()
53 | for wall in walls:
54 | genItem(wall, 'Wall')
55 |
56 | obj2ds = self.__scene.label.getLayoutObject2d()
57 | for obj2d in obj2ds:
58 | dicts = [
59 | 'door', 'glass_door', 'frame', 'window', 'kitchen_counter',
60 | 'closet'
61 | ]
62 | genItem(obj2d, dicts[obj2d.obj_type])
63 |
64 | def getSelectObjects(self, objType):
65 |
66 | objs = []
67 | for obj, item in self.itemLinks.items():
68 | if item in self.selectedItems():
69 | if obj in self.__scene.selectObjs:
70 | if type(obj) == objType:
71 | objs.append(obj)
72 | return objs
73 |
74 | def onTreeClicked(self, QModelIndex):
75 |
76 | for obj, item in self.itemLinks.items():
77 | if item in self.selectedItems():
78 | if obj not in self.__scene.selectObjs:
79 | self.__scene.selectObjs.append(obj)
80 | else:
81 | if obj in self.__scene.selectObjs:
82 | self.__scene.selectObjs.remove(obj)
83 |
84 | def keyPressEvent(self, event):
85 |
86 | walls = self.getSelectObjects(data.WallPlane)
87 | obj2ds = self.getSelectObjects(data.Object2D)
88 |
89 | if (event.key() == Qt.Key_D):
90 | if walls or obj2ds:
91 | self.__scene.label.delLayoutObject2ds(obj2ds)
92 | self.__scene.label.delLayoutWalls(walls)
93 | self.refreshList()
94 |
95 | if (event.key() == Qt.Key_M):
96 | self.__scene.label.mergeLayoutWalls(walls)
97 | self.refreshList()
98 |
99 | def enterEvent(self, event):
100 | self.setFocus(True)
101 |
102 | def leaveEvent(self, event):
103 | pass
104 |
105 | def setMainWindow(self, mainWindow):
106 | self.__mainWindow = mainWindow
107 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/views/MainWindowUi.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore, QtGui, QtWidgets
2 |
3 | from .PanoView import PanoView
4 | from .MonoView import MonoView
5 | from .ResultView import ResultView
6 | from .LabelListView import LabelListView
7 |
8 |
9 | class MainWindowUi(object):
10 | def setupUi(self, mainWindow):
11 | mainWindow.setObjectName("mainWindow")
12 | mainWindow.resize(1470, 900)
13 |
14 | self.centralWidget = QtWidgets.QWidget(mainWindow)
15 | self.centralWidget.setObjectName("centralWidget")
16 |
17 | #####
18 | #Menu bar seting
19 | #####
20 | self.menubar = QtWidgets.QMenuBar(mainWindow)
21 | self.menubar.setGeometry(QtCore.QRect(0, 0, 1600, 30))
22 | self.menubar.setObjectName("menubar")
23 | mainWindow.setMenuBar(self.menubar)
24 |
25 | self.menuOpen = QtWidgets.QMenu(self.menubar)
26 | self.menuOpen.setObjectName("menuOpen")
27 |
28 | self.actionOpenImage = QtWidgets.QAction(mainWindow)
29 | self.actionOpenImage.setObjectName("actionOpenImage")
30 | self.menuOpen.addAction(self.actionOpenImage)
31 |
32 | self.actionOpenJson = QtWidgets.QAction(mainWindow)
33 | self.actionOpenJson.setObjectName("actionOpenJson")
34 | self.menuOpen.addAction(self.actionOpenJson)
35 |
36 | self.menubar.addAction(self.menuOpen.menuAction())
37 |
38 | self.menuSave = QtWidgets.QMenu(self.menubar)
39 | self.menuSave.setObjectName("menuSave")
40 | self.actionSaveFile = QtWidgets.QAction(mainWindow)
41 | self.actionSaveFile.setObjectName("actionSaveFile")
42 | self.menuSave.addAction(self.actionSaveFile)
43 | self.menubar.addAction(self.menuSave.menuAction())
44 |
45 | #####
46 | #Pano equalrectangular image view
47 | #####
48 | self.panoView = PanoView(self.centralWidget)
49 | self.panoView.setObjectName("panoView")
50 | self.panoView.setGeometry(QtCore.QRect(25, 25, 800, 400))
51 | self.panoView.setScaledContents(True)
52 | #self.panoView.setMinimumSize(QtCore.QSize(1024, 512))
53 | self.panoView.setText("PanoView Widget")
54 | self.panoView.setStyleSheet("#panoView { background-color: black }")
55 |
56 | #####
57 | #Pano monocular image view
58 | #####
59 | self.monoView = MonoView(self.centralWidget)
60 | self.monoView.setObjectName("monoView")
61 | self.monoView.setGeometry(QtCore.QRect(25, 450, 800, 400))
62 |
63 | #####
64 | #Result preview view
65 | #####
66 | self.resultView = ResultView(self.centralWidget)
67 | self.resultView.setObjectName("resultView")
68 | self.resultView.setGeometry(QtCore.QRect(850, 25, 600, 400))
69 |
70 | #####
71 | #Data operation list
72 | ####
73 | self.labelListView = LabelListView(self.centralWidget)
74 | self.labelListView.setObjectName("ProgressView")
75 | self.labelListView.setGeometry(QtCore.QRect(850, 450, 600, 350))
76 |
77 | self.progressView = QtWidgets.QProgressBar(self.centralWidget)
78 | self.progressView.setObjectName("LabelListView")
79 | self.progressView.setGeometry(QtCore.QRect(850, 810, 600, 40))
80 |
81 | mainWindow.setCentralWidget(self.centralWidget)
82 |
83 | self.retranslateUi(mainWindow)
84 | QtCore.QMetaObject.connectSlotsByName(mainWindow)
85 |
86 | def retranslateUi(self, mainWindow):
87 | _translate = QtCore.QCoreApplication.translate
88 | mainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
89 | self.menuOpen.setTitle(_translate("MainWindow", "Open"))
90 | self.actionOpenImage.setText(_translate("MainWindow", "Open Image"))
91 | self.actionOpenJson.setText(_translate("MainWindow", "Open Json"))
92 | self.menuSave.setTitle(_translate("MainWindow", "Save"))
93 | self.actionSaveFile.setText(_translate("MainWindow", "Save as Json"))
94 | self.actionSaveFile.setShortcut(_translate("MainWindow", "Ctrl+s"))
95 |
--------------------------------------------------------------------------------
/JigsawAnnotator/PanoAnnotator/views/__init__.py:
--------------------------------------------------------------------------------
1 | from .MainWindowUi import MainWindowUi
2 | from .PanoView import PanoView
3 | from .ResultView import ResultView
--------------------------------------------------------------------------------
/JigsawAnnotator/panotools/__init__.py:
--------------------------------------------------------------------------------
1 | # from .house import House
2 | # from .bbox import BBox
3 | # from .panorama import Panorama
4 | # from . import tools
5 | # from . import visualize as vis
6 |
--------------------------------------------------------------------------------
/JigsawAnnotator/panotools/bbox.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class BBox:
4 | def __init__(self, bbox=None, obj_type=None):
5 | self.bbox = bbox #BoundingBox (2,3)
6 | self.type = obj_type
7 | if abs(self.bbox[0][0]-self.bbox[1][0])<1e-4:
8 | if self.bbox[0][0]>0:
9 | self.direction = 0
10 | else:
11 | self.direction = 2
12 | else:
13 | if self.bbox[0][2]>0:
14 | self.direction = 1
15 | else:
16 | self.direction = 3
17 |
18 |
--------------------------------------------------------------------------------
/JigsawAnnotator/panotools/house.py:
--------------------------------------------------------------------------------
1 | import copy
2 | from scipy.spatial.distance import cdist
3 | import numpy as np
4 | from PIL import Image, ImageDraw
5 | import shapely.geometry as sg
6 | import shapely.ops as so
7 | from shapely.ops import transform, nearest_points
8 | from shapely import affinity
9 | import os
10 | from .tree import Tree
11 | from .panorama import Panorama
12 | from . import tools
13 | import json
14 | import matplotlib.pyplot as plt
15 |
16 | class House:
17 | def __init__(self, path, name):
18 | self.path = path
19 | self.name = name
20 | self.labeld = False
21 | if not os.path.exists("{}/{}/labels.json".format(path, name)):
22 | return
23 | data = json.load(open("{}/{}/labels.json".format(path, name)))
24 | if len(data['flags'])!=0 and int(data['flags'][0])<4:
25 | return
26 | self.labeld = True
27 | self.pano_names = data['pano_names']
28 | self.rotations = data['rotations']
29 | self.room_types = data['room_types']
30 | self.positions = data['positions']
31 | self.scale = data['scales'][0]
32 | self.pano_scale = data['scales'][1]
33 |
34 | self.fp = "{}/{}/floorplan.jpg".format(path, name)
35 | self.panos = []
36 | for name in self.pano_names:
37 | self.panos.append(Panorama("{}/{}".format(self.path, self.name), 'aligned_'+name))
38 |
39 | self.positive_pairs = []
40 | self.negative_pairs = []
41 | self.check_connections()
42 |
43 | def get_fp_img(self, type="RGB"):
44 | img = Image.open(self.fp).convert(type)
45 | img = img.resize((int(img.size[0] * self.scale), int(img.size[1] * self.scale)))
46 | return img
47 |
48 | def dindex_to_panoindex(self, index):
49 | for i, pano in enumerate(self.panos):
50 | if (index < len(pano.doors)):
51 | return i, int(index)
52 | index -= len(pano.doors)
53 |
54 | def visualize_alignment(self):
55 | fp = self.get_fp_img()
56 | for i in self.positions:
57 | pos = self.positions[i]
58 | i = int(i)
59 | pano = self.panos[i].get_top_down_view()
60 | pano = pano.rotate(-90 * self.rotations[i])
61 | pano = pano.resize((int(pano.size[0] * self.pano_scale), int(pano.size[1] * self.pano_scale)))
62 | pano = pano.crop((-pos[0], -pos[1], fp.size[0] - pos[0], fp.size[1] - pos[1]))
63 | alpha = pano.split()[-1]
64 | fp = Image.composite(pano, fp, alpha)
65 | fp.show()
66 |
67 | def check_connections(self):
68 | objs = []
69 | for name in self.positions:
70 | pano = self.panos[int(name)]
71 | for j, obj in enumerate(pano.obj_list):
72 | dtype = obj.type
73 | bbox = obj.bbox * 25.6 + 256
74 | obj = sg.LineString([(bbox[0][0], bbox[0][2]), (bbox[1][0], bbox[1][2])])
75 | obj = affinity.rotate(obj, 90 * self.rotations[int(name)], (256,256))
76 | obj = affinity.translate(obj, self.positions[name][0], self.positions[name][1])
77 | objs.append([obj, int(name), j])
78 | dists = np.zeros([len(objs), len(objs)]) + 1e10
79 | for i in range(len(objs)):
80 | for j in range(len(objs)):
81 | if i==j:
82 | continue
83 | tmp = nearest_points(objs[i][0].centroid, objs[j][0])
84 | d = tmp[1].distance(objs[i][0].centroid)
85 | dists[i,j] = d
86 | dists = np.round(dists,3)
87 | args = np.argmin(dists, 1)
88 | dists = np.min(dists,1)
89 | for i in range(len(objs)):
90 | for j in range(i+1, len(objs)):
91 | if args[i]==j and args[j]==i and dists[i]<10:
92 | self.positive_pairs.append([objs[i][1:], objs[j][1:]])
93 | # print(self.panos[objs[i][1]].obj_list[objs[i][2]].direction+self.rotations[objs[i][1]],
94 | # self.panos[objs[j][1]].obj_list[objs[j][2]].direction+self.rotations[objs[j][1]])
95 | else:
96 | self.negative_pairs.append([objs[i][1:], objs[j][1:]])
97 |
98 | #####################################################################################
99 | import glob
100 | names = os.listdir("clean_data/")
101 | count = 0
102 | cnt = 0
103 | lblds = 0
104 | panos = np.zeros(13)
105 | valid_panos = np.zeros(13)
106 | for name in names[:]:
107 | house = House("clean_data", name)
108 | if house.labeld:
109 | panos[len(house.panos)]+=1
110 | valid_panos[len(house.positions)]+=1
111 | lblds+=1
112 | cnt += len(house.positive_pairs)
113 | count += len(house.negative_pairs)
114 | # house.visualize_alignment()
115 | print(lblds, cnt, count)
116 | print(panos)
117 | print(valid_panos)
118 |
--------------------------------------------------------------------------------
/JigsawAnnotator/panotools/tools.py:
--------------------------------------------------------------------------------
1 | import shapely.geometry as sg
2 | import shapely.ops as so
3 | import numpy as np
4 | from shapely.ops import transform
5 | import math
6 | import seaborn as sns
7 |
8 |
9 | colors = sns.color_palette("bright", 8)
10 | colors = [[x[0] * 255, x[1] * 255, x[2] * 255, 255] for x in colors]
11 | colors = np.array(colors, dtype=int)
12 |
13 |
14 | rcolors = sns.color_palette("dark", 10)
15 | rcolors = [[x[0] * 255, x[1] * 255, x[2] * 255, 200] for x in rcolors]
16 | rcolors = np.array(rcolors, dtype=int)
17 |
18 | def flip(x, y):
19 | return x, -y
20 |
21 |
22 | def non_max_suppression_fast(boxes, probs=None, overlapThresh=0.3):
23 | # if there are no boxes, return an empty list
24 | if len(boxes) == 0:
25 | return []
26 |
27 | # if the bounding boxes are integers, convert them to floats -- this
28 | # is important since we'll be doing a bunch of divisions
29 | if boxes.dtype.kind == "i":
30 | boxes = boxes.astype("float")
31 |
32 | # initialize the list of picked indexes
33 | pick = []
34 |
35 | # grab the coordinates of the bounding boxes
36 | x1 = boxes[:, 0]
37 | y1 = boxes[:, 1]
38 | x2 = boxes[:, 2]
39 | y2 = boxes[:, 3]
40 |
41 | # compute the area of the bounding boxes and grab the indexes to sort
42 | # (in the case that no probabilities are provided, simply sort on the
43 | # bottom-left y-coordinate)
44 | area = (x2 - x1 + 1) * (y2 - y1 + 1)
45 | idxs = y2
46 |
47 | # if probabilities are provided, sort on them instead
48 | if probs is not None:
49 | idxs = probs
50 |
51 | # sort the indexes
52 | idxs = np.argsort(idxs)
53 |
54 | # keep looping while some indexes still remain in the indexes list
55 | while len(idxs) > 0:
56 | # grab the last index in the indexes list and add the index value
57 | # to the list of picked indexes
58 | last = len(idxs) - 1
59 | i = idxs[last]
60 | pick.append(i)
61 |
62 | # find the largest (x, y) coordinates for the start of the bounding
63 | # box and the smallest (x, y) coordinates for the end of the bounding
64 | # box
65 | xx1 = np.maximum(x1[i], x1[idxs[:last]])
66 | yy1 = np.maximum(y1[i], y1[idxs[:last]])
67 | xx2 = np.minimum(x2[i], x2[idxs[:last]])
68 | yy2 = np.minimum(y2[i], y2[idxs[:last]])
69 |
70 | # compute the width and height of the bounding box
71 | w = np.maximum(0, xx2 - xx1 + 1)
72 | h = np.maximum(0, yy2 - yy1 + 1)
73 |
74 | # compute the ratio of overlap
75 | overlap = (w * h) / area[idxs[:last]]
76 |
77 | # delete all indexes from the index list that have overlap greater
78 | # than the provided overlap threshold
79 | idxs = np.delete(
80 | idxs, np.concatenate(
81 | ([last], np.where(overlap > overlapThresh)[0])))
82 |
83 | # return only the bounding boxes that were picked
84 | return boxes[pick].astype("int")
85 |
86 |
87 | def pano_to_fp(point, polygon, pano_size, rot90=0):
88 | x = point[0]
89 | degree = (pano_size[1] - x) / pano_size[1] * (2 * np.pi)
90 | degree = (degree + (np.pi / 2 * rot90))
91 | ray = [(0, 0), (512 * np.cos(degree), 512 * np.sin(degree))]
92 | ray = sg.LineString(ray)
93 | intersect = polygon.exterior.intersection(ray)
94 | if (intersect.type == "MultiPoint"):
95 | intersect = intersect[0]
96 | if (intersect.type == "LineString"):
97 | return intersect, 0
98 | x, y = polygon.exterior.coords.xy
99 | for i in range(1, len(x)):
100 | line = sg.LineString([(x[i - 1], y[i - 1]), (x[i], y[i])])
101 | check = line.intersects(ray)
102 | if (check):
103 | break
104 | x, y = line.xy
105 | if (abs(x[0] - x[1]) < abs(y[0] - y[1])):
106 | is_vertical = 1 if (x[0] < 0) else 3
107 | else:
108 | is_vertical = 0 if (y[0] < 0) else 2
109 | return intersect, is_vertical
110 |
111 |
112 | def map_pano_to_tdv(pano):
113 | mapping = np.zeros([2, pano.size[0], pano.size[1]])
114 | for i in range(pano.size[1]):
115 | p, _ = pano_to_fp([i, 0], pano.poly, pano.size, rot90=1)
116 | mapping[0, 100:, i] = np.linspace(
117 | p.y, 0, num=pano.size[0] - 100, endpoint=False) + 512
118 | mapping[1, 100:, i] = np.linspace(
119 | p.x, 0, num=pano.size[0] - 100, endpoint=False) + 512
120 | mapping = mapping.astype(int)
121 | return mapping
122 |
123 | def uv2coords(uv):
124 |
125 | coordsX = uv[0] / (2 * math.pi) + 0.5
126 | coordsY = -uv[1] / math.pi + 0.5
127 |
128 | coords = (coordsX, coordsY)
129 |
130 | return coords
131 |
132 |
133 | def xyz2uv(xyz):
134 |
135 | normXZ = math.sqrt(math.pow(xyz[0], 2) + math.pow(xyz[2], 2))
136 | if normXZ < 0.000001:
137 | normXZ = 0.000001
138 |
139 | normXYZ = math.sqrt(
140 | math.pow(xyz[0], 2) + math.pow(xyz[1], 2) + math.pow(xyz[2], 2))
141 |
142 | v = math.asin(xyz[1] / normXYZ)
143 | u = math.asin(xyz[0] / normXZ)
144 |
145 | if xyz[2] > 0 and u > 0:
146 | u = math.pi - u
147 | elif xyz[2] > 0 and u < 0:
148 | u = -math.pi - u
149 |
150 | uv = (u, v)
151 |
152 | return uv
153 |
154 |
155 | def xyz2coords(xyz):
156 |
157 | uv = xyz2uv(xyz)
158 | coords = uv2coords(uv)
159 |
160 | return coords
161 |
--------------------------------------------------------------------------------
/JigsawAnnotator/panotools/tree.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | class Tree:
5 | def __init__(self):
6 | self.rooms = []
7 | self.pairs_list = []
8 | self.num_pos = 0
9 | self.num_neg = 0
10 |
11 | def __len__(self):
12 | return len(self.rooms)
13 |
14 | def add_pair(self, pair, is_positive):
15 | self.pairs_list.append(pair)
16 | if (is_positive):
17 | self.num_pos += 1
18 | else:
19 | self.num_neg += 1
20 |
21 | def drop_last(self, is_positive):
22 | self.pairs_list = self.pairs_list[:-1]
23 | if (is_positive):
24 | self.num_pos -= 1
25 | else:
26 | self.num_neg -= 1
27 |
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/.gitignore:
--------------------------------------------------------------------------------
1 | /*.pyc
2 |
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/Hmovetoolbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/Hmovetoolbar.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/Hsepartoolbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/Hsepartoolbar.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/Vmovetoolbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/Vmovetoolbar.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/Vsepartoolbar.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/Vsepartoolbar.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/branch_closed-on.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/branch_closed-on.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/branch_closed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/branch_closed.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/branch_open-on.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/branch_open-on.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/branch_open.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/branch_open.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_checked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_checked.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_checked_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_checked_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_checked_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_checked_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_indeterminate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_indeterminate.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_indeterminate_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_indeterminate_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_indeterminate_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_indeterminate_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_unchecked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_unchecked.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_unchecked_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_unchecked_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/checkbox_unchecked_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/checkbox_unchecked_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/close-hover.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/close-hover.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/close-pressed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/close-pressed.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/close.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/close.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/down_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/down_arrow.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/down_arrow_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/down_arrow_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/left_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/left_arrow.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/left_arrow_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/left_arrow_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/radio_checked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/radio_checked.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/radio_checked_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/radio_checked_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/radio_checked_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/radio_checked_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/radio_unchecked.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/radio_unchecked.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/radio_unchecked_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/radio_unchecked_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/radio_unchecked_focus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/radio_unchecked_focus.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/right_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/right_arrow.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/right_arrow_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/right_arrow_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/sizegrip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/sizegrip.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/stylesheet-branch-end.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/stylesheet-branch-end.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/stylesheet-branch-more.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/stylesheet-branch-more.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/stylesheet-vline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/stylesheet-vline.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/transparent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/transparent.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/undock.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/undock.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/up_arrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/up_arrow.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/rc/up_arrow_disabled.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/JigsawAnnotator/qdarkstyle/rc/up_arrow_disabled.png
--------------------------------------------------------------------------------
/JigsawAnnotator/qdarkstyle/style.qrc:
--------------------------------------------------------------------------------
1 |
2 |
3 | rc/up_arrow_disabled.png
4 | rc/Hmovetoolbar.png
5 | rc/stylesheet-branch-end.png
6 | rc/branch_closed-on.png
7 | rc/stylesheet-vline.png
8 | rc/branch_closed.png
9 | rc/branch_open-on.png
10 | rc/transparent.png
11 | rc/right_arrow_disabled.png
12 | rc/sizegrip.png
13 | rc/close.png
14 | rc/close-hover.png
15 | rc/close-pressed.png
16 | rc/down_arrow.png
17 | rc/Vmovetoolbar.png
18 | rc/left_arrow.png
19 | rc/stylesheet-branch-more.png
20 | rc/up_arrow.png
21 | rc/right_arrow.png
22 | rc/left_arrow_disabled.png
23 | rc/Hsepartoolbar.png
24 | rc/branch_open.png
25 | rc/Vsepartoolbar.png
26 | rc/down_arrow_disabled.png
27 | rc/undock.png
28 | rc/checkbox_checked_disabled.png
29 | rc/checkbox_checked_focus.png
30 | rc/checkbox_checked.png
31 | rc/checkbox_indeterminate.png
32 | rc/checkbox_indeterminate_focus.png
33 | rc/checkbox_unchecked_disabled.png
34 | rc/checkbox_unchecked_focus.png
35 | rc/checkbox_unchecked.png
36 | rc/radio_checked_disabled.png
37 | rc/radio_checked_focus.png
38 | rc/radio_checked.png
39 | rc/radio_unchecked_disabled.png
40 | rc/radio_unchecked_focus.png
41 | rc/radio_unchecked.png
42 |
43 |
44 | style.qss
45 |
46 |
47 |
--------------------------------------------------------------------------------
/JigsawAnnotator/room_type_annotator.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | import sys
4 | import json
5 | import matplotlib.pyplot as plt
6 | from PIL import Image
7 | import numpy as np
8 |
9 |
10 |
11 | houses = glob.glob('clean_data/*')
12 | houses.sort()
13 | for house in houses:
14 | if(os.path.isfile('{}/room_types.txt'.format(house))):
15 | continue
16 | print(house)
17 | house_name = house.split('/')[1]
18 | ricoh_data = json.load(open('annotations/{}.json'.format(house_name)))
19 | ricoh_data = ricoh_data['images']
20 | ricoh_data = [[x['file_name'][:-4], x['room_type']] for x in ricoh_data]
21 | mapping = {'Washing_room': 7, 'Bathroom': 8, 'Kitchen': 5, 'Balcony': 0, 'Toilet': 9,
22 | 'Japanese-style_room': 3, 'Verandah': 0, 'Western-style_room': 2, 'Entrance': 6}
23 | ricoh_data = [[x[0], mapping[x[1]]] for x in ricoh_data]
24 | for x in ricoh_data:
25 | def press(event):
26 | print('press', event.key)
27 | if event.key=='1':
28 | print('got LDK...')
29 | x[1] = 4
30 | plt.close()
31 | elif event.key=='2':
32 | print('got western...')
33 | x[1] = 2
34 | plt.close()
35 | if x[1] == 2:
36 | img = Image.open('{}/aligned_{}.png'.format(house, x[0]))
37 | fig, ax = plt.subplots()
38 | fig.canvas.mpl_connect('key_press_event', press)
39 | plt.imshow(img)
40 | mng = plt.get_current_fig_manager()
41 | mng.window.showMaximized()
42 | plt.show()
43 | with open('{}/room_types.txt'.format(house),'w') as f:
44 | for x in ricoh_data:
45 | f.write("pano: {} \t type: {} \n".format(x[0], x[1]))
46 |
--------------------------------------------------------------------------------
/JigsawAnnotator/utils/fileListWidget.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore, QtGui, QtWidgets
2 | import PyQt5.QtWidgets as qtw
3 | from PyQt5.QtCore import QCoreApplication, Qt
4 | from PyQt5.QtGui import QIcon, QPixmap
5 | import qdarkstyle
6 | import os, sys
7 |
8 |
9 | class ExtendedQLabel(qtw.QLabel):
10 | clicked = QtCore.pyqtSignal(str)
11 |
12 | def __init__(self, parent):
13 | super().__init__(parent)
14 | self.setStyleSheet("QLabel::hover" "{" "background-color : gray;" "}")
15 |
16 | def mousePressEvent(self, ev):
17 | if ev.button() == Qt.RightButton:
18 | self.clicked.emit(self.objectName())
19 | else:
20 | self.clicked.emit(self.objectName())
21 |
22 |
23 | class FileListWidget():
24 | def __init__(self, MW, app):
25 | #super().__init__(MW)
26 | self.app = app
27 | self.list = qtw.QListView(MW)
28 | self.list.setWindowTitle('Houses List')
29 | self.list.clicked.connect(self.app.update_folder)
30 | self.model = QtGui.QStandardItemModel(self.list)
31 | self.list.setModel(self.model)
32 |
33 | self.list.setObjectName('sca_folders')
34 | self.initUI()
35 | self.updateUI()
36 |
37 | def initUI(self):
38 | self.labels = []
39 | self.dir = None
40 |
41 | def get_row(self, indx):
42 | self.list.setCurrentIndex(
43 | self.model.indexFromItem(self.model.item(indx)))
44 | return self.model.indexFromItem(self.model.item(indx))
45 |
46 | def updateUI(self):
47 | self.size = [
48 | self.app.MW.width() * 0.002, 25,
49 | self.app.MW.width() * 0.09,
50 | self.app.MW.height() * 0.5
51 | ]
52 | self.list.setGeometry(self.size[0], self.size[1], self.size[2],
53 | self.size[3])
54 |
55 | def update(self, dir):
56 | self.updateUI()
57 | self.model.removeRows(0, self.model.rowCount())
58 | self.dir = dir
59 | fileList = os.listdir(dir)
60 | fileList.sort()
61 | self.labels = []
62 | for i, x in enumerate(fileList):
63 | item = QtGui.QStandardItem()
64 | item.setText(x)
65 | if (os.path.exists("clean_data/{}/labels.json".format(x))):
66 | item.setForeground(QtGui.QColor("green"))
67 | self.model.appendRow(item)
68 | self.labels.append(item)
69 |
--------------------------------------------------------------------------------
/JigsawAnnotator/utils/flagListWidget.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore, QtGui, QtWidgets
2 | import PyQt5.QtWidgets as qtw
3 | from PyQt5.QtCore import QCoreApplication, Qt
4 | from PyQt5.QtGui import QIcon, QPixmap
5 | import qdarkstyle
6 | import os, sys
7 |
8 |
9 | class FlagListWidget():
10 | def __init__(self, MW, app, house):
11 | self.house = house
12 | self.app = app
13 | self.list = qtw.QListView(MW)
14 | self.list.setWindowTitle('Flags List')
15 | self.list.setSelectionMode(
16 | QtWidgets.QAbstractItemView.ExtendedSelection)
17 | #self.list.clicked.connect(self.app.update_folder)
18 | self.model = QtGui.QStandardItemModel(self.list)
19 | self.list.setModel(self.model)
20 |
21 | self.list.setObjectName('sca_flags')
22 |
23 | # Duplex, Partiallly covered, additional impossible panos but some connected panos
24 | # ignored because completely outside or noises, lack of pano but possible, without overlap
25 | flags = ['duplex', 'impossible', 'ignored', 'hard', 'non overlap']
26 | self.model.removeRows(0, self.model.rowCount())
27 | for i, x in enumerate(flags):
28 | item = QtGui.QStandardItem()
29 | item.setCheckable(True)
30 | item.setText(x)
31 | self.model.appendRow(item)
32 |
33 | self.updateUI()
34 |
35 | def updateUI(self):
36 | self.size = [
37 | self.app.MW.width() * 0.002, 25 + self.app.MW.height() * 0.51,
38 | self.app.MW.width() * 0.09,
39 | self.app.MW.height() * 0.15
40 | ]
41 | self.list.setGeometry(self.size[0], self.size[1], self.size[2],
42 | self.size[3])
43 |
44 | def update(self):
45 | for i in range(self.model.rowCount()):
46 | self.model.item(i).setCheckState(False)
47 | flags = self.house.get_flags()
48 | for index in flags:
49 | item = self.model.item(index)
50 | item.setCheckState(True)
51 |
--------------------------------------------------------------------------------
/JigsawAnnotator/utils/floorPlanWidget.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore, QtGui, QtWidgets
2 | import PyQt5.QtWidgets as qtw
3 | from PyQt5.QtCore import QCoreApplication, Qt, QMimeData
4 | from PyQt5.QtGui import QIcon, QPixmap, QTransform, QDrag, QCursor, QPainter
5 | from PyQt5.QtWidgets import QApplication, QLabel, QWidget
6 |
7 | import qdarkstyle
8 | import os, sys
9 | import numpy as np
10 |
11 |
12 | class FPExtendedQImage(qtw.QLabel):
13 | clicked = QtCore.pyqtSignal(str)
14 |
15 | def __init__(self, pixmap=None):
16 | super().__init__()
17 | if(pixmap is not None):
18 | self.pixmap = pixmap
19 | self.setPixmap(self.pixmap)
20 | self.setStyleSheet("QLabel { background-color: rgba(0,0,0,0%)}")
21 |
22 |
23 | class FloorPlanWidget(qtw.QScrollArea):
24 | def __init__(self, MW, app, house):
25 | super().__init__(MW)
26 | self.house = house
27 | self.app = app
28 | self.setObjectName('fp_layout')
29 |
30 | self.widget = qtw.QWidget()
31 | self.vbox = qtw.QVBoxLayout()
32 | self.objs = []
33 | self.dir = None
34 | self.widget.setLayout(self.vbox)
35 |
36 | #Scroll Area Properties
37 | self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
38 | self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
39 | self.setWidgetResizable(True)
40 | self.setWidget(self.widget)
41 | self.updateUI()
42 |
43 | def updateUI(self):
44 | self.size = [
45 | self.app.MW.width() * 0.4, 25,
46 | self.app.MW.width() * 0.59,
47 | self.app.MW.height() - 60
48 | ]
49 | self.setGeometry(self.size[0], self.size[1], self.size[2],
50 | self.size[3])
51 |
52 | def update(self):
53 | for x in self.objs:
54 | x.setParent(None)
55 | self.vbox.removeWidget(x)
56 | self.objs = []
57 | if self.app.view_fp:
58 | pixmap = self.house.getFPPixmap()
59 | fp = FPExtendedQImage(pixmap)
60 | else:
61 | fp = FPExtendedQImage()
62 | fp.setAlignment(Qt.AlignTop)
63 | fp.setObjectName('floorplan')
64 | self.objs.append(fp)
65 | self.vbox.addWidget(fp)
66 |
67 | tmp_obj, tmp_pos = self.house.get_added_panos(self.app.view_room_colors, self.app.view_door_colors)
68 |
69 | for i in range(len(tmp_pos)):
70 | pixmap = tmp_obj[i]
71 | obj = FPExtendedQImage(pixmap)
72 | obj.setParent(fp)
73 | obj.setAlignment(Qt.AlignTop)
74 | obj.setGeometry(tmp_pos[i][0], tmp_pos[i][1], 768, 768)
75 | obj.setObjectName('floorplan')
76 | self.objs.append(obj)
77 |
--------------------------------------------------------------------------------
/JigsawAnnotator/utils/imageListWidget.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore, QtGui, QtWidgets
2 | import PyQt5.QtWidgets as qtw
3 | from PyQt5.QtCore import QCoreApplication, Qt
4 | from PyQt5.QtGui import QIcon, QPixmap, QTransform
5 | import qdarkstyle
6 | import os, sys
7 |
8 |
9 | class ExtendedQImage(qtw.QLabel):
10 | clicked = QtCore.pyqtSignal(str)
11 |
12 | def __init__(self, pixmap, w):
13 | super().__init__()
14 | self.pixmap = pixmap
15 | self.pixWidth = self.pixmap.width()
16 | self.setPixmap(self.pixmap.scaledToWidth(int(w)))
17 | self.setStyleSheet(
18 | "QLabel { background-color: white} QLabel::hover {background-color : lightgray;}"
19 | )
20 |
21 | def mousePressEvent(self, ev):
22 | if ev.button() == Qt.RightButton:
23 | self.clicked.emit(self.objectName())
24 | else:
25 | self.clicked.emit(self.objectName())
26 |
27 |
28 | class ImageListWidget(qtw.QScrollArea):
29 | def __init__(self, MW, app, house):
30 | super().__init__(MW)
31 | self.house = house
32 | self.app = app
33 | self.setObjectName('image_layout')
34 | self.initUI()
35 | self.updateUI()
36 |
37 | def updateUI(self):
38 | self.size = [
39 | self.app.MW.width() * 0.094, 25,
40 | self.app.MW.width() * 0.15,
41 | self.app.MW.height() - 60
42 | ]
43 | self.setGeometry(self.size[0], self.size[1], self.size[2],
44 | self.size[3])
45 |
46 | def initUI(self):
47 | self.widget = qtw.QWidget()
48 | self.vbox = qtw.QVBoxLayout()
49 | self.objs = []
50 | self.widget.setLayout(self.vbox)
51 |
52 | #Scroll Area Properties
53 | self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
54 | self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
55 | self.setWidgetResizable(True)
56 | self.setWidget(self.widget)
57 | return
58 |
59 | def update(self):
60 | pixMaps = self.house.getPanoPixmaps()
61 |
62 | for x in self.objs:
63 | x.setParent(None)
64 | self.vbox.removeWidget(x)
65 |
66 | self.objs = []
67 | for i, x in enumerate(pixMaps):
68 | obj = ExtendedQImage(x, self.app.MW.width() * 0.15)
69 | obj.setAlignment(Qt.AlignCenter)
70 | obj.setObjectName('pano:{}'.format(i))
71 | obj.clicked.connect(self.app.update_image)
72 | self.objs.append(obj)
73 | self.vbox.addWidget(obj)
74 |
--------------------------------------------------------------------------------
/JigsawAnnotator/utils/roomViewWidget.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore, QtGui, QtWidgets
2 | import PyQt5.QtWidgets as qtw
3 | from PyQt5.QtCore import QCoreApplication, Qt
4 | from PyQt5.QtGui import QIcon, QPixmap, QTransform
5 | import qdarkstyle
6 | import os, sys
7 | import numpy as np
8 | from PanoAnnotator import PanoAnnotator
9 |
10 |
11 | class RoomExtendedQImage(qtw.QLabel):
12 | clicked = QtCore.pyqtSignal()
13 |
14 | def __init__(self, pixmap, w):
15 | super().__init__()
16 | self.pixmap = pixmap
17 | self.pixWidth = self.pixmap.width()
18 | self.setPixmap(self.pixmap.scaledToWidth(int(w)))
19 | self.setStyleSheet(
20 | "QLabel { background-color: white} QLabel::hover {background-color : lightgray;}"
21 | )
22 |
23 | def mousePressEvent(self, ev):
24 | if ev.button() == Qt.RightButton:
25 | self.clicked.emit()
26 | else:
27 | self.clicked.emit()
28 |
29 |
30 | class RoomViewWidget(qtw.QScrollArea):
31 | def __init__(self, MW, app, house):
32 | super().__init__(MW)
33 | self.house = house
34 | self.app = app
35 | self.setObjectName('image_layout')
36 | self.initUI()
37 | self.updateUI()
38 |
39 | def updateUI(self):
40 | self.size = [
41 | self.app.MW.width() * 0.245, 25,
42 | self.app.MW.width() * 0.15,
43 | self.app.MW.height() - 60
44 | ]
45 | self.setGeometry(self.size[0], self.size[1], self.size[2],
46 | self.size[3])
47 |
48 | def initUI(self):
49 | self.widget = qtw.QWidget()
50 | self.vbox = qtw.QVBoxLayout()
51 | self.objs = []
52 | self.dir = None
53 | self.widget.setLayout(self.vbox)
54 |
55 | #Scroll Area Properties
56 | self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
57 | self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
58 | self.setWidgetResizable(True)
59 | self.setWidget(self.widget)
60 | return
61 |
62 | def update(self):
63 | for x in self.objs:
64 | x.setParent(None)
65 | self.vbox.removeWidget(x)
66 | self.objs = []
67 | if (self.house.current_pano == -1):
68 | return
69 | if (not os.path.exists("{}/{}/aligned_{}.json".format(
70 | self.house.dir, self.house.house_name,
71 | self.house.current_pano))):
72 | if (self.house.types[self.house.current_pano] != -1):
73 | self.app.panoAnnotator.run("{}/{}/aligned_{}.png".format(
74 | self.house.dir, self.house.house_name,
75 | self.house.current_pano))
76 |
77 | vis, tdw, tdf = self.house.get_current_pano(self.app.view_room_colors, self.app.view_door_colors)
78 | obj = RoomExtendedQImage(vis, self.app.MW.width() * 0.15)
79 | obj.clicked.connect(self.app.openPano)
80 | obj.setAlignment(Qt.AlignCenter)
81 | obj.setObjectName('vis_img')
82 | self.objs.append(obj)
83 | self.vbox.addWidget(obj)
84 |
85 | obj = RoomExtendedQImage(tdw, self.app.MW.width() * 0.15)
86 | obj.setAlignment(Qt.AlignCenter)
87 | obj.setObjectName('tdw_img')
88 | self.objs.append(obj)
89 | self.vbox.addWidget(obj)
90 |
91 | obj = RoomExtendedQImage(tdf, self.app.MW.width() * 0.15)
92 | obj.setAlignment(Qt.AlignCenter)
93 | obj.setObjectName('tdf_img')
94 | self.objs.append(obj)
95 | self.vbox.addWidget(obj)
96 |
--------------------------------------------------------------------------------
/JigsawAnnotator/utils/room_type_annotator.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import matplotlib.pyplot as plt
3 | import sys, glob, os
4 | from PIL import Image
5 | from PyQt5 import QtCore, QtGui, QtWidgets
6 | from PyQt5.QtCore import QCoreApplication, Qt
7 | from PyQt5.QtGui import QIcon, QPixmap, QTransform
8 |
9 | # room_types:
10 | # 0) balcony 1) outdoor 2) western-style-room 3) japanese-style-room
11 | # 4) Dining room 5) Kitchen 6) corridor 7) washroom 8) bathroom 9)toilet
12 |
13 |
14 | # ricoh_data = json.load(open('{}/{}.json'.format(PR_RICOH_DIR, self.house_name)))
15 | # ricoh_data = ricoh_data['images']
16 | # ricoh_data = [x for x in ricoh_data if x['file_name'][:-4] == self.name]
17 | # ricoh_data = ricoh_data[0]['room_type']
18 | # mapping = {'Washing_room': 7, 'Bathroom': 8, 'Kitchen': 5, 'Balcony': 0, 'Toilet': 9,
19 | # 'Japanese-style_room': 3, 'Verandah': 0, 'Western-style_room': 2, 'Entrance': 6}
20 | # self.type = mapping[ricoh_data]
21 |
22 | class Room_type_annotator():
23 | def __init__(self, app, house_dir):
24 | self.app = app
25 | self.house_dir = house_dir
26 | self.dialog = None
27 | img_files = glob.glob("{}/aligned_*.png".format(house_dir))
28 | img_files.sort()
29 | self.type_list = []
30 | self.is_closed = False
31 | if (os.path.exists("{}/room_types.txt".format(house_dir))):
32 | return
33 | for img in img_files:
34 | self.name = img.split('/')[-1][8:-4]
35 | self.openPano(img)
36 | if (self.is_closed):
37 | return
38 | with open("{}/room_types.txt".format(house_dir), 'w') as tmpfile:
39 | for t in self.type_list:
40 | tmpfile.write("pano: {} \t type: {} \n".format(t[0], t[1]))
41 |
42 | def keyPressEvent(self, e):
43 | # print(e.key())
44 | if (e.key() == 81): # = q for quit
45 | if (e.modifiers() & QtCore.Qt.ControlModifier):
46 | self.is_closed = True
47 | self.dialog.close()
48 | QCoreApplication.quit()
49 | else:
50 | self.dialog.close()
51 | elif (e.key() > 47 and e.key() < 58): # = numbers
52 | print('room assigned of type {}'.format(e.text()))
53 | self.type_list.append([self.name, int(e.text())])
54 | self.dialog.close()
55 |
56 | def openPano(self, image_dir):
57 | pano = QPixmap(image_dir)
58 | dialog = QtWidgets.QDialog()
59 | dialog.resize(self.app.MWsize[0], self.app.MWsize[0] / 1024 * 512)
60 | imagelabel = QtWidgets.QLabel(dialog)
61 | imagelabel.setPixmap(pano.scaledToWidth(self.app.MWsize[0]))
62 | dialog.setWindowTitle('Pano room type annotator')
63 | dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose)
64 | dialog.keyPressEvent = self.keyPressEvent
65 | self.dialog = dialog
66 |
67 | label = QtWidgets.QLabel(dialog)
68 | label.setText(
69 | "0) balcony 1) closet 2) western-style-room 3) japanese-style-room 4) Dining room 5) Kitchen 6) corridor 7) washroom 8) bathroom 9)toilet"
70 | )
71 | size = [self.app.MW.width() * 0.002, 25, self.app.MWsize[0], 25]
72 | label.setGeometry(size[0], size[1], size[2], size[3])
73 |
74 | dialog.exec_()
75 |
--------------------------------------------------------------------------------
/JigsawAnnotator/utils/typeListWidget.py:
--------------------------------------------------------------------------------
1 | from PyQt5 import QtCore, QtGui, QtWidgets
2 | import PyQt5.QtWidgets as qtw
3 | from PyQt5.QtCore import QCoreApplication, Qt
4 | from PyQt5.QtGui import QIcon, QPixmap
5 | import qdarkstyle
6 | import os, sys
7 |
8 | # room_types:
9 | # 0) balcony 1) closet 2) western-style-room 3) japanese-style-room
10 | # 4) Dining room 5) Kitchen 6) corridor 7) washroom 8) bathroom 9)toilet
11 |
12 |
13 | class TypeListWidget():
14 | def __init__(self, MW, app, house):
15 | self.house = house
16 | self.app = app
17 | self.list = qtw.QListView(MW)
18 | self.list.setWindowTitle('Types List')
19 | self.list.setSelectionMode(
20 | QtWidgets.QAbstractItemView.ExtendedSelection)
21 | #self.list.clicked.connect(self.app.update_folder)
22 | self.model = QtGui.QStandardItemModel(self.list)
23 | self.list.setModel(self.model)
24 |
25 | self.list.setGeometry(5, 510, 150, 300)
26 | self.list.setObjectName('sca_flags')
27 | self.initUI()
28 | self.updateUI()
29 |
30 | def updateUI(self):
31 | self.size = [
32 | self.app.MW.width() * 0.002, 25 + self.app.MW.height() * 0.67,
33 | self.app.MW.width() * 0.09,
34 | self.app.MW.height() * 0.25
35 | ]
36 | self.list.setGeometry(self.size[0], self.size[1], self.size[2],
37 | self.size[3])
38 |
39 | def initUI(self):
40 | types = [
41 | "Balcony", "Closet", "Western-style-room", "Japanese-style-room",
42 | "Dining room", "Kitchen", "Corridor", "Washroom", "Bathroom",
43 | "Toilet"
44 | ]
45 | self.model.removeRows(0, self.model.rowCount())
46 | for i, x in enumerate(types):
47 | item = QtGui.QStandardItem()
48 | item.setCheckable(True)
49 | item.setText(x)
50 | self.model.appendRow(item)
51 | return
52 |
53 | def update(self):
54 | for i in range(self.model.rowCount()):
55 | self.model.item(i).setCheckState(False)
56 | type = self.house.get_type()
57 | if (type == -1):
58 | return
59 | item = self.model.item(type)
60 | item.setCheckState(True)
61 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Extreme Structure from Motion for Indoor Panoramas without Visual Overlaps
2 | Code and instructions for our paper: Extreme Structure from Motion for Indoor Panoramas without Visual Overlaps, ICCV 2021.
3 |
4 | ## Installation
5 | First, clone our repo and install the requirements:
6 | ```
7 | git clone https://github.com/aminshabani/extreme-indoor-sfm.git
8 | cd extreme-indoor-sfm
9 | pip install -r requirements.txt
10 | ```
11 | The cose is based on pytorch and use [Detectron2](https://github.com/facebookresearch/detectron2) for door/window detection, and [HorizonNet](https://github.com/sunset1995/HorizonNet) for layout estimation.
12 |
13 | ## Dataset
14 | First, preprocess the panorama images of each house to be aligned with the Manhattan World. You can use the same [script](https://github.com/sunset1995/HorizonNet#1-pre-processing-align-camera-rotation-pose) as previous methods on layout estimation.
15 | create a new `dataset` directory including a folder for each house and move the corrosponding panorama images to that folder.
16 | The directory structure should be as following:
17 | ```
18 | extreme-indoor-sfm
19 | ├── dataset
20 | │ ├── house1
21 | │ │ ├──images
22 | │ │ │ └── aligned_0.png
23 | │ │ │ └── aligned_1.png
24 | | | │ └── ...
25 | | | └── floorplan.jpg
26 | │ ├── house2
27 | | | └── images
28 | | | └── floorplan.jpg
29 | | └── ...
30 | └── detection
31 | └── ...
32 | ```
33 | You can download some of the sample houses from this [link](https://drive.google.com/file/d/1pGvsbgo3OOdyUBmQNwF2Fxly9lQRuBXm/view?usp=sharing).
34 | please see the [panorama.py](src/panotools/panorama.py) and [house.py](src/panotools/panorama.py) for more details.
35 |
36 | Finally, add the names of the houses to `test.txt`. For the provided house for example, it should be:
37 | ```
38 | 0001
39 | 0002
40 | ...
41 | ```
42 | ## Pre-trained Models
43 | Please download the checkpoints from [Google Drive](https://drive.google.com/file/d/172E2vJ4x_wqH6OYNLYI_l6MH-bYbOnX6/view?usp=sharing) and put them to the same directory as they are. You can also update the corresponding args in [parser.py](parser.py).
44 |
45 | ## Floorplan estimation
46 | Finally you can simple run the code by:
47 | ```
48 | bash run.sh
49 | ```
50 | The above command generate each module step-by-step and creates a new `output` directory in which you can find the predicted floorplans sorted by their score.
51 |
52 | Meanwhile, you can also find the outputs of each module (detection, layout estimation, and room type predictions) in the dataset folder of each house.
53 | ```
54 | extreme-indoor-sfm
55 | ├── dataset
56 | │ ├── house1
57 | | | └── images
58 | | | └── detection_preds
59 | | | └── ...
60 | | | └── floorplan.jpg
61 | └── ...
62 | └── output
63 | ```
64 | ## Citation
65 | ```
66 | @InProceedings{Shabani_2021_ICCV,
67 | author = {Shabani, Mohammad Amin and Song, Weilian and
68 | Odamaki, Makoto and Fujiki, Hirochika and Furukawa, Yasutaka},
69 | title = {Extreme Structure from Motion for Indoor Panoramas without Visual Overlaps},
70 | booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
71 | month = {October},
72 | year = {2021},
73 | url = {https://aminshabani.github.io/publications/extreme_sfm/pdfs/iccv2021_2088.pdf}
74 | }
75 | ```
76 |
--------------------------------------------------------------------------------
/detection/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/detection/__init__.py
--------------------------------------------------------------------------------
/detection/load_data.py:
--------------------------------------------------------------------------------
1 | import os
2 | from glob import glob
3 |
4 | from . import panotools
5 |
6 | def load_data(set_name, args):
7 | if set_name=='all':
8 | names = [line.rstrip() for line in open(args.train_set)] + [line.rstrip() for line in open(args.test_set)]
9 | elif set_name=='train':
10 | names = [line.rstrip() for line in open(args.train_set)]
11 | elif set_name=='test':
12 | names = [line.rstrip() for line in open(args.test_set)]
13 | else:
14 | print("set name is not defined properly...")
15 | return
16 | folder_list = [os.path.join(args.data_dir, x) for x in names]
17 | folder_list = sorted(folder_list)
18 | panos = []
19 | for folder in folder_list:
20 | img_list = glob(f"{folder}/images/aligned_*.png")
21 | for f in img_list:
22 | name = f.split("/")[-1][:-4]
23 | path = os.path.join(folder, 'images')
24 | pano = panotools.panorama.Panorama(path, name)
25 | annotation = pano.get_detectron_annotation(len(panos))
26 | panos.append(annotation)
27 | if args.det_is_eval:
28 | panos = [x for x in panos if len(x['annotations'])>0]
29 | return panos
30 |
--------------------------------------------------------------------------------
/detection/panotools/__init__.py:
--------------------------------------------------------------------------------
1 | from .bbox import BBox
2 | from .panorama import Panorama
3 | from . import tools
4 |
--------------------------------------------------------------------------------
/detection/panotools/bbox.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | class BBox:
4 | def __init__(self, bbox=None, obj_type=None):
5 | self.bbox = bbox #BoundingBox (2,3)
6 | self.type = obj_type
7 | if abs(self.bbox[0][0]-self.bbox[1][0])<1e-4:
8 | if self.bbox[0][0]>0:
9 | self.direction = 0
10 | else:
11 | self.direction = 2
12 | else:
13 | if self.bbox[0][2]>0:
14 | self.direction = 1
15 | else:
16 | self.direction = 3
17 |
18 |
--------------------------------------------------------------------------------
/detection/panotools/tools.py:
--------------------------------------------------------------------------------
1 | import shapely.geometry as sg
2 | import shapely.ops as so
3 | import numpy as np
4 | from shapely.ops import transform
5 | import math
6 | import seaborn as sns
7 |
8 | def uv2coords(uv):
9 | coordsX = uv[0] / (2 * math.pi) + 0.5
10 | coordsY = -uv[1] / math.pi + 0.5
11 | coords = (coordsX, coordsY)
12 | return coords
13 |
14 |
15 | def xyz2uv(xyz):
16 | normXZ = math.sqrt(math.pow(xyz[0], 2) + math.pow(xyz[2], 2))
17 | if normXZ < 0.000001:
18 | normXZ = 0.000001
19 | normXYZ = math.sqrt(
20 | math.pow(xyz[0], 2) + math.pow(xyz[1], 2) + math.pow(xyz[2], 2))
21 | v = math.asin(xyz[1] / normXYZ)
22 | u = math.asin(xyz[0] / normXZ)
23 | if xyz[2] > 0 and u > 0:
24 | u = math.pi - u
25 | elif xyz[2] > 0 and u < 0:
26 | u = -math.pi - u
27 | uv = (u, v)
28 | return uv
29 |
30 |
31 | def xyz2coords(xyz):
32 | uv = xyz2uv(xyz)
33 | coords = uv2coords(uv)
34 | return coords
35 |
--------------------------------------------------------------------------------
/detection/readme.md:
--------------------------------------------------------------------------------
1 | # Detection Module
2 |
3 | You can download the checkpoints from [Google Drive](https://drive.google.com/file/d/172E2vJ4x_wqH6OYNLYI_l6MH-bYbOnX6/view?usp=sharing).
4 | ### Generating the detection results
5 | To generate the predictions:
6 | ```
7 | python -m detection.test
8 | ```
9 | The above command create a `detection_preds` folder for each house in the dataset. You can also pass `det_save_image` to also save the predictions as images in the same directory.
10 | ```
11 | python -m detection.test --det_save_images
12 | ```
13 |
14 | ### Evaluation
15 | To evaluate the performance:
16 | ```
17 | python -m detection.test --det_is_eval
18 | ```
19 |
20 | ### Training on new dataset
21 | For the training on your data, please see the [panorama.py](panotools/panorama.py).
22 |
23 |
24 |
--------------------------------------------------------------------------------
/detection/test.py:
--------------------------------------------------------------------------------
1 | from glob import glob
2 |
3 | import torch
4 |
5 | from detectron2.utils.logger import setup_logger
6 | setup_logger()
7 | import pycocotools.mask as mask_util
8 |
9 | # import some common libraries
10 | import numpy as np
11 | import os, cv2
12 | import simplejson as json
13 |
14 | # import some common detectron2 utilities
15 | import detectron2.data.transforms as T
16 | from detectron2.modeling import build_model
17 | from detectron2.checkpoint import DetectionCheckpointer
18 | from detectron2 import model_zoo
19 | from detectron2.engine import DefaultPredictor
20 | from detectron2.config import get_cfg
21 | from detectron2.utils.visualizer import Visualizer
22 | from detectron2.data import MetadataCatalog, DatasetCatalog, DatasetMapper
23 | from detectron2.utils.visualizer import ColorMode
24 | from detectron2.evaluation import COCOEvaluator, inference_on_dataset
25 | from detectron2.data import build_detection_test_loader
26 |
27 | from .load_data import load_data
28 | from parser import config_parser
29 |
30 |
31 | def save_json(outputs, scores, path):
32 | outputs = outputs['instances'].to('cpu')
33 | output = outputs.get_fields()
34 | json_output = dict()
35 | json_output['scores'] = scores.tolist()#output['scores'].data.numpy().tolist()
36 | json_output['pred_boxes'] = output['pred_boxes'].tensor.data.numpy().tolist()
37 | json_output['pred_classes'] = output['pred_classes'].data.numpy().tolist()
38 | json_output['pred_masks'] = output['pred_masks'].int().data.numpy().astype(np.uint8)
39 | json_output['pred_masks'] = [mask_util.encode(
40 | np.asfortranarray(mask)) for mask in json_output['pred_masks']]
41 | with open(path, 'w') as f:
42 | json.dump(json_output, f, ensure_ascii=False)
43 |
44 | def pred(aug, model, path):
45 | org_im = cv2.imread(path)
46 | height, width = org_im.shape[:2]
47 | with torch.no_grad():
48 | im = aug.get_transform(org_im).apply_image(org_im)
49 | im = torch.as_tensor(im.astype("float32").transpose(2, 0, 1))
50 | inputs = [{"image": im, "height": height, "width": width}]
51 | images = model.preprocess_image(inputs)
52 | features = model.backbone(images.tensor) # set of cnn features
53 | proposals, _ = model.proposal_generator(images, features, None) # RPN
54 | features_ = [features[f] for f in model.roi_heads.box_in_features]
55 | box_features = model.roi_heads.box_pooler(features_, [x.proposal_boxes for x in proposals])
56 | box_features = model.roi_heads.box_head(box_features) # features of all 1k candidates
57 | predictions = model.roi_heads.box_predictor(box_features)
58 | pred_instances, pred_inds = model.roi_heads.box_predictor.inference(predictions, proposals)
59 | pred_instances = model.roi_heads.forward_with_given_boxes(features, pred_instances)
60 |
61 | # output boxes, masks, scores, etc
62 | pred_instances = model._postprocess(pred_instances, inputs, images.image_sizes) # scale box to orig size
63 | # features of the proposed boxes
64 | scores = torch.softmax(predictions[0][pred_inds], 1).data.cpu().numpy()
65 | outputs = pred_instances[0]
66 |
67 | ######
68 | path = path.replace('images','detection_preds')
69 | name = path.split('/')[-1]
70 | path = path.replace('/'+name,'')
71 | os.makedirs(path, exist_ok=True)
72 | save_json(outputs, scores, f"{path}/{name.replace('png','json')}")
73 | if args.det_save_images:
74 | v = Visualizer(org_im[:, :, ::-1], scale=1.0)
75 | out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
76 | cv2.imwrite(f'{path}/{name}', out.get_image()[:, :, ::-1])
77 |
78 | if __name__ == "__main__":
79 | parser = config_parser()
80 | args = parser.parse_args()
81 | for d in ["train", "test"]:
82 | DatasetCatalog.register("mydata_" + d, lambda d=d: load_data(d, args))
83 | MetadataCatalog.get("mydata_" + d).set(thing_classes=["Door","Glass_door","Frame","Window","Kitchen_counter","closet"])
84 | mydata_metadata = MetadataCatalog.get("mydata_train")
85 |
86 | cfg = get_cfg()
87 | cfg.merge_from_file(model_zoo.get_config_file(args.det_config))
88 | cfg.DATASETS.TRAIN = ("mydata_train")
89 | cfg.DATASETS.TEST = ("mydata_test")
90 | cfg.DATALOADER.NUM_WORKERS = 2
91 | cfg.MODEL.WEIGHTS = f"detection/{args.det_model_weight}"
92 | cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.3
93 | cfg.SOLVER.IMS_PER_BATCH = 2
94 | cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
95 | cfg.MODEL.ROI_HEADS.NUM_CLASSES = 6
96 |
97 |
98 | if args.det_is_eval:
99 | predictor = DefaultPredictor(cfg)
100 | evaluator = COCOEvaluator("mydata_test", ("bbox", "segm"), False, output_dir="logs/detection")
101 | mapper = DatasetMapper(cfg, is_train=False)
102 | loader = build_detection_test_loader(cfg, "mydata_test", mapper=mapper)
103 | print(inference_on_dataset(predictor.model, loader, evaluator))
104 | else:
105 | aug = T.ResizeShortestEdge(
106 | [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
107 | )
108 | mycfg = cfg.clone()
109 | predictor = build_model(mycfg)
110 | DetectionCheckpointer(predictor).load(cfg.MODEL.WEIGHTS)
111 | predictor.eval()
112 | dataset_dicts = load_data('all', args)
113 | for d in dataset_dicts:
114 | pred(aug, predictor, d['file_name'])
115 |
--------------------------------------------------------------------------------
/figs/teaser.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/figs/teaser.jpg
--------------------------------------------------------------------------------
/parser.py:
--------------------------------------------------------------------------------
1 | import configargparse
2 |
3 |
4 | def config_parser():
5 | parser = configargparse.ArgumentParser()
6 |
7 | parser.add_argument("--device", type=str, default='cuda', help='target device, cuda or cpu')
8 | parser.add_argument("--log", type=str, help="log level", default='DEBUG')
9 |
10 | # datset configs
11 | parser.add_argument("--data_dir", type=str, default='dataset', help='dataset dir')
12 | parser.add_argument("--train_set", type=str, default='train.txt', help='txt file including the name of train houses')
13 | parser.add_argument("--test_set", type=str, default='test.txt', help='txt file including the name of test houses')
14 |
15 | # detection configs
16 | parser.add_argument("--det_config", type=str, default="COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"
17 | , help='default configs of the detection module (see Detectron2 Model Zoo for more details)')
18 | parser.add_argument("--det_model_weight", type=str, default='ckpt/model_final.pth', help='detection model')
19 | parser.add_argument("--det_save_images", action='store_true', help='save the detection outputs')
20 | parser.add_argument("--det_is_eval", action='store_true', help='only consider labeled images (for evaluation)')
21 | parser.add_argument("--det_check_labels", action='store_true', help='visualize random samples before training to check annotations')
22 |
23 | # layout estimation configs
24 | parser.add_argument("--lt_model_weight", type=str, default='ckpt/model_final.pth', help='layout estimation model')
25 | parser.add_argument('--lt_visualize', action='store_true')
26 | parser.add_argument('--lt_flip', action='store_true', help='whether to perfome left-right flip. ' '# of input x2.')
27 | parser.add_argument('--lt_rotate', nargs='*', default=[], type=float,
28 | help='whether to perfome horizontal rotate. '
29 | 'each elements indicate fraction of image width. '
30 | '# of input xlen(rotate).')
31 | parser.add_argument('--lt_r', default=0.05, type=float)
32 | parser.add_argument('--lt_min_v', default=None, type=float)
33 | parser.add_argument('--lt_force_cuboid', action='store_true')
34 |
35 | # room classification configs
36 | parser.add_argument("--rc_model", type=str, help="model architecture", default='unet')
37 | parser.add_argument("--rc_model_weight", type=str, help="weights file", default="src/exps/room_type_classification/room_type_final_model.pth")
38 | parser.add_argument("--rc_batch_size", type=int, help="batch_size", default=8)
39 | parser.add_argument("--rc_is_eval", action='store_true', help='only consider labeled images (for evaluation)')
40 |
41 |
42 |
43 | # arrangement configs
44 | parser.add_argument("--ar_model", type=str, help="model architecture", default='convmpn')
45 | parser.add_argument("--ar_exp", type=str, help="experiment name", default='test')
46 | parser.add_argument("--ar_batch_size", type=int, help="batch_size", default=1)
47 | parser.add_argument("--ar_model_weight", type=str, help="weights file", default="src/exps/main/model.pth")
48 |
49 | # visualization configs
50 | parser.add_argument('--vis_ignore_centers', action='store_true')
51 | parser.add_argument('--vis_ignore_door_colors', action='store_true')
52 | parser.add_argument('--vis_ignore_room_colors', action='store_true')
53 |
54 | # prediction configs
55 | parser.add_argument("--prediction_level", type=str, help="how many samples to use, lv1 is using 2 filters before prediction. For other values, also pass keep_sets_overlapped option (full, lv2, lv1)", default='lv1')
56 |
57 |
58 | # main configs
59 | parser.add_argument('--use_gt', action='store_true')
60 | parser.add_argument('--use_rotations_input', action='store_true')
61 | parser.add_argument('--keep_sets_overlapped', action='store_true')
62 |
63 | args = parser.parse_args()
64 |
65 | return parser
66 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==1.0.0
2 | albumentations==1.1.0
3 | antlr4-python3-runtime==4.8
4 | appdirs==1.4.4
5 | black==21.4b2
6 | cachetools==4.2.4
7 | certifi==2021.10.8
8 | charset-normalizer==2.0.8
9 | click==8.0.3
10 | cloudpickle==2.0.0
11 | coloredlogs==15.0.1
12 | ConfigArgParse==1.5.3
13 | cycler==0.11.0
14 | Cython==0.29.24
15 | detectron2==0.6+cu102
16 | fonttools==4.28.2
17 | future==0.18.2
18 | fvcore==0.1.5.post20211023
19 | google-auth==2.3.3
20 | google-auth-oauthlib==0.4.6
21 | grpcio==1.42.0
22 | humanfriendly==10.0
23 | hydra-core==1.1.1
24 | idna==3.3
25 | imageio==2.13.3
26 | importlib-metadata==4.8.2
27 | iopath==0.1.9
28 | joblib==1.1.0
29 | kiwisolver==1.3.2
30 | Markdown==3.3.6
31 | matplotlib==3.5.0
32 | mypy-extensions==0.4.3
33 | networkx==2.6.3
34 | numpy==1.21.4
35 | oauthlib==3.1.1
36 | omegaconf==2.1.1
37 | opencv-python==4.5.4.60
38 | opencv-python-headless==4.5.4.60
39 | packaging==21.3
40 | pandas==1.3.4
41 | pathspec==0.9.0
42 | Pillow==8.4.0
43 | portalocker==2.3.2
44 | protobuf==3.19.1
45 | pyasn1==0.4.8
46 | pyasn1-modules==0.2.8
47 | pycocotools==2.0.3
48 | pydot==1.4.2
49 | pyparsing==3.0.6
50 | python-dateutil==2.8.2
51 | pytz==2021.3
52 | PyWavelets==1.2.0
53 | PyYAML==6.0
54 | qudida==0.0.4
55 | regex==2021.11.10
56 | requests==2.26.0
57 | requests-oauthlib==1.3.0
58 | rsa==4.8
59 | scikit-image==0.19.0
60 | scikit-learn==1.0.1
61 | scipy==1.7.3
62 | seaborn==0.11.2
63 | setuptools-scm==6.3.2
64 | Shapely==1.8.0
65 | simplejson==3.17.6
66 | six==1.16.0
67 | tabulate==0.8.9
68 | tensorboard==2.7.0
69 | tensorboard-data-server==0.6.1
70 | tensorboard-plugin-wit==1.8.0
71 | termcolor==1.1.0
72 | threadpoolctl==3.0.0
73 | tifffile==2021.11.2
74 | toml==0.10.2
75 | tomli==1.2.2
76 | torch==1.10.0
77 | torchaudio==0.10.0
78 | torchvision==0.11.1
79 | tqdm==4.62.3
80 | typing-extensions==4.0.1
81 | urllib3==1.26.7
82 | Werkzeug==2.0.2
83 | yacs==0.1.8
84 | zipp==3.6.0
85 |
--------------------------------------------------------------------------------
/run.sh:
--------------------------------------------------------------------------------
1 | python -m detection.test --det_save_images
2 | python -m src.tests.room_type_classification
3 | python -m HorizonNet.inference --lt_visualize
4 | python -m src.tests.predict_arrangements
5 |
--------------------------------------------------------------------------------
/src/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/src/__init__.py
--------------------------------------------------------------------------------
/src/loaders/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/src/loaders/__init__.py
--------------------------------------------------------------------------------
/src/loaders/main_loader.py:
--------------------------------------------------------------------------------
1 | from src.panotools.house import House
2 | from multiprocessing import Pool
3 |
4 |
5 | def load_house(name, args):
6 | house = House(name, args)
7 | return house
8 |
9 | def load_dataset(args, set_name=None):
10 | names = [line.rstrip() for line in open(args.train_set)]
11 | test_names = [line.rstrip() for line in open(args.test_set)]
12 | if set_name is None:
13 | house_list = names+test_names
14 | else:
15 | if set_name == 'train':
16 | house_list = names
17 | else:
18 | house_list = test_names
19 |
20 | # pool = Pool(10)
21 | # houses = pool.map(load_house, house_list)
22 | # pool.close()
23 | houses = [load_house(x, args) for x in house_list]
24 | return houses
25 |
--------------------------------------------------------------------------------
/src/loaders/room_type_classification.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import albumentations as A
4 | from torch.utils.data import Dataset, DataLoader
5 | import torchvision.transforms as transforms
6 | from .main_loader import load_dataset
7 | import logging
8 | import json
9 | from PIL import Image
10 | from glob import glob
11 | import src.panotools
12 |
13 | # return img1, img2, mask1, mask2, label, rtype1, rtype2
14 | ROOM_TYPES = ['Western-style_room', 'Entrance', 'Kitchen', 'Verandah',
15 | 'Balcony', 'Toilet', 'Washing_room', 'Bathroom', 'Japanese-style_room']
16 |
17 | logger = logging.getLogger('log')
18 | BASE_DIR = ''
19 |
20 |
21 | class FloorDataset(Dataset):
22 | def __init__(self, set_name, transform, retorg=False):
23 | self.augmentation = A.Compose([
24 | A.HorizontalFlip(p=0.5),
25 | A.RandomBrightnessContrast(p=0.2),
26 | A.CLAHE(),
27 | A.ISONoise(),
28 | ])
29 | logger.info("Loading data from {} ...".format(BASE_DIR))
30 | self.ret_org = retorg
31 | self.transforms = transform
32 | self.set_name = set_name
33 | self.samples = []
34 | # self.houses = load_dataset()
35 | # TYPES: [Balcony, Closet, Western style room, Japanese style room, Dining Room
36 | # Kitchen, Corridor, Washroom, Bathroom, Toilet]
37 | names = [line.rstrip() for line in open('data_names.txt')]
38 | test_names = [line.rstrip() for line in open('test_names.txt')]
39 |
40 | train_houses = []
41 | test_houses = []
42 | for name in names:
43 | if name not in test_names:
44 | train_houses.append(panotools.House(BASE_DIR, name))
45 | else:
46 | test_houses.append(panotools.House(BASE_DIR, name))
47 | if set_name == 'train':
48 | for house in train_houses:
49 | for pano in house.panos:
50 | if(pano.get_type()==-1):
51 | continue
52 | self.samples.append([pano, pano.get_type()])
53 | else:
54 | for house in test_houses:
55 | for pano in house.panos:
56 | if(pano.get_type()==-1):
57 | continue
58 | self.samples.append([pano, pano.get_type()])
59 |
60 |
61 | per_class_samples = np.zeros(10)
62 | for sample in self.samples:
63 | assert sample[1] < 10, sample
64 | per_class_samples[sample[1]] += 1
65 | print(per_class_samples, len(self.samples))
66 |
67 | logger.info(per_class_samples)
68 | logger.info("finish loading with {} houses and {} samples...".format(
69 | 1000, len(self.samples)))
70 |
71 | def __len__(self):
72 | return len(self.samples)
73 |
74 | def __getitem__(self, idx):
75 | if torch.is_tensor(idx):
76 | idx = idx.tolist()
77 |
78 | sample = self.samples[idx]
79 | pano = sample[0]
80 | label = sample[1]
81 | img = pano.get_panorama()
82 | img = np.array(img)
83 |
84 | if(self.set_name == 'train'):
85 | transformed = self.augmentation(image=img)
86 | img = transformed['image']
87 | img = img.astype(float)/255
88 |
89 | if self.transforms is not None:
90 | img = self.transforms(img)
91 |
92 | return img, label
93 |
94 |
95 | def dataset(set_name='train', batch_size=2, house_id=0):
96 | transform = transforms.Compose([
97 | transforms.ToTensor(),
98 | # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
99 | ])
100 | if (set_name == 'train'):
101 | dataset = FloorDataset(set_name, transform=transform)
102 | else:
103 | dataset = FloorDataset(set_name, transform=transform)
104 |
105 | if (set_name == 'train'):
106 | loader = DataLoader(dataset,
107 | batch_size=batch_size,
108 | num_workers=10,
109 | pin_memory=True,
110 | shuffle=True, prefetch_factor=10)
111 | else:
112 | loader = DataLoader(dataset,
113 | batch_size=batch_size,
114 | num_workers=10,
115 | pin_memory=True,
116 | shuffle=False)
117 |
118 | return loader
119 |
120 |
121 | if __name__ == '__main__':
122 | transform = transforms.Compose([
123 | transforms.ToTensor(),
124 | # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
125 | ])
126 | dataset = FloorDataset('train', transform=transform)
127 | for i in range(10):
128 | dataset[i]
129 |
--------------------------------------------------------------------------------
/src/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/src/models/__init__.py
--------------------------------------------------------------------------------
/src/models/convmpn.py:
--------------------------------------------------------------------------------
1 | """ Full assembly of the parts to form the complete network """
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | import logging
7 | import numpy as np
8 | logger = logging.getLogger('log')
9 |
10 |
11 | class DoubleConv(nn.Module):
12 | def __init__(self, in_channels, out_channels, mid_channels=None):
13 | super().__init__()
14 | if not mid_channels:
15 | mid_channels = out_channels
16 | self.double_conv = nn.Sequential(
17 | nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=True),
18 | # nn.BatchNorm2d(mid_channels),
19 | nn.GroupNorm(np.minimum(mid_channels, 4), mid_channels),
20 | nn.ReLU(),
21 | nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=True),
22 | # nn.BatchNorm2d(out_channels),
23 | nn.GroupNorm(np.minimum(out_channels, 4), out_channels),
24 | nn.ReLU()
25 | )
26 |
27 | def forward(self, x):
28 | return self.double_conv(x)
29 |
30 |
31 | class MSP(nn.Module):
32 | def __init__(self, in_channels):
33 | super().__init__()
34 | self.conv = nn.Sequential(DoubleConv(in_channels*2, in_channels))
35 |
36 | def forward(self, x):
37 | for i in range(len(x)):
38 | condition = list(range(len(x)))
39 | condition.remove(i)
40 | if i==0:
41 | common_features = torch.sum(x[1:], 0).unsqueeze(0)
42 | else:
43 | common_features = torch.cat([common_features, torch.sum(x[condition], 0).unsqueeze(0)], 0)
44 | x = torch.cat([x, common_features], 1)
45 | return self.conv(x)
46 |
47 |
48 | class Down(nn.Module):
49 | def __init__(self, in_channels, out_channels):
50 | super().__init__()
51 | self.maxpool_conv = nn.Sequential(
52 | nn.MaxPool2d(2), DoubleConv(in_channels, out_channels))
53 |
54 | def forward(self, x):
55 | return self.maxpool_conv(x)
56 |
57 |
58 | class OutConv(nn.Module):
59 | def __init__(self, in_channels, out_channels):
60 | super(OutConv, self).__init__()
61 | self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
62 |
63 | def forward(self, x):
64 | return self.conv(x)
65 |
66 |
67 | class MyModel(nn.Module):
68 | def __init__(self, n_channels, n_classes):
69 | super(MyModel, self).__init__()
70 | # self.l1 = nn.Sequential(nn.Linear(16, 1), nn.ReLU(True))
71 | # self.msp0 = MSP(16)
72 | self.inc = DoubleConv(16, 32)
73 | self.msp1 = MSP(32)
74 | self.down1 = Down(32, 64) # 16
75 | self.msp2 = MSP(64)
76 | self.down2 = Down(64, 128) # 32
77 | self.msp3 = MSP(128)
78 | self.down3 = Down(128, 256) # 64
79 | self.msp4 = MSP(256)
80 | self.down4 = Down(256, 128) # 64
81 | self.msp5 = MSP(128)
82 | self.down5 = Down(128, 1) # 8
83 | # self.msp6 = MSP(128)
84 | # self.down6 = Down(128, 32) # 128
85 | # self.msp7 = MSP(64)
86 | # self.down7 = Down(64, 32) # 128
87 | # self.msp8 = MSP(32)
88 | # self.down8 = Down(32, 16) # 64
89 | self.linear_out = nn.Linear(64, 1)
90 |
91 | def forward(self, x):
92 | x = x.view(-1, 16, 256, 256)
93 | x = x[1:, :, :, :]
94 | # x = self.msp0(x)
95 | x = self.inc(x)
96 | x = self.msp1(x)
97 | x = self.down1(x)
98 | x = self.msp2(x)
99 | x = self.down2(x)
100 | x = self.msp3(x)
101 | x = self.down3(x)
102 | x = self.msp4(x)
103 | x = self.down4(x)
104 | x = self.msp5(x)
105 | x = self.down5(x)
106 | # x = self.msp6(x)
107 | # x = self.down6(x)
108 | # x = self.msp7(x)
109 | # x = self.down7(x)
110 | x = torch.mean(x, 0)
111 | x = torch.flatten(x).unsqueeze(0)
112 | x = self.linear_out(x)
113 |
114 | return x
115 |
116 |
117 | def get_convmpn_model(name, inp_dim=1, out_dim=1):
118 | if (name == 'convmpn'):
119 | model = MyModel(inp_dim, out_dim)
120 | else:
121 | logging.error("model type {} has not found".format(name))
122 | sys.exit(1)
123 |
124 | model = model.cuda()
125 | return model
126 |
--------------------------------------------------------------------------------
/src/models/model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 | import sys
4 | from .unet import UNet
5 | from .convmpn import get_convmpn_model
6 | import logging
7 | import torchvision as tv
8 | logger = logging.getLogger('log')
9 |
10 | class Classifier(nn.Module):
11 | def __init__(self, my_pretrained_model, input_dim, out_dim):
12 | super(Classifier, self).__init__()
13 | self.pretrained = my_pretrained_model
14 | # self.my_new_layers1 = nn.Sequential(nn.ReLU(), nn.Linear(input_dim*1, 4))
15 | self.my_new_layers2 = nn.Sequential(nn.Linear(16, 1))
16 |
17 | def forward(self, img1, img2, mask1, mask2, is_train=True):
18 | x1 = self.pretrained(torch.cat([img1, mask1], 1))
19 | # x1 = self.pretrained(img1)
20 | x2 = self.pretrained(torch.cat([img2, mask2], 1))
21 | # x2 = self.pretrained(img2)
22 | # if is_train:
23 | # x1 = torch.dropout(x1, 0.5, is_train)
24 | # x2 = torch.dropout(x2, 0.5, is_train)
25 | # x1 = self.my_new_layers1(x1)
26 | # x2 = self.my_new_layers1(x2)
27 | # x = self.my_new_layers2(torch.cat([x1, x2], 1))
28 | x = self.my_new_layers2(x1*x2)
29 | return x
30 |
31 |
32 | def get_model(name, inp_dim=3, out_dim=1):
33 | if (name == 'unet'):
34 | model = UNet(inp_dim, out_dim, decoder=False)
35 | elif (name == 'unet-encoder'):
36 | model = UNet(inp_dim, None, decoder=False)
37 | model = Classifier(model, 16, out_dim)
38 | elif (name == 'resnet-encoder'):
39 | model = tv.models.resnet18(pretrained=True)
40 | model.conv1 = nn.Conv2d(inp_dim,
41 | 64,
42 | kernel_size=(7, 7),
43 | stride=(2, 2),
44 | padding=(3, 3),
45 | bias=False)
46 | model = Classifier(model, 1000, out_dim)
47 | elif ('convmpn' in name):
48 | model = get_convmpn_model(name, inp_dim)
49 | else:
50 | logging.error("model type {} has not found".format(name))
51 | sys.exit(1)
52 |
53 | model = model.cuda()
54 | return model
55 |
--------------------------------------------------------------------------------
/src/models/unet.py:
--------------------------------------------------------------------------------
1 | """ Full assembly of the parts to form the complete network """
2 |
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 | import logging
7 | logger = logging.getLogger('log')
8 |
9 |
10 | class DoubleConv(nn.Module):
11 | """(convolution => [BN] => ReLU) * 2"""
12 |
13 | def __init__(self, in_channels, out_channels, mid_channels=None):
14 | super().__init__()
15 | if not mid_channels:
16 | mid_channels = out_channels
17 | if mid_channels != 1:
18 | self.double_conv = nn.Sequential(
19 | nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
20 | nn.GroupNorm(4,mid_channels), nn.ReLU(),)
21 | # nn.BatchNorm2d(mid_channels), nn.ReLU(),)
22 | # nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
23 | # nn.GroupNorm(4,out_channels), nn.ReLU())
24 | else:
25 | self.double_conv = nn.Sequential(
26 | nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
27 | nn.GroupNorm(1,mid_channels), nn.ReLU(),)
28 |
29 | def forward(self, x):
30 | return self.double_conv(x)
31 |
32 |
33 | class Down(nn.Module):
34 | """Downscaling with maxpool then double conv"""
35 |
36 | def __init__(self, in_channels, out_channels):
37 | super().__init__()
38 | self.maxpool_conv = nn.Sequential(
39 | nn.MaxPool2d(2), DoubleConv(in_channels, out_channels))
40 |
41 | def forward(self, x):
42 | return self.maxpool_conv(x)
43 |
44 |
45 | class Up(nn.Module):
46 | """Upscaling then double conv"""
47 |
48 | def __init__(self, in_channels, out_channels, bilinear=True):
49 | super().__init__()
50 |
51 | # if bilinear, use the normal convolutions to reduce the number of channels
52 | if bilinear:
53 | self.up = nn.Upsample(scale_factor=2,
54 | mode='bilinear',
55 | align_corners=True)
56 | self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
57 | else:
58 | self.up = nn.ConvTranspose2d(in_channels,
59 | in_channels // 2,
60 | kernel_size=2,
61 | stride=2)
62 | self.conv = DoubleConv(in_channels, out_channels)
63 |
64 | def forward(self, x1, x2=None):
65 | x1 = self.up(x1)
66 | '''
67 | # input is CHW
68 | diffY = x2.size()[2] - x1.size()[2]
69 | diffX = x2.size()[3] - x1.size()[3]
70 |
71 | x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
72 | diffY // 2, diffY - diffY // 2])
73 | # if you have padding issues, see
74 | # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
75 | # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
76 | x = torch.cat([x2, x1], dim=1)
77 | '''
78 | x = x1
79 | return self.conv(x)
80 |
81 |
82 | class OutConv(nn.Module):
83 | def __init__(self, in_channels, out_channels):
84 | super(OutConv, self).__init__()
85 | self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
86 |
87 | def forward(self, x):
88 | return self.conv(x)
89 |
90 |
91 | class UNet(nn.Module):
92 | def __init__(self, n_channels, n_classes, bilinear=True, decoder=True):
93 | super(UNet, self).__init__()
94 | self.n_channels = n_channels
95 | self.n_classes = n_classes
96 | self.bilinear = bilinear
97 | self.decoder = decoder
98 |
99 | base_ch = 4
100 | self.inc = DoubleConv(n_channels, 4)
101 | self.down1 = DoubleConv(4, 8) # 32
102 | self.down2 = Down(8, 8) # 128
103 | self.down3 = Down(8, 8) # 128
104 | self.down4 = Down(8, 8) # 256
105 | self.down5 = Down(8, 8) # 256
106 | self.down6 = Down(8, 8) # 128
107 | self.down7 = Down(8, 8) # 128
108 | # self.down8 = Down(8, 8) # 64
109 | self.linear = nn.Linear(128, n_classes)
110 | # self.do = nn.Dropout()
111 | if (decoder):
112 | self.up1 = Up(1024, 512, bilinear)
113 | self.up2 = Up(512, 256, bilinear)
114 | self.up3 = Up(256, 128, bilinear)
115 | self.up4 = Up(128, 64, bilinear)
116 | self.up5 = Up(64, 32, bilinear)
117 | self.up6 = Up(32, 16, bilinear)
118 | self.up7 = Up(16, 8, bilinear)
119 | self.up8 = Up(8, 8, bilinear)
120 | self.outc = OutConv(32, n_classes)
121 |
122 | def forward(self, x):
123 | x1 = self.inc(x)
124 | x2 = self.down1(x1)
125 | x3 = self.down2(x2)
126 | x4 = self.down3(x3)
127 | x5 = self.down4(x4)
128 | x6 = self.down5(x5)
129 | x7 = self.down6(x6)
130 | x8 = self.down7(x7)
131 | # x9 = self.down8(x8)
132 | if (self.decoder):
133 | x = self.up1(x9)
134 | x = self.up2(x)
135 | x = self.up3(x)
136 | x = self.up4(x)
137 | x = self.up5(x)
138 | x = self.up6(x)
139 | x = self.up7(x)
140 | x = self.up8(x)
141 | x = self.outc(x)
142 | output = torch.tanh(x)
143 | else:
144 | output = torch.flatten(x8, 1)
145 | # output = self.do(output)
146 | # print(output.shape)
147 | output = self.linear(output)
148 | # logger.debug("output size is {}".format(output.size()))
149 | return output
150 |
--------------------------------------------------------------------------------
/src/panotools/__init__.py:
--------------------------------------------------------------------------------
1 | # from .house import House
2 | # from .bbox import BBox
3 | # from .panorama import Panorama
4 |
5 | import logging
6 | import coloredlogs
7 | logger = logging.getLogger('panotools_configs')
8 | coloredlogs.install(level="INFO",
9 | logger=logger,
10 | fmt='%(name)s, %(message)s')
11 | logging.root.setLevel(logging.INFO)
12 |
--------------------------------------------------------------------------------
/src/panotools/bbox.py:
--------------------------------------------------------------------------------
1 | import shapely
2 | import numpy as np
3 |
4 | # thing_classes=["Door","Glass_door","Frame","Window","Kitchen_counter","closet"]
5 |
6 |
7 | class BBox:
8 | def __init__(self, bbox=None, obj_type=None):
9 | # if type(obj_type).__module__ == np.__name__:
10 | # obj_type = int(np.argmax(obj_type))
11 | self.bbox = np.round(bbox, 3) # BoundingBox (2,3) xyz
12 | self.type = obj_type
13 | if abs(self.bbox[0][0]-self.bbox[1][0]) < abs(self.bbox[0][2]-self.bbox[1][2]):
14 | if self.bbox[0][0] > 0:
15 | self.direction = 0
16 | else:
17 | self.direction = 2
18 | else:
19 | if self.bbox[0][2] > 0:
20 | self.direction = 1
21 | else:
22 | self.direction = 3
23 |
24 | def get_type(self):
25 | if isinstance(self.type, np.ndarray):
26 | return np.argmax(self.type)
27 | return self.type
28 |
29 | def get_center(self):
30 | center = (self.bbox[0]+self.bbox[1])/2
31 | center = shapely.geometry.Point([center[0], center[2]])
32 | return center
33 |
34 | def get_line(self):
35 | line = shapely.geometry.LineString(
36 | [(self.bbox[0, 0], self.bbox[0, 2]), (self.bbox[1, 0], self.bbox[1, 2])])
37 | return line
38 |
39 | def length(self):
40 | return self.get_line().length
41 |
--------------------------------------------------------------------------------
/src/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==0.11.0
2 | albumentations==0.5.2
3 | astroid==2.4.2
4 | autopep8==1.5.4
5 | cachetools==4.2.0
6 | certifi==2020.12.5
7 | chardet==4.0.0
8 | cloudpickle==1.6.0
9 | coloredlogs==15.0
10 | cycler==0.10.0
11 | Cython==0.29.21
12 | decorator==4.4.2
13 | detectron2 @ git+https://github.com/facebookresearch/detectron2.git@05573d7480a43e7edc61ba6061082c81dd3cde76
14 | flake8==3.8.4
15 | future==0.18.2
16 | fvcore==0.1.2.post20210115
17 | google-auth==1.24.0
18 | google-auth-oauthlib==0.4.2
19 | grpcio==1.35.0
20 | humanfriendly==9.1
21 | idna==2.10
22 | imageio==2.9.0
23 | imgaug==0.4.0
24 | iopath==0.1.2
25 | isort==5.7.0
26 | jedi==0.17.2
27 | joblib==1.0.0
28 | kiwisolver==1.3.1
29 | lazy-object-proxy==1.4.3
30 | Markdown==3.3.3
31 | matplotlib==3.3.3
32 | mccabe==0.6.1
33 | networkx==2.5
34 | numpy==1.19.5
35 | oauthlib==3.1.0
36 | opencv-python==4.5.1.48
37 | opencv-python-headless==4.5.1.48
38 | pandas==1.2.0
39 | parso==0.7.1
40 | Pillow==8.1.0
41 | pluggy==0.13.1
42 | portalocker==2.0.0
43 | protobuf==3.14.0
44 | pyasn1==0.4.8
45 | pyasn1-modules==0.2.8
46 | pycocotools==2.0.2
47 | pycodestyle==2.6.0
48 | pydot==1.4.1
49 | pyflakes==2.2.0
50 | pylint==2.6.0
51 | pyparsing==2.4.7
52 | python-dateutil==2.8.1
53 | python-jsonrpc-server==0.4.0
54 | python-language-server==0.36.2
55 | pytz==2020.5
56 | PyWavelets==1.1.1
57 | PyYAML==5.3.1
58 | ranger-fm==1.9.3
59 | requests==2.25.1
60 | requests-oauthlib==1.3.0
61 | rope==0.18.0
62 | rsa==4.7
63 | scikit-image==0.18.1
64 | scikit-learn==0.24.1
65 | scipy==1.6.0
66 | seaborn==0.11.1
67 | Shapely==1.7.1
68 | simplejson==3.17.2
69 | six==1.15.0
70 | sklearn==0.0
71 | tabulate==0.8.7
72 | tensorboard==2.4.1
73 | tensorboard-plugin-wit==1.8.0
74 | threadpoolctl==2.1.0
75 | tifffile==2021.1.14
76 | toml==0.10.2
77 | torch==1.7.1
78 | torchvision==0.8.2
79 | tqdm==4.56.0
80 | typing-extensions==3.7.4.3
81 | ujson==4.0.2
82 | urllib3==1.26.2
83 | Werkzeug==1.0.1
84 | wrapt==1.12.1
85 | yacs==0.1.8
86 |
--------------------------------------------------------------------------------
/src/tests/predict_arrangements.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import coloredlogs
3 | import logging
4 | import os
5 | from tqdm import tqdm
6 | import torch
7 | import torchvision.transforms as transforms
8 | import sys
9 | import functools
10 | from PIL import Image
11 |
12 | from parser import config_parser
13 | from src.panotools.house import House
14 | from src.models.model import get_model
15 | from src.panotools import visualize
16 |
17 | logger = logging.getLogger('log')
18 | coloredlogs.install(level="DEBUG",
19 | logger=logger,
20 | fmt='%(asctime)s, %(name)s, %(levelname)s %(message)s')
21 | logging.root.setLevel(logging.INFO)
22 |
23 | def main(args):
24 | print("____________args____________")
25 | for key in args.__dict__:
26 | print("{}: {}".format(key, args.__dict__[key]))
27 | print("____________________________")
28 |
29 | logger.setLevel(args.log)
30 |
31 | transform = transforms.Compose([
32 | transforms.ToTensor(),
33 | # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
34 | ])
35 | model = get_model(args.ar_model, inp_dim=3)
36 | pretrained_dict = torch.load(args.ar_model_weight)
37 | model.load_state_dict(pretrained_dict, strict=True)
38 | iter = 0
39 | model.eval()
40 | os.makedirs(f'outputs/{args.ar_exp}', exist_ok=True)
41 |
42 | train_names = [line.rstrip() for line in open(args.train_set)]
43 | test_names = [line.rstrip() for line in open(args.test_set)]
44 |
45 | train_houses = []
46 | test_houses = []
47 |
48 | for name in train_names:
49 | train_houses.append(House(name, args))
50 | for name in test_names:
51 | test_houses.append(House(name, args))
52 |
53 | for house in tqdm(test_houses, position=0, desc='Houses'):
54 | with torch.no_grad():
55 | if(args.prediction_level=='lv1'):
56 | samples = house.strong_positive_trees
57 | elif(args.prediction_level=='lv2'):
58 | samples = house.weak_positive_trees
59 | else:
60 | samples = house.negative_trees
61 | for tree in tqdm(samples, desc='processing alignments'):
62 | masks = tree.get_masks(house, False)
63 | output_masks = [transform(np.ones((256, 256, 16), dtype=float)*0.5)]
64 | for i, mask in enumerate(masks):
65 | mask = mask.astype(float)/255
66 | output_masks.append(transform(mask))
67 | imgs = torch.stack(output_masks).unsqueeze(0)
68 |
69 | iter += 1
70 | imgs = torch.as_tensor(imgs,
71 | dtype=torch.float,
72 | device=torch.device('cuda'))
73 | imgs = imgs.transpose(1, 0)
74 |
75 | pred = model(imgs)
76 | pred = pred.squeeze(1)
77 | nppred = torch.tanh(pred).data.cpu().numpy()
78 |
79 | os.makedirs(f'outputs/{args.ar_exp}/{house.name}', exist_ok=True)
80 | align_img = [np.array(visualize.show_tree(house, tree))]
81 | align_img = Image.fromarray(align_img[0])
82 | align_img.convert('RGB').save(f'outputs/{args.ar_exp}/{house.name}/{round(max(nppred)*100000)}_{iter}.png')
83 | # if house.labeled:
84 | # house.visualize_alignment(args)
85 |
86 | if __name__ == '__main__':
87 | parser = config_parser()
88 | args = parser.parse_args()
89 | main(args)
90 |
--------------------------------------------------------------------------------
/src/tests/room_type_classification.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import numpy as np
3 | import coloredlogs
4 | import logging
5 | import sys
6 | from src.loaders.room_type_classification import dataset
7 | from src.models.model import get_model
8 | from tqdm import tqdm
9 | import torch
10 | import torch.nn as nn
11 | import cv2
12 | from glob import glob
13 | from sklearn.metrics import confusion_matrix
14 | import sklearn.metrics as metrics
15 | import torchvision.transforms as transforms
16 |
17 | from parser import config_parser
18 |
19 |
20 | logger = logging.getLogger('log')
21 | coloredlogs.install(level="DEBUG",
22 | logger=logger,
23 | fmt='%(asctime)s, %(name)s, %(levelname)s %(message)s')
24 | logging.root.setLevel(logging.INFO)
25 |
26 |
27 | def main(args):
28 | parser = config_parser()
29 | args = parser.parse_args()
30 | logger.setLevel(args.log)
31 | model = get_model(args.rc_model, 3, 10)
32 |
33 | pretrained_dict = torch.load(args.rc_model_weight)
34 | pretrained_dict = {
35 | k: v
36 | for k, v in pretrained_dict.items() if k in model.state_dict()
37 | }
38 | model.load_state_dict(pretrained_dict, strict=True)
39 | model.eval()
40 |
41 | if(args.rc_is_eval):
42 | test_loader = dataset('test', args)
43 | labels = []
44 | preds = []
45 | for (img, label) in tqdm(test_loader, position=0):
46 | with torch.no_grad():
47 | img = torch.as_tensor(img,
48 | dtype=torch.float,
49 | device=torch.device('cuda'))
50 | img = nn.functional.interpolate(img, size=(256, 256))
51 | label = torch.as_tensor(label,
52 | dtype=torch.long,
53 | device=torch.device('cuda'))
54 | pred = model(img)
55 | labels.extend(label.cpu().data.numpy().tolist())
56 | pred = torch.argmax(pred ,1)
57 | preds.extend(pred.cpu().data.numpy().tolist())
58 | conf_mat = confusion_matrix(labels, preds)
59 | print(conf_mat)
60 | print(metrics.accuracy_score(labels, preds))
61 | print(metrics.average_precision_score(labels, preds))
62 | else:
63 | transform = transforms.Compose([
64 | transforms.ToTensor(),
65 | # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
66 | ])
67 | folder = glob(f"{args.data_dir}/*")
68 | for house in tqdm(folder):
69 | files = glob(f'{house}/images/aligned_*.png')
70 | preds = dict()
71 | for path in files:
72 | name = path.split('/')[-1][8:-4]
73 | img = cv2.imread(path)
74 | img = img.astype(float)/255
75 | img = transform(img)
76 | with torch.no_grad():
77 | img = torch.as_tensor(img,
78 | dtype=torch.float,
79 | device=torch.device('cuda'))
80 | img = img.unsqueeze(0)
81 | img = nn.functional.interpolate(img, size=(256, 256))
82 |
83 | pred = model(img)
84 | pred = torch.softmax(pred, -1)
85 | preds[name] = pred[0].cpu().data.numpy()
86 | np.save(f'{house}/room_type_preds.npy', preds)
87 |
88 |
89 | if __name__ == '__main__':
90 | main(sys.argv[1:])
91 |
--------------------------------------------------------------------------------
/src/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/src/utils/__init__.py
--------------------------------------------------------------------------------
/src/utils/summary_writer.py:
--------------------------------------------------------------------------------
1 | from torchvision.utils import make_grid
2 | from torch.utils.tensorboard import SummaryWriter
3 | import logging
4 |
5 | logger = logging.getLogger('log')
6 |
7 |
8 | class MyWriter():
9 | def __init__(self, args, subdir):
10 | self.writer = SummaryWriter('exps/{}/{}/'.format(subdir, args.exp))
11 | self.values = dict()
12 | self.cnt = dict()
13 |
14 | def add_scalar(self, name, val):
15 | if (name in self.values):
16 | self.values[name] += val
17 | self.cnt[name] += 1.0
18 | else:
19 | self.values[name] = val
20 | self.cnt[name] = 1.0
21 |
22 | def add_imgs(self, pred, target, epoch):
23 | pred = make_grid(pred, 4)
24 | target = make_grid(target, 4)
25 | self.writer.add_image('pred', pred, epoch)
26 | self.writer.add_image('target', target, epoch)
27 |
28 | def push(self, step):
29 | for key in self.values:
30 | self.values[key] = self.values[key] / float(self.cnt[key])
31 | for key in self.values:
32 | self.writer.add_scalar(key, self.values[key], step)
33 | self.values = dict()
34 |
35 | def get(self, name):
36 | if (name not in self.values):
37 | logger.error("name {} was not found in the writer".format(name))
38 | return None
39 | return self.values[name] / float(self.cnt[name])
40 |
--------------------------------------------------------------------------------
/test.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/test.txt
--------------------------------------------------------------------------------
/train.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aminshabani/extreme-indoor-sfm/b326c7c078555e8f724cd07bb19c5933780be79a/train.txt
--------------------------------------------------------------------------------