├── .gitignore
├── .gitmodules
├── CMakeLists.txt
├── LICENSE
├── LICENSE_Matterport3DSimulator
├── README.md
├── cmake
└── FindNumPy.cmake
├── connectivity
├── 17DRP5sb8fy_connectivity.json
├── 1LXtFkjw3qL_connectivity.json
├── 1pXnuDYAj8r_connectivity.json
├── 29hnd4uzFmX_connectivity.json
├── 2azQ1b91cZZ_connectivity.json
├── 2n8kARJN3HM_connectivity.json
├── 2t7WUuJeko7_connectivity.json
├── 5LpN3gDmAk7_connectivity.json
├── 5ZKStnWn8Zo_connectivity.json
├── 5q7pvUzZiYa_connectivity.json
├── 759xd9YjKW5_connectivity.json
├── 7y3sRwLe3Va_connectivity.json
├── 8194nk5LbLH_connectivity.json
├── 82sE5b5pLXE_connectivity.json
├── 8WUmhLawc2A_connectivity.json
├── ARNzJeq3xxb_connectivity.json
├── B6ByNegPMKs_connectivity.json
├── D7G3Y4RVNrH_connectivity.json
├── D7N2EKCX4Sj_connectivity.json
├── E9uDoFAP3SH_connectivity.json
├── EDJbREhghzL_connectivity.json
├── EU6Fwq7SyZv_connectivity.json
├── GdvgFV5R1Z5_connectivity.json
├── HxpKQynjfin_connectivity.json
├── JF19kD82Mey_connectivity.json
├── JeFG25nYj2p_connectivity.json
├── JmbYfDe2QKZ_connectivity.json
├── PX4nDJXEHrG_connectivity.json
├── Pm6F8kyY3z2_connectivity.json
├── PuKPg4mmafe_connectivity.json
├── QUCTc6BB5sX_connectivity.json
├── README.md
├── RPmz2sHmrrY_connectivity.json
├── S9hNv5qa7GM_connectivity.json
├── SN83YJsR3w2_connectivity.json
├── TbHJrupSAjP_connectivity.json
├── ULsKaCPVFJR_connectivity.json
├── UwV83HsGsw3_connectivity.json
├── Uxmj2M2itWa_connectivity.json
├── V2XKFyX4ASd_connectivity.json
├── VFuaQ6m2Qom_connectivity.json
├── VLzqgDo317F_connectivity.json
├── VVfe2KiqLaN_connectivity.json
├── Vt2qJdWjCF2_connectivity.json
├── Vvot9Ly1tCj_connectivity.json
├── VzqfbhrpDEA_connectivity.json
├── WYY7iVyf5p8_connectivity.json
├── X7HyMhZNoso_connectivity.json
├── XcA2TqTSSAj_connectivity.json
├── YFuZgdQ5vWj_connectivity.json
├── YVUC4YcDtcY_connectivity.json
├── YmJkqBEsHnH_connectivity.json
├── Z6MFQCViBuw_connectivity.json
├── ZMojNkEp431_connectivity.json
├── aayBHfsNo7d_connectivity.json
├── ac26ZMwG7aT_connectivity.json
├── b8cTxDM8gDG_connectivity.json
├── cV4RVeZvu5T_connectivity.json
├── dhjEzFoUFzH_connectivity.json
├── e9zR4mvMWw7_connectivity.json
├── fzynW3qQPVF_connectivity.json
├── gTV8FGcVJC9_connectivity.json
├── gYvKGZ5eRqb_connectivity.json
├── gZ6f7yhEvPG_connectivity.json
├── gxdoqLR6rwA_connectivity.json
├── i5noydFURQK_connectivity.json
├── jh4fc5c5qoQ_connectivity.json
├── jtcxE69GiFV_connectivity.json
├── kEZ7cmS4wCh_connectivity.json
├── mJXqzFtmKg4_connectivity.json
├── oLBMNvg9in8_connectivity.json
├── p5wJjkQkbXX_connectivity.json
├── pLe4wQe7qrG_connectivity.json
├── pRbA3pwrgk9_connectivity.json
├── pa4otMbVnkk_connectivity.json
├── q9vSo1VnCiC_connectivity.json
├── qoiz87JEwZ2_connectivity.json
├── r1Q1Z4BcV1o_connectivity.json
├── r47D5H71a5s_connectivity.json
├── rPc6DW4iMge_connectivity.json
├── rqfALeAoiTq_connectivity.json
├── s8pcmisQ38h_connectivity.json
├── sKLMLpTHeUy_connectivity.json
├── sT4fr6TAbpF_connectivity.json
├── scans.txt
├── uNb9QFRL6hY_connectivity.json
├── ur6pFq6Qu1A_connectivity.json
├── vyrNrziPKCB_connectivity.json
├── wc2JMjhGNzB_connectivity.json
├── x8F5xyUWy9e_connectivity.json
├── yqstnuAEVhm_connectivity.json
└── zsNo4HB9uLZ_connectivity.json
├── include
├── Benchmark.hpp
├── Catch.hpp
└── MatterSim.hpp
├── python_requirements.txt
├── r2r_src
├── agent.py
├── bleu.py
├── env.py
├── eval.py
├── model.py
├── param.py
├── speaker.py
├── train.py
└── utils.py
├── run
├── adv_train.bash
├── attack.bash
├── finetune.bash
├── pretrain.bash
├── quick_start.bash
└── test_agent.bash
├── semantic_views
├── 17DRP5sb8fy
│ ├── 10c252c90fa24ef3b698c6f54d984c5c
│ │ ├── 0.png
│ │ ├── 1.png
│ │ ├── 10.png
│ │ ├── 11.png
│ │ ├── 12.png
│ │ ├── 13.png
│ │ ├── 14.png
│ │ ├── 15.png
│ │ ├── 16.png
│ │ ├── 17.png
│ │ ├── 18.png
│ │ ├── 19.png
│ │ ├── 2.png
│ │ ├── 20.png
│ │ ├── 21.png
│ │ ├── 22.png
│ │ ├── 23.png
│ │ ├── 24.png
│ │ ├── 25.png
│ │ ├── 26.png
│ │ ├── 27.png
│ │ ├── 28.png
│ │ ├── 29.png
│ │ ├── 3.png
│ │ ├── 30.png
│ │ ├── 31.png
│ │ ├── 32.png
│ │ ├── 33.png
│ │ ├── 34.png
│ │ ├── 35.png
│ │ ├── 4.png
│ │ ├── 5.png
│ │ ├── 6.png
│ │ ├── 7.png
│ │ ├── 8.png
│ │ └── 9.png
│ └── 10c252c90fa24ef3b698c6f54d984c5c_rgb
│ │ ├── 0.jpg
│ │ ├── 1.jpg
│ │ ├── 10.jpg
│ │ ├── 11.jpg
│ │ ├── 12.jpg
│ │ ├── 13.jpg
│ │ ├── 14.jpg
│ │ ├── 15.jpg
│ │ ├── 16.jpg
│ │ ├── 17.jpg
│ │ ├── 18.jpg
│ │ ├── 19.jpg
│ │ ├── 2.jpg
│ │ ├── 20.jpg
│ │ ├── 21.jpg
│ │ ├── 22.jpg
│ │ ├── 23.jpg
│ │ ├── 24.jpg
│ │ ├── 25.jpg
│ │ ├── 26.jpg
│ │ ├── 27.jpg
│ │ ├── 28.jpg
│ │ ├── 29.jpg
│ │ ├── 3.jpg
│ │ ├── 30.jpg
│ │ ├── 31.jpg
│ │ ├── 32.jpg
│ │ ├── 33.jpg
│ │ ├── 34.jpg
│ │ ├── 35.jpg
│ │ ├── 4.jpg
│ │ ├── 5.jpg
│ │ ├── 6.jpg
│ │ ├── 7.jpg
│ │ ├── 8.jpg
│ │ └── 9.jpg
├── Matterport3D_agreement.pdf
├── Matterport3D_license.txt
└── label2color.json
├── src
├── driver
│ ├── mattersim_main.cpp
│ └── random_agent.cpp
├── lib
│ ├── Benchmark.cpp
│ ├── MatterSim.cpp
│ ├── fragment.sh
│ └── vertex.sh
├── lib_python
│ └── MatterSimPython.cpp
└── test
│ ├── main.cpp
│ └── rendertest_spec.json
└── tasks
└── R2R
└── data
├── download.sh
├── train_vocab.txt
└── trainval_vocab.txt
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.so
3 | *.zip
4 | /r2r_src/.idea
5 | /img_features
6 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "pybind11"]
2 | path = pybind11
3 | url = https://github.com/pybind/pybind11.git
4 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | project(Matterport_Simulator CXX)
2 | cmake_minimum_required(VERSION 2.8)
3 |
4 | option(OSMESA_RENDERING "Render offscreen with OSMesa" OFF)
5 |
6 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
7 |
8 | if(NOT CMAKE_BUILD_TYPE)
9 | set(CMAKE_BUILD_TYPE Release)
10 | endif()
11 |
12 | include_directories("${PROJECT_SOURCE_DIR}/include")
13 |
14 | find_package(OpenCV REQUIRED)
15 | find_package(PkgConfig REQUIRED)
16 |
17 | pkg_check_modules(JSONCPP REQUIRED jsoncpp)
18 |
19 | if(OSMESA_RENDERING)
20 | pkg_check_modules(OSMESA REQUIRED osmesa)
21 | set(GL_LIBS ${OSMESA_LIBRARIES})
22 | else()
23 | find_package(OpenGL REQUIRED)
24 | find_package(GLEW REQUIRED)
25 | set(GL_LIBS ${OPENGL_LIBRARIES} ${GLEW_LIBRARIES})
26 | endif()
27 |
28 | add_library(MatterSim SHARED src/lib/MatterSim.cpp src/lib/Benchmark.cpp)
29 | if(OSMESA_RENDERING)
30 | target_compile_definitions(MatterSim PUBLIC "-DOSMESA_RENDERING")
31 | endif()
32 | target_include_directories(MatterSim PRIVATE ${JSONCPP_INCLUDE_DIRS})
33 | target_link_libraries(MatterSim ${JSONCPP_LIBRARIES} ${OpenCV_LIBS} ${GL_LIBS})
34 |
35 | add_executable(tests src/test/main.cpp)
36 | target_include_directories(tests PRIVATE ${JSONCPP_INCLUDE_DIRS})
37 | target_link_libraries(tests MatterSim ${JSONCPP_LIBRARIES} ${OpenCV_LIBS})
38 |
39 | add_executable(mattersim_main src/driver/mattersim_main.cpp)
40 | target_link_libraries(mattersim_main MatterSim)
41 |
42 | add_executable(random_agent src/driver/random_agent.cpp)
43 | target_link_libraries(random_agent MatterSim)
44 |
45 | add_subdirectory(pybind11)
46 |
47 | find_package(PythonInterp 3.6 REQUIRED)
48 | message(${PYTHON_EXECUTABLE})
49 |
50 | # Need to search for python executable again to pick up an activated
51 | # virtualenv python, if any.
52 | unset(PYTHON_EXECUTABLE CACHE)
53 | find_program(PYTHON_EXECUTABLE python
54 | PATHS ENV PATH # look in the PATH environment variable
55 | NO_DEFAULT_PATH # do not look anywhere else...
56 | )
57 | message(${PYTHON_EXECUTABLE})
58 | # Make FindNumPy available
59 | set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/cmake")
60 | find_package(NumPy REQUIRED)
61 |
62 | pybind11_add_module(MatterSimPython src/lib_python/MatterSimPython.cpp)
63 | target_include_directories(MatterSimPython PRIVATE ${NUMPY_INCLUDES})
64 | target_link_libraries(MatterSimPython PRIVATE MatterSim)
65 | set_target_properties(MatterSimPython
66 | PROPERTIES
67 | OUTPUT_NAME MatterSim)
68 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Hao Tan, Licheng Yu, Mohit Bansal
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/LICENSE_Matterport3DSimulator:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Peter Anderson, Philip Roberts, Qi Wu, Damien Teney, Jake Bruce
4 | Mark Johnson, Niko Sunderhauf, Ian Reid, Stephen Gould, Anton van den Hengel
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Adversarial Reinforced Instruction Attacker for Robust Vision-Language Navigation
2 |
3 | PyTorch implementation of the paper ["Adversarial Reinforced Instruction Attacker for Robust Vision-Language Navigation"](https://arxiv.org/abs/2107.11252) (TPAMI 2021).
4 |
5 | ## Environment Installation
6 | The environment installation follows that in [EnvDrop](https://github.com/airsplay/R2R-EnvDrop).
7 |
8 | Python requirements: Need python3.6 (python 3.5 should be OK)
9 | ```
10 | pip install -r python_requirements.txt
11 | ```
12 | Install Matterport3D simulators:
13 | ```
14 | git submodule update --init --recursive
15 | sudo apt-get install libjsoncpp-dev libepoxy-dev libglm-dev libosmesa6 libosmesa6-dev libglew-dev
16 | mkdir build && cd build
17 | cmake -DEGL_RENDERING=ON ..
18 | make -j8
19 | ```
20 |
21 | ## Data Preparation
22 | Download Room-to-Room navigation data:
23 | ```
24 | bash ./tasks/R2R/data/download.sh
25 | ```
26 | Download image features for environments:
27 | ```
28 | mkdir img_features
29 | wget https://www.dropbox.com/s/o57kxh2mn5rkx4o/ResNet-152-imagenet.zip -P img_features/
30 | cd img_features
31 | unzip ResNet-152-imagenet.zip
32 | ```
33 | Download [R2R augmentation data](http://people.eecs.berkeley.edu/~ronghang/projects/speaker_follower/data_augmentation/R2R_literal_speaker_data_augmentation_paths.json) from [speaker-follower](https://github.com/ronghanghu/speaker_follower).
34 |
35 | Download R2R navigation data added target words and candidate substitution words [here](https://drive.google.com/file/d/16cvB2HyOU1zycsndads6FCl3DERNkBVi/view?usp=sharing).
36 |
37 | Download object word vocabulary [here](https://drive.google.com/file/d/1GIlqvRfmnoO6617BcMtClzSj8t6hl7nR/view?usp=sharing).
38 |
39 | ### Trained Network weights
40 | Download adversarial training checkpoint [here](https://drive.google.com/file/d/1ye9_ytFSoUz0rk0PuWMnaSdmwq9koGzT/view?usp=sharing).
41 |
42 | Download finetuning checkpoint [here](https://drive.google.com/file/d/13qHTndeqrI_ULyhPpw2r90FgHMh50Dny/view?usp=sharing).
43 |
44 | ## Code
45 | ### Reproduce Testing Results
46 | Run the following scripts with the finetuning checkpoint to replicate the navigation performance reported in the paper:
47 | ```
48 | bash run/test_agent.sh 0
49 | ```
50 | ### Quickly Start
51 | Load the adversarial training checkpoint to perform finetuning:
52 | ```
53 | bash run/quick_start.sh 0
54 | ```
55 | ### Four-stage Training
56 | ```
57 | bash run/pretrain.sh 0
58 | bash run/attack.sh 0
59 | bash run/adv_train.sh 0
60 | bash run/finetune.sh 0
61 | ```
62 |
63 | ## Acknowledgement
64 | The implementation relies on resources from [EnvDrop](https://github.com/airsplay/R2R-EnvDrop) and [speaker-follower](https://github.com/ronghanghu/speaker_follower). We thank the original authors for their open-sourcing.
65 |
--------------------------------------------------------------------------------
/cmake/FindNumPy.cmake:
--------------------------------------------------------------------------------
1 | #-------------------------------------------------------------------------------
2 | # Copyright (c) 2013, Lars Baehren
3 | # All rights reserved.
4 | #
5 | # Redistribution and use in source and binary forms, with or without modification,
6 | # are permitted provided that the following conditions are met:
7 | #
8 | # * Redistributions of source code must retain the above copyright notice, this
9 | # list of conditions and the following disclaimer.
10 | # * Redistributions in binary form must reproduce the above copyright notice,
11 | # this list of conditions and the following disclaimer in the documentation
12 | # and/or other materials provided with the distribution.
13 | #
14 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 | #-------------------------------------------------------------------------------
25 |
26 | # - Check for the presence of NumPy
27 | #
28 | # The following variables are set when NumPy is found:
29 | # NUMPY_FOUND = Set to true, if all components of NUMPY have been found.
30 | # NUMPY_INCLUDES = Include path for the header files of NUMPY
31 | # NUMPY_LIBRARIES = Link these to use NUMPY
32 | # NUMPY_LFLAGS = Linker flags (optional)
33 |
34 | if (NOT NUMPY_FOUND)
35 |
36 | if (NOT NUMPY_ROOT_DIR)
37 | set (NUMPY_ROOT_DIR ${CMAKE_INSTALL_PREFIX})
38 | endif (NOT NUMPY_ROOT_DIR)
39 |
40 | if (NOT PYTHONINTERP_FOUND)
41 | find_package (PythonInterp)
42 | endif (NOT PYTHONINTERP_FOUND)
43 |
44 | ##__________________________________________________________________________
45 | ## Check for the header files
46 |
47 | ## Use Python to determine the include directory
48 | execute_process (
49 | COMMAND ${PYTHON_EXECUTABLE} -c import\ numpy\;\ print\(numpy.get_include\(\)\)\;
50 | ERROR_VARIABLE NUMPY_FIND_ERROR
51 | RESULT_VARIABLE NUMPY_FIND_RESULT
52 | OUTPUT_VARIABLE NUMPY_FIND_OUTPUT
53 | OUTPUT_STRIP_TRAILING_WHITESPACE
54 | )
55 | ## process the output from the execution of the command
56 | if (NOT NUMPY_FIND_RESULT)
57 | set (NUMPY_INCLUDES ${NUMPY_FIND_OUTPUT})
58 | endif (NOT NUMPY_FIND_RESULT)
59 |
60 | ##__________________________________________________________________________
61 | ## Check for the library
62 |
63 | unset (NUMPY_LIBRARIES)
64 |
65 | if (PYTHON_SITE_PACKAGES)
66 | find_library (NUMPY_NPYMATH_LIBRARY npymath
67 | HINTS ${PYTHON_SITE_PACKAGES}/numpy/core
68 | PATH_SUFFIXES lib
69 | )
70 | if (NUMPY_NPYMATH_LIBRARY)
71 | list (APPEND NUMPY_LIBRARIES ${NUMPY_NPYMATH_LIBRARY})
72 | endif (NUMPY_NPYMATH_LIBRARY)
73 | endif (PYTHON_SITE_PACKAGES)
74 |
75 | ##__________________________________________________________________________
76 | ## Get API version of NumPy from 'numpy/numpyconfig.h'
77 |
78 | if (PYTHON_EXECUTABLE)
79 | execute_process (
80 | COMMAND ${PYTHON_EXECUTABLE} -c import\ numpy\;\ print\(numpy.__version__\)\;
81 | ERROR_VARIABLE NUMPY_API_VERSION_ERROR
82 | RESULT_VARIABLE NUMPY_API_VERSION_RESULT
83 | OUTPUT_VARIABLE NUMPY_API_VERSION
84 | OUTPUT_STRIP_TRAILING_WHITESPACE
85 | )
86 | else ()
87 | ## Backup procedure: extract version number directly from the header file
88 | if (NUMPY_INCLUDES)
89 | find_file (HAVE_NUMPYCONFIG_H numpy/numpyconfig.h
90 | HINTS ${NUMPY_INCLUDES}
91 | )
92 | endif (NUMPY_INCLUDES)
93 | endif ()
94 |
95 | ## Dissect full version number into major, minor and patch version
96 | if (NUMPY_API_VERSION)
97 | string (REGEX REPLACE "\\." ";" _tmp ${NUMPY_API_VERSION})
98 | list (GET _tmp 0 NUMPY_API_VERSION_MAJOR)
99 | list (GET _tmp 1 NUMPY_API_VERSION_MINOR)
100 | list (GET _tmp 2 NUMPY_API_VERSION_PATCH)
101 | endif (NUMPY_API_VERSION)
102 |
103 | ##__________________________________________________________________________
104 | ## Actions taken when all components have been found
105 |
106 | find_package_handle_standard_args (NUMPY DEFAULT_MSG NUMPY_INCLUDES)
107 |
108 | if (NUMPY_FOUND)
109 | if (NOT NUMPY_FIND_QUIETLY)
110 | message (STATUS "Found components for NumPy")
111 | message (STATUS "NUMPY_ROOT_DIR = ${NUMPY_ROOT_DIR}")
112 | message (STATUS "NUMPY_INCLUDES = ${NUMPY_INCLUDES}")
113 | message (STATUS "NUMPY_LIBRARIES = ${NUMPY_LIBRARIES}")
114 | message (STATUS "NUMPY_API_VERSION = ${NUMPY_API_VERSION}")
115 | endif (NOT NUMPY_FIND_QUIETLY)
116 | else (NUMPY_FOUND)
117 | if (NUMPY_FIND_REQUIRED)
118 | message (FATAL_ERROR "Could not find NUMPY!")
119 | endif (NUMPY_FIND_REQUIRED)
120 | endif (NUMPY_FOUND)
121 |
122 | ##__________________________________________________________________________
123 | ## Mark advanced variables
124 |
125 | mark_as_advanced (
126 | NUMPY_ROOT_DIR
127 | NUMPY_INCLUDES
128 | NUMPY_LIBRARIES
129 | )
130 |
131 | endif (NOT NUMPY_FOUND)
132 |
--------------------------------------------------------------------------------
/connectivity/8194nk5LbLH_connectivity.json:
--------------------------------------------------------------------------------
1 | [{"image_id":"c9e8dc09263e4d0da77d16de0ecddd39","pose":[-0.611043,-0.00396746,-0.791588,-0.213904,0.791585,-0.00882497,-0.610996,2.305,-0.00456166,-0.999953,0.00853306,1.56916,0,0,0,1],"included":true,"visible":[false,false,false,false,true,true,false,true,true,true,false,false,false,false,false,false,false,false,false,false],"unobstructed":[false,false,false,false,true,false,false,false,true,true,false,false,false,false,false,false,false,false,false,false],"height":1.5826326295962942},{"image_id":"286b0c2d9a46408ba80b6ccebb21e582","pose":[0.951596,0.00201098,0.307346,6.58012,-0.307351,0.00915895,0.951552,-2.96479,-0.000901435,-0.999956,0.00933374,4.36353,0,0,0,1],"included":true,"visible":[false,false,true,true,false,false,false,false,false,false,false,false,false,true,false,true,false,true,false,true],"unobstructed":[false,false,false,true,false,false,false,false,false,false,false,false,false,true,false,false,false,true,true,false],"height":1.5712253956498747},{"image_id":"6776097c17ed4b93aee61704eb32f06c","pose":[-0.711582,-0.00419131,-0.702591,-1.68941,0.702575,0.00464776,-0.711594,-5.37908,0.00624796,-0.99998,-0.000362505,1.58622,0,0,0,1],"included":true,"visible":[false,false,false,false,false,true,true,true,false,true,false,true,false,false,true,false,true,false,false,false],"unobstructed":[false,false,false,false,false,true,true,false,false,false,false,true,false,false,false,false,false,false,false,true],"height":1.5804941871490743},{"image_id":"8c7e8da7d4a44ab695e6b3195eac0cf1","pose":[0.709879,0.011247,0.704234,8.62929,-0.70424,-0.00407304,0.70995,-1.77115,0.0108531,-0.999928,0.00502926,4.38556,0,0,0,1],"included":true,"visible":[false,true,false,false,false,false,false,false,false,false,true,false,true,true,false,false,false,true,true,false],"unobstructed":[false,true,false,false,false,false,false,false,false,false,true,false,false,true,false,false,false,true,true,false],"height":1.585645804390483},{"image_id":"f33c718aaf2c41469389a87944442c62","pose":[0.619478,0.0166688,0.784837,-3.88437,-0.784902,-0.00375152,0.619609,-0.528748,0.0132725,-0.999854,0.0107595,1.58368,0,0,0,1],"included":true,"visible":[true,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,true],"unobstructed":[true,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,true],"height":1.5829827809014503},{"image_id":"fcd90a404061413385286bef9662630e","pose":[-0.111393,0.00837906,0.993741,2.80245,-0.993773,-0.00348217,-0.111367,-3.78204,0.0025272,-0.999959,0.00871482,1.58057,0,0,0,1],"included":true,"visible":[true,false,true,false,false,false,false,true,false,false,false,false,false,false,false,true,false,false,false,false],"unobstructed":[false,false,true,false,false,false,true,true,true,false,false,false,false,false,false,false,false,false,false,false],"height":1.5763528408163245},{"image_id":"c07d4ae8330542a09cf8f8dddb9728ce","pose":[-0.985207,-0.0101267,0.171069,0.656519,-0.171094,0.00168538,-0.985253,-5.08928,0.00968898,-0.999947,-0.00339301,1.57611,0,0,0,1],"included":true,"visible":[true,false,true,false,false,true,false,true,false,false,false,false,false,false,true,false,true,false,false,true],"unobstructed":[false,false,true,false,false,true,false,true,false,false,false,true,false,false,false,false,false,false,false,false],"height":1.575276915205382},{"image_id":"2393bffb53fe4205bcc67796c6fb76e3","pose":[-0.241654,0.00228344,-0.97036,3.33582,0.970294,0.0124463,-0.241608,-5.90025,0.0115256,-0.99992,-0.00522325,1.57791,0,0,0,1],"included":true,"visible":[false,false,true,false,false,true,true,false,false,false,false,false,false,false,true,false,true,false,false,false],"unobstructed":[false,false,false,false,false,true,true,false,false,false,false,false,false,false,true,false,false,false,false,false],"height":1.5730354249357412},{"image_id":"71bf74df73cd4e24a191ef4f2338ca22","pose":[0.906931,-0.00688335,-0.421222,0.122562,0.421182,-0.00662188,0.906952,-0.00319673,-0.00903217,-0.999954,-0.00310641,1.57207,0,0,0,1],"included":true,"visible":[true,false,false,false,true,true,true,true,false,true,false,false,false,false,false,false,false,false,false,false],"unobstructed":[true,false,false,false,false,true,false,false,false,true,false,false,false,false,false,false,false,false,false,false],"height":1.570272020216938},{"image_id":"be8a2edacab34ec8887ba6a7b1e4945f","pose":[0.791463,0.0101015,0.611133,-3.50132,-0.611154,-0.00121731,0.791511,1.58103,0.00873934,-0.999948,0.00521015,1.56992,0,0,0,1],"included":true,"visible":[true,false,true,false,true,false,false,true,false,false,false,true,false,false,false,false,false,false,false,true],"unobstructed":[true,false,false,false,true,false,false,false,true,false,false,false,false,false,false,false,false,false,false,false],"height":1.577126892771864},{"image_id":"9bdde31adaa1443bb206b09bfa3c474c","pose":[0.799844,0.0047414,0.60019,8.67581,-0.600208,0.0075118,0.799809,-4.8108,-0.000716311,-0.99996,0.00885413,2.82261,0,0,0,1],"included":true,"visible":[false,false,false,true,false,false,false,false,false,false,false,false,false,true,false,false,true,true,false,false],"unobstructed":[false,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false],"height":1.58264400638767},{"image_id":"66d4adb61b57494aa2c1ad141a0fad9b","pose":[-0.34536,-0.0108675,-0.938407,-2.27885,0.938436,0.00459882,-0.345423,-3.2282,0.00806945,-0.99993,0.00861029,1.58739,0,0,0,1],"included":true,"visible":[false,false,true,false,false,true,true,true,false,true,false,false,false,false,true,true,false,false,false,true],"unobstructed":[false,false,true,false,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,true],"height":1.5705441219971223},{"image_id":"83ff709c0e3e46079836153ea5c7feac","pose":[0.68423,0.0137303,0.729137,3.42529,-0.729235,0.00364543,0.684254,1.65175,0.00673696,-0.999899,0.012507,4.37069,0,0,0,1],"included":true,"visible":[false,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false],"height":1.578378655072358},{"image_id":"d9e325df2f3948679c78b93d8025e2da","pose":[0.826698,0.0192407,0.562317,8.49764,-0.562455,0.00220125,0.826825,-0.816805,0.0146709,-0.999812,0.0126418,4.38875,0,0,0,1],"included":true,"visible":[false,true,false,true,false,false,false,false,false,false,true,false,true,false,false,false,false,true,true,false],"unobstructed":[false,true,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,false],"height":1.5865892751674604},{"image_id":"423efb97f77f4e7995f19c66fe82afbc","pose":[0.958879,0.00141119,0.283813,5.51819,-0.283808,0.0124035,0.958801,-5.67527,-0.00216725,-0.999922,0.012294,1.58856,0,0,0,1],"included":true,"visible":[false,false,true,false,false,false,true,true,false,false,false,false,false,false,false,false,true,false,false,false],"unobstructed":[false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,true,false,false,false],"height":1.5784339701720043},{"image_id":"6c49579a5cd34df8acb7f790b74e9eae","pose":[-0.95716,-0.00676032,-0.289482,-6.48379,0.289538,-0.00977451,-0.957117,-2.57899,0.00364085,-0.999929,0.0113132,1.59886,0,0,0,1],"included":true,"visible":[false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,true],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true],"height":1.5798282335589897},{"image_id":"aeed67040d744240b188f66f17d87d43","pose":[0.132175,0.0257204,0.990893,7.67989,-0.991226,0.00381825,0.132121,-5.81072,-0.000385302,-0.999662,0.0259995,2.29866,0,0,0,1],"included":true,"visible":[false,false,true,false,false,false,true,true,false,false,true,false,false,false,true,false,false,false,false,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,true,false,false,false,true,false,false,false,false,false],"height":1.6026680667792301},{"image_id":"aae01016bb354f78bd6db86e9d71af2b","pose":[0.0788252,0.00384462,0.996881,6.79041,-0.996887,0.00184069,0.0788186,-0.995862,-0.00153193,-0.999991,0.0039778,4.37219,0,0,0,1],"included":true,"visible":[false,true,false,true,false,false,false,false,false,false,false,false,false,true,false,false,false,false,true,false],"unobstructed":[false,true,false,true,false,false,false,false,false,false,false,false,false,true,false,false,false,false,true,false],"height":1.5770919536040346},{"image_id":"346b680ac5904359a1859c929ad312b6","pose":[-0.589008,0.00463239,0.808114,5.58585,-0.808123,0.00000695791,-0.589015,0.644327,-0.00273419,-0.999989,0.00373948,4.38174,0,0,0,1],"included":true,"visible":[false,true,false,true,false,false,false,false,false,false,false,false,true,true,false,false,false,true,false,false],"unobstructed":[false,true,false,true,false,false,false,false,false,false,false,false,true,true,false,false,false,true,false,false],"height":1.5707587596461066},{"image_id":"ae91518ed77047b3bdeeca864cd04029","pose":[0.310985,0.0070688,0.950389,-4.60607,-0.950392,-0.00460962,0.31102,-2.5949,0.00657945,-0.999964,0.00528466,1.58581,0,0,0,1],"included":true,"visible":[false,false,true,false,true,true,false,true,false,true,false,true,false,false,false,true,false,false,false,false],"unobstructed":[false,false,true,false,true,false,false,false,false,false,false,true,false,false,false,true,false,false,false,false],"height":1.5747548700639524}]
--------------------------------------------------------------------------------
/connectivity/GdvgFV5R1Z5_connectivity.json:
--------------------------------------------------------------------------------
1 | [{"image_id":"0b02e18654324edd8d74c078b66bfb20","pose":[-0.057695,-0.000357129,0.998334,-2.46692,-0.998304,-0.00769199,-0.0576965,-3.15814,0.00770012,-0.99997,0.0000884733,1.5171,0,0,0,1],"included":true,"visible":[false,true,false,false,false,true,true,true,true,false,true,false],"unobstructed":[false,false,false,false,false,true,false,true,true,false,true,false],"height":1.51470410293751},{"image_id":"1db1c0a09ecf40d188197efc05ced3bb","pose":[-0.442443,0.0138817,0.896688,-4.03893,-0.89679,-0.0101225,-0.442338,-3.05434,0.00293664,-0.999852,0.0169288,0.974424,0,0,0,1],"included":true,"visible":[true,false,false,false,true,true,false,false,true,false,false,true],"unobstructed":[false,false,false,false,false,true,false,false,true,false,false,true],"height":0.9701803380402906},{"image_id":"6178647ca8d14dc09370f6c1b7ed2fd6","pose":[-0.870025,0.0056275,0.492973,-3.69279,-0.493005,-0.0105975,-0.869962,1.95433,0.000328893,-0.999927,0.0119957,1.51516,0,0,0,1],"included":true,"visible":[false,false,false,true,false,false,true,true,false,false,true,false],"unobstructed":[false,false,false,true,false,false,true,true,false,true,true,false],"height":1.517582101716661},{"image_id":"565cc21cd28b4ee6bb5ba83c5270c032","pose":[0.0242634,0.000986587,-0.999704,-3.91782,0.999699,0.00333371,0.024267,0.178675,0.00335701,-0.999993,-0.0009042,1.50868,0,0,0,1],"included":true,"visible":[false,false,true,false,false,false,true,false,false,true,true,false],"unobstructed":[false,false,true,false,false,false,false,false,false,true,true,false],"height":1.5114421933143356},{"image_id":"ef638e508e054c4aabd49b38d1b88fc7","pose":[0.0820523,0.0151057,0.996513,-4.61631,-0.995947,-0.0356725,0.0825462,-2.18899,0.0367954,-0.999249,0.0121187,1.52757,0,0,0,1],"included":true,"visible":[false,true,false,false,false,true,false,false,true,false,false,true],"unobstructed":[false,false,false,false,false,true,false,false,true,false,false,true],"height":1.5162868543024455},{"image_id":"97ed68de989e44fdaf2d9b949898fab6","pose":[0.0900997,0.0149714,0.99582,-3.64126,-0.995713,-0.0195971,0.0903844,-3.16818,0.0208687,-0.999695,0.0131427,1.52081,0,0,0,1],"included":true,"visible":[true,true,false,false,true,false,false,false,true,false,false,true],"unobstructed":[true,true,false,false,true,false,false,false,true,false,false,true],"height":1.5211418713547455},{"image_id":"5fd70cff4992429a99a84fd3c117ccb5","pose":[-0.0539877,-0.000800861,-0.998541,0.0108044,0.998337,0.0201438,-0.0539926,0.00604319,0.020158,-0.999796,-0.000286778,1.51223,0,0,0,1],"included":true,"visible":[true,false,true,true,false,false,false,true,false,true,true,false],"unobstructed":[false,false,true,false,false,false,false,true,false,false,true,false],"height":1.5113248528175798},{"image_id":"86d342c576ff46a9828d2ba377cc8cd5","pose":[0.998173,0.0151118,-0.0584746,-1.78347,0.0584707,0.000718574,0.998288,-1.89835,0.0151283,-0.999885,-0.000165129,1.52238,0,0,0,1],"included":true,"visible":[true,false,true,false,false,false,true,false,false,false,true,false],"unobstructed":[true,false,true,false,false,false,true,false,false,false,true,false],"height":1.5103397372923053},{"image_id":"8dba9ff900b14f9b84ead660f5f7f701","pose":[-0.999855,-0.0144511,0.00887107,-4.11579,-0.00895392,0.00564829,-0.999943,-2.90606,0.0144005,-0.999879,-0.00577567,1.51617,0,0,0,1],"included":true,"visible":[true,true,false,false,true,true,false,false,false,false,false,true],"unobstructed":[true,true,false,false,true,true,false,false,false,false,false,true],"height":1.5112098807574073},{"image_id":"0d8c5fbfd73f44e28d6da370520611e4","pose":[0.0769887,0.00664334,0.997009,-6.15424,-0.997016,-0.00490415,0.0770216,-0.0398163,0.00540151,-0.999965,0.00624716,1.50965,0,0,0,1],"included":true,"visible":[false,false,true,true,false,false,true,false,false,false,true,false],"unobstructed":[false,false,true,true,false,false,false,false,false,false,false,false],"height":1.5058928427471967},{"image_id":"aebb1de49d21485e8bef7633dfb58761","pose":[-0.0229751,-0.0058052,-0.999718,-1.94579,0.999719,0.00553997,-0.0230069,-0.026534,0.00567231,-0.999967,0.0056775,1.50582,0,0,0,1],"included":true,"visible":[true,false,true,true,false,false,true,true,false,true,false,false],"unobstructed":[true,false,true,true,false,false,true,true,false,false,false,false],"height":1.5101720791580233},{"image_id":"e34e51f3d6584ad09c510de5db84752f","pose":[-0.0418368,-0.0124855,0.999046,-3.99281,-0.993607,-0.104406,-0.0429142,-2.13265,0.104842,-0.994456,-0.00803644,0.980264,0,0,0,1],"included":true,"visible":[false,true,false,false,true,true,false,false,true,false,false,false],"unobstructed":[false,true,false,false,true,true,false,false,true,false,false,false],"height":0.969584316081611}]
--------------------------------------------------------------------------------
/connectivity/README.md:
--------------------------------------------------------------------------------
1 | ## connectivity
2 | Connectivity graphs indicating the navigable paths between viewpoints in each scan.
3 |
4 | Each json file contains an array of annotations, one for each viewpoint in the scan. All annotations share the same basic structure as follows:
5 |
6 | ```
7 | {
8 | "image_id": str,
9 | "pose": [float x 16],
10 | "included": boolean,
11 | "visible": [boolean x num_viewpoints],
12 | "unobstructed": [boolean x num_viewpoints],
13 | "height": float
14 | }
15 | ```
16 | - `image_id`: matterport skybox prefix
17 | - `pose`: 4x4 matrix in row major order that transforms matterport skyboxes to global coordinates (z-up). Pose matrices are based on the assumption that the camera is facing skybox image 3.
18 | - `included`: whether viewpoint is included in the simulator. Some overlapping viewpoints are excluded.
19 | - `visible`: indicates other viewpoints that can be seen from this viewpoint.
20 | - `unobstructed`: indicates transitions to other viewpoints that are considered navigable for an agent.
21 | - `height`: estimated height of the viewpoint above the floor. Not required for the simulator.
22 |
23 | Units are in metres.
24 |
25 | `scans.txt` contains a list of all the scan ids in the dataset.
26 |
--------------------------------------------------------------------------------
/connectivity/YmJkqBEsHnH_connectivity.json:
--------------------------------------------------------------------------------
1 | [{"image_id":"006933a75f764c5485cf284bea0ded0b","pose":[0.210914,-0.00824746,-0.977469,-7.64722,0.977278,0.0232484,0.210677,-2.15553,0.0209873,-0.999695,0.0129646,1.56695,0,0,0,1],"included":true,"visible":[false,false,true,false,true,true,false,true,true,true,false],"unobstructed":[false,false,false,false,false,false,false,true,true,false,false],"height":1.524793092035509},{"image_id":"e4ede0695e4e4a77aae8537abb9f11d3","pose":[-0.0422212,-0.0176246,-0.998952,-0.133122,0.998904,0.0194092,-0.0425613,-0.0184591,0.0201393,-0.999656,0.016787,1.48352,0,0,0,1],"included":true,"visible":[false,false,true,true,false,false,false,false,false,false,false],"unobstructed":[false,false,true,false,false,true,false,false,false,false,false],"height":1.5227398475592409},{"image_id":"d471e89e00be49f49a7ecace814d60bf","pose":[0.426939,-0.00370058,-0.904272,-0.421886,0.904055,0.0239963,0.426739,-2.12366,0.0201203,-0.999705,0.0135916,1.49477,0,0,0,1],"included":true,"visible":[true,true,false,true,true,true,false,true,true,true,false],"unobstructed":[false,true,false,true,false,true,false,false,false,false,false],"height":1.5263900136377955},{"image_id":"b34af02ce9b642ebbd0c7e9e0ba3b553","pose":[0.960272,0.00870611,-0.278924,-0.0905727,0.278755,0.0168277,0.960214,-3.55265,0.0130537,-0.99982,0.0137334,1.49061,0,0,0,1],"included":true,"visible":[true,true,true,false,false,false,false,false,false,false,false],"unobstructed":[false,false,true,false,false,true,false,false,false,false,false],"height":1.5323637229797105},{"image_id":"01c80b5f8fbd4c969ee0bc03f1ec7a6c","pose":[0.359562,-0.0105291,-0.933061,-3.77309,0.932771,0.0313799,0.359097,-2.1838,0.0254987,-0.999452,0.0211054,1.53932,0,0,0,1],"included":true,"visible":[true,false,true,false,false,true,false,true,true,true,false],"unobstructed":[false,false,false,false,false,true,false,true,false,false,false],"height":1.5286629461398107},{"image_id":"82ea5baa30f945fe98f6cad3064af847","pose":[0.0376233,-0.0115611,-0.999224,-2.01669,0.998821,0.0310955,0.0372487,-2.16965,0.030641,-0.999449,0.0127185,1.50807,0,0,0,1],"included":true,"visible":[true,true,true,true,true,false,false,true,true,true,false],"unobstructed":[false,true,true,true,true,false,false,false,false,false,false],"height":1.5253207999550662},{"image_id":"aecbb791f30b452a9236c5a8c7030663","pose":[0.296076,-0.0242641,-0.954855,-13.5955,0.955111,0.0179483,0.2957,-2.22547,0.00996343,-0.999544,0.0284901,1.59272,0,0,0,1],"included":true,"visible":[true,false,true,false,true,true,false,true,true,true,true],"unobstructed":[false,false,false,false,false,false,false,false,false,true,true],"height":1.7557263982456066},{"image_id":"d841f7b710f9470796d55561f8f524db","pose":[0.270437,0.002913,-0.962732,-5.77716,0.962325,0.0284129,0.27041,-2.21321,0.028142,-0.999591,0.00488176,1.55947,0,0,0,1],"included":true,"visible":[true,false,true,false,true,true,false,false,true,true,false],"unobstructed":[true,false,false,false,true,false,false,false,false,false,false],"height":1.5357935019251416},{"image_id":"8e38fdd81c7949db9646968bafbbdcfc","pose":[-0.00277118,-0.0169575,-0.999852,-9.93905,0.999791,0.020127,-0.00311204,-2.17463,0.0201771,-0.999653,0.0168993,1.60592,0,0,0,1],"included":true,"visible":[true,false,true,false,true,true,false,true,false,true,true],"unobstructed":[true,false,false,false,false,false,false,false,false,true,false],"height":1.5208970888736792},{"image_id":"20fd759be0b64fc9aa96d290f0a704ec","pose":[0.227815,0.0117555,-0.973633,-12.1161,0.973367,0.0235263,0.228037,-2.15724,0.025587,-0.999654,-0.00608172,1.59969,0,0,0,1],"included":true,"visible":[true,false,true,false,true,true,true,true,true,false,true],"unobstructed":[false,false,false,false,false,false,true,false,true,false,false],"height":1.5261379179165138},{"image_id":"d838acff82244c2da0cf2651e54966cb","pose":[0.310234,-0.0632421,-0.948553,-15.2317,0.950604,0.0313736,0.308813,-2.28133,0.0102298,-0.997504,0.0698525,0.902626,0,0,0,1],"included":true,"visible":[true,false,true,false,true,true,true,true,true,true,false],"unobstructed":[false,false,false,false,false,false,true,false,false,false,false],"height":1.558854711359605}]
--------------------------------------------------------------------------------
/connectivity/gZ6f7yhEvPG_connectivity.json:
--------------------------------------------------------------------------------
1 | [{"image_id":"80929af5cf234ae38ac3a2a4e60e4342","pose":[0.983395,0.00450812,-0.181418,-2.79247,0.181442,-0.00570068,0.983385,-1.38801,0.00339928,-0.999973,-0.00642298,1.42676,0,0,0,1],"included":true,"visible":[false,true,true,false,false,true,false,false],"unobstructed":[false,true,false,true,false,true,false,false],"height":1.4191402375960298},{"image_id":"ba27da20782d4e1a825f0a133ad84da9","pose":[-0.7605,-0.0115739,-0.649234,-2.38988,0.648885,0.0237502,-0.760515,-0.0538717,0.0242219,-0.999651,-0.0105509,1.4341,0,0,0,1],"included":true,"visible":[true,false,true,true,false,true,false,true],"unobstructed":[true,false,false,false,false,true,false,true],"height":1.424939020658826},{"image_id":"46cecea0b30e4786b673f5e951bf82d4","pose":[0.593129,0.0137361,-0.80499,0.99933,0.804932,0.010707,0.59327,1.17558,0.0167685,-0.999848,-0.00470498,1.41684,0,0,0,1],"included":true,"visible":[false,false,false,true,true,false,true,true],"unobstructed":[false,false,false,true,true,false,true,true],"height":1.4252108727703763},{"image_id":"bda7a9e6d1d94b3aa8ff491beb158f3a","pose":[-0.378592,-0.0208239,0.925329,-0.182918,-0.925433,-0.00820128,-0.37882,-1.72967,0.0154776,-0.999749,-0.0161651,1.42205,0,0,0,1],"included":true,"visible":[true,false,true,false,true,false,true,true],"unobstructed":[true,false,true,false,true,false,false,true],"height":1.42983949725488},{"image_id":"dbb2f8000bc04b3ebcd0a55112786149","pose":[-0.595363,0.00457706,-0.803444,1.10196,0.803383,0.0168543,-0.595222,-1.10724,0.0108174,-0.999847,-0.0137106,1.41536,0,0,0,1],"included":true,"visible":[false,false,true,true,false,false,true,true],"unobstructed":[false,false,true,true,false,false,true,true],"height":1.4186255623107038},{"image_id":"29b20fa80dcd4771974303c1ccd8953f","pose":[0.292738,0.0164579,-0.956051,-2.77306,0.956096,0.0090939,0.292909,1.55377,0.0135152,-0.999823,-0.0130722,1.43367,0,0,0,1],"included":true,"visible":[true,true,true,false,true,false,false,false],"unobstructed":[true,true,false,false,false,false,false,false],"height":1.4237594118402337},{"image_id":"0ee20663dfa34b438d48750ddcd7366c","pose":[-0.75968,-0.0019971,-0.650293,-0.111567,0.650131,0.0201598,-0.759554,1.31337,0.014627,-0.999794,-0.0140156,1.42291,0,0,0,1],"included":true,"visible":[false,false,true,true,true,false,false,true],"unobstructed":[false,false,true,false,true,false,false,true],"height":1.4276556862049736},{"image_id":"47d8a8282c1c4a7fb3eeeacc45e9d959","pose":[-0.0254788,0.00643152,-0.999654,-0.0034508,0.999603,0.0120797,-0.0253995,0.0112371,0.0119124,-0.999906,-0.00673574,1.42388,0,0,0,1],"included":true,"visible":[true,true,true,true,true,false,true,false],"unobstructed":[false,true,true,true,true,false,true,false],"height":1.4268855357216241}]
--------------------------------------------------------------------------------
/connectivity/pLe4wQe7qrG_connectivity.json:
--------------------------------------------------------------------------------
1 | [{"image_id":"e4c0a4ec08104bf5ada134b123fa53e7","pose":[-0.133089,0.0111501,-0.991041,1.16811,0.991028,0.0137789,-0.132932,-2.20571,0.0121736,-0.999843,-0.0128829,1.54855,0,0,0,1],"included":true,"visible":[false,false,true,false,true,false,true,true,false,true,false,false,true,false,true,false,false,false,false,false,false,true,false,true,true,true,true,true,false,false,true],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,true,false,false,true,false,false,false,false,false,false],"height":1.5280399019555968},{"image_id":"959ea6af304a4339bbc5d97f044d11c3","pose":[0.312992,0.0130519,-0.949666,2.47951,0.948724,0.0422726,0.313263,-2.23387,0.0442338,-0.999021,0.000849325,1.58243,0,0,0,1],"included":true,"visible":[false,false,true,true,false,false,false,false,false,true,false,true,true,false,true,true,false,false,false,false,false,false,true,true,true,true,true,false,true,false,true],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,true,false,false],"height":1.5361363756730164},{"image_id":"ffe0e6835287419c9cfe343e9d613d87","pose":[-0.802259,-0.00971694,-0.596896,5.96539,0.59688,0.00470064,-0.802316,-2.03323,0.0106021,-0.999941,0.00202973,1.57957,0,0,0,1],"included":true,"visible":[false,true,false,false,false,false,true,false,false,false,false,false,true,true,false,true,false,false,false,false,false,true,true,false,false,false,true,false,false,true,true],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,true,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true],"height":1.518586128876891},{"image_id":"47a69295198f4265958b9b1d497c328d","pose":[-0.90497,-0.00981301,-0.42536,2.46799,0.425363,0.00186582,-0.90502,2.04203,0.00967489,-0.99995,0.0024866,1.55214,0,0,0,1],"included":true,"visible":[false,true,false,false,false,true,true,false,true,false,false,true,false,false,false,true,false,false,true,true,true,false,false,false,true,false,false,true,true,false,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,true,false,false,false,false,false,false,false,false,false,false],"height":1.5121750884423606},{"image_id":"3dfe07714b2f49d88bd4c8749e8bb0b7","pose":[-0.979561,-0.00709476,0.201019,-1.64821,-0.200975,-0.00640329,-0.979575,0.566531,0.0082373,-0.999954,0.00484756,1.56065,0,0,0,1],"included":true,"visible":[true,false,false,false,false,false,true,true,false,false,false,true,false,true,false,true,true,false,false,false,false,true,true,true,true,true,false,true,false,false,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,true,false,false,false,false,true,false,false,true,true,false,true,false,false,false],"height":1.5157095354765127},{"image_id":"87407bb6ed614926b91fc3e27eab766e","pose":[0.22909,0.0301697,-0.972937,4.56488,0.973286,0.00848048,0.229435,2.04904,0.0151732,-0.999508,-0.02742,1.5442,0,0,0,1],"included":true,"visible":[false,false,false,true,false,false,true,false,false,true,false,false,false,false,true,true,false,false,true,true,true,false,true,false,false,false,false,false,true,false,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,false,false,false,false,false,false,false,false,false,false],"height":1.5111934219678684},{"image_id":"530f8e4126b14082a5c4ff6c3f6ae7cd","pose":[-0.172634,-0.00379856,-0.984978,8.51758,0.984978,0.00322887,-0.172647,0.14365,0.00383645,-0.999987,0.0031851,1.4578,0,0,0,1],"included":true,"visible":[false,false,true,false,true,false,false,false,false,false,true,false,false,true,false,false,false,false,false,false,false,false,true,true,false,false,false,false,false,true,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,true,false],"height":1.5362285111230571},{"image_id":"96782d3925ec4088ab224cdc92a4fd6a","pose":[-0.216113,-0.00838211,-0.976332,1.24213,0.976316,0.00844697,-0.216182,2.38931,0.0100594,-0.999929,0.00635911,1.53856,0,0,0,1],"included":true,"visible":[true,false,false,false,true,true,true,false,true,false,false,false,false,true,false,false,true,false,true,true,false,true,true,true,false,false,false,true,false,false,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,true,false,false,false,false,false,true,false,false,false],"height":1.5135335729735602},{"image_id":"2dcc9c6ca2d44d5080a0a7e7b7fb9c4d","pose":[-0.951188,-0.00996649,-0.308449,-1.21085,0.308409,0.00538007,-0.951238,2.40322,0.0111403,-0.999936,-0.00204269,1.55952,0,0,0,1],"included":true,"visible":[false,false,false,false,false,false,false,true,false,true,false,false,false,false,true,false,true,true,true,false,false,true,false,false,false,true,false,true,false,false,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,true,false,true,false,false,false],"height":1.5317176811699114},{"image_id":"0d704acada9041c48621c5d01d775da0","pose":[0.884279,0.0143861,0.466735,-1.34535,-0.466608,-0.0113974,0.88439,-2.3821,0.0180428,-0.999831,-0.00336482,1.52522,0,0,0,1],"included":true,"visible":[true,false,false,false,false,false,false,false,true,false,false,false,false,false,true,false,true,true,false,false,false,true,false,false,true,true,false,false,false,false,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,true,true,false,false,false,false,false],"height":1.5405532836763522},{"image_id":"2cbd295d838b4c51b5590dcf2a37fba0","pose":[0.246342,0.0412581,-0.968304,4.76599,0.96868,0.0216735,0.247362,0.169153,0.0311925,-0.998913,-0.0346258,1.42661,0,0,0,1],"included":true,"visible":[false,false,false,false,false,false,true,false,false,false,false,false,false,true,false,false,false,false,false,false,false,true,true,true,false,false,false,false,true,true,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,true,true,false,false,false,false,false,false,false,true,false,true,false,false,false,false,true,true,false],"height":1.5180090338091925},{"image_id":"6fbd170d8df746b0b10e3801e2dad706","pose":[-0.872353,-0.0000202749,0.488874,3.49156,-0.488854,-0.00892582,-0.872319,0.121306,0.00438157,-0.99996,0.00777758,1.41535,0,0,0,1],"included":true,"visible":[false,true,false,false,true,false,false,false,false,false,false,false,false,false,false,true,false,true,false,false,false,true,false,false,false,true,false,false,true,false,false],"unobstructed":[false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false],"height":1.5371204380160495},{"image_id":"31d308fee8284a168c28e238cf814363","pose":[0.998122,0.0164352,-0.0590029,6.9369,0.0592246,-0.0133283,0.998155,-2.13031,0.0156188,-0.999776,-0.0142757,1.58199,0,0,0,1],"included":true,"visible":[false,true,true,false,false,false,false,false,false,false,true,false,false,true,false,true,false,false,true,false,false,true,true,true,false,false,true,false,false,true,true],"unobstructed":[false,false,true,false,false,false,false,false,false,false,true,false,false,true,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false],"height":1.5115252320863801},{"image_id":"789faffd87b949fd9ed7e6df4fadc2f1","pose":[0.998352,0.0156401,-0.0551931,6.89589,0.0551612,0.00248225,0.998474,-1.07864,0.0157535,-0.999874,0.00161644,1.58253,0,0,0,1],"included":true,"visible":[false,false,true,false,true,false,true,false,false,false,true,false,true,false,false,true,false,false,true,false,false,true,true,true,false,false,false,false,false,true,false],"unobstructed":[false,false,true,false,false,false,true,false,false,false,true,false,true,false,false,false,false,false,false,false,false,false,true,true,false,false,false,false,false,true,false],"height":1.5156362905724483},{"image_id":"a26b0e83785f45d484e5f9b83fdb4df3","pose":[0.784717,-0.00024993,0.619854,-0.356288,-0.619842,-0.00640294,0.7847,-1.3696,0.00377304,-0.999979,-0.0051784,1.5663,0,0,0,1],"included":true,"visible":[true,false,true,false,true,false,false,false,false,true,false,false,true,false,false,true,true,true,false,false,false,true,false,false,true,true,false,true,false,false,false],"unobstructed":[true,false,false,false,true,false,false,false,false,true,false,false,false,false,false,true,true,true,false,false,false,true,false,false,true,true,false,false,false,false,false],"height":1.5217725369665362},{"image_id":"df0b69b34d04453691b72a6c16923756","pose":[0.00951654,-0.00498874,-0.999942,2.41189,0.999919,0.00833186,0.00947506,0.0914117,0.00828438,-0.999952,0.00506864,1.42153,0,0,0,1],"included":true,"visible":[false,true,false,true,true,false,false,false,false,false,true,true,false,true,true,false,true,true,false,false,false,true,false,false,false,true,false,false,true,false,false],"unobstructed":[false,false,false,true,false,false,false,false,false,false,false,true,false,false,true,false,true,false,false,false,false,true,false,false,false,false,false,false,true,false,false],"height":1.5270023190896223},{"image_id":"d7d0e431bbfa40429a561060150f24cb","pose":[0.999351,0.0057182,0.0355512,-0.337565,-0.0355828,0.00559738,0.999351,1.14528,0.00551577,-0.999968,0.00579823,1.55634,0,0,0,1],"included":true,"visible":[false,false,false,false,true,false,true,true,true,false,false,false,false,true,true,true,false,true,false,false,false,true,true,false,true,false,false,true,false,false,false],"unobstructed":[false,false,false,false,true,false,false,true,true,false,false,false,false,false,true,true,false,true,false,false,false,true,false,false,false,false,false,true,false,false,false],"height":1.5126864275679581},{"image_id":"8f17854feb134826ae42e16b303e7445","pose":[-0.04737,0.0249555,-0.998565,-0.00382618,0.998875,0.00294013,-0.0473109,-0.017549,0.00175551,-0.999684,-0.0250657,1.55087,0,0,0,1],"included":true,"visible":[false,false,false,false,false,false,false,false,true,true,false,true,false,true,true,true,true,false,false,false,false,true,false,false,true,false,false,true,false,false,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,true,false,false,false,false,true,false,false,false,false,false,true,false,false,false],"height":1.5136058544662168},{"image_id":"d0584db5d0ba41ee955f6c91195afcb3","pose":[-0.0387735,-0.000627238,0.999248,6.85886,-0.999187,-0.0109357,-0.0387783,2.09848,0.0109521,-0.99994,-0.000201698,1.56982,0,0,0,1],"included":true,"visible":[false,false,false,true,false,true,false,false,false,false,false,false,true,true,true,true,false,false,false,true,true,true,true,true,false,false,false,false,false,true,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,true,false,false,false,false,false,true,false],"height":1.5123722877852799},{"image_id":"87491cd48b094270a2a1aa682b8a770c","pose":[0.995378,0.0106665,0.0954335,5.60063,-0.0953334,-0.00948957,0.9954,2.17887,0.0115233,-0.999898,-0.00842783,1.55259,0,0,0,1],"included":true,"visible":[false,false,false,true,false,true,true,false,false,false,true,true,true,true,false,true,false,false,true,false,true,true,true,true,false,false,false,false,true,false,false],"unobstructed":[false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,false,true,false],"height":1.5096271733017124},{"image_id":"8a65d3586fed4c5f9e0f28fc184b3ff2","pose":[0.999328,0.0243579,-0.0273564,3.25097,0.0277536,-0.016113,0.999485,2.12641,0.0239048,-0.999573,-0.0167772,1.55627,0,0,0,1],"included":true,"visible":[false,false,false,true,false,true,true,false,true,false,true,true,true,true,false,false,false,false,true,true,false,false,false,true,false,false,false,false,false,false,false],"unobstructed":[false,false,false,true,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false],"height":1.5216447032258948},{"image_id":"eb464984cc4847d2a61eab27e3e31e51","pose":[0.317487,0.0187868,-0.948076,1.37215,0.94826,-0.0045702,0.317459,0.120026,0.0016314,-0.999813,-0.0192648,1.55431,0,0,0,1],"included":true,"visible":[true,false,false,false,true,false,false,true,true,true,false,true,false,true,true,true,true,true,false,false,false,false,false,false,true,true,false,false,false,false,false],"unobstructed":[true,false,false,false,true,false,false,true,false,false,true,false,false,false,true,true,true,true,false,false,false,false,false,false,true,true,false,false,false,false,false],"height":1.5187432392237161},{"image_id":"ce103547e620457f935a63050cea57b3","pose":[-0.926095,-0.0151941,-0.376983,7.37065,0.376978,0.00327303,-0.926216,0.160002,0.0153072,-0.999879,0.00269771,1.43016,0,0,0,1],"included":true,"visible":[false,false,true,false,true,false,true,false,false,false,true,false,true,true,false,false,false,false,true,false,false,false,false,true,false,true,false,false,false,true,false],"unobstructed":[false,false,false,false,false,false,true,false,false,false,false,false,false,true,false,false,false,false,false,false,false,false,false,true,false,false,false,false,false,true,false],"height":1.5228214121764414},{"image_id":"fa48c6f958304aa8a8f765a72fe7e8d5","pose":[-0.994837,-0.00721806,0.101218,6.07693,-0.101252,0.00455002,-0.99485,0.0491342,0.00672061,-0.999963,-0.00525636,1.42403,0,0,0,1],"included":true,"visible":[false,false,false,false,true,false,true,false,false,false,true,false,true,true,false,false,false,false,false,false,false,false,true,false,false,true,false,false,false,true,false],"unobstructed":[false,false,false,false,false,false,false,false,false,false,true,false,true,true,false,false,false,false,true,false,false,false,true,false,false,false,false,false,false,true,false],"height":1.520425902170783},{"image_id":"50be95bc6efb466c90867d52cf32ba3f","pose":[0.803639,0.00102907,-0.595115,-0.280264,0.595001,0.0182495,0.803517,-2.40583,0.0116877,-0.999833,0.0140547,1.54308,0,0,0,1],"included":true,"visible":[true,false,true,false,true,false,false,false,false,true,false,false,true,false,true,false,true,true,false,false,false,true,true,false,false,true,false,true,false,false,false],"unobstructed":[true,false,false,false,true,false,false,false,false,true,false,false,false,false,true,false,false,false,false,false,false,true,false,false,false,true,false,false,false,false,false],"height":1.5259856691595353},{"image_id":"91d1554c155e4185a8c69636d47fd58d","pose":[0.7634,0.00593063,0.645898,-1.49105,-0.645812,-0.0117048,0.763406,-0.563949,0.0120878,-0.999914,-0.00510434,1.56479,0,0,0,1],"included":true,"visible":[true,false,false,false,true,false,true,true,true,true,false,true,false,false,true,true,false,false,false,false,false,true,true,true,true,false,false,true,false,false,false],"unobstructed":[false,false,false,false,true,false,false,false,true,true,false,false,false,false,true,false,false,false,false,false,false,true,false,false,true,false,false,true,false,false,false],"height":1.5123581928141085},{"image_id":"5d4349e09ada47b0aa8b20a0d22c54ca","pose":[0.0797542,0.0285043,-0.996407,3.62156,0.996744,0.00951931,0.080054,-2.10242,0.0117672,-0.999548,-0.0276513,1.56537,0,0,0,1],"included":true,"visible":[false,true,true,false,false,false,false,false,false,true,true,true,true,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false,true,false,true],"unobstructed":[false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,true],"height":1.5223704869964667},{"image_id":"298e09e5e1144e7b9762747370ca68a5","pose":[0.31306,-0.00832259,-0.949696,0.0361493,0.949732,0.00181293,0.313056,2.42577,-0.000883427,-0.999963,0.0084728,1.55565,0,0,0,1],"included":true,"visible":[true,false,false,false,true,false,true,true,true,false,false,false,false,true,true,false,true,true,true,true,false,false,false,true,true,true,false,false,false,false,false],"unobstructed":[false,false,false,false,true,false,false,true,true,false,false,false,false,false,false,false,true,true,false,false,false,false,false,false,false,true,false,false,false,false,false],"height":1.5224640014863746},{"image_id":"f8e13e216dd6477ea05e694e2f1478d9","pose":[0.998766,0.0109404,-0.0484187,2.48582,0.0482994,0.0109393,0.998773,-1.19789,0.0114569,-0.99988,0.0103984,1.57265,0,0,0,1],"included":true,"visible":[false,true,false,true,true,false,true,false,true,true,true,true,false,false,false,true,true,true,false,false,false,false,true,true,true,false,true,false,false,false,true],"unobstructed":[false,true,false,false,false,false,false,false,false,false,true,true,false,false,false,true,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false],"height":1.5206684141424807},{"image_id":"e5f7cab8517b47399eda8866f0e30ab3","pose":[-0.660778,-0.00608519,-0.750556,7.08848,0.750578,-0.00299603,-0.660773,1.44662,0.00177251,-0.999977,0.00654814,1.57334,0,0,0,1],"included":true,"visible":[false,false,false,false,false,true,true,false,false,false,true,false,true,true,false,false,false,false,true,true,false,false,true,true,false,true,false,false,true,false,false],"unobstructed":[false,false,false,false,false,false,true,false,false,false,true,false,false,true,false,false,false,false,true,true,false,false,true,true,false,false,false,false,false,false,false],"height":1.5050461478205863},{"image_id":"a924a5855b954d68b26ebe82ab61c71d","pose":[-0.120428,-0.000846936,-0.992721,4.79789,0.992705,0.00559062,-0.12043,-2.05172,0.0056522,-0.999984,0.000168504,1.57612,0,0,0,1],"included":true,"visible":[false,true,true,false,false,false,true,false,true,false,true,false,true,false,false,false,true,false,false,false,false,true,true,true,false,false,true,false,false,true,false],"unobstructed":[false,false,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,false,false,false],"height":1.5244946264278192}]
--------------------------------------------------------------------------------
/connectivity/scans.txt:
--------------------------------------------------------------------------------
1 | 17DRP5sb8fy
2 | 1LXtFkjw3qL
3 | 1pXnuDYAj8r
4 | 29hnd4uzFmX
5 | 2azQ1b91cZZ
6 | 2n8kARJN3HM
7 | 2t7WUuJeko7
8 | 5LpN3gDmAk7
9 | 5q7pvUzZiYa
10 | 5ZKStnWn8Zo
11 | 759xd9YjKW5
12 | 7y3sRwLe3Va
13 | 8194nk5LbLH
14 | 82sE5b5pLXE
15 | 8WUmhLawc2A
16 | aayBHfsNo7d
17 | ac26ZMwG7aT
18 | ARNzJeq3xxb
19 | B6ByNegPMKs
20 | b8cTxDM8gDG
21 | cV4RVeZvu5T
22 | D7G3Y4RVNrH
23 | D7N2EKCX4Sj
24 | dhjEzFoUFzH
25 | E9uDoFAP3SH
26 | e9zR4mvMWw7
27 | EDJbREhghzL
28 | EU6Fwq7SyZv
29 | fzynW3qQPVF
30 | GdvgFV5R1Z5
31 | gTV8FGcVJC9
32 | gxdoqLR6rwA
33 | gYvKGZ5eRqb
34 | gZ6f7yhEvPG
35 | HxpKQynjfin
36 | i5noydFURQK
37 | JeFG25nYj2p
38 | JF19kD82Mey
39 | jh4fc5c5qoQ
40 | JmbYfDe2QKZ
41 | jtcxE69GiFV
42 | kEZ7cmS4wCh
43 | mJXqzFtmKg4
44 | oLBMNvg9in8
45 | p5wJjkQkbXX
46 | pa4otMbVnkk
47 | pLe4wQe7qrG
48 | Pm6F8kyY3z2
49 | pRbA3pwrgk9
50 | PuKPg4mmafe
51 | PX4nDJXEHrG
52 | q9vSo1VnCiC
53 | qoiz87JEwZ2
54 | QUCTc6BB5sX
55 | r1Q1Z4BcV1o
56 | r47D5H71a5s
57 | rPc6DW4iMge
58 | RPmz2sHmrrY
59 | rqfALeAoiTq
60 | s8pcmisQ38h
61 | S9hNv5qa7GM
62 | sKLMLpTHeUy
63 | SN83YJsR3w2
64 | sT4fr6TAbpF
65 | TbHJrupSAjP
66 | ULsKaCPVFJR
67 | uNb9QFRL6hY
68 | ur6pFq6Qu1A
69 | UwV83HsGsw3
70 | Uxmj2M2itWa
71 | V2XKFyX4ASd
72 | VFuaQ6m2Qom
73 | VLzqgDo317F
74 | Vt2qJdWjCF2
75 | VVfe2KiqLaN
76 | Vvot9Ly1tCj
77 | vyrNrziPKCB
78 | VzqfbhrpDEA
79 | wc2JMjhGNzB
80 | WYY7iVyf5p8
81 | X7HyMhZNoso
82 | x8F5xyUWy9e
83 | XcA2TqTSSAj
84 | YFuZgdQ5vWj
85 | YmJkqBEsHnH
86 | yqstnuAEVhm
87 | YVUC4YcDtcY
88 | Z6MFQCViBuw
89 | ZMojNkEp431
90 | zsNo4HB9uLZ
91 |
--------------------------------------------------------------------------------
/include/Benchmark.hpp:
--------------------------------------------------------------------------------
1 | #ifndef MATTERSIM_BENCHMARK
2 | #define MATTERSIM_BENCHMARK
3 |
4 | #include
5 |
6 | namespace mattersim {
7 |
8 | class Timer {
9 | public:
10 | Timer();
11 | virtual void Start();
12 | virtual void Stop();
13 | virtual void Reset();
14 | virtual float MilliSeconds();
15 | virtual float MicroSeconds();
16 | virtual float Seconds();
17 | inline bool running() { return running_; }
18 |
19 | protected:
20 | bool running_;
21 | std::chrono::steady_clock::time_point start_;
22 | std::chrono::steady_clock::duration elapsed_;
23 | };
24 | }
25 |
26 | #endif // MATTERSIM_BENCHMARK
27 |
--------------------------------------------------------------------------------
/include/MatterSim.hpp:
--------------------------------------------------------------------------------
1 | #ifndef MATTERSIM_HPP
2 | #define MATTERSIM_HPP
3 |
4 | #include
5 | #include
6 | #include
7 | #include
8 | #include
9 |
10 | #include
11 |
12 | #ifdef OSMESA_RENDERING
13 | #define GL_GLEXT_PROTOTYPES
14 | #include
15 | #include
16 | #else
17 | #include
18 | #endif
19 |
20 | #define GLM_FORCE_RADIANS
21 | #include
22 | #include
23 | #include
24 |
25 | #include "Benchmark.hpp"
26 |
27 | namespace mattersim {
28 | struct Viewpoint {
29 | //! Viewpoint identifier
30 | std::string viewpointId;
31 | //! Viewpoint index into connectivity graph
32 | unsigned int ix;
33 | //! 3D position in world coordinates
34 | cv::Point3f point;
35 | //! Heading relative to the camera
36 | double rel_heading;
37 | //! Elevation relative to the camera
38 | double rel_elevation;
39 | //! Distance from the agent
40 | double rel_distance;
41 | };
42 |
43 | typedef std::shared_ptr ViewpointPtr;
44 | struct ViewpointPtrComp {
45 | inline bool operator() (const ViewpointPtr& l, const ViewpointPtr& r){
46 | return sqrt(l->rel_heading*l->rel_heading+l->rel_elevation*l->rel_elevation)
47 | < sqrt(r->rel_heading*r->rel_heading+r->rel_elevation*r->rel_elevation);
48 | }
49 | };
50 |
51 | /**
52 | * Simulator state class.
53 | */
54 | struct SimState {
55 | //! Building / scan environment identifier
56 | std::string scanId;
57 | //! Number of frames since the last newEpisode() call
58 | unsigned int step = 0;
59 | //! RGB image taken from the agent's current viewpoint
60 | cv::Mat rgb;
61 | //! Depth image taken from the agent's current viewpoint (not implemented)
62 | cv::Mat depth;
63 | //! Agent's current 3D location
64 | ViewpointPtr location;
65 | //! Agent's current camera heading in radians
66 | double heading = 0;
67 | //! Agent's current camera elevation in radians
68 | double elevation = 0;
69 | //! Agent's current view [0-35] (set only when viewing angles are discretized)
70 | //! [0-11] looking down, [12-23] looking at horizon, [24-35] looking up
71 | unsigned int viewIndex = 0;
72 | //! Vector of nearby navigable locations representing state-dependent action candidates, i.e.
73 | //! viewpoints you can move to. Index 0 is always to remain at the current viewpoint.
74 | //! The remaining viewpoints are sorted by their angular distance from the centre of the image.
75 | std::vector navigableLocations;
76 | };
77 |
78 | typedef std::shared_ptr SimStatePtr;
79 |
80 | /**
81 | * Internal class for representing nearby candidate locations that can be moved to.
82 | */
83 | struct Location {
84 | //! True if viewpoint is included in the simulator. Sometimes duplicated viewpoints have been excluded.
85 | bool included;
86 | //! Unique Matterport identifier for every pano location
87 | std::string viewpointId;
88 | //! Rotation component
89 | glm::mat4 rot;
90 | //! Translation component
91 | glm::vec3 pos;
92 | std::vector unobstructed;
93 | GLuint cubemap_texture;
94 | };
95 |
96 | typedef std::shared_ptr LocationPtr;
97 |
98 | /**
99 | * Main class for accessing an instance of the simulator environment.
100 | */
101 | class Simulator {
102 | friend class SimulatorPython;
103 | public:
104 | Simulator();
105 |
106 | ~Simulator();
107 |
108 | /**
109 | * Sets camera resolution. Default is 320 x 240.
110 | */
111 | void setCameraResolution(int width, int height);
112 |
113 | /**
114 | * Sets camera vertical field-of-view in radians. Default is 0.8, approx 46 degrees.
115 | */
116 | void setCameraVFOV(double vfov);
117 |
118 | /**
119 | * Enable or disable rendering. Useful for testing. Default is true (enabled).
120 | */
121 | void setRenderingEnabled(bool value);
122 |
123 | /**
124 | * Enable or disable discretized viewing angles. When enabled, heading and
125 | * elevation changes will be restricted to 30 degree increments from zero,
126 | * with left/right/up/down movement triggered by the sign of the makeAction
127 | * heading and elevation parameters. Default is false (disabled).
128 | */
129 | void setDiscretizedViewingAngles(bool value);
130 |
131 | /**
132 | * Initialize the simulator. Further camera configuration won't take any effect from now on.
133 | */
134 | void init();
135 |
136 | /**
137 | * Set a non-standard path to the Matterport3D dataset.
138 | * The provided directory must contain subdirectories of the form:
139 | * "/v1/scans//matterport_skybox_images/". Default is "./data" (expected location of dataset symlink).
140 | */
141 | void setDatasetPath(const std::string& path);
142 |
143 | /**
144 | * Set a non-standard path to the viewpoint connectivity graphs. The provided directory must contain files
145 | * of the form "/_connectivity.json". Default is "./connectivity" (the graphs provided
146 | * by this repo).
147 | */
148 | void setNavGraphPath(const std::string& path);
149 |
150 | /**
151 | * Set the random seed for episodes where viewpoint is not provided.
152 | */
153 | void setSeed(int seed) { generator.seed(seed); };
154 |
155 | /**
156 | * Set the camera elevation min and max limits in radians. Default is +-0.94 radians.
157 | * @return true if successful.
158 | */
159 | bool setElevationLimits(double min, double max);
160 |
161 | /**
162 | * Starts a new episode. If a viewpoint is not provided initialization will be random.
163 | * @param scanId - sets which scene is used, e.g. "2t7WUuJeko7"
164 | * @param viewpointId - sets the initial viewpoint location, e.g. "cc34e9176bfe47ebb23c58c165203134"
165 | * @param heading - set the agent's initial camera heading in radians. With z-axis up,
166 | * heading is defined relative to the y-axis (turning right is positive).
167 | * @param elevation - set the initial camera elevation in radians, measured from the horizon
168 | * defined by the x-y plane (up is positive).
169 | */
170 | void newEpisode(const std::string& scanId, const std::string& viewpointId=std::string(),
171 | double heading=0, double elevation=0);
172 |
173 | /**
174 | * Returns the current environment state including RGB image and available actions.
175 | */
176 | SimStatePtr getState();
177 |
178 | /** @brief Select an action.
179 | *
180 | * An RL agent will sample an action here. A task-specific reward can be determined
181 | * based on the location, heading, elevation, etc. of the resulting state.
182 | * @param index - an index into the set of feasible actions defined by getState()->navigableLocations.
183 | * @param heading - desired heading change in radians. With z-axis up, heading is defined
184 | * relative to the y-axis (turning right is positive).
185 | * @param elevation - desired elevation change in radians, measured from the horizon defined
186 | * by the x-y plane (up is positive).
187 | */
188 | void makeAction(int index, double heading, double elevation);
189 |
190 | /**
191 | * Closes the environment and releases underlying texture resources, OpenGL contexts, etc.
192 | */
193 | void close();
194 | private:
195 | const int headingCount = 12; // 12 heading values in discretized views
196 | const double elevationIncrement = M_PI/6.0; // 30 degrees discretized up/down
197 | void loadLocationGraph();
198 | void clearLocationGraph();
199 | void populateNavigable();
200 | void loadTexture(int locationId);
201 | void setHeadingElevation(double heading, double elevation);
202 | void renderScene();
203 | #ifdef OSMESA_RENDERING
204 | void *buffer;
205 | OSMesaContext ctx;
206 | #else
207 | GLuint FramebufferName;
208 | #endif
209 | SimStatePtr state;
210 | bool initialized;
211 | bool renderingEnabled;
212 | bool discretizeViews;
213 | int width;
214 | int height;
215 | double vfov;
216 | double minElevation;
217 | double maxElevation;
218 | glm::mat4 Projection;
219 | glm::mat4 View;
220 | glm::mat4 Model;
221 | glm::mat4 Scale;
222 | glm::mat4 RotateX;
223 | glm::mat4 RotateZ;
224 | GLint PVM;
225 | GLint vertex;
226 | GLuint ibo_cube_indices;
227 | GLuint vbo_cube_vertices;
228 | GLuint glProgram;
229 | GLuint glShaderV;
230 | GLuint glShaderF;
231 | std::string datasetPath;
232 | std::string navGraphPath;
233 | std::map > scanLocations;
234 | std::default_random_engine generator;
235 | Timer cpuLoadTimer;
236 | Timer gpuLoadTimer;
237 | Timer renderTimer;
238 | Timer totalTimer;
239 | };
240 | }
241 |
242 | #endif
243 |
--------------------------------------------------------------------------------
/python_requirements.txt:
--------------------------------------------------------------------------------
1 | torch
2 | torchvision
3 | numpy
4 | networkX
5 | tensorboardX
6 | tqdm
7 |
--------------------------------------------------------------------------------
/r2r_src/bleu.py:
--------------------------------------------------------------------------------
1 | # Copyright 2017 Google Inc. All Rights Reserved.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | # ==============================================================================
15 |
16 | """Python implementation of BLEU and smooth-BLEU.
17 |
18 | This module provides a Python implementation of BLEU and smooth-BLEU.
19 | Smooth BLEU is computed following the method outlined in the paper:
20 | Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
21 | evaluation metrics for machine translation. COLING 2004.
22 | """
23 |
24 | import collections
25 | import math
26 |
27 |
28 | def _get_ngrams(segment, max_order):
29 | """Extracts all n-grams upto a given maximum order from an input segment.
30 |
31 | Args:
32 | segment: text segment from which n-grams will be extracted.
33 | max_order: maximum length in tokens of the n-grams returned by this
34 | methods.
35 |
36 | Returns:
37 | The Counter containing all n-grams upto max_order in segment
38 | with a count of how many times each n-gram occurred.
39 | """
40 | ngram_counts = collections.Counter()
41 | for order in range(1, max_order + 1):
42 | for i in range(0, len(segment) - order + 1):
43 | ngram = tuple(segment[i:i+order])
44 | ngram_counts[ngram] += 1
45 | return ngram_counts
46 |
47 |
48 | def compute_bleu(reference_corpus, translation_corpus, max_order=4,
49 | smooth=False):
50 | """Computes BLEU score of translated segments against one or more references.
51 |
52 | Args:
53 | reference_corpus: list of lists of references for each translation. Each
54 | reference should be tokenized into a list of tokens.
55 | translation_corpus: list of translations to score. Each translation
56 | should be tokenized into a list of tokens.
57 | max_order: Maximum n-gram order to use when computing BLEU score.
58 | smooth: Whether or not to apply Lin et al. 2004 smoothing.
59 |
60 | Returns:
61 | 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
62 | precisions and brevity penalty.
63 | """
64 | matches_by_order = [0] * max_order
65 | possible_matches_by_order = [0] * max_order
66 | reference_length = 0
67 | translation_length = 0
68 | for (references, translation) in zip(reference_corpus,
69 | translation_corpus):
70 | reference_length += min(len(r) for r in references)
71 | translation_length += len(translation)
72 |
73 | merged_ref_ngram_counts = collections.Counter()
74 | for reference in references:
75 | merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
76 | translation_ngram_counts = _get_ngrams(translation, max_order)
77 | overlap = translation_ngram_counts & merged_ref_ngram_counts
78 | for ngram in overlap:
79 | matches_by_order[len(ngram)-1] += overlap[ngram]
80 | for order in range(1, max_order+1):
81 | possible_matches = len(translation) - order + 1
82 | if possible_matches > 0:
83 | possible_matches_by_order[order-1] += possible_matches
84 |
85 | precisions = [0] * max_order
86 | for i in range(0, max_order):
87 | if smooth:
88 | precisions[i] = ((matches_by_order[i] + 1.) /
89 | (possible_matches_by_order[i] + 1.))
90 | else:
91 | if possible_matches_by_order[i] > 0:
92 | precisions[i] = (float(matches_by_order[i]) /
93 | possible_matches_by_order[i])
94 | else:
95 | precisions[i] = 0.0
96 |
97 | if min(precisions) > 0:
98 | p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
99 | geo_mean = math.exp(p_log_sum)
100 | else:
101 | geo_mean = 0
102 |
103 | ratio = float(translation_length) / reference_length
104 |
105 | if ratio > 1.0:
106 | bp = 1.
107 | elif ratio == 0.:
108 | bp = 0.
109 | else:
110 | bp = math.exp(1 - 1. / ratio)
111 |
112 | bleu = geo_mean * bp
113 |
114 | return (bleu, precisions, bp, ratio, translation_length, reference_length)
--------------------------------------------------------------------------------
/r2r_src/env.py:
--------------------------------------------------------------------------------
1 | ''' Batched Room-to-Room navigation environment '''
2 |
3 | import sys
4 | sys.path.append('buildpy36')
5 | import MatterSim
6 | import csv
7 | import numpy as np
8 | import math
9 | import base64
10 | import utils
11 | import json
12 | import os
13 | import random
14 | import networkx as nx
15 | from param import args
16 |
17 | from utils import load_datasets, load_nav_graphs, Tokenizer
18 |
19 | csv.field_size_limit(sys.maxsize)
20 |
21 |
22 | class EnvBatch():
23 | ''' A simple wrapper for a batch of MatterSim environments,
24 | using discretized viewpoints and pretrained features '''
25 |
26 | def __init__(self, feature_store=None, batch_size=100):
27 | """
28 | 1. Load pretrained image feature
29 | 2. Init the Simulator.
30 | :param feature_store: The name of file stored the feature.
31 | :param batch_size: Used to create the simulator list.
32 | """
33 | if feature_store:
34 | if type(feature_store) is dict: # A silly way to avoid multiple reading
35 | self.features = feature_store
36 | self.image_w = 640
37 | self.image_h = 480
38 | self.vfov = 60
39 | self.feature_size = next(iter(self.features.values())).shape[-1]
40 | print('The feature size is %d' % self.feature_size)
41 | else:
42 | print('Image features not provided')
43 | self.features = None
44 | self.image_w = 640
45 | self.image_h = 480
46 | self.vfov = 60
47 | self.featurized_scans = set([key.split("_")[0] for key in list(self.features.keys())])
48 | self.sims = []
49 | for i in range(batch_size):
50 | sim = MatterSim.Simulator()
51 | sim.setRenderingEnabled(False)
52 | sim.setDiscretizedViewingAngles(True) # Set increment/decrement to 30 degree. (otherwise by radians)
53 | sim.setCameraResolution(self.image_w, self.image_h)
54 | sim.setCameraVFOV(math.radians(self.vfov))
55 | sim.init()
56 | self.sims.append(sim)
57 |
58 | def _make_id(self, scanId, viewpointId):
59 | return scanId + '_' + viewpointId
60 |
61 | def newEpisodes(self, scanIds, viewpointIds, headings):
62 | for i, (scanId, viewpointId, heading) in enumerate(zip(scanIds, viewpointIds, headings)):
63 | # print("New episode %d" % i)
64 | # sys.stdout.flush()
65 | self.sims[i].newEpisode(scanId, viewpointId, heading, 0)
66 |
67 | def getStates(self):
68 | """
69 | Get list of states augmented with precomputed image features. rgb field will be empty.
70 | Agent's current view [0-35] (set only when viewing angles are discretized)
71 | [0-11] looking down, [12-23] looking at horizon, [24-35] looking up
72 | :return: [ ((30, 2048), sim_state) ] * batch_size
73 | """
74 | feature_states = []
75 | for i, sim in enumerate(self.sims):
76 | state = sim.getState()
77 |
78 | long_id = self._make_id(state.scanId, state.location.viewpointId)
79 | if self.features:
80 | feature = self.features[long_id] # Get feature for
81 | feature_states.append((feature, state))
82 | else:
83 | feature_states.append((None, state))
84 | return feature_states
85 |
86 | def makeActions(self, actions):
87 | ''' Take an action using the full state dependent action interface (with batched input).
88 | Every action element should be an (index, heading, elevation) tuple. '''
89 | for i, (index, heading, elevation) in enumerate(actions):
90 | self.sims[i].makeAction(index, heading, elevation)
91 |
92 | class R2RBatch():
93 | ''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
94 |
95 | def __init__(self, feature_store, batch_size=100, seed=10, splits=['train'], tokenizer=None,
96 | name=None, load_target_and_candidate_word=False):
97 | self.env = EnvBatch(feature_store=feature_store, batch_size=batch_size)
98 | if feature_store:
99 | self.feature_size = self.env.feature_size
100 | self.data = []
101 | if tokenizer:
102 | self.tok = tokenizer
103 | scans = []
104 | for split in splits:
105 | for item in load_datasets([split]):
106 |
107 | # Split multiple instructions into separate entries
108 | for j,instr in enumerate(item['instructions']):
109 | if item['scan'] not in self.env.featurized_scans: # For fast training
110 | continue
111 | new_item = dict(item)
112 | new_item['instr_id'] = '%s_%d' % (item['path_id'], j)
113 | new_item['instructions'] = instr
114 |
115 | if load_target_and_candidate_word:
116 | #get target word
117 | new_item['target_word_no_repeat'] = item['target_word_no_repeat'][j]
118 | #get target word position
119 | new_item['target_word_position'] = item['target_word_position'][j]
120 | #get candidate substitution word
121 | #new_item['candidate_word'] = item['candidate_substitution_word'][j]
122 | #get candidate substitution word position
123 | new_item['candidate_word_position'] = item['candidate_substitution_word_position'][j]
124 |
125 |
126 | if tokenizer:
127 | new_item['instr_encoding'], new_item['sentence_tok'] = tokenizer.encode_sentence(instr)
128 |
129 | if not tokenizer or new_item['instr_encoding'] is not None: # Filter the wrong data
130 | if load_target_and_candidate_word:
131 | if len(new_item["target_word_no_repeat"]) >= 2:
132 | self.data.append(new_item)
133 | scans.append(item['scan'])
134 | else:
135 | self.data.append(new_item)
136 | scans.append(item['scan'])
137 | if name is None:
138 | self.name = splits[0] if len(splits) > 0 else "FAKE"
139 | else:
140 | self.name = name
141 |
142 | self.scans = set(scans)
143 | self.splits = splits
144 | self.seed = seed
145 | random.seed(self.seed)
146 | random.shuffle(self.data)
147 |
148 | self.ix = 0
149 | self.batch_size = batch_size
150 | self._load_nav_graphs()
151 |
152 | self.angle_feature = utils.get_all_point_angle_feature()
153 | self.sim = utils.new_simulator()
154 | self.buffered_state_dict = {}
155 |
156 | # It means that the fake data is equals to data in the supervised setup
157 | self.fake_data = self.data
158 | print('R2RBatch loaded with %d instructions, using splits: %s' % (len(self.data), ",".join(splits)))
159 |
160 | def size(self):
161 | return len(self.data)
162 |
163 | def _load_nav_graphs(self):
164 | """
165 | load graph from self.scan,
166 | Store the graph {scan_id: graph} in self.graphs
167 | Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
168 | Store the distances in self.distances. (Structure see above)
169 | Load connectivity graph for each scan, useful for reasoning about shortest paths
170 | :return: None
171 | """
172 | print('Loading navigation graphs for %d scans' % len(self.scans))
173 | self.graphs = load_nav_graphs(self.scans)
174 | self.paths = {}
175 | for scan, G in self.graphs.items(): # compute all shortest paths
176 | self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
177 | self.distances = {}
178 | for scan, G in self.graphs.items(): # compute all shortest paths
179 | self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
180 |
181 | def _next_minibatch(self, tile_one=False, batch_size=None, **kwargs):
182 | """
183 | Store the minibach in 'self.batch'
184 | :param tile_one: Tile the one into batch_size
185 | :return: None
186 | """
187 | if batch_size is None:
188 | batch_size = self.batch_size
189 | if tile_one:
190 | batch = [self.data[self.ix]] * batch_size
191 | self.ix += 1
192 | if self.ix >= len(self.data):
193 | random.shuffle(self.data)
194 | self.ix -= len(self.data)
195 | else:
196 | batch = self.data[self.ix: self.ix+batch_size]
197 | if len(batch) < batch_size:
198 | random.shuffle(self.data)
199 | self.ix = batch_size - len(batch)
200 | batch += self.data[:self.ix]
201 | else:
202 | self.ix += batch_size
203 | self.batch = batch
204 |
205 | def reset_epoch(self, shuffle=False):
206 | ''' Reset the data index to beginning of epoch. Primarily for testing.
207 | You must still call reset() for a new episode. '''
208 | if shuffle:
209 | random.shuffle(self.data)
210 | self.ix = 0
211 |
212 | def _shortest_path_action(self, state, goalViewpointId):
213 | ''' Determine next action on the shortest path to goal, for supervised training. '''
214 | if state.location.viewpointId == goalViewpointId:
215 | return goalViewpointId # Just stop here
216 | path = self.paths[state.scanId][state.location.viewpointId][goalViewpointId]
217 | nextViewpointId = path[1]
218 | return nextViewpointId
219 |
220 | def make_candidate(self, feature, scanId, viewpointId, viewId):
221 | def _loc_distance(loc):
222 | return np.sqrt(loc.rel_heading ** 2 + loc.rel_elevation ** 2)
223 | base_heading = (viewId % 12) * math.radians(30)
224 | adj_dict = {}
225 | long_id = "%s_%s" % (scanId, viewpointId)
226 | if long_id not in self.buffered_state_dict:
227 | for ix in range(36):
228 | if ix == 0:
229 | self.sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
230 | elif ix % 12 == 0:
231 | self.sim.makeAction(0, 1.0, 1.0)
232 | else:
233 | self.sim.makeAction(0, 1.0, 0)
234 |
235 | state = self.sim.getState()
236 | assert state.viewIndex == ix
237 |
238 | # Heading and elevation for the viewpoint center
239 | heading = state.heading - base_heading
240 | elevation = state.elevation
241 |
242 | visual_feat = feature[ix]
243 |
244 | # get adjacent locations
245 | for j, loc in enumerate(state.navigableLocations[1:]):
246 | # if a loc is visible from multiple view, use the closest
247 | # view (in angular distance) as its representation
248 | distance = _loc_distance(loc)
249 |
250 | # Heading and elevation for for the loc
251 | loc_heading = heading + loc.rel_heading
252 | loc_elevation = elevation + loc.rel_elevation
253 | angle_feat = utils.angle_feature(loc_heading, loc_elevation)
254 | if (loc.viewpointId not in adj_dict or
255 | distance < adj_dict[loc.viewpointId]['distance']):
256 | adj_dict[loc.viewpointId] = {
257 | 'heading': loc_heading,
258 | 'elevation': loc_elevation,
259 | "normalized_heading": state.heading + loc.rel_heading,
260 | 'scanId':scanId,
261 | 'viewpointId': loc.viewpointId, # Next viewpoint id
262 | 'pointId': ix,
263 | 'distance': distance,
264 | 'idx': j + 1,
265 | 'feature': np.concatenate((visual_feat, angle_feat), -1)
266 | }
267 | candidate = list(adj_dict.values())
268 | self.buffered_state_dict[long_id] = [
269 | {key: c[key]
270 | for key in
271 | ['normalized_heading', 'elevation', 'scanId', 'viewpointId',
272 | 'pointId', 'idx']}
273 | for c in candidate
274 | ]
275 | return candidate
276 | else:
277 | candidate = self.buffered_state_dict[long_id]
278 | candidate_new = []
279 | for c in candidate:
280 | c_new = c.copy()
281 | ix = c_new['pointId']
282 | normalized_heading = c_new['normalized_heading']
283 | visual_feat = feature[ix]
284 | loc_heading = normalized_heading - base_heading
285 | c_new['heading'] = loc_heading
286 | angle_feat = utils.angle_feature(c_new['heading'], c_new['elevation'])
287 | c_new['feature'] = np.concatenate((visual_feat, angle_feat), -1)
288 | c_new.pop('normalized_heading')
289 | candidate_new.append(c_new)
290 | return candidate_new
291 |
292 | def _get_obs(self):
293 | obs = []
294 | for i, (feature, state) in enumerate(self.env.getStates()):
295 | item = self.batch[i]
296 | base_view_id = state.viewIndex
297 |
298 | # Full features
299 | candidate = self.make_candidate(feature, state.scanId, state.location.viewpointId, state.viewIndex)
300 |
301 | # (visual_feature, angel_feature) for views
302 | feature = np.concatenate((feature, self.angle_feature[base_view_id]), -1)
303 | if args.pretrain_attacker:
304 | obs.append({
305 | 'instr_id' : item['instr_id'],
306 | 'scan' : state.scanId,
307 | 'viewpoint' : state.location.viewpointId,
308 | 'viewIndex' : state.viewIndex,
309 | 'heading' : state.heading,
310 | 'elevation' : state.elevation,
311 | 'feature' : feature,
312 | 'candidate': candidate,
313 | 'navigableLocations' : state.navigableLocations,
314 | 'instructions' : item['instructions'],
315 | 'teacher' : self._shortest_path_action(state, item['path'][-1]),
316 | 'path_id' : item['path_id'],
317 | # new keys
318 | 'target_word_position': item['target_word_position'],
319 | 'candidate_word_position': item['candidate_word_position'],
320 | 'sentence_tok': item['sentence_tok']
321 | })
322 | else:
323 | obs.append({
324 | 'instr_id' : item['instr_id'],
325 | 'scan' : state.scanId,
326 | 'viewpoint' : state.location.viewpointId,
327 | 'viewIndex' : state.viewIndex,
328 | 'heading' : state.heading,
329 | 'elevation' : state.elevation,
330 | 'feature' : feature,
331 | 'candidate': candidate,
332 | 'navigableLocations' : state.navigableLocations,
333 | 'instructions' : item['instructions'],
334 | 'teacher' : self._shortest_path_action(state, item['path'][-1]),
335 | 'path_id' : item['path_id']
336 | })
337 | if 'instr_encoding' in item:
338 | obs[-1]['instr_encoding'] = item['instr_encoding']
339 | # A2C reward. The negative distance between the state and the final state
340 | obs[-1]['distance'] = self.distances[state.scanId][state.location.viewpointId][item['path'][-1]]
341 | return obs
342 |
343 | def reset(self, batch=None, inject=False, **kwargs):
344 | ''' Load a new minibatch / episodes. '''
345 | if batch is None: # Allow the user to explicitly define the batch
346 | self._next_minibatch(**kwargs)
347 | else:
348 | if inject: # Inject the batch into the next minibatch
349 | self._next_minibatch(**kwargs)
350 | self.batch[:len(batch)] = batch
351 | else: # Else set the batch to the current batch
352 | self.batch = batch
353 | scanIds = [item['scan'] for item in self.batch]
354 | viewpointIds = [item['path'][0] for item in self.batch]
355 | headings = [item['heading'] for item in self.batch]
356 | self.env.newEpisodes(scanIds, viewpointIds, headings)
357 | return self._get_obs()
358 |
359 | def step(self, actions):
360 | ''' Take action (same interface as makeActions) '''
361 | self.env.makeActions(actions)
362 | return self._get_obs()
363 |
364 | def get_statistics(self):
365 | stats = {}
366 | length = 0
367 | path = 0
368 | for datum in self.data:
369 | length += len(self.tok.split_sentence(datum['instructions']))
370 | path += self.distances[datum['scan']][datum['path'][0]][datum['path'][-1]]
371 | stats['length'] = length / len(self.data)
372 | stats['path'] = path / len(self.data)
373 | return stats
374 |
375 |
376 |
--------------------------------------------------------------------------------
/r2r_src/eval.py:
--------------------------------------------------------------------------------
1 | ''' Evaluation of agent trajectories '''
2 |
3 | import json
4 | import os
5 | import sys
6 | from collections import defaultdict
7 | import networkx as nx
8 | import numpy as np
9 | import pprint
10 | pp = pprint.PrettyPrinter(indent=4)
11 |
12 | from env import R2RBatch
13 | from utils import load_datasets, load_nav_graphs
14 | from agent import BaseAgent
15 |
16 |
17 | class Evaluation(object):
18 | ''' Results submission format: [{'instr_id': string, 'trajectory':[(viewpoint_id, heading_rads, elevation_rads),] } ] '''
19 |
20 | def __init__(self, splits, scans, tok):
21 | self.error_margin = 3.0
22 | self.splits = splits
23 | self.tok = tok
24 | self.gt = {}
25 | self.instr_ids = []
26 | self.scans = []
27 | for split in splits:
28 | for item in load_datasets([split]):
29 | if scans is not None and item['scan'] not in scans:
30 | continue
31 | self.gt[str(item['path_id'])] = item
32 | self.scans.append(item['scan'])
33 | self.instr_ids += ['%s_%d' % (item['path_id'], i) for i in range(len(item['instructions']))]
34 | self.scans = set(self.scans)
35 | self.instr_ids = set(self.instr_ids)
36 | self.graphs = load_nav_graphs(self.scans)
37 | self.distances = {}
38 | for scan,G in self.graphs.items(): # compute all shortest paths
39 | self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
40 |
41 | def _get_nearest(self, scan, goal_id, path):
42 | near_id = path[0][0]
43 | near_d = self.distances[scan][near_id][goal_id]
44 | for item in path:
45 | d = self.distances[scan][item[0]][goal_id]
46 | if d < near_d:
47 | near_id = item[0]
48 | near_d = d
49 | return near_id
50 |
51 | def _score_item(self, instr_id, path):
52 | ''' Calculate error based on the final position in trajectory, and also
53 | the closest position (oracle stopping rule).
54 | The path contains [view_id, angle, vofv] '''
55 | gt = self.gt[instr_id.split('_')[-2]]
56 | start = gt['path'][0]
57 | assert start == path[0][0], 'Result trajectories should include the start position'
58 | goal = gt['path'][-1]
59 | final_position = path[-1][0] # the first of [view_id, angle, vofv]
60 | nearest_position = self._get_nearest(gt['scan'], goal, path)
61 | self.scores['nav_errors'].append(self.distances[gt['scan']][final_position][goal])
62 | self.scores['oracle_errors'].append(self.distances[gt['scan']][nearest_position][goal])
63 | self.scores['trajectory_steps'].append(len(path)-1)
64 | distance = 0 # Work out the length of the path in meters
65 | prev = path[0]
66 | for curr in path[1:]:
67 | distance += self.distances[gt['scan']][prev[0]][curr[0]]
68 | prev = curr
69 | self.scores['trajectory_lengths'].append(distance)
70 | self.scores['shortest_lengths'].append(
71 | self.distances[gt['scan']][start][goal]
72 | )
73 |
74 | def score(self, output_file):
75 | ''' Evaluate each agent trajectory based on how close it got to the goal location '''
76 | self.scores = defaultdict(list)
77 | instr_ids = set(self.instr_ids)
78 |
79 | if type(output_file) is str:
80 | with open(output_file) as f:
81 | results = json.load(f)
82 | else:
83 | results = output_file
84 |
85 | for item in results:
86 | # Check against expected ids
87 | if item['instr_id'] in instr_ids:
88 | instr_ids.remove(item['instr_id'])
89 | self._score_item(item['instr_id'], item['trajectory'])
90 |
91 | #if 'train' not in self.splits: # Exclude the training from this. (Because training eval may be partial)
92 | #assert len(instr_ids) == 0, 'Missing %d of %d instruction ids from %s - not in %s'\
93 | # % (len(instr_ids), len(self.instr_ids), ",".join(self.splits), output_file)
94 | #assert len(self.scores['nav_errors']) == len(self.instr_ids)
95 |
96 | score_summary = {
97 | 'nav_error': np.average(self.scores['nav_errors']),
98 | 'oracle_error': np.average(self.scores['oracle_errors']),
99 | 'steps': np.average(self.scores['trajectory_steps']),
100 | 'lengths': np.average(self.scores['trajectory_lengths'])
101 | }
102 | num_successes = len([i for i in self.scores['nav_errors'] if i < self.error_margin])
103 | score_summary['success_rate'] = float(num_successes)/float(len(self.scores['nav_errors']))
104 | oracle_successes = len([i for i in self.scores['oracle_errors'] if i < self.error_margin])
105 | score_summary['oracle_rate'] = float(oracle_successes)/float(len(self.scores['oracle_errors']))
106 |
107 | spl = [float(error < self.error_margin) * l / max(l, p, 0.01)
108 | for error, p, l in
109 | zip(self.scores['nav_errors'], self.scores['trajectory_lengths'], self.scores['shortest_lengths'])
110 | ]
111 | score_summary['spl'] = np.average(spl)
112 |
113 | return score_summary, self.scores
114 |
115 | def bleu_score(self, path2inst):
116 | from bleu import compute_bleu
117 | refs = []
118 | candidates = []
119 | for path_id, inst in path2inst.items():
120 | path_id = str(path_id)
121 | assert path_id in self.gt
122 | # There are three references
123 | refs.append([self.tok.split_sentence(sent) for sent in self.gt[path_id]['instructions']])
124 | candidates.append([self.tok.index_to_word[word_id] for word_id in inst])
125 |
126 | tuple = compute_bleu(refs, candidates, smooth=False)
127 | bleu_score = tuple[0]
128 | precisions = tuple[1]
129 |
130 | return bleu_score, precisions
131 |
132 |
133 | RESULT_DIR = 'tasks/R2R/results/'
134 |
135 | def eval_simple_agents():
136 | ''' Run simple baselines on each split. '''
137 | for split in ['train', 'val_seen', 'val_unseen', 'test']:
138 | env = R2RBatch(None, batch_size=1, splits=[split])
139 | ev = Evaluation([split])
140 |
141 | for agent_type in ['Stop', 'Shortest', 'Random']:
142 | outfile = '%s%s_%s_agent.json' % (RESULT_DIR, split, agent_type.lower())
143 | agent = BaseAgent.get_agent(agent_type)(env, outfile)
144 | agent.test()
145 | agent.write_results()
146 | score_summary, _ = ev.score(outfile)
147 | print('\n%s' % agent_type)
148 | pp.pprint(score_summary)
149 |
150 |
151 | def eval_seq2seq():
152 | ''' Eval sequence to sequence models on val splits (iteration selected from training error) '''
153 | outfiles = [
154 | RESULT_DIR + 'seq2seq_teacher_imagenet_%s_iter_5000.json',
155 | RESULT_DIR + 'seq2seq_sample_imagenet_%s_iter_20000.json'
156 | ]
157 | for outfile in outfiles:
158 | for split in ['val_seen', 'val_unseen']:
159 | ev = Evaluation([split])
160 | score_summary, _ = ev.score(outfile % split)
161 | print('\n%s' % outfile)
162 | pp.pprint(score_summary)
163 |
164 |
165 | if __name__ == '__main__':
166 | eval_simple_agents()
167 |
168 |
169 |
170 |
171 |
172 |
173 |
--------------------------------------------------------------------------------
/r2r_src/param.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import torch
4 |
5 |
6 | class Param:
7 | def __init__(self):
8 | self.parser = argparse.ArgumentParser(description="")
9 |
10 | # General
11 | self.parser.add_argument('--iters', type=int, default=100000)
12 | self.parser.add_argument('--name', type=str, default='default')
13 | self.parser.add_argument('--train', type=str, default='speaker')
14 |
15 | # Data preparation
16 | self.parser.add_argument('--maxInput', type=int, default=80, help="max input instruction")
17 | self.parser.add_argument('--maxDecode', type=int, default=120, help="max input instruction")
18 | self.parser.add_argument('--maxAction', type=int, default=20, help='Max Action sequence')
19 | self.parser.add_argument('--batchSize', type=int, default=64)
20 | self.parser.add_argument('--ignoreid', type=int, default=-100)
21 | self.parser.add_argument('--feature_size', type=int, default=2048)
22 | self.parser.add_argument("--loadOptim", action="store_const", default=False, const=True)
23 |
24 | # Load the model from
25 | self.parser.add_argument("--speaker", default=None)
26 | self.parser.add_argument("--listener", default=None)
27 | self.parser.add_argument("--load", type=str, default=None)
28 |
29 | # More Paths from
30 | self.parser.add_argument("--aug", default=None)
31 |
32 | # Listener Model Config
33 | self.parser.add_argument("--zeroInit", dest='zero_init', action='store_const', default=False, const=True)
34 | self.parser.add_argument("--mlWeight", dest='ml_weight', type=float, default=0.)
35 | self.parser.add_argument("--teacherWeight", dest='teacher_weight', type=float, default=1.)
36 | self.parser.add_argument("--accumulateGrad", dest='accumulate_grad', action='store_const', default=False, const=True)
37 | self.parser.add_argument("--features", type=str, default='imagenet')
38 |
39 | # Env Dropout Param
40 | self.parser.add_argument('--featdropout', type=float, default=0.3)
41 |
42 | # SSL configuration
43 | self.parser.add_argument("--selfTrain", dest='self_train', action='store_const', default=False, const=True)
44 |
45 | # Submision configuration
46 | self.parser.add_argument("--candidates", type=int, default=1)
47 | self.parser.add_argument("--paramSearch", dest='param_search', action='store_const', default=False, const=True)
48 | self.parser.add_argument("--submit", action='store_const', default=False, const=True)
49 | self.parser.add_argument("--beam", action="store_const", default=False, const=True)
50 | self.parser.add_argument("--alpha", type=float, default=0.5)
51 |
52 | # Training Configurations
53 | self.parser.add_argument('--optim', type=str, default='rms') # rms, adam
54 | self.parser.add_argument('--lr', type=float, default=0.0001, help="The learning rate")
55 | self.parser.add_argument('--decay', dest='weight_decay', type=float, default=0.)
56 | self.parser.add_argument('--dropout', type=float, default=0.5)
57 | self.parser.add_argument('--feedback', type=str, default='sample',
58 | help='How to choose next position, one of ``teacher``, ``sample`` and ``argmax``')
59 | self.parser.add_argument('--teacher', type=str, default='final',
60 | help="How to get supervision. one of ``next`` and ``final`` ")
61 | self.parser.add_argument('--epsilon', type=float, default=0.1)
62 |
63 | # Model hyper params:
64 | self.parser.add_argument('--rnnDim', dest="rnn_dim", type=int, default=512)
65 | self.parser.add_argument('--wemb', type=int, default=256)
66 | self.parser.add_argument('--aemb', type=int, default=64)
67 | self.parser.add_argument('--proj', type=int, default=512)
68 | self.parser.add_argument("--fast", dest="fast_train", action="store_const", default=False, const=True)
69 | self.parser.add_argument("--valid", action="store_const", default=False, const=True)
70 | self.parser.add_argument("--candidate", dest="candidate_mask",
71 | action="store_const", default=False, const=True)
72 |
73 | self.parser.add_argument("--bidir", type=bool, default=True) # This is not full option
74 | self.parser.add_argument("--encode", type=str, default="word") # sub, word, sub_ctx
75 | self.parser.add_argument("--subout", dest="sub_out", type=str, default="tanh") # tanh, max
76 | self.parser.add_argument("--attn", type=str, default="soft") # soft, mono, shift, dis_shift
77 |
78 | self.parser.add_argument("--angleFeatSize", dest="angle_feat_size", type=int, default=4)
79 |
80 | # A2C
81 | self.parser.add_argument("--gamma", default=0.9, type=float)
82 | self.parser.add_argument("--normalize", dest="normalize_loss", default="total", type=str, help='batch or total')
83 |
84 | #### new argument ####
85 | self.parser.add_argument('--maxTWnumber', type=int, default=25, help="max number of target word")
86 | self.parser.add_argument('--maxCWnumber', type=int, default=25, help="max number of candidate word")
87 |
88 | self.parser.add_argument('--itersAlterNav', dest="iters_alter_nav", type=int, default=4000)
89 | self.parser.add_argument('--itersAlterAtt', dest="iters_alter_att", type=int, default=4000)
90 |
91 | self.parser.add_argument('--feedbackAttacker', dest="feedback_attacker", type=str, default='sample',
92 | help='How to choose next action, one of ``sample`` and ``argmax``')
93 |
94 | # training setting
95 | self.parser.add_argument("--pretrainAgent", dest="pretrain_agent", action='store_true',
96 | help="pretrain agent")
97 | self.parser.add_argument("--pretrainAttacker", dest="pretrain_attacker", action='store_true', \
98 | help="pretrain attacker")
99 | self.parser.add_argument("--advTrain", dest="adv_train", action='store_true',
100 | help="adversarial training")
101 | self.parser.add_argument("--finetuneAgent", dest="finetune_agent", action='store_true',
102 | help="finetune agent")
103 | self.parser.add_argument("--ifSelfSupervised", dest="if_self_supervised", action='store_true',
104 | help="self-supervised reasoning task")
105 |
106 | # pretrained model path
107 | self.parser.add_argument("--loadAttacker", dest="load_attacker", type=str, default=None)
108 |
109 |
110 | self.args = self.parser.parse_args()
111 |
112 | if self.args.optim == 'rms':
113 | print("Optimizer: Using RMSProp")
114 | self.args.optimizer = torch.optim.RMSprop
115 | elif self.args.optim == 'adam':
116 | print("Optimizer: Using Adam")
117 | self.args.optimizer = torch.optim.Adam
118 | elif self.args.optim == 'sgd':
119 | print("Optimizer: sgd")
120 | self.args.optimizer = torch.optim.SGD
121 | else:
122 | assert False
123 |
124 | param = Param()
125 | args = param.args
126 | args.TRAIN_VOCAB = 'tasks/R2R/data/train_vocab.txt'
127 | args.TRAINVAL_VOCAB = 'tasks/R2R/data/trainval_vocab.txt'
128 |
129 | args.IMAGENET_FEATURES = 'img_features/ResNet-152-imagenet.tsv'
130 | args.CANDIDATE_FEATURES = 'img_features/ResNet-152-candidate.tsv'
131 | args.features_fast = 'img_features/ResNet-152-imagenet-fast.tsv'
132 | args.log_dir = 'snap/%s' % args.name
133 |
134 | if not os.path.exists(args.log_dir):
135 | os.makedirs(args.log_dir)
136 | #DEBUG_FILE = open(os.path.join('snap', args.name, "debug.log"), 'w')
137 |
138 |
--------------------------------------------------------------------------------
/r2r_src/speaker.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 | from param import args
4 | import os
5 | import utils
6 | import model
7 | import torch.nn.functional as F
8 |
9 |
10 | class Speaker():
11 | env_actions = {
12 | 'left': (0,-1, 0), # left
13 | 'right': (0, 1, 0), # right
14 | 'up': (0, 0, 1), # up
15 | 'down': (0, 0,-1), # down
16 | 'forward': (1, 0, 0), # forward
17 | '': (0, 0, 0), #
18 | '': (0, 0, 0), #
19 | '': (0, 0, 0) #
20 | }
21 |
22 | def __init__(self, env, listener, tok):
23 | self.env = env
24 | self.feature_size = self.env.feature_size
25 | self.tok = tok
26 | self.tok.finalize()
27 | self.listener = listener
28 |
29 | # Model
30 | print("VOCAB_SIZE", self.tok.vocab_size())
31 | self.encoder = model.SpeakerEncoder(self.feature_size+args.angle_feat_size, args.rnn_dim, args.dropout, bidirectional=args.bidir).cuda()
32 | self.decoder = model.SpeakerDecoder(self.tok.vocab_size(), args.wemb, self.tok.word_to_index[''],
33 | args.rnn_dim, args.dropout).cuda()
34 | self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)
35 | self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)
36 |
37 | # Evaluation
38 | self.softmax_loss = torch.nn.CrossEntropyLoss(ignore_index=self.tok.word_to_index[''])
39 |
40 | # Will be used in beam search
41 | self.nonreduced_softmax_loss = torch.nn.CrossEntropyLoss(
42 | ignore_index=self.tok.word_to_index[''],
43 | size_average=False,
44 | reduce=False
45 | )
46 |
47 | def train(self, iters):
48 | for i in range(iters):
49 | self.env.reset()
50 |
51 | self.encoder_optimizer.zero_grad()
52 | self.decoder_optimizer.zero_grad()
53 |
54 | loss = self.teacher_forcing(train=True)
55 |
56 | loss.backward()
57 | torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)
58 | torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)
59 | self.encoder_optimizer.step()
60 | self.decoder_optimizer.step()
61 |
62 | def get_insts(self, wrapper=(lambda x: x)):
63 | # Get the caption for all the data
64 | self.env.reset_epoch(shuffle=True)
65 | path2inst = {}
66 | total = self.env.size()
67 | for _ in wrapper(range(total // self.env.batch_size + 1)): # Guarantee that all the data are processed
68 | obs = self.env.reset()
69 | insts = self.infer_batch() # Get the insts of the result
70 | path_ids = [ob['path_id'] for ob in obs] # Gather the path ids
71 | for path_id, inst in zip(path_ids, insts):
72 | if path_id not in path2inst:
73 | path2inst[path_id] = self.tok.shrink(inst) # Shrink the words
74 | return path2inst
75 |
76 | def valid(self, *aargs, **kwargs):
77 | """
78 |
79 | :param iters:
80 | :return: path2inst: path_id --> inst (the number from to )
81 | loss: The XE loss
82 | word_accu: per word accuracy
83 | sent_accu: per sent accuracy
84 | """
85 | path2inst = self.get_insts(*aargs, **kwargs)
86 |
87 | # Calculate the teacher-forcing metrics
88 | self.env.reset_epoch(shuffle=True)
89 | N = 1 if args.fast_train else 3 # Set the iter to 1 if the fast_train (o.w. the problem occurs)
90 | metrics = np.zeros(3)
91 | for i in range(N):
92 | self.env.reset()
93 | metrics += np.array(self.teacher_forcing(train=False))
94 | metrics /= N
95 |
96 | return (path2inst, *metrics)
97 |
98 | def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):
99 | def take_action(i, idx, name):
100 | if type(name) is int: # Go to the next view
101 | self.env.env.sims[idx].makeAction(name, 0, 0)
102 | else: # Adjust
103 | self.env.env.sims[idx].makeAction(*self.env_actions[name])
104 | state = self.env.env.sims[idx].getState()
105 | if traj is not None:
106 | traj[i]['path'].append((state.location.viewpointId, state.heading, state.elevation))
107 | if perm_idx is None:
108 | perm_idx = range(len(perm_obs))
109 | for i, idx in enumerate(perm_idx):
110 | action = a_t[i]
111 | if action != -1: # -1 is the action
112 | select_candidate = perm_obs[i]['candidate'][action]
113 | src_point = perm_obs[i]['viewIndex']
114 | trg_point = select_candidate['pointId']
115 | src_level = (src_point) // 12 # The point idx started from 0
116 | trg_level = (trg_point) // 12
117 | while src_level < trg_level: # Tune up
118 | take_action(i, idx, 'up')
119 | src_level += 1
120 | # print("UP")
121 | while src_level > trg_level: # Tune down
122 | take_action(i, idx, 'down')
123 | src_level -= 1
124 | # print("DOWN")
125 | while self.env.env.sims[idx].getState().viewIndex != trg_point: # Turn right until the target
126 | take_action(i, idx, 'right')
127 | # print("RIGHT")
128 | # print(self.env.env.sims[idx].getState().viewIndex, trg_point)
129 | assert select_candidate['viewpointId'] == \
130 | self.env.env.sims[idx].getState().navigableLocations[select_candidate['idx']].viewpointId
131 | take_action(i, idx, select_candidate['idx'])
132 |
133 | def _teacher_action(self, obs, ended, tracker=None):
134 | """
135 | Extract teacher actions into variable.
136 | :param obs: The observation.
137 | :param ended: Whether the action seq is ended
138 | :return:
139 | """
140 | a = np.zeros(len(obs), dtype=np.int64)
141 | for i, ob in enumerate(obs):
142 | if ended[i]: # Just ignore this index
143 | a[i] = args.ignoreid
144 | else:
145 | for k, candidate in enumerate(ob['candidate']):
146 | if candidate['viewpointId'] == ob['teacher']: # Next view point
147 | a[i] = k
148 | break
149 | else: # Stop here
150 | assert ob['teacher'] == ob['viewpoint'] # The teacher action should be "STAY HERE"
151 | a[i] = len(ob['candidate'])
152 | return torch.from_numpy(a).cuda()
153 |
154 | def _candidate_variable(self, obs, actions):
155 | candidate_feat = np.zeros((len(obs), self.feature_size + args.angle_feat_size), dtype=np.float32)
156 | for i, (ob, act) in enumerate(zip(obs, actions)):
157 | if act == -1: # Ignore or Stop --> Just use zero vector as the feature
158 | pass
159 | else:
160 | c = ob['candidate'][act]
161 | candidate_feat[i, :] = c['feature'] # Image feat
162 | return torch.from_numpy(candidate_feat).cuda()
163 |
164 | def from_shortest_path(self, viewpoints=None, get_first_feat=False):
165 | """
166 | :param viewpoints: [[], [], ....(batch_size)]. Only for dropout viewpoint
167 | :param get_first_feat: whether output the first feat
168 | :return:
169 | """
170 | obs = self.env._get_obs()
171 | ended = np.array([False] * len(obs)) # Indices match permuation of the model, not env
172 | length = np.zeros(len(obs), np.int64)
173 | img_feats = []
174 | can_feats = []
175 | first_feat = np.zeros((len(obs), self.feature_size+args.angle_feat_size), np.float32)
176 | for i, ob in enumerate(obs):
177 | first_feat[i, -args.angle_feat_size:] = utils.angle_feature(ob['heading'], ob['elevation'])
178 | first_feat = torch.from_numpy(first_feat).cuda()
179 | while not ended.all():
180 | if viewpoints is not None:
181 | for i, ob in enumerate(obs):
182 | viewpoints[i].append(ob['viewpoint'])
183 | img_feats.append(self.listener._feature_variable(obs))
184 | teacher_action = self._teacher_action(obs, ended)
185 | teacher_action = teacher_action.cpu().numpy()
186 | for i, act in enumerate(teacher_action):
187 | if act < 0 or act == len(obs[i]['candidate']): # Ignore or Stop
188 | teacher_action[i] = -1 # Stop Action
189 | can_feats.append(self._candidate_variable(obs, teacher_action))
190 | self.make_equiv_action(teacher_action, obs)
191 | length += (1 - ended)
192 | ended[:] = np.logical_or(ended, (teacher_action == -1))
193 | obs = self.env._get_obs()
194 | img_feats = torch.stack(img_feats, 1).contiguous() # batch_size, max_len, 36, 2052
195 | can_feats = torch.stack(can_feats, 1).contiguous() # batch_size, max_len, 2052
196 | if get_first_feat:
197 | return (img_feats, can_feats, first_feat), length
198 | else:
199 | return (img_feats, can_feats), length
200 |
201 | def gt_words(self, obs):
202 | """
203 | See "utils.Tokenizer.encode_sentence(...)" for "instr_encoding" details
204 | """
205 | seq_tensor = np.array([ob['instr_encoding'] for ob in obs])
206 | return torch.from_numpy(seq_tensor).cuda()
207 |
208 | def teacher_forcing(self, train=True, features=None, insts=None, for_listener=False):
209 | if train:
210 | self.encoder.train()
211 | self.decoder.train()
212 | else:
213 | self.encoder.eval()
214 | self.decoder.eval()
215 |
216 | # Get Image Input & Encode
217 | if features is not None:
218 | # It is used in calulating the speaker score in beam-search
219 | assert insts is not None
220 | (img_feats, can_feats), lengths = features
221 | ctx = self.encoder(can_feats, img_feats, lengths)
222 | batch_size = len(lengths)
223 | else:
224 | obs = self.env._get_obs()
225 | batch_size = len(obs)
226 | (img_feats, can_feats), lengths = self.from_shortest_path() # Image Feature (from the shortest path)
227 | ctx = self.encoder(can_feats, img_feats, lengths)
228 | h_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
229 | c_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
230 | ctx_mask = utils.length2mask(lengths)
231 |
232 | # Get Language Input
233 | if insts is None:
234 | insts = self.gt_words(obs) # Language Feature
235 |
236 | # Decode
237 | logits, _, _ = self.decoder(insts, ctx, ctx_mask, h_t, c_t)
238 |
239 | # Because the softmax_loss only allow dim-1 to be logit,
240 | # So permute the output (batch_size, length, logit) --> (batch_size, logit, length)
241 | logits = logits.permute(0, 2, 1).contiguous()
242 | loss = self.softmax_loss(
243 | input = logits[:, :, :-1], # -1 for aligning
244 | target = insts[:, 1:] # "1:" to ignore the word
245 | )
246 |
247 | if for_listener:
248 | return self.nonreduced_softmax_loss(
249 | input = logits[:, :, :-1], # -1 for aligning
250 | target = insts[:, 1:] # "1:" to ignore the word
251 | )
252 |
253 | if train:
254 | return loss
255 | else:
256 | # Evaluation
257 | _, predict = logits.max(dim=1) # BATCH, LENGTH
258 | gt_mask = (insts != self.tok.word_to_index[''])
259 | correct = (predict[:, :-1] == insts[:, 1:]) * gt_mask[:, 1:] # Not pad and equal to gt
260 | correct, gt_mask = correct.type(torch.LongTensor), gt_mask.type(torch.LongTensor)
261 | word_accu = correct.sum().item() / gt_mask[:, 1:].sum().item() # Exclude
262 | sent_accu = (correct.sum(dim=1) == gt_mask[:, 1:].sum(dim=1)).sum().item() / batch_size # Exclude
263 | return loss.item(), word_accu, sent_accu
264 |
265 | def infer_batch(self, sampling=False, train=False, featdropmask=None):
266 | """
267 |
268 | :param sampling: if not, use argmax. else use softmax_multinomial
269 | :param train: Whether in the train mode
270 | :return: if sampling: return insts(np, [batch, max_len]),
271 | log_probs(torch, requires_grad, [batch,max_len])
272 | hiddens(torch, requires_grad, [batch, max_len, dim})
273 | And if train: the log_probs and hiddens are detached
274 | if not sampling: returns insts(np, [batch, max_len])
275 | """
276 | if train:
277 | self.encoder.train()
278 | self.decoder.train()
279 | else:
280 | self.encoder.eval()
281 | self.decoder.eval()
282 |
283 | # Image Input for the Encoder
284 | obs = self.env._get_obs()
285 | batch_size = len(obs)
286 | viewpoints_list = [list() for _ in range(batch_size)]
287 |
288 | # Get feature
289 | (img_feats, can_feats), lengths = self.from_shortest_path(viewpoints=viewpoints_list) # Image Feature (from the shortest path)
290 |
291 | # This code block is only used for the featdrop.
292 | if featdropmask is not None:
293 | img_feats[..., :-args.angle_feat_size] *= featdropmask
294 | can_feats[..., :-args.angle_feat_size] *= featdropmask
295 |
296 | # Encoder
297 | ctx = self.encoder(can_feats, img_feats, lengths,
298 | already_dropfeat=(featdropmask is not None))
299 | ctx_mask = utils.length2mask(lengths)
300 |
301 | # Decoder
302 | words = []
303 | log_probs = []
304 | hidden_states = []
305 | entropies = []
306 | h_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
307 | c_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()
308 | ended = np.zeros(len(obs), np.bool)
309 | word = np.ones(len(obs), np.int64) * self.tok.word_to_index[''] # First word is
310 | word = torch.from_numpy(word).view(-1, 1).cuda()
311 | for i in range(args.maxDecode):
312 | # Decode Step
313 | logits, h_t, c_t = self.decoder(word, ctx, ctx_mask, h_t, c_t) # Decode, logits: (b, 1, vocab_size)
314 |
315 | # Select the word
316 | logits = logits.squeeze() # logits: (b, vocab_size)
317 | logits[:, self.tok.word_to_index['']] = -float("inf") # No in infer
318 | if sampling:
319 | probs = F.softmax(logits, -1)
320 | m = torch.distributions.Categorical(probs)
321 | word = m.sample()
322 | log_prob = m.log_prob(word)
323 | if train:
324 | log_probs.append(log_prob)
325 | hidden_states.append(h_t.squeeze())
326 | entropies.append(m.entropy())
327 | else:
328 | log_probs.append(log_prob.detach())
329 | hidden_states.append(h_t.squeeze().detach())
330 | entropies.append(m.entropy().detach())
331 | else:
332 | values, word = logits.max(1)
333 |
334 | # Append the word
335 | cpu_word = word.cpu().numpy()
336 | cpu_word[ended] = self.tok.word_to_index['']
337 | words.append(cpu_word)
338 |
339 | # Prepare the shape for next step
340 | word = word.view(-1, 1)
341 |
342 | # End?
343 | ended = np.logical_or(ended, cpu_word == self.tok.word_to_index[''])
344 | if ended.all():
345 | break
346 |
347 | if train and sampling:
348 | return np.stack(words, 1), torch.stack(log_probs, 1), torch.stack(hidden_states, 1), torch.stack(entropies, 1)
349 | else:
350 | return np.stack(words, 1) # [(b), (b), (b), ...] --> [b, l]
351 |
352 | def save(self, epoch, path):
353 | ''' Snapshot models '''
354 | the_dir, _ = os.path.split(path)
355 | os.makedirs(the_dir, exist_ok=True)
356 | states = {}
357 | def create_state(name, model, optimizer):
358 | states[name] = {
359 | 'epoch': epoch + 1,
360 | 'state_dict': model.state_dict(),
361 | 'optimizer': optimizer.state_dict(),
362 | }
363 | all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
364 | ("decoder", self.decoder, self.decoder_optimizer)]
365 | for param in all_tuple:
366 | create_state(*param)
367 | torch.save(states, path)
368 |
369 | def load(self, path):
370 | ''' Loads parameters (but not training state) '''
371 | print("Load the speaker's state dict from %s" % path)
372 | states = torch.load(path)
373 | def recover_state(name, model, optimizer):
374 | # print(name)
375 | # print(list(model.state_dict().keys()))
376 | # for key in list(model.state_dict().keys()):
377 | # print(key, model.state_dict()[key].size())
378 | state = model.state_dict()
379 | state.update(states[name]['state_dict'])
380 | model.load_state_dict(state)
381 | if args.loadOptim:
382 | optimizer.load_state_dict(states[name]['optimizer'])
383 | all_tuple = [("encoder", self.encoder, self.encoder_optimizer),
384 | ("decoder", self.decoder, self.decoder_optimizer)]
385 | for param in all_tuple:
386 | recover_state(*param)
387 | return states['encoder']['epoch'] - 1
388 |
389 |
--------------------------------------------------------------------------------
/run/adv_train.bash:
--------------------------------------------------------------------------------
1 | name=adv_train
2 | flag="--attn soft --train auginslistener
3 | --featdropout 0.3
4 | --angleFeatSize 128
5 | --feedback sample
6 | --feedbackAttacker sample
7 | --mlWeight 0.2
8 | --subout max --dropout 0.5 --optim rms --lr 1e-4 --iters 40000 --maxAction 35
9 | --advTrain
10 | --ifSelfSupervised
11 | --itersAlterNav 3000
12 | --itersAlterAtt 1000
13 | --load tasks/R2R/snapshots/snap/pretrain/state_dict/best_val_unseen
14 | --loadAttacker tasks/R2R/snapshots/snap/attack/state_dict/best_val_unseen_attacker"
15 | mkdir -p snap/$name
16 | CUDA_VISIBLE_DEVICES=$1 python3.6 r2r_src/train.py $flag --name $name
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/run/attack.bash:
--------------------------------------------------------------------------------
1 | name=attack
2 | flag="--attn soft --train listener
3 | --featdropout 0.3
4 | --angleFeatSize 128
5 | --feedback argmax
6 | --feedbackAttacker sample
7 | --mlWeight 0.
8 | --subout max --dropout 0.5 --optim rms --lr 1e-4 --iters 10000 --maxAction 35
9 | --load tasks/R2R/snapshots/snap/pretrain/state_dict/best_val_unseen
10 | --pretrainAttacker"
11 | mkdir -p snap/$name
12 | CUDA_VISIBLE_DEVICES=$1 python3.6 r2r_src/train.py $flag --name $name
--------------------------------------------------------------------------------
/run/finetune.bash:
--------------------------------------------------------------------------------
1 | name=finetune
2 | flag="--attn soft --train auginslistener
3 | --featdropout 0.3
4 | --angleFeatSize 128
5 | --feedback sample
6 | --mlWeight 0.2
7 | --subout max --dropout 0.5 --optim rms --lr 1e-4 --iters 300000 --maxAction 35
8 | --finetuneAgent
9 | --load tasks/R2R/snapshots/snap/adv_train/state_dict/best_val_unseen"
10 | mkdir -p snap/$name
11 | CUDA_VISIBLE_DEVICES=$1 python3.6 r2r_src/train.py $flag --name $name
12 |
13 |
--------------------------------------------------------------------------------
/run/pretrain.bash:
--------------------------------------------------------------------------------
1 | name=pretrain
2 | flag="--attn soft --train auginslistener
3 | --featdropout 0.3
4 | --angleFeatSize 128
5 | --feedback sample
6 | --mlWeight 0.2
7 | --subout max --dropout 0.5 --optim rms --lr 1e-4 --iters 40000 --maxAction 35
8 | --pretrainAgent"
9 | mkdir -p snap/$name
10 | CUDA_VISIBLE_DEVICES=$1 python3.6 r2r_src/train.py $flag --name $name
11 |
--------------------------------------------------------------------------------
/run/quick_start.bash:
--------------------------------------------------------------------------------
1 | name=quick_start
2 | flag="--attn soft --train auginslistener
3 | --featdropout 0.3
4 | --angleFeatSize 128
5 | --feedback sample
6 | --mlWeight 0.2
7 | --subout max --dropout 0.5 --optim rms --lr 1e-4 --iters 300000 --maxAction 35
8 | --finetuneAgent
9 | --load tasks/R2R/snapshots/snap/adv_train_checkpoint/best_val_unseen"
10 | mkdir -p snap/$name
11 | CUDA_VISIBLE_DEVICES=$1 python3.6 r2r_src/train.py $flag --name $name
12 |
13 |
--------------------------------------------------------------------------------
/run/test_agent.bash:
--------------------------------------------------------------------------------
1 | name=test_agent
2 | flag="--attn soft --train validlistener
3 | --featdropout 0.3
4 | --angleFeatSize 128
5 | --feedback sample
6 | --mlWeight 0.2
7 | --submit
8 | --subout max --dropout 0.5 --optim rms --lr 1e-4 --iters 300000 --maxAction 35
9 | --load tasks/R2R/snapshots/snap/finetune_checkpoint/best_val_unseen"
10 | mkdir -p snap/$name
11 | CUDA_VISIBLE_DEVICES=$1 python3.6 r2r_src/train.py $flag --name $name
12 |
13 |
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/0.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/1.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/10.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/11.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/12.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/13.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/14.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/15.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/15.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/16.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/17.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/17.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/18.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/18.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/19.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/19.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/2.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/20.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/21.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/21.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/22.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/22.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/23.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/24.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/24.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/25.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/25.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/26.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/27.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/27.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/28.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/28.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/29.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/29.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/3.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/30.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/30.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/31.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/31.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/32.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/33.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/33.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/34.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/34.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/35.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/35.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/4.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/5.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/6.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/7.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/7.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/8.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/8.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/9.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c/9.png
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/0.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/1.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/10.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/10.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/11.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/11.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/12.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/12.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/13.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/13.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/14.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/14.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/15.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/15.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/16.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/16.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/17.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/17.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/18.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/18.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/19.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/19.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/2.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/20.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/20.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/21.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/21.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/22.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/22.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/23.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/23.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/24.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/24.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/25.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/25.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/26.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/26.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/27.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/27.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/28.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/28.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/29.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/29.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/3.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/30.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/30.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/31.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/31.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/32.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/32.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/33.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/33.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/34.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/34.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/35.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/35.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/4.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/5.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/6.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/7.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/8.jpg
--------------------------------------------------------------------------------
/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/17DRP5sb8fy/10c252c90fa24ef3b698c6f54d984c5c_rgb/9.jpg
--------------------------------------------------------------------------------
/semantic_views/Matterport3D_agreement.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/expectorlin/DR-Attacker/81c5c6324d793c96c94dd229bd26aee9a21651e1/semantic_views/Matterport3D_agreement.pdf
--------------------------------------------------------------------------------
/semantic_views/Matterport3D_license.txt:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Angel Chang, Angela Dai, Thomas Funkhouser, Maciej Halber, Matthias Nießner, Manolis Savva, Shuran Song, Andy Zeng, Yinda Zhang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/semantic_views/label2color.json:
--------------------------------------------------------------------------------
1 | {
2 | "void": {
3 | "R": 128,
4 | "G": 128,
5 | "B": 128
6 | },
7 | "wall": {
8 | "R": 0,
9 | "G": 0,
10 | "B": 255
11 | },
12 | "floor": {
13 | "R": 0,
14 | "G": 255,
15 | "B": 0
16 | },
17 | "chair": {
18 | "R": 0,
19 | "G": 255,
20 | "B": 255
21 | },
22 | "door": {
23 | "R": 255,
24 | "G": 0,
25 | "B": 255
26 | },
27 | "table": {
28 | "R": 255,
29 | "G": 128,
30 | "B": 0
31 | },
32 | "picture": {
33 | "R": 0,
34 | "G": 255,
35 | "B": 128
36 | },
37 | "cabinet": {
38 | "R": 128,
39 | "G": 0,
40 | "B": 255
41 | },
42 | "cushion": {
43 | "R": 128,
44 | "G": 255,
45 | "B": 0
46 | },
47 | "window": {
48 | "R": 0,
49 | "G": 128,
50 | "B": 255
51 | },
52 | "sofa": {
53 | "R": 255,
54 | "G": 0,
55 | "B": 128
56 | },
57 | "bed": {
58 | "R": 128,
59 | "G": 0,
60 | "B": 0
61 | },
62 | "curtain": {
63 | "R": 0,
64 | "G": 128,
65 | "B": 0
66 | },
67 | "chest_of_drawers": {
68 | "R": 0,
69 | "G": 0,
70 | "B": 128
71 | },
72 | "plant": {
73 | "R": 128,
74 | "G": 128,
75 | "B": 0
76 | },
77 | "sink": {
78 | "R": 0,
79 | "G": 128,
80 | "B": 128
81 | },
82 | "stairs": {
83 | "R": 128,
84 | "G": 0,
85 | "B": 128
86 | },
87 | "ceiling": {
88 | "R": 178,
89 | "G": 0,
90 | "B": 0
91 | },
92 | "toilet": {
93 | "R": 0,
94 | "G": 178,
95 | "B": 0
96 | },
97 | "stool": {
98 | "R": 0,
99 | "G": 0,
100 | "B": 178
101 | },
102 | "towel": {
103 | "R": 178,
104 | "G": 178,
105 | "B": 0
106 | },
107 | "mirror": {
108 | "R": 0,
109 | "G": 178,
110 | "B": 178
111 | },
112 | "tv_monitor": {
113 | "R": 178,
114 | "G": 0,
115 | "B": 178
116 | },
117 | "shower": {
118 | "R": 178,
119 | "G": 76,
120 | "B": 0
121 | },
122 | "column": {
123 | "R": 0,
124 | "G": 178,
125 | "B": 76
126 | },
127 | "bathtub": {
128 | "R": 76,
129 | "G": 0,
130 | "B": 178
131 | },
132 | "counter": {
133 | "R": 76,
134 | "G": 178,
135 | "B": 0
136 | },
137 | "fireplace": {
138 | "R": 0,
139 | "G": 76,
140 | "B": 178
141 | },
142 | "lighting": {
143 | "R": 178,
144 | "G": 0,
145 | "B": 76
146 | },
147 | "beam": {
148 | "R": 76,
149 | "G": 0,
150 | "B": 0
151 | },
152 | "railing": {
153 | "R": 0,
154 | "G": 76,
155 | "B": 0
156 | },
157 | "shelving": {
158 | "R": 0,
159 | "G": 0,
160 | "B": 76
161 | },
162 | "blinds": {
163 | "R": 76,
164 | "G": 76,
165 | "B": 0
166 | },
167 | "gym_equipment": {
168 | "R": 0,
169 | "G": 76,
170 | "B": 76
171 | },
172 | "seating": {
173 | "R": 76,
174 | "G": 0,
175 | "B": 76
176 | },
177 | "board_panel": {
178 | "R": 255,
179 | "G": 76,
180 | "B": 76
181 | },
182 | "furniture": {
183 | "R": 76,
184 | "G": 255,
185 | "B": 76
186 | },
187 | "appliances": {
188 | "R": 76,
189 | "G": 76,
190 | "B": 255
191 | },
192 | "clothes": {
193 | "R": 255,
194 | "G": 255,
195 | "B": 76
196 | },
197 | "objects": {
198 | "R": 76,
199 | "G": 255,
200 | "B": 255
201 | },
202 | "misc": {
203 | "R": 255,
204 | "G": 76,
205 | "B": 255
206 | },
207 | "unlabeled": {
208 | "R": 0,
209 | "G": 0,
210 | "B": 0
211 | }
212 | }
213 |
--------------------------------------------------------------------------------
/src/driver/mattersim_main.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 |
4 | #include "MatterSim.hpp"
5 |
6 | using namespace mattersim;
7 |
8 | #define WIDTH 1280
9 | #define HEIGHT 720
10 |
11 | #ifndef M_PI
12 | #define M_PI (3.14159265358979323846)
13 | #endif
14 |
15 | int main(int argc, char *argv[]) {
16 | cv::namedWindow("displaywin");
17 |
18 | Simulator *sim = new Simulator();
19 |
20 | // Sets resolution. Default is 320X240
21 | sim->setCameraResolution(640,480);
22 |
23 | // Initialize the simulator. Further camera configuration won't take any effect from now on.
24 | sim->init();
25 |
26 | // Run this many episodes
27 | int episodes = 10;
28 |
29 | for (int i = 0; i < episodes; ++i) {
30 | std::cout << "Episode #" << i + 1 << "\n";
31 |
32 | // Starts a new episode. It is not needed right after init() but it doesn't cost much and the loop is nicer.
33 | sim->newEpisode("2t7WUuJeko7"); // Take optional viewpoint_id argument, otherwise launches at a random location
34 |
35 | while (true) {
36 |
37 | // Get the state
38 | SimStatePtr state = sim->getState(); // SimStatePtr is std::shared_ptr
39 |
40 | // Which consists of:
41 | unsigned int n = state->step;
42 | cv::Mat rgb = state->rgb; // Use OpenCV CV_8UC3 type (i.e. 8bit color rgb)
43 | cv::Mat depth = state->depth; // ignore for now
44 | ViewpointPtr location = state->location; // Need a class to hold viewpoint id, and cv::Point3 for x,y,z location of a viewpoint
45 | float heading = state->heading;
46 | float elevation = state->elevation; // camera parameters
47 | std::vector reachable = state->navigableLocations; // Where we can move to,
48 | int locationIdx = 0;
49 | float headingChange = 0;
50 | float elevationChange = 0;
51 |
52 | cv::imshow("displaywin", rgb);
53 | // Make action (index into reachable, heading change in rad, elevation change in rad)
54 | // E.g. an RL agent will sample an action here. A reward can be determined based on location, heading, elevation but that is dataset dependent
55 | int key = cv::waitKey(1);
56 |
57 | switch (key) {
58 | case -1:
59 | continue;
60 | case '1':
61 | case '2':
62 | case '3':
63 | case '4':
64 | case '5':
65 | case '6':
66 | case '7':
67 | case '8':
68 | case '9':
69 | locationIdx = key - '0';
70 | if (locationIdx >= reachable.size()) {
71 | locationIdx = 0;
72 | }
73 | break;
74 | case 'q':
75 | return 0;
76 | break;
77 | case 81:
78 | headingChange = -M_PI / 180;
79 | break;
80 | case 82:
81 | elevationChange = M_PI / 180;
82 | break;
83 | case 83:
84 | headingChange = M_PI / 180;
85 | break;
86 | case 84:
87 | elevationChange = -M_PI / 180;
88 | break;
89 | }
90 | sim->makeAction(locationIdx, headingChange, elevationChange);
91 |
92 | }
93 | std::cout << "Episode finished.\n";
94 | }
95 |
96 | // It will be done automatically in destructor but after close You can init it again with different settings.
97 | sim->close();
98 | delete sim;
99 |
100 | return 0;
101 | }
102 |
--------------------------------------------------------------------------------
/src/driver/random_agent.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 |
6 | #include
7 |
8 | #include "MatterSim.hpp"
9 |
10 | using namespace mattersim;
11 |
12 | #ifndef M_PI
13 | #define M_PI (3.14159265358979323846)
14 | #endif
15 |
16 | int main(int argc, char *argv[]) {
17 | // std::mt19937 random_source(0);
18 | std::minstd_rand random_source(0);
19 | std::uniform_real_distribution angledist(-M_PI / 4, M_PI / 4);
20 | Simulator *sim = new Simulator();
21 |
22 | // Sets resolution. Default is 320X240
23 | sim->setCameraResolution(640,480);
24 |
25 | // Initialize the simulator. Further camera configuration won't take any effect from now on.
26 | sim->init();
27 |
28 | // Run this many episodes
29 | int episodes = 10;
30 |
31 | for (int i = 0; i < episodes; ++i) {
32 |
33 | // Starts a new episode. It is not needed right after init() but it doesn't cost much and the loop is nicer.
34 | sim->newEpisode("2t7WUuJeko7"); // Take optional viewpoint_id argument, otherwise launches at a random location
35 |
36 | for (int step = 0;step < 10;++step) {
37 | // Get the state
38 | SimStatePtr state = sim->getState(); // SimStatePtr is std::shared_ptr
39 |
40 | // Which consists of:
41 | unsigned int n = state->step;
42 | cv::Mat rgb = state->rgb; // Use OpenCV CV_8UC3 type (i.e. 8bit color rgb)
43 | cv::Mat depth = state->depth; // ignore for now
44 | ViewpointPtr location = state->location; // Need a class to hold viewpoint id, and cv::Point3 for x,y,z location of a viewpoint
45 | float heading = state->heading;
46 | float elevation = state->elevation; // camera parameters
47 | std::vector reachable = state->navigableLocations; // Where we can move to,
48 | std::uniform_int_distribution<> distribution(0, reachable.size() - 1);
49 | int locationIdx = distribution(random_source);
50 | float headingChange = angledist(random_source);
51 | float elevationChange = angledist(random_source);
52 |
53 | /* std::stringstream ss;
54 | ss << "rgb_" << i << "_" << step << ".png";
55 | cv::imwrite(ss.str(), rgb);*/
56 |
57 | // Make action (index into reachable, heading change in rad, elevation change in rad)
58 | // E.g. an RL agent will sample an action here. A reward can be determined based on location, heading, elevation but that is dataset dependent
59 | sim->makeAction(locationIdx, headingChange, elevationChange);
60 | std::this_thread::sleep_for(std::chrono::milliseconds{100});
61 | }
62 | }
63 |
64 | // It will be done automatically in destructor but after close You can init it again with different settings.
65 | sim->close();
66 | delete sim;
67 |
68 | return 0;
69 | }
70 |
--------------------------------------------------------------------------------
/src/lib/Benchmark.cpp:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "Benchmark.hpp"
4 |
5 | namespace mattersim {
6 |
7 | Timer::Timer()
8 | : running_(false),
9 | elapsed_(0) {}
10 |
11 | void Timer::Start() {
12 | if (!running()) {
13 | start_ = std::chrono::steady_clock::now();
14 | running_ = true;
15 | }
16 | }
17 |
18 | void Timer::Stop() {
19 | if (running()) {
20 | elapsed_ += std::chrono::steady_clock::now() - start_;
21 | running_ = false;
22 | }
23 | }
24 |
25 | void Timer::Reset() {
26 | if (running()) {
27 | running_ = false;
28 | }
29 | elapsed_ = std::chrono::steady_clock::duration(0);
30 | }
31 |
32 | float Timer::MicroSeconds() {
33 | return std::chrono::duration_cast(elapsed_).count();
34 | }
35 |
36 | float Timer::MilliSeconds() {
37 | return std::chrono::duration_cast(elapsed_).count();
38 | }
39 |
40 | float Timer::Seconds() {
41 | return std::chrono::duration_cast(elapsed_).count();
42 | }
43 |
44 |
45 | }
46 |
--------------------------------------------------------------------------------
/src/lib/fragment.sh:
--------------------------------------------------------------------------------
1 | #version 120
2 |
3 | varying vec3 texCoord;
4 | uniform samplerCube cubemap;
5 |
6 | void main (void) {
7 | gl_FragColor = textureCube(cubemap, vec3(-texCoord.x, texCoord.y, texCoord.z));
8 | }
9 |
--------------------------------------------------------------------------------
/src/lib/vertex.sh:
--------------------------------------------------------------------------------
1 | #version 120
2 |
3 | attribute vec3 vertex;
4 | varying vec3 texCoord;
5 | uniform mat4 PVM;
6 |
7 | void main() {
8 | gl_Position = PVM * vec4(vertex, 1.0);
9 | texCoord = vertex;
10 | }
11 |
--------------------------------------------------------------------------------
/src/lib_python/MatterSimPython.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include "MatterSim.hpp"
6 |
7 | namespace py = pybind11;
8 |
9 | namespace mattersim {
10 | class ViewPointPython {
11 | public:
12 | ViewPointPython(ViewpointPtr locptr) {
13 | viewpointId = locptr->viewpointId;
14 | ix = locptr->ix;
15 | point.append(locptr->point.x);
16 | point.append(locptr->point.y);
17 | point.append(locptr->point.z);
18 | rel_heading = locptr->rel_heading;
19 | rel_elevation = locptr->rel_elevation;
20 | rel_distance = locptr->rel_distance;
21 | }
22 | std::string viewpointId;
23 | unsigned int ix;
24 | py::list point;
25 | double rel_heading;
26 | double rel_elevation;
27 | double rel_distance;
28 | };
29 |
30 | class SimStatePython {
31 | public:
32 | SimStatePython(SimStatePtr state, bool renderingEnabled)
33 | : step{state->step},
34 | viewIndex{state->viewIndex},
35 | location{state->location},
36 | heading{state->heading},
37 | elevation{state->elevation} {
38 | if (renderingEnabled) {
39 | npy_intp colorShape[3] {state->rgb.rows, state->rgb.cols, 3};
40 | rgb = matToNumpyArray(3, colorShape, NPY_UBYTE, (void*)state->rgb.data);
41 | }
42 | scanId = state->scanId;
43 | for (auto viewpoint : state->navigableLocations) {
44 | navigableLocations.append(ViewPointPython{viewpoint});
45 | }
46 | }
47 | std::string scanId;
48 | unsigned int step;
49 | unsigned int viewIndex;
50 | py::object rgb;
51 | ViewPointPython location;
52 | double heading;
53 | double elevation;
54 | py::list navigableLocations;
55 | private:
56 | py::object matToNumpyArray(int dims, npy_intp *shape, int type, void *data) {
57 | //colorDims, this->colorShape, NPY_UBYTE, this->state->screenBuffer->data());
58 | PyObject *pyArray = PyArray_SimpleNewFromData(dims, shape, type, data);
59 | /* This line makes a copy: */
60 | PyObject *pyArrayCopied = PyArray_FROM_OTF(pyArray, type, NPY_ARRAY_ENSURECOPY | NPY_ARRAY_ENSUREARRAY);
61 | /* And this line gets rid of the old object which caused a memory leak: */
62 | Py_DECREF(pyArray);
63 |
64 | py::handle numpyArrayHandle = py::handle(pyArrayCopied);
65 | py::object numpyArray = py::reinterpret_steal(numpyArrayHandle);
66 |
67 | return numpyArray;
68 | }
69 | };
70 | #if PY_MAJOR_VERSION >= 3
71 | void* init_numpy() {
72 | import_array();
73 | return nullptr;
74 | }
75 | #else
76 | void init_numpy() {
77 | import_array();
78 | }
79 | #endif
80 | class SimulatorPython {
81 | public:
82 | SimulatorPython() {
83 | init_numpy();
84 | }
85 | void setDatasetPath(std::string path) {
86 | sim.setDatasetPath(path);
87 | }
88 | void setNavGraphPath(std::string path) {
89 | sim.setNavGraphPath(path);
90 | }
91 | void setCameraResolution(int width, int height) {
92 | sim.setCameraResolution(width, height);
93 | }
94 | void setCameraVFOV(double vfov) {
95 | sim.setCameraVFOV(vfov);
96 | }
97 | void setRenderingEnabled(bool value){
98 | sim.setRenderingEnabled(value);
99 | }
100 | void setDiscretizedViewingAngles(bool value){
101 | sim.setDiscretizedViewingAngles(value);
102 | }
103 | void init() {
104 | sim.init();
105 | }
106 | void setSeed(int seed) {
107 | sim.setSeed(seed);
108 | }
109 | bool setElevationLimits(double min, double max) {
110 | return sim.setElevationLimits(min, max);
111 | }
112 | void newEpisode(const std::string& scanId, const std::string& viewpointId=std::string(),
113 | double heading=0, double elevation=0) {
114 | sim.newEpisode(scanId, viewpointId, heading, elevation);
115 | }
116 | SimStatePython *getState() {
117 | return new SimStatePython(sim.getState(), sim.renderingEnabled);
118 | }
119 | void makeAction(int index, double heading, double elevation) {
120 | sim.makeAction(index, heading, elevation);
121 | }
122 | void close() {
123 | sim.close();
124 | }
125 | private:
126 | Simulator sim;
127 | };
128 | }
129 |
130 | using namespace mattersim;
131 |
132 | PYBIND11_MODULE(MatterSim, m) {
133 | py::class_(m, "ViewPoint")
134 | .def_readonly("viewpointId", &ViewPointPython::viewpointId)
135 | .def_readonly("ix", &ViewPointPython::ix)
136 | .def_readonly("point", &ViewPointPython::point)
137 | .def_readonly("rel_heading", &ViewPointPython::rel_heading)
138 | .def_readonly("rel_elevation", &ViewPointPython::rel_elevation)
139 | .def_readonly("rel_distance", &ViewPointPython::rel_distance);
140 | py::class_(m, "SimState")
141 | .def_readonly("scanId", &SimStatePython::scanId)
142 | .def_readonly("step", &SimStatePython::step)
143 | .def_readonly("rgb", &SimStatePython::rgb)
144 | .def_readonly("location", &SimStatePython::location)
145 | .def_readonly("heading", &SimStatePython::heading)
146 | .def_readonly("elevation", &SimStatePython::elevation)
147 | .def_readonly("viewIndex", &SimStatePython::viewIndex)
148 | .def_readonly("navigableLocations", &SimStatePython::navigableLocations);
149 | py::class_(m, "Simulator")
150 | .def(py::init<>())
151 | .def("setDatasetPath", &SimulatorPython::setDatasetPath)
152 | .def("setNavGraphPath", &SimulatorPython::setNavGraphPath)
153 | .def("setCameraResolution", &SimulatorPython::setCameraResolution)
154 | .def("setCameraVFOV", &SimulatorPython::setCameraVFOV)
155 | .def("setRenderingEnabled", &SimulatorPython::setRenderingEnabled)
156 | .def("setDiscretizedViewingAngles", &SimulatorPython::setDiscretizedViewingAngles)
157 | .def("init", &SimulatorPython::init)
158 | .def("setSeed", &SimulatorPython::setSeed)
159 | .def("setElevationLimits", &SimulatorPython::setElevationLimits)
160 | .def("newEpisode", &SimulatorPython::newEpisode)
161 | .def("getState", &SimulatorPython::getState, py::return_value_policy::take_ownership)
162 | .def("makeAction", &SimulatorPython::makeAction)
163 | .def("close", &SimulatorPython::close);
164 | }
165 |
166 |
167 |
--------------------------------------------------------------------------------
/src/test/main.cpp:
--------------------------------------------------------------------------------
1 | #define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file
2 |
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | #include
9 | #include
10 |
11 | #include "Catch.hpp"
12 | #include "MatterSim.hpp"
13 |
14 |
15 | using namespace mattersim;
16 |
17 | #ifndef M_PI
18 | #define M_PI (3.14159265358979323846)
19 | #endif
20 |
21 | // Note: tests tagged with 'Rendering' will require the dataset to be installed
22 | // To run tests without the dataset installed:
23 | // $ ./build/tests exclude:[Rendering]
24 |
25 |
26 | double radians(double deg) {
27 | return deg * M_PI / 180.0;
28 | }
29 |
30 | double degrees(double rad) {
31 | return (rad * 180.0) / M_PI;
32 | }
33 |
34 | float heading[10] = { 10, 350, 350, 1, 90, 180, 90, 270, 90, 270 };
35 | float heading_chg[10] = { -20, -360, 371, 89, 90, -90, -180, -180, -180, 0 };
36 | float discreteHeading[10] = { 0, 330, 300, 330, 0, 30, 0, 330, 300, 270 };
37 | float elevation[10] = { 10, 10, -26, -40, -40, -40, 50, 50, 40, 0 };
38 | float elevation_chg[10] = { 0, -36, -30, -10, 0, 90, 5, -10, -40, 0 };
39 | float discreteElevation[10] = { 0, 0, -30, -30, -30, -30, 0, 30, 0, -30 };
40 | unsigned int viewIndex[10] = { 12, 23, 10, 11, 0, 1, 12, 35, 22, 9 };
41 |
42 | TEST_CASE( "Continuous Motion", "[Actions]" ) {
43 |
44 | std::vector scanIds {"2t7WUuJeko7", "17DRP5sb8fy"};
45 | std::vector viewpointIds {"cc34e9176bfe47ebb23c58c165203134", "5b9b2794954e4694a45fc424a8643081"};
46 | Simulator sim;
47 | sim.setCameraResolution(200,100); // width,height
48 | sim.setCameraVFOV(radians(45)); // 45deg vfov, 90deg hfov
49 | sim.setRenderingEnabled(false);
50 | CHECK(sim.setElevationLimits(radians(-40),radians(50)));
51 | REQUIRE_NOTHROW(sim.init());
52 | for (int i = 0; i < scanIds.size(); ++i) {
53 | std::string scanId = scanIds[i];
54 | std::string viewpointId = viewpointIds[i];
55 | REQUIRE_NOTHROW(sim.newEpisode(scanId, viewpointId, radians(heading[0]), radians(elevation[0])));
56 | for (int t = 0; t < 10; ++t ) {
57 | INFO("i=" << i << ", t=" << t);
58 | SimStatePtr state = sim.getState();
59 | CHECK( state->scanId == scanId );
60 | CHECK( state->step == t );
61 | CHECK( state->heading == Approx(radians(heading[t])) );
62 | CHECK( state->elevation == Approx(radians(elevation[t])) );
63 | CHECK( state->rgb.rows == 100 );
64 | CHECK( state->rgb.cols == 200 );
65 | CHECK( state->location->viewpointId == viewpointId );
66 | CHECK( state->viewIndex == 0 ); // not active
67 | std::vector actions = state->navigableLocations;
68 | int ix = t % actions.size(); // select an action
69 | sim.makeAction(ix, radians(heading_chg[t]), radians(elevation_chg[t]));
70 | viewpointId = actions[ix]->viewpointId;
71 | }
72 | }
73 | REQUIRE_NOTHROW(sim.close());
74 | }
75 |
76 | TEST_CASE( "Discrete Motion", "[Actions]" ) {
77 |
78 | std::vector scanIds {"2t7WUuJeko7", "17DRP5sb8fy"};
79 | std::vector viewpointIds {"cc34e9176bfe47ebb23c58c165203134", "5b9b2794954e4694a45fc424a8643081"};
80 | Simulator sim;
81 | sim.setCameraResolution(200,100); // width,height
82 | sim.setCameraVFOV(radians(45)); // 45deg vfov, 90deg hfov
83 | sim.setRenderingEnabled(false);
84 | sim.setDiscretizedViewingAngles(true);
85 | CHECK(sim.setElevationLimits(radians(-10),radians(10))); // should be disregarded
86 | REQUIRE_NOTHROW(sim.init());
87 | for (int i = 0; i < scanIds.size(); ++i) {
88 | std::string scanId = scanIds[i];
89 | std::string viewpointId = viewpointIds[i];
90 | REQUIRE_NOTHROW(sim.newEpisode(scanId, viewpointId, radians(heading[0]), radians(elevation[0])));
91 | for (int t = 0; t < 10; ++t ) {
92 | INFO("i=" << i << ", t=" << t);
93 | SimStatePtr state = sim.getState();
94 | CHECK( state->scanId == scanId );
95 | CHECK( state->step == t );
96 | CHECK( state->heading == Approx(radians(discreteHeading[t])) );
97 | CHECK( state->elevation == Approx(radians(discreteElevation[t])) );
98 | CHECK( state->rgb.rows == 100 );
99 | CHECK( state->rgb.cols == 200 );
100 | CHECK( state->location->viewpointId == viewpointId );
101 | CHECK( state->viewIndex == viewIndex[t] );
102 | std::vector actions = state->navigableLocations;
103 | int ix = t % actions.size(); // select an action
104 | sim.makeAction(ix, radians(heading_chg[t]), radians(elevation_chg[t]));
105 | viewpointId = actions[ix]->viewpointId;
106 | }
107 | }
108 | REQUIRE_NOTHROW(sim.close());
109 | }
110 |
111 | TEST_CASE( "Robot Relative Coords", "[Actions]" ) {
112 |
113 | std::vector scanIds {"2t7WUuJeko7", "17DRP5sb8fy"};
114 | std::vector viewpointIds {"cc34e9176bfe47ebb23c58c165203134", "5b9b2794954e4694a45fc424a8643081"};
115 | Simulator sim;
116 | sim.setCameraResolution(200,100); // width,height
117 | sim.setCameraVFOV(radians(45)); // 45deg vfov, 90deg hfov
118 | sim.setRenderingEnabled(false);
119 | CHECK(sim.setElevationLimits(radians(-40),radians(50)));
120 | REQUIRE_NOTHROW(sim.init());
121 | for (int i = 0; i < scanIds.size(); ++i) {
122 | std::string scanId = scanIds[i];
123 | std::string viewpointId = viewpointIds[i];
124 | REQUIRE_NOTHROW(sim.newEpisode(scanId, viewpointId, radians(heading[0]), radians(elevation[0])));
125 | for (int t = 0; t < 10; ++t ) {
126 | INFO("i=" << i << ", t=" << t);
127 | SimStatePtr state = sim.getState();
128 | cv::Point3f curr = state->location->point;
129 | std::vector actions = state->navigableLocations;
130 | double last_angle = 0.0;
131 | int k = 0;
132 | for (auto loc: actions ){
133 | if (k == 0) {
134 | CHECK(state->location->rel_heading == Approx(0));
135 | CHECK(state->location->rel_elevation == Approx(0));
136 | CHECK(state->location->rel_distance == Approx(0));
137 | k++;
138 | continue;
139 | }
140 | double curr_angle = sqrt(loc->rel_heading*loc->rel_heading + loc->rel_elevation*loc->rel_elevation);
141 | // Should be getting further from the centre of the image
142 | CHECK(curr_angle >= last_angle);
143 | last_angle = curr_angle;
144 | // Robot rel coordinates should describe the position
145 | double h = state->heading + loc->rel_heading;
146 | double e = state->elevation + loc->rel_elevation;
147 | INFO("k="<< k << ", heading=" << degrees(state->heading) << ", rel_heading=" << degrees(loc->rel_heading));
148 | INFO("elevation=" << degrees(state->elevation) << ", rel_elevation=" << degrees(loc->rel_elevation));
149 | INFO("rel_distance=" << loc->rel_distance);
150 | INFO("curr=(" << curr.x << ", " << curr.y << ", " << curr.z << ")");
151 | INFO("targ=(" << loc->point.x << ", " << loc->point.y << ", " << loc->point.z << ")");
152 | INFO("diff=(" << loc->point.x-curr.x << ", " << loc->point.y-curr.y << ", " << loc->point.z-curr.z << ")");
153 | cv::Point3f offset(sin(h)*cos(e)*loc->rel_distance, cos(h)*cos(e)*loc->rel_distance, sin(e)*loc->rel_distance);
154 | INFO("calc diff=(" << offset.x << ", " << offset.y << ", " << offset.z << ")");
155 | cv::Point3f target = curr + offset;
156 | REQUIRE(loc->point.x == Approx(target.x));
157 | REQUIRE(loc->point.y == Approx(target.y));
158 | REQUIRE(loc->point.z == Approx(target.z));
159 | k++;
160 | }
161 | int ix = t % actions.size(); // select an action
162 | sim.makeAction(ix, radians(heading_chg[t]), radians(elevation_chg[t]));
163 | viewpointId = actions[ix]->viewpointId;
164 | }
165 | }
166 | REQUIRE_NOTHROW(sim.close());
167 | }
168 |
169 | TEST_CASE( "Navigable Locations", "[Actions]" ) {
170 |
171 | std::vector scanIds;
172 | std::ifstream infile ("./connectivity/scans.txt", std::ios_base::in);
173 | std::string scanId;
174 | while (infile >> scanId) {
175 | scanIds.push_back (scanId);
176 | }
177 | Simulator sim;
178 | sim.setCameraResolution(20,20); // don't really care about the image
179 | sim.setCameraVFOV(radians(90)); // 90deg vfov, 90deg hfov
180 | double half_hfov = M_PI/4;
181 | sim.setRenderingEnabled(false);
182 | sim.setSeed(1);
183 | REQUIRE_NOTHROW(sim.init());
184 | for (auto scanId : scanIds) {
185 | REQUIRE_NOTHROW(sim.newEpisode(scanId)); // start anywhere, but repeatably so
186 |
187 | // Load connectivity graph
188 | Json::Value root;
189 | auto navGraphFile = "./connectivity/" + scanId + "_connectivity.json";
190 | std::ifstream ifs(navGraphFile, std::ifstream::in);
191 | ifs >> root;
192 | // Find included viewpoints
193 | std::vector included;
194 | SimStatePtr state = sim.getState();
195 | for (auto viewpoint : root) {
196 | included.push_back(viewpoint["included"].asBool());
197 | if (viewpoint["image_id"].asString() == state->location->viewpointId) {
198 | INFO("Don't let newEpisode spawn at an excluded viewpoint");
199 | CHECK(included.back());
200 | }
201 | }
202 |
203 | // Check a short episode
204 | for (int t = 0; t < 10; ++t ) {
205 | state = sim.getState();
206 | CHECK( state->scanId == scanId );
207 | CHECK( state->step == t );
208 |
209 | // navigableLocations from sim into a map
210 | std::unordered_map locs;
211 | for (auto v : state->navigableLocations) {
212 | locs[v->viewpointId] = v;
213 | }
214 |
215 | // Find current viewpoint in json file
216 | Json::Value currentViewpoint;
217 | for (auto viewpoint : root) {
218 | auto viewpointId = viewpoint["image_id"].asString();
219 | if (viewpointId == state->location->viewpointId) {
220 | currentViewpoint = viewpoint;
221 | break;
222 | }
223 | }
224 | REQUIRE_FALSE(currentViewpoint.empty());
225 |
226 | // Check navigableLocations are correct
227 | int navigableCount = 0;
228 | for (int i = 0; i < included.size(); ++i) {
229 | std::string curr = currentViewpoint["image_id"].asString();
230 | std::string target = root[i]["image_id"].asString();
231 | // Read current position
232 | float x = currentViewpoint["pose"][3].asFloat();
233 | float y = currentViewpoint["pose"][7].asFloat();
234 | float z = currentViewpoint["pose"][11].asFloat();
235 | // Read target position
236 | float tar_x = root[i]["pose"][3].asFloat();
237 | float tar_y = root[i]["pose"][7].asFloat();
238 | float tar_z = root[i]["pose"][11].asFloat();
239 | if (curr == target) {
240 | INFO("Viewpoint " << target << " must be self-reachable");
241 | // Every viewpoint must be self reachable
242 | REQUIRE(locs.find(target) != locs.end());
243 | // We should never be at a not included viewpoint
244 | CHECK(included[i]);
245 | ViewpointPtr target_viewpoint = locs[target];
246 | CHECK(target_viewpoint->point.x == Approx(tar_x));
247 | CHECK(target_viewpoint->point.y == Approx(tar_y));
248 | CHECK(target_viewpoint->point.z == Approx(tar_z));
249 | navigableCount++;
250 | } else if (!currentViewpoint["unobstructed"][i].asBool()) {
251 | // obstructed
252 | INFO("Viewpoint " << target << " is obstructed from "
253 | << curr << ", can't be a navigableLocation");
254 | CHECK(locs.find(target) == locs.end());
255 | } else if (!included[i]) {
256 | INFO("Viewpoint " << target << " is excluded,"
257 | << " can't be a navigableLocation");
258 | CHECK(locs.find(target) == locs.end());
259 | } else {
260 | // check if this viewpoint is visible
261 | INFO("atan2 " << atan2(tar_y - y, tar_x - x));
262 | float viewpointHeading = M_PI/2 - atan2(tar_y - y, tar_x - x);
263 | // convert interval [-0.5pi, 1.5pi] to interval [0, 2pi]
264 | if (viewpointHeading < 0) {
265 | viewpointHeading += 2*M_PI;
266 | }
267 | bool visible = fabs(state->heading - viewpointHeading) <= half_hfov ||
268 | fabs(state->heading + 2.0 * M_PI - viewpointHeading) <= half_hfov ||
269 | fabs(state->heading - (viewpointHeading + 2.0 * M_PI)) <= half_hfov;
270 | INFO("Estimated heading " << viewpointHeading << ", agent heading " << state->heading
271 | << ", visible " << visible);
272 | if (visible) {
273 | INFO("Viewpoint " << target << " (" << tar_x << ", " << tar_y << ", " << tar_z
274 | << ") should be reachable from " << curr << " (" << x << ", " << y << ", " << z
275 | << ") with heading " << state->heading);
276 | REQUIRE(locs.find(target) != locs.end());
277 | ViewpointPtr target_viewpoint = locs[target];
278 | CHECK(target_viewpoint->point.x == Approx(tar_x));
279 | CHECK(target_viewpoint->point.y == Approx(tar_y));
280 | CHECK(target_viewpoint->point.z == Approx(tar_z));
281 | navigableCount++;
282 | } else {
283 | INFO("Viewpoint " << target << " (" << tar_x << ", " << tar_y << ", " << tar_z
284 | << ") is not visible in camera from " << curr << " (" << x << ", " << y << ", " << z
285 | << ") with heading " << state->heading << ", can't be a navigableLocation");
286 | REQUIRE(locs.find(target) == locs.end());
287 | }
288 | }
289 | }
290 | CHECK(navigableCount == state->navigableLocations.size());
291 |
292 | // Move somewhere else
293 | std::vector actions = state->navigableLocations;
294 | int ix = t % actions.size(); // select an action
295 | sim.makeAction(ix, radians(heading_chg[t]), radians(elevation_chg[t]));
296 | }
297 | }
298 | REQUIRE_NOTHROW(sim.close());
299 | }
300 |
301 |
302 | TEST_CASE( "RGB Image", "[Rendering]" ) {
303 |
304 | Simulator sim;
305 | sim.setCameraResolution(640,480); // width,height
306 | sim.setCameraVFOV(radians(60)); // 60deg vfov, 80deg hfov
307 | CHECK(sim.setElevationLimits(radians(-40),radians(50)));
308 | REQUIRE_NOTHROW(sim.init());
309 | Json::Value root;
310 | std::string testSpecFile{"src/test/rendertest_spec.json"};
311 | std::ifstream ifs(testSpecFile, std::ifstream::in);
312 | if (ifs.fail()){
313 | throw std::invalid_argument( "Could not open test spec file: " + testSpecFile );
314 | }
315 | ifs >> root;
316 | for (auto testcase : root) {
317 | auto imgfile = testcase["reference_image"].asString();
318 | auto scanId = testcase["scanId"].asString();
319 | auto viewpointId = testcase["viewpointId"].asString();
320 | auto heading = testcase["heading"].asFloat();
321 | auto elevation = testcase["elevation"].asFloat();
322 |
323 | REQUIRE_NOTHROW(sim.newEpisode(scanId, viewpointId, heading, elevation));
324 |
325 | SimStatePtr state = sim.getState();
326 | auto reference_image = cv::imread("webgl_imgs/"+imgfile);
327 | cv::imwrite("sim_imgs/"+imgfile, state->rgb); // save for later comparison
328 |
329 | //cv::imshow("WebGL", reference_image);
330 | //cv::imshow("MatterSim", state->rgb);
331 | //int key = cv::waitKey(100);
332 |
333 | double err = cv::norm(reference_image, state->rgb, CV_L2);
334 | err /= reference_image.rows * reference_image.cols;
335 | CHECK(err < 0.15);
336 | }
337 | REQUIRE_NOTHROW(sim.close());
338 | }
339 |
--------------------------------------------------------------------------------
/src/test/rendertest_spec.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "scanId": "17DRP5sb8fy",
4 | "viewpointId": "85c23efeaecd4d43a7dcd5b90137179e",
5 | "elevation": 0.008557380839564054,
6 | "heading": 2.551961945320492,
7 | "reference_image": "17DRP5sb8fy_85c23efeaecd4d43a7dcd5b90137179e_2.551961945320492_0.008557380839564054.png"
8 | },
9 | {
10 | "scanId": "1LXtFkjw3qL",
11 | "viewpointId": "187589bb7d4644f2943079fb949c0be9",
12 | "elevation": 0.0004921836022802584,
13 | "heading": 1.8699330579409539,
14 | "reference_image": "1LXtFkjw3qL_187589bb7d4644f2943079fb949c0be9_1.8699330579409539_0.0004921836022802584.png"
15 | },
16 | {
17 | "scanId": "1pXnuDYAj8r",
18 | "viewpointId": "163d61ac7edb43fb958c5d9e69ae11ad",
19 | "elevation": -0.02444352614304746,
20 | "heading": 4.626331047551077,
21 | "reference_image": "1pXnuDYAj8r_163d61ac7edb43fb958c5d9e69ae11ad_4.626331047551077_-0.02444352614304746.png"
22 | },
23 | {
24 | "scanId": "29hnd4uzFmX",
25 | "viewpointId": "1576d62e7bbb45e8a5ef9e7bb37b1839",
26 | "elevation": -0.0006838914039405167,
27 | "heading": 5.844119909926444,
28 | "reference_image": "29hnd4uzFmX_1576d62e7bbb45e8a5ef9e7bb37b1839_5.844119909926444_-0.0006838914039405167.png"
29 | },
30 | {
31 | "scanId": "2azQ1b91cZZ",
32 | "viewpointId": "3daad58ad53742038e50d62e91f84e7b",
33 | "elevation": 0.016732869758208434,
34 | "heading": 3.1736484087962933,
35 | "reference_image": "2azQ1b91cZZ_3daad58ad53742038e50d62e91f84e7b_3.1736484087962933_0.016732869758208434.png"
36 | },
37 | {
38 | "scanId": "2n8kARJN3HM",
39 | "viewpointId": "94ac3cea52ec455993f8562f78da3be1",
40 | "elevation": -0.0009188787844489273,
41 | "heading": 2.604601935142565,
42 | "reference_image": "2n8kARJN3HM_94ac3cea52ec455993f8562f78da3be1_2.604601935142565_-0.0009188787844489273.png"
43 | },
44 | {
45 | "scanId": "2t7WUuJeko7",
46 | "viewpointId": "529f006f8293406da0b506defd2891a5",
47 | "elevation": -0.013788837143969411,
48 | "heading": 0.032985516949381344,
49 | "reference_image": "2t7WUuJeko7_529f006f8293406da0b506defd2891a5_0.032985516949381344_-0.013788837143969411.png"
50 | },
51 | {
52 | "scanId": "5LpN3gDmAk7",
53 | "viewpointId": "bda8025f20404048a77381e9e0dc0ccf",
54 | "elevation": -0.01083211073205187,
55 | "heading": 5.325207878739601,
56 | "reference_image": "5LpN3gDmAk7_bda8025f20404048a77381e9e0dc0ccf_5.325207878739601_-0.01083211073205187.png"
57 | },
58 | {
59 | "scanId": "5q7pvUzZiYa",
60 | "viewpointId": "397403366d784caf804d741f32fd68b9",
61 | "elevation": -0.0007063598518199811,
62 | "heading": 2.8746465006968234,
63 | "reference_image": "5q7pvUzZiYa_397403366d784caf804d741f32fd68b9_2.8746465006968234_-0.0007063598518199811.png"
64 | },
65 | {
66 | "scanId": "5ZKStnWn8Zo",
67 | "viewpointId": "c76b52856e7c4f2a9a4419000c8e646a",
68 | "elevation": -0.02922217527541366,
69 | "heading": 4.13470589902238,
70 | "reference_image": "5ZKStnWn8Zo_c76b52856e7c4f2a9a4419000c8e646a_4.13470589902238_-0.02922217527541366.png"
71 | },
72 | {
73 | "scanId": "759xd9YjKW5",
74 | "viewpointId": "2343ef3bf04a4433af62f0d527d7512a",
75 | "elevation": -0.016938006310169448,
76 | "heading": 3.5451019786019264,
77 | "reference_image": "759xd9YjKW5_2343ef3bf04a4433af62f0d527d7512a_3.5451019786019264_-0.016938006310169448.png"
78 | },
79 | {
80 | "scanId": "7y3sRwLe3Va",
81 | "viewpointId": "9bbf903d50da4ffd9e5d1fb7c9f4d69b",
82 | "elevation": 0.008361841032265524,
83 | "heading": 1.7348660165523566,
84 | "reference_image": "7y3sRwLe3Va_9bbf903d50da4ffd9e5d1fb7c9f4d69b_1.7348660165523566_0.008361841032265524.png"
85 | },
86 | {
87 | "scanId": "8194nk5LbLH",
88 | "viewpointId": "c9e8dc09263e4d0da77d16de0ecddd39",
89 | "elevation": 0.008533161479170466,
90 | "heading": 4.05504292862083,
91 | "reference_image": "8194nk5LbLH_c9e8dc09263e4d0da77d16de0ecddd39_4.05504292862083_0.008533161479170466.png"
92 | },
93 | {
94 | "scanId": "82sE5b5pLXE",
95 | "viewpointId": "056a491afa534b17bac36f4f5898462a",
96 | "elevation": -0.0037883068413356496,
97 | "heading": 1.689393931320027,
98 | "reference_image": "82sE5b5pLXE_056a491afa534b17bac36f4f5898462a_1.689393931320027_-0.0037883068413356496.png"
99 | },
100 | {
101 | "scanId": "8WUmhLawc2A",
102 | "viewpointId": "d21aae0b5d944f27a0074525c803fc9f",
103 | "elevation": -0.04510889155759994,
104 | "heading": 3.047458184407221,
105 | "reference_image": "8WUmhLawc2A_d21aae0b5d944f27a0074525c803fc9f_3.047458184407221_-0.04510889155759994.png"
106 | },
107 | {
108 | "scanId": "ac26ZMwG7aT",
109 | "viewpointId": "efeef7cc82c84690addb0bf415f075ea",
110 | "elevation": -0.013447513736072197,
111 | "heading": 0.07434352566701552,
112 | "reference_image": "ac26ZMwG7aT_efeef7cc82c84690addb0bf415f075ea_0.07434352566701552_-0.013447513736072197.png"
113 | },
114 | {
115 | "scanId": "ARNzJeq3xxb",
116 | "viewpointId": "9a671e6915de4eb897f45fee8bf2031d",
117 | "elevation": 0.02583868533558965,
118 | "heading": 5.616355886953764,
119 | "reference_image": "ARNzJeq3xxb_9a671e6915de4eb897f45fee8bf2031d_5.616355886953764_0.02583868533558965.png"
120 | },
121 | {
122 | "scanId": "B6ByNegPMKs",
123 | "viewpointId": "e3a65955df26467581c32613c4e9f865",
124 | "elevation": 0.007265625492957138,
125 | "heading": 5.230794959607039,
126 | "reference_image": "B6ByNegPMKs_e3a65955df26467581c32613c4e9f865_5.230794959607039_0.007265625492957138.png"
127 | },
128 | {
129 | "scanId": "b8cTxDM8gDG",
130 | "viewpointId": "f2944e0b66b9461994a7f757582f9bc3",
131 | "elevation": -0.007543204141144086,
132 | "heading": 0.0853092784395515,
133 | "reference_image": "b8cTxDM8gDG_f2944e0b66b9461994a7f757582f9bc3_0.0853092784395515_-0.007543204141144086.png"
134 | },
135 | {
136 | "scanId": "cV4RVeZvu5T",
137 | "viewpointId": "1b321779a4374c2b952c51820daa9e6c",
138 | "elevation": 0.07914721704610106,
139 | "heading": 6.266463179566256,
140 | "reference_image": "cV4RVeZvu5T_1b321779a4374c2b952c51820daa9e6c_6.266463179566256_0.07914721704610106.png"
141 | }
142 | ]
--------------------------------------------------------------------------------
/tasks/R2R/data/download.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | wget https://www.dropbox.com/s/hh5qec8o5urcztn/R2R_train.json -P tasks/R2R/data/
4 | wget https://www.dropbox.com/s/8ye4gqce7v8yzdm/R2R_val_seen.json -P tasks/R2R/data/
5 | wget https://www.dropbox.com/s/p6hlckr70a07wka/R2R_val_unseen.json -P tasks/R2R/data/
6 | wget https://www.dropbox.com/s/w4pnbwqamwzdwd1/R2R_test.json -P tasks/R2R/data/
7 | wget http://www.cs.unc.edu/~airsplay/aug_paths.json -P tasks/R2R/data/
8 |
--------------------------------------------------------------------------------
/tasks/R2R/data/train_vocab.txt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | the
5 | .
6 | and
7 | walk
8 | turn
9 | left
10 | right
11 | to
12 | go
13 | of
14 | ,
15 | stop
16 | on
17 | room
18 | stairs
19 | into
20 | door
21 | in
22 | down
23 | wait
24 | through
25 | a
26 | at
27 | past
28 | you
29 | hallway
30 | doorway
31 | bedroom
32 | straight
33 | exit
34 | table
35 | enter
36 | then
37 | bathroom
38 | up
39 | with
40 | your
41 | towards
42 | front
43 | take
44 | hall
45 | kitchen
46 | out
47 | forward
48 | around
49 | next
50 | near
51 | by
52 | first
53 | doors
54 | continue
55 | top
56 | bed
57 | chairs
58 | steps
59 | end
60 | make
61 | area
62 | dining
63 | until
64 | there
65 | pass
66 | once
67 | wall
68 | outside
69 | glass
70 | head
71 | open
72 | that
73 | leave
74 | two
75 | chair
76 | inside
77 | couch
78 | toward
79 | living
80 | across
81 | bottom
82 | just
83 | white
84 | when
85 | side
86 | get
87 | double
88 | sink
89 | ahead
90 | before
91 | reach
92 | large
93 | from
94 | second
95 | staircase
96 | step
97 | between
98 | after
99 | fireplace
100 | pool
101 | closet
102 | way
103 | stand
104 | are
105 | house
106 | passed
107 | behind
108 | is
109 | going
110 | it
111 | rug
112 | entrance
113 | set
114 | counter
115 | move
116 | walking
117 | patio
118 | office
119 | landing
120 | '
121 | all
122 | desk
123 | wooden
124 | other
125 | black
126 | small
127 | painting
128 | keep
129 | corner
130 | facing
131 | floor
132 | mirror
133 | red
134 | again
135 | sitting
136 | three
137 | slight
138 | middle
139 | along
140 | bar
141 | laundry
142 | couches
143 | slightly
144 | another
145 | passing
146 | stair
147 | blue
148 | archway
149 | leading
150 | see
151 | entryway
152 | onto
153 | round
154 | window
155 | long
156 | foot
157 | last
158 | an
159 | follow
160 | will
161 | building
162 | carpet
163 | hard
164 | brown
165 | plant
166 | balcony
167 | be
168 | stopping
169 | sliding
170 | piano
171 | few
172 | opening
173 | away
174 | wood
175 | railing
176 | one
177 | island
178 | tv
179 | sign
180 | bench
181 | directly
182 | face
183 | sofa
184 | beside
185 | using
186 | flight
187 | proceed
188 | tub
189 | tables
190 | far
191 | entry
192 | opposite
193 | back
194 | refrigerator
195 | this
196 | light
197 | third
198 | shower
199 | picture
200 | windows
201 | where
202 | "
203 | main
204 | toilet
205 | -
206 | vase
207 | as
208 | ll
209 | under
210 | walkway
211 | sharp
212 | leads
213 | foyer
214 | green
215 | over
216 | entering
217 | corridor
218 | cross
219 | massage
220 | lounge
221 | stairwell
222 | degrees
223 | here
224 | bathtub
225 | s
226 | veer
227 | stairway
228 | path
229 | cabinet
230 | statue
231 | porch
232 | stove
233 | upstairs
234 | lamp
235 | them
236 | which
237 | re
238 | art
239 | dinning
240 | downstairs
241 | television
242 | beds
243 | leather
244 | immediate
245 | garage
246 | potted
247 | hanging
248 | so
249 | big
250 | immediately
251 | four
252 | marble
253 | climb
254 | fridge
255 | has
256 | home
257 | base
258 | grey
259 | coffee
260 | pictures
261 | can
262 | 2
263 | exercise
264 | tile
265 | stools
266 | stone
267 | more
268 | arched
269 | flowers
270 | plants
271 | tree
272 | center
273 | dresser
274 | banister
275 | start
276 | edge
277 | pillar
278 | standing
279 | deck
280 | stepping
281 | wine
282 | water
283 | case
284 | paintings
285 | shelves
286 | mat
287 | walls
288 | empty
289 | very
290 | hand
291 | sinks
292 | cabinets
293 | machine
294 | nearest
295 | shelf
296 | metal
297 | screen
298 | feet
299 | bookshelf
300 | christmas
301 | looking
302 | closest
303 | heading
304 | chandelier
305 | washer
306 | halfway
307 | off
308 | fourth
309 | narrow
310 | some
311 | /
312 | clock
313 | elevator
314 | have
315 | dryer
316 | travel
317 | direction
318 | short
319 | purple
320 | bookcase
321 | switch
322 | circular
323 | sculpture
324 | should
325 | yellow
326 | thermostat
327 | about
328 | though
329 | arch
330 | circle
331 | five
332 | stay
333 | above
334 | gym
335 | threshold
336 | come
337 | theater
338 | indoors
339 | pillars
340 | taking
341 | column
342 | furniture
343 | fire
344 | rope
345 | framed
346 | couple
347 | exiting
348 | keeping
349 | brick
350 | desks
351 | level
352 | oven
353 | descend
354 | turning
355 | little
356 | rest
357 | via
358 | gray
359 | spiral
360 | columns
361 | orange
362 | gate
363 | artwork
364 | outdoor
365 | reaching
366 | pantry
367 | need
368 | frame
369 | doorways
370 | library
371 | but
372 | tall
373 | those
374 | closed
375 | lobby
376 | benches
377 | study
378 | ceiling
379 | half
380 | sets
381 | ninety
382 | hot
383 | restroom
384 | striped
385 | following
386 | 3
387 | seating
388 | washing
389 | bookshelves
390 | ottoman
391 | he
392 | locker
393 | sofas
394 | for
395 | wicker
396 | fountain
397 | rocking
398 | rail
399 | ping
400 | pong
401 | 180
402 | gold
403 | both
404 | intersection
405 | display
406 | tiled
407 | reception
408 | french
409 | pink
410 | pathway
411 | now
412 | extinguisher
413 | flower
414 | sauna
415 | moving
416 | 2nd
417 | bath
418 | guitar
419 | square
420 | equipment
421 | u
422 | switches
423 | partition
424 | conference
425 | treadmill
426 | while
427 | single
428 | (
429 | bear
430 | bike
431 | work
432 | zebra
433 | ascend
434 | stool
435 | carpeted
436 | suite
437 | curved
438 | )
439 | lead
440 | silver
441 | seat
442 | destination
443 | located
444 | beige
445 | piece
446 | against
447 | flooring
448 | lockers
449 | space
450 | pot
451 | dark
452 | animal
453 | armchairs
454 | horse
455 | built
456 | arm
457 | nightstand
458 | leftmost
459 | farthest
460 | pews
461 | 90
462 | soon
463 | game
464 | covered
465 | mosaic
466 | book
467 | upwards
468 | each
469 | recliner
470 | welcome
471 | quick
472 | cloth
473 | reached
474 | elevators
475 | aisle
476 | master
477 | divider
478 | not
479 | grand
480 | rack
481 | hardwood
482 | curtains
483 | do
484 | vanity
485 | says
486 | till
487 | storage
488 | finish
489 | find
490 | six
491 | ve
492 | man
493 | 4
494 | t
495 | den
496 | tapestry
497 | forwards
498 | clear
499 | look
500 | arcade
501 | adjacent
502 | even
503 | cooler
504 | mirrors
505 | fifth
506 | armchair
507 | staircases
508 | outdoors
509 | flights
510 | concrete
511 | opens
512 | smaller
513 | great
514 | hammock
515 | en
516 | beach
517 | jacuzzi
518 | iron
519 | entered
520 | o
521 | phone
522 | row
523 | curve
524 | giant
525 | trees
526 | continuing
527 | degree
528 | oval
529 | close
530 | planter
531 | position
532 | machines
533 | parallel
534 | mini
535 | place
536 | colorful
537 | exterior
538 | marked
539 | platform
540 | breakfast
541 | ground
542 | photos
543 | ovens
544 | fitness
545 | fan
546 | faucet
547 | alongside
548 | poster
549 | locate
550 | ballroom
551 | vent
552 | almost
553 | utility
554 | counters
555 | visible
556 | rocks
557 | doormat
558 | staying
559 | drawers
560 | underneath
561 | towel
562 | heater
563 | alcove
564 | portrait
565 | movie
566 | decoration
567 | shaped
568 | either
569 | cellar
570 | entire
571 | length
572 | rooms
573 | midway
574 | ladder
575 | dog
576 | towels
577 | microwave
578 | box
579 | basket
580 | ..
581 | buffet
582 | prior
583 | or
584 | holes
585 | pillows
586 | colored
587 | beyond
588 | beginning
589 | kitchenette
590 | tan
591 | books
592 | drum
593 | rectangular
594 | seats
595 | men
596 | floors
597 | runner
598 | roped
599 | abstract
600 | sidewalk
601 | arrive
602 | rock
603 | downwards
604 | these
605 | same
606 | grass
607 | further
608 | grill
609 | marilyn
610 | meeting
611 | bit
612 | leaving
613 | panel
614 | 1
615 | sectional
616 | candles
617 | upon
618 | well
619 | workout
620 | railings
621 | turns
622 | immediatly
623 | rightmost
624 | 5
625 | family
626 | dishwasher
627 | print
628 | stones
629 | monroe
630 | lower
631 | radiator
632 | garden
633 | chaise
634 | curtain
635 | 45
636 | console
637 | hang
638 | frosted
639 | passageway
640 | diagonally
641 | basketball
642 | ropes
643 | board
644 | countertop
645 | vases
646 | checkered
647 | barber
648 | without
649 | nook
650 | section
651 | flag
652 | coat
653 | its
654 | part
655 | loveseat
656 | barrier
657 | ornate
658 | computer
659 | ends
660 | wet
661 | like
662 | guardrail
663 | wardrobe
664 | sconce
665 | air
666 | several
667 | photo
668 | lamps
669 | support
670 | making
671 | spa
672 | floral
673 | wide
674 | begin
675 | climbing
676 | unfinished
677 | sixth
678 | floored
679 | trash
680 | mounted
681 | court
682 | patterned
683 | church
684 | frames
685 | line
686 | bedrooms
687 | let
688 | clockwise
689 | women
690 | arches
691 | final
692 | granite
693 | lights
694 | break
695 | velvet
696 | dots
697 | loft
698 | angle
699 | post
700 | love
701 | altar
702 | shoes
703 | opened
704 | 3rd
705 | swing
706 | many
707 | full
708 | fancy
709 | hit
710 | dot
711 | recreation
712 | waiting
713 | baskets
714 | pots
715 | pattern
716 | statues
717 | painted
718 | lattice
719 | below
720 | cactus
721 | planters
722 | decorative
723 | blanket
724 | bust
725 | bush
726 | approach
727 | descending
728 | tennis
729 | cheetah
730 | upper
731 | pillow
732 | credenza
733 | labeled
734 | copy
735 | winding
736 | froward
737 | mirrored
738 | washroom
739 | put
740 | american
741 | stall
742 | arc
743 | #
744 | folding
745 | bridal
746 | remaining
747 | furthest
748 | drinking
749 | ;
750 | started
751 | meet
752 | curving
753 | looks
754 | beneath
755 | tot
756 | our
757 | curves
758 | swinging
759 | host
760 | swimming
761 | placed
762 | clothes
763 | golden
764 | arrow
765 | comforter
766 | distance
767 | chest
768 | tiles
769 | seven
770 | containing
771 | tops
772 | old
773 | no
774 | goes
775 | sailboat
776 | eighty
777 | still
778 | girl
779 | yourself
780 | pas
781 | bowl
782 | design
783 | flat
784 | backyard
785 | rounded
786 | bigger
787 | hundred
788 | also
789 | archways
790 | screened
791 | garbage
792 | posters
793 | structure
794 | walked
795 | extreme
796 | larger
797 | easel
798 | footstool
799 | promptly
800 | eight
801 | photographs
802 | recliners
803 | hallways
804 | getting
805 | kids
806 | changes
807 | thing
808 | salon
809 | giraffe
810 | infront
811 | object
812 | . .
813 | don
814 | pocket
815 | holding
816 | plaque
817 | openings
818 | dinner
819 | fork
820 | topped
821 | filled
822 | shelving
823 | tablecloth
824 | backside
825 | interior
826 | cushioned
827 | stained
828 | only
829 | telephone
830 | nest
831 | ladies
832 | low
833 | stars
834 | dogleg
835 | children
836 | woman
837 | armoire
838 | paper
839 | veering
840 | bushes
841 | number
842 | rugs
843 | wreath
844 | bin
845 | overlooking
846 | loungers
847 | use
848 | sit
849 | arrangement
850 | billiard
851 | oriental
852 | billiards
853 | gallery
854 | urn
855 | settee
856 | areas
857 | closets
858 | ledge
859 | foosball
860 | avoid
861 | panes
862 | construction
863 | cylinder
864 | ascending
865 | fence
866 | too
867 | cushions
868 | watermelon
869 | if
870 | bottles
871 | night
872 | ship
873 | passage
874 | current
875 | barstools
876 | drawer
877 | terrace
878 | than
879 | 11
880 | reading
881 | nearby
882 | twin
883 | bathrooms
884 | fruit
885 | restrooms
886 | rear
887 | central
888 | pivot
889 | lounging
890 | pedestal
891 | brass
892 | starting
893 | dressing
894 | grandfather
895 | saxophone
896 | burgundy
897 | doorframe
898 | followed
899 | goal
900 | doorknob
901 | alter
902 | banquet
903 | point
904 | cement
905 | lady
906 | palm
907 | cupboards
908 | gravel
909 | jump
910 | entertainment
911 | decorations
912 | steel
913 | indoor
914 | handle
915 | beam
916 | polka
917 | feather
918 | ball
919 | thought
920 | mats
921 | made
922 | straw
923 | unit
924 | hutch
925 | roof
926 | dividers
927 | keyboard
928 | entrances
929 | they
930 | approximately
931 | fiction
932 | portraits
933 | zebras
934 | padded
935 | faced
936 | handing
937 | wrap
938 | paneled
939 | carpeting
940 | skin
941 | cow
942 | finally
943 | guitars
944 | woven
945 | fake
946 | projector
947 | marbled
948 | car
949 | huge
950 | 4th
951 | talk
952 | view
953 | were
954 | any
955 | theatre
956 | pair
957 | sunset
958 | stacked
959 | pole
960 | pulp
961 | deer
962 | teh
963 | enerskin
964 | trunk
965 | 6
966 | surrounded
967 | enclosed
968 | lit
969 | plywood
970 | leopard
971 | arrived
972 | formal
973 | candle
974 | lounger
975 | sides
976 | pipes
977 | bird
978 | body
979 | plate
980 | elliptical
981 | hangings
982 | lines
983 | crossing
984 | spot
985 | pgi
986 | son
987 | pause
988 | powder
989 | grate
990 | pig
991 | treadmills
992 |
--------------------------------------------------------------------------------
/tasks/R2R/data/trainval_vocab.txt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | the
5 | .
6 | and
7 | walk
8 | turn
9 | left
10 | right
11 | to
12 | go
13 | of
14 | ,
15 | stop
16 | on
17 | room
18 | stairs
19 | into
20 | door
21 | down
22 | in
23 | wait
24 | through
25 | a
26 | at
27 | past
28 | you
29 | hallway
30 | doorway
31 | straight
32 | bedroom
33 | exit
34 | table
35 | enter
36 | then
37 | up
38 | bathroom
39 | with
40 | your
41 | towards
42 | take
43 | front
44 | hall
45 | kitchen
46 | out
47 | next
48 | forward
49 | around
50 | by
51 | near
52 | first
53 | doors
54 | continue
55 | bed
56 | steps
57 | top
58 | end
59 | chairs
60 | area
61 | dining
62 | until
63 | make
64 | there
65 | once
66 | pass
67 | wall
68 | outside
69 | head
70 | that
71 | open
72 | glass
73 | living
74 | leave
75 | bottom
76 | two
77 | couch
78 | chair
79 | inside
80 | across
81 | toward
82 | just
83 | side
84 | when
85 | get
86 | white
87 | ahead
88 | double
89 | before
90 | sink
91 | reach
92 | second
93 | from
94 | large
95 | step
96 | staircase
97 | between
98 | after
99 | pool
100 | way
101 | fireplace
102 | closet
103 | stand
104 | are
105 | passed
106 | rug
107 | is
108 | house
109 | going
110 | it
111 | behind
112 | entrance
113 | set
114 | counter
115 | move
116 | all
117 | wooden
118 | red
119 | walking
120 | patio
121 | '
122 | landing
123 | floor
124 | other
125 | corner
126 | small
127 | office
128 | sitting
129 | black
130 | painting
131 | keep
132 | mirror
133 | desk
134 | facing
135 | again
136 | three
137 | along
138 | bar
139 | another
140 | middle
141 | slight
142 | carpet
143 | archway
144 | couches
145 | passing
146 | laundry
147 | slightly
148 | stair
149 | leading
150 | blue
151 | see
152 | follow
153 | window
154 | onto
155 | entryway
156 | long
157 | round
158 | an
159 | foot
160 | last
161 | hard
162 | will
163 | brown
164 | plant
165 | building
166 | sliding
167 | stopping
168 | island
169 | be
170 | piano
171 | few
172 | balcony
173 | wood
174 | opening
175 | directly
176 | one
177 | railing
178 | away
179 | tv
180 | sign
181 | beside
182 | flight
183 | sofa
184 | far
185 | face
186 | tub
187 | using
188 | proceed
189 | entry
190 | bench
191 | refrigerator
192 | back
193 | this
194 | opposite
195 | toilet
196 | shower
197 | third
198 | windows
199 | picture
200 | "
201 | tables
202 | light
203 | where
204 | vase
205 | under
206 | -
207 | sharp
208 | ll
209 | leads
210 | massage
211 | as
212 | foyer
213 | cabinet
214 | over
215 | main
216 | entering
217 | walkway
218 | green
219 | stove
220 | here
221 | degrees
222 | corridor
223 | cross
224 | four
225 | path
226 | stairway
227 | stairwell
228 | statue
229 | downstairs
230 | lamp
231 | bathtub
232 | s
233 | veer
234 | porch
235 | lounge
236 | big
237 | upstairs
238 | art
239 | television
240 | dinning
241 | fridge
242 | hanging
243 | coffee
244 | potted
245 | immediate
246 | arched
247 | them
248 | which
249 | re
250 | has
251 | leather
252 | tile
253 | more
254 | immediately
255 | so
256 | garage
257 | edge
258 | exercise
259 | climb
260 | home
261 | marble
262 | grey
263 | wine
264 | beds
265 | stools
266 | base
267 | banister
268 | pictures
269 | center
270 | flowers
271 | 2
272 | start
273 | standing
274 | stone
275 | can
276 | plants
277 | dresser
278 | clock
279 | pillar
280 | deck
281 | shelves
282 | case
283 | tree
284 | stepping
285 | nearest
286 | mat
287 | very
288 | shelf
289 | paintings
290 | looking
291 | some
292 | cabinets
293 | arch
294 | walls
295 | empty
296 | off
297 | hand
298 | short
299 | chandelier
300 | direction
301 | halfway
302 | bookshelf
303 | oven
304 | travel
305 | water
306 | circular
307 | machine
308 | closest
309 | /
310 | feet
311 | pantry
312 | sinks
313 | though
314 | should
315 | heading
316 | metal
317 | have
318 | above
319 | narrow
320 | circle
321 | bookcase
322 | christmas
323 | sculpture
324 | washer
325 | fourth
326 | screen
327 | yellow
328 | dryer
329 | framed
330 | couple
331 | thermostat
332 | switch
333 | threshold
334 | furniture
335 | fire
336 | come
337 | pillars
338 | purple
339 | level
340 | exiting
341 | five
342 | gym
343 | brick
344 | rope
345 | about
346 | theater
347 | keeping
348 | elevator
349 | gray
350 | sauna
351 | column
352 | stay
353 | descend
354 | little
355 | outdoor
356 | via
357 | indoors
358 | taking
359 | turning
360 | reaching
361 | frame
362 | columns
363 | lobby
364 | library
365 | display
366 | sets
367 | doorways
368 | for
369 | desks
370 | sofas
371 | tall
372 | those
373 | rest
374 | need
375 | closed
376 | ninety
377 | bath
378 | artwork
379 | 3
380 | intersection
381 | spiral
382 | ceiling
383 | french
384 | study
385 | hot
386 | following
387 | orange
388 | but
389 | both
390 | striped
391 | gate
392 | half
393 | 180
394 | ottoman
395 | he
396 | tiled
397 | treadmill
398 | pathway
399 | seating
400 | rocking
401 | bookshelves
402 | moving
403 | benches
404 | wicker
405 | pink
406 | flower
407 | fountain
408 | gold
409 | locker
410 | ping
411 | square
412 | washing
413 | pong
414 | rail
415 | now
416 | restroom
417 | extinguisher
418 | seat
419 | switches
420 | bear
421 | stool
422 | beige
423 | u
424 | destination
425 | carpeted
426 | suite
427 | lead
428 | reception
429 | dark
430 | fitness
431 | single
432 | 90
433 | against
434 | 2nd
435 | equipment
436 | while
437 | located
438 | animal
439 | ve
440 | each
441 | man
442 | guitar
443 | pot
444 | nightstand
445 | leftmost
446 | covered
447 | (
448 | book
449 | work
450 | silver
451 | partition
452 | welcome
453 | lockers
454 | ascend
455 | )
456 | bike
457 | counters
458 | flooring
459 | armchairs
460 | vases
461 | zebra
462 | grass
463 | reached
464 | pews
465 | mosaic
466 | upwards
467 | conference
468 | storage
469 | arm
470 | curved
471 | piece
472 | platform
473 | not
474 | den
475 | rack
476 | farthest
477 | ladder
478 | microwave
479 | space
480 | soon
481 | finish
482 | cellar
483 | console
484 | master
485 | do
486 | quick
487 | till
488 | ground
489 | built
490 | flights
491 | game
492 | degree
493 | hardwood
494 | en
495 | parallel
496 | aisle
497 | ropes
498 | entered
499 | vanity
500 | says
501 | divider
502 | t
503 | horse
504 | find
505 | look
506 | cloth
507 | six
508 | curtain
509 | elevators
510 | jacuzzi
511 | curtains
512 | even
513 | place
514 | recliner
515 | phone
516 | drawers
517 | photos
518 | clear
519 | grand
520 | bit
521 | position
522 | visible
523 | adjacent
524 | roped
525 | row
526 | mirrors
527 | breakfast
528 | 4
529 | curve
530 | forwards
531 | outdoors
532 | either
533 | opens
534 | close
535 | beaded
536 | basket
537 | underneath
538 | colored
539 | grill
540 | tapestry
541 | beyond
542 | giant
543 | concrete
544 | continuing
545 | oval
546 | rooms
547 | dog
548 | doormat
549 | o
550 | fifth
551 | armchair
552 | ovens
553 | staircases
554 | fan
555 | locate
556 | smaller
557 | planter
558 | midway
559 | iron
560 | railings
561 | exterior
562 | immediatly
563 | staying
564 | these
565 | alongside
566 | trees
567 | arcade
568 | great
569 | length
570 | drum
571 | vent
572 | beach
573 | cooler
574 | post
575 | sidewalk
576 | marked
577 | downwards
578 | towel
579 | alcove
580 | shaped
581 | frames
582 | tan
583 | hammock
584 | almost
585 | utility
586 | machines
587 | mini
588 | runner
589 | ..
590 | prior
591 | without
592 | pillows
593 | lower
594 | radiator
595 | poster
596 | books
597 | pots
598 | rectangular
599 | well
600 | seats
601 | like
602 | colorful
603 | towels
604 | photo
605 | arrive
606 | rightmost
607 | buffet
608 | or
609 | holes
610 | further
611 | its
612 | beginning
613 | faucet
614 | part
615 | barrier
616 | portrait
617 | movie
618 | decoration
619 | entire
620 | leaving
621 | panel
622 | ballroom
623 | candles
624 | ends
625 | upon
626 | men
627 | workout
628 | rocks
629 | turns
630 | box
631 | love
632 | countertop
633 | same
634 | heater
635 | 5
636 | kitchenette
637 | waiting
638 | 1
639 | yard
640 | placed
641 | passageway
642 | diagonally
643 | floors
644 | several
645 | support
646 | abstract
647 | making
648 | climbing
649 | nook
650 | design
651 | family
652 | patterned
653 | loveseat
654 | marilyn
655 | clockwise
656 | footstool
657 | 45
658 | hang
659 | loft
660 | air
661 | board
662 | old
663 | eye
664 | altar
665 | spa
666 | floral
667 | rock
668 | mirrored
669 | section
670 | hit
671 | meeting
672 | monroe
673 | ornate
674 | garden
675 | let
676 | baskets
677 | sectional
678 | pattern
679 | statues
680 | guardrail
681 | frosted
682 | wardrobe
683 | bust
684 | angle
685 | lamps
686 | wide
687 | begin
688 | checkered
689 | dressing
690 | dishwasher
691 | mounted
692 | coat
693 | print
694 | stones
695 | church
696 | line
697 | recreation
698 | women
699 | looks
700 | chaise
701 | below
702 | decorative
703 | blanket
704 | basketball
705 | goes
706 | pillow
707 | labeled
708 | sailboat
709 | girl
710 | 3rd
711 | pas
712 | barber
713 | unfinished
714 | sixth
715 | floored
716 | starting
717 | washroom
718 | flag
719 | also
720 | bedrooms
721 | ;
722 | promptly
723 | eight
724 | computer
725 | painted
726 | lattice
727 | wet
728 | hallways
729 | fence
730 | sconce
731 | chest
732 | approach
733 | if
734 | tops
735 | descending
736 | upper
737 | credenza
738 | shoes
739 | yourself
740 | cushioned
741 | many
742 | telephone
743 | nest
744 | trash
745 | full
746 | court
747 | backyard
748 | bigger
749 | rugs
750 | dot
751 | started
752 | arches
753 | meet
754 | tot
755 | final
756 | urn
757 | beam
758 | swimming
759 | made
760 | granite
761 | cactus
762 | lights
763 | thing
764 | break
765 | velvet
766 | curio
767 | tiles
768 | dots
769 | cheetah
770 | still
771 | opened
772 | swing
773 | stars
774 | armoire
775 | alter
776 | fancy
777 | veering
778 | put
779 | american
780 | stall
781 | chart
782 | #
783 | remaining
784 | furthest
785 | drinking
786 | extreme
787 | larger
788 | curving
789 | billiard
790 | our
791 | oriental
792 | getting
793 | clothes
794 | planters
795 | golden
796 | arrow
797 | comforter
798 | distance
799 | infront
800 | . .
801 | bush
802 | seven
803 | night
804 | dinner
805 | tennis
806 | no
807 | barstools
808 | backside
809 | copy
810 | winding
811 | interior
812 | froward
813 | bowl
814 | low
815 | cream
816 | dogleg
817 | woman
818 | followed
819 | rounded
820 | bushes
821 | archways
822 | arc
823 | folding
824 | point
825 | bridal
826 | bin
827 | posters
828 | walked
829 | loungers
830 | easel
831 | beneath
832 | curves
833 | indoor
834 | swinging
835 | recliners
836 | ledge
837 | foosball
838 | host
839 | star
840 | bannister
841 | hutch
842 | cushions
843 | don
844 | holding
845 | containing
846 | plaque
847 | openings
848 | current
849 | filled
850 | eighty
851 | tablecloth
852 | talk
853 | terrace
854 | than
855 | 11
856 | only
857 | pivot
858 | pedestal
859 | children
860 | trunk
861 | flat
862 | paper
863 | number
864 | rails
865 | hundred
866 | please
867 | screened
868 | formal
869 | palm
870 | garbage
871 | structure
872 | overlooking
873 | sit
874 | arrangement
875 | entertainment
876 | decorations
877 | billiards
878 | gallery
879 | antelope
880 | photographs
881 | areas
882 | feather
883 | splits
884 | closets
885 | ball
886 | mats
887 | panes
888 | throught
889 | kids
890 | changes
891 | atrium
892 | violin
893 | salon
894 | giraffe
895 | object
896 | pocket
897 | watermelon
898 | bottles
899 | boxes
900 | fork
901 | topped
902 | passage
903 | woven
904 | change
905 | shelving
906 | stuffed
907 | stained
908 | nearby
909 | twin
910 | fruit
911 | rear
912 | pole
913 | central
914 | ladies
915 | deer
916 | brass
917 | weight
918 | lit
919 | banquet
920 | wreath
921 | pew
922 | sunken
923 | electrical
924 | cupboards
925 | use
926 | globe
927 | steel
928 | settee
929 | handle
930 | polka
931 | thought
932 | avoid
933 | letters
934 | construction
935 | urns
936 | cylinder
937 | unit
938 | would
939 | dividers
940 | ascending
941 | approximately
942 | too
943 | faced
944 | people
945 | mantle
946 | ship
947 | skin
948 | finally
949 | bedside
950 | fake
951 | marbled
952 | drawer
953 | 4th
954 | view
955 | any
956 | pair
957 | sunset
958 | reading
959 | bathrooms
960 | being
961 | restrooms
962 | stationary
963 | lounging
964 | grandfather
965 | saxophone
966 | teh
967 | burgundy
968 | unicycle
969 | doorframe
970 | 6
971 | goal
972 | doorknob
973 | cement
974 | arrived
975 | lady
976 | candle
977 | block
978 | rows
979 | sides
980 | gravel
981 | jump
982 | bird
983 | butler
984 | high
985 | lines
986 | bathing
987 | beads
988 | crossing
989 | spot
990 | completely
991 | bookcases
992 | pgi
993 | powder
994 | int
995 | treadmills
996 | projection
997 | straw
998 | roof
999 | keyboard
1000 | entrances
1001 | they
1002 | fiction
1003 | reclining
1004 | portraits
1005 | zebras
1006 | padded
1007 | rectangle
1008 | new
1009 | lots
1010 | handrail
1011 | handing
1012 | vaulted
1013 | landscape
1014 | lips
1015 | playroom
1016 | wrap
1017 | class
1018 | paneled
1019 | crystal
1020 | carpeting
1021 | cow
1022 | guitars
1023 | run
1024 | thirty
1025 | projector
1026 | cases
1027 | car
1028 | counting
1029 | suitcases
1030 | huge
1031 | halt
1032 | were
1033 | theatre
1034 | lawn
1035 | shoe
1036 | lighted
1037 | outlet
1038 | exam
1039 | ways
1040 | vertical
1041 | tun
1042 | stacked
1043 | media
1044 | stepped
1045 | throw
1046 | pulp
1047 | ten
1048 | enerskin
1049 | stick
1050 | horizontal
1051 | signs
1052 | style
1053 | houseplant
1054 | surrounded
1055 | enclosed
1056 | dividing
1057 | barn
1058 | plywood
1059 | leopard
1060 | lounger
1061 | e
1062 | barbecue
1063 | pipes
1064 | body
1065 | plate
1066 | elliptical
1067 | instead
1068 | hangings
1069 | sculptures
1070 | diagonal
1071 | pendant
1072 | slider
1073 | intersecting
1074 | mid
1075 | son
1076 | pause
1077 | grain
1078 | gone
1079 | homes
1080 | ski
1081 | grate
1082 | pig
1083 |
--------------------------------------------------------------------------------