├── .github
└── workflows
│ └── codeql-analysis.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── DSAEncoder
└── drn_d_22-4bd2f8ea.pth
├── DSAModules.py
├── LICENSE
├── README.md
├── SECURITY.md
├── SUPPORT.md
├── datasets
├── kitti
│ ├── test.txt
│ ├── train.txt
│ └── val.txt
└── vkitti
│ ├── test.txt
│ ├── train.txt
│ └── val.txt
├── img
├── intro.PNG
└── overview.png
├── loaddata.py
├── models
└── modules.py
├── test.py
├── train.py
├── train_loss.py
├── transform.py
└── utils.py
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ main ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ main ]
20 | schedule:
21 | - cron: '26 22 * * 4'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 | permissions:
28 | actions: read
29 | contents: read
30 | security-events: write
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | language: [ 'python' ]
36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
37 | # Learn more:
38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
39 |
40 | steps:
41 | - name: Checkout repository
42 | uses: actions/checkout@v2
43 |
44 | # Initializes the CodeQL tools for scanning.
45 | - name: Initialize CodeQL
46 | uses: github/codeql-action/init@v1
47 | with:
48 | languages: ${{ matrix.language }}
49 | # If you wish to specify custom queries, you can do so here or in a config file.
50 | # By default, queries listed here will override any specified in a config file.
51 | # Prefix the list here with "+" to use these queries and those in the config file.
52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
53 |
54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
55 | # If this step fails, then you should remove it and run the build manually (see below)
56 | - name: Autobuild
57 | uses: github/codeql-action/autobuild@v1
58 |
59 | # ℹ️ Command-line programs to run using the OS shell.
60 | # 📚 https://git.io/JvXDl
61 |
62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
63 | # and modify them (or add more) to build your code if your project
64 | # uses a compiled language
65 |
66 | #- run: |
67 | # make bootstrap
68 | # make release
69 |
70 | - name: Perform CodeQL Analysis
71 | uses: github/codeql-action/analyze@v1
72 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Ignore Visual Studio temporary files, build results, and
2 | ## files generated by popular Visual Studio add-ons.
3 | ##
4 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
5 |
6 | # User-specific files
7 | *.rsuser
8 | *.suo
9 | *.user
10 | *.userosscache
11 | *.sln.docstates
12 |
13 | # User-specific files (MonoDevelop/Xamarin Studio)
14 | *.userprefs
15 |
16 | # Mono auto generated files
17 | mono_crash.*
18 |
19 | # Build results
20 | [Dd]ebug/
21 | [Dd]ebugPublic/
22 | [Rr]elease/
23 | [Rr]eleases/
24 | x64/
25 | x86/
26 | [Aa][Rr][Mm]/
27 | [Aa][Rr][Mm]64/
28 | bld/
29 | [Bb]in/
30 | [Oo]bj/
31 | [Ll]og/
32 | [Ll]ogs/
33 |
34 | # Visual Studio 2015/2017 cache/options directory
35 | .vs/
36 | # Uncomment if you have tasks that create the project's static files in wwwroot
37 | #wwwroot/
38 |
39 | # Visual Studio 2017 auto generated files
40 | Generated\ Files/
41 |
42 | # MSTest test Results
43 | [Tt]est[Rr]esult*/
44 | [Bb]uild[Ll]og.*
45 |
46 | # NUnit
47 | *.VisualState.xml
48 | TestResult.xml
49 | nunit-*.xml
50 |
51 | # Build Results of an ATL Project
52 | [Dd]ebugPS/
53 | [Rr]eleasePS/
54 | dlldata.c
55 |
56 | # Benchmark Results
57 | BenchmarkDotNet.Artifacts/
58 |
59 | # .NET Core
60 | project.lock.json
61 | project.fragment.lock.json
62 | artifacts/
63 |
64 | # StyleCop
65 | StyleCopReport.xml
66 |
67 | # Files built by Visual Studio
68 | *_i.c
69 | *_p.c
70 | *_h.h
71 | *.ilk
72 | *.meta
73 | *.obj
74 | *.iobj
75 | *.pch
76 | *.pdb
77 | *.ipdb
78 | *.pgc
79 | *.pgd
80 | *.rsp
81 | *.sbr
82 | *.tlb
83 | *.tli
84 | *.tlh
85 | *.tmp
86 | *.tmp_proj
87 | *_wpftmp.csproj
88 | *.log
89 | *.vspscc
90 | *.vssscc
91 | .builds
92 | *.pidb
93 | *.svclog
94 | *.scc
95 |
96 | # Chutzpah Test files
97 | _Chutzpah*
98 |
99 | # Visual C++ cache files
100 | ipch/
101 | *.aps
102 | *.ncb
103 | *.opendb
104 | *.opensdf
105 | *.sdf
106 | *.cachefile
107 | *.VC.db
108 | *.VC.VC.opendb
109 |
110 | # Visual Studio profiler
111 | *.psess
112 | *.vsp
113 | *.vspx
114 | *.sap
115 |
116 | # Visual Studio Trace Files
117 | *.e2e
118 |
119 | # TFS 2012 Local Workspace
120 | $tf/
121 |
122 | # Guidance Automation Toolkit
123 | *.gpState
124 |
125 | # ReSharper is a .NET coding add-in
126 | _ReSharper*/
127 | *.[Rr]e[Ss]harper
128 | *.DotSettings.user
129 |
130 | # TeamCity is a build add-in
131 | _TeamCity*
132 |
133 | # DotCover is a Code Coverage Tool
134 | *.dotCover
135 |
136 | # AxoCover is a Code Coverage Tool
137 | .axoCover/*
138 | !.axoCover/settings.json
139 |
140 | # Visual Studio code coverage results
141 | *.coverage
142 | *.coveragexml
143 |
144 | # NCrunch
145 | _NCrunch_*
146 | .*crunch*.local.xml
147 | nCrunchTemp_*
148 |
149 | # MightyMoose
150 | *.mm.*
151 | AutoTest.Net/
152 |
153 | # Web workbench (sass)
154 | .sass-cache/
155 |
156 | # Installshield output folder
157 | [Ee]xpress/
158 |
159 | # DocProject is a documentation generator add-in
160 | DocProject/buildhelp/
161 | DocProject/Help/*.HxT
162 | DocProject/Help/*.HxC
163 | DocProject/Help/*.hhc
164 | DocProject/Help/*.hhk
165 | DocProject/Help/*.hhp
166 | DocProject/Help/Html2
167 | DocProject/Help/html
168 |
169 | # Click-Once directory
170 | publish/
171 |
172 | # Publish Web Output
173 | *.[Pp]ublish.xml
174 | *.azurePubxml
175 | # Note: Comment the next line if you want to checkin your web deploy settings,
176 | # but database connection strings (with potential passwords) will be unencrypted
177 | *.pubxml
178 | *.publishproj
179 |
180 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
181 | # checkin your Azure Web App publish settings, but sensitive information contained
182 | # in these scripts will be unencrypted
183 | PublishScripts/
184 |
185 | # NuGet Packages
186 | *.nupkg
187 | # NuGet Symbol Packages
188 | *.snupkg
189 | # The packages folder can be ignored because of Package Restore
190 | **/[Pp]ackages/*
191 | # except build/, which is used as an MSBuild target.
192 | !**/[Pp]ackages/build/
193 | # Uncomment if necessary however generally it will be regenerated when needed
194 | #!**/[Pp]ackages/repositories.config
195 | # NuGet v3's project.json files produces more ignorable files
196 | *.nuget.props
197 | *.nuget.targets
198 |
199 | # Microsoft Azure Build Output
200 | csx/
201 | *.build.csdef
202 |
203 | # Microsoft Azure Emulator
204 | ecf/
205 | rcf/
206 |
207 | # Windows Store app package directories and files
208 | AppPackages/
209 | BundleArtifacts/
210 | Package.StoreAssociation.xml
211 | _pkginfo.txt
212 | *.appx
213 | *.appxbundle
214 | *.appxupload
215 |
216 | # Visual Studio cache files
217 | # files ending in .cache can be ignored
218 | *.[Cc]ache
219 | # but keep track of directories ending in .cache
220 | !?*.[Cc]ache/
221 |
222 | # Others
223 | ClientBin/
224 | ~$*
225 | *~
226 | *.dbmdl
227 | *.dbproj.schemaview
228 | *.jfm
229 | *.pfx
230 | *.publishsettings
231 | orleans.codegen.cs
232 |
233 | # Including strong name files can present a security risk
234 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
235 | #*.snk
236 |
237 | # Since there are multiple workflows, uncomment next line to ignore bower_components
238 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
239 | #bower_components/
240 |
241 | # RIA/Silverlight projects
242 | Generated_Code/
243 |
244 | # Backup & report files from converting an old project file
245 | # to a newer Visual Studio version. Backup files are not needed,
246 | # because we have git ;-)
247 | _UpgradeReport_Files/
248 | Backup*/
249 | UpgradeLog*.XML
250 | UpgradeLog*.htm
251 | ServiceFabricBackup/
252 | *.rptproj.bak
253 |
254 | # SQL Server files
255 | *.mdf
256 | *.ldf
257 | *.ndf
258 |
259 | # Business Intelligence projects
260 | *.rdl.data
261 | *.bim.layout
262 | *.bim_*.settings
263 | *.rptproj.rsuser
264 | *- [Bb]ackup.rdl
265 | *- [Bb]ackup ([0-9]).rdl
266 | *- [Bb]ackup ([0-9][0-9]).rdl
267 |
268 | # Microsoft Fakes
269 | FakesAssemblies/
270 |
271 | # GhostDoc plugin setting file
272 | *.GhostDoc.xml
273 |
274 | # Node.js Tools for Visual Studio
275 | .ntvs_analysis.dat
276 | node_modules/
277 |
278 | # Visual Studio 6 build log
279 | *.plg
280 |
281 | # Visual Studio 6 workspace options file
282 | *.opt
283 |
284 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
285 | *.vbw
286 |
287 | # Visual Studio LightSwitch build output
288 | **/*.HTMLClient/GeneratedArtifacts
289 | **/*.DesktopClient/GeneratedArtifacts
290 | **/*.DesktopClient/ModelManifest.xml
291 | **/*.Server/GeneratedArtifacts
292 | **/*.Server/ModelManifest.xml
293 | _Pvt_Extensions
294 |
295 | # Paket dependency manager
296 | .paket/paket.exe
297 | paket-files/
298 |
299 | # FAKE - F# Make
300 | .fake/
301 |
302 | # CodeRush personal settings
303 | .cr/personal
304 |
305 | # Python Tools for Visual Studio (PTVS)
306 | __pycache__/
307 | *.pyc
308 |
309 | # Cake - Uncomment if you are using it
310 | # tools/**
311 | # !tools/packages.config
312 |
313 | # Tabs Studio
314 | *.tss
315 |
316 | # Telerik's JustMock configuration file
317 | *.jmconfig
318 |
319 | # BizTalk build output
320 | *.btp.cs
321 | *.btm.cs
322 | *.odx.cs
323 | *.xsd.cs
324 |
325 | # OpenCover UI analysis results
326 | OpenCover/
327 |
328 | # Azure Stream Analytics local run output
329 | ASALocalRun/
330 |
331 | # MSBuild Binary and Structured Log
332 | *.binlog
333 |
334 | # NVidia Nsight GPU debugger configuration file
335 | *.nvuser
336 |
337 | # MFractors (Xamarin productivity tool) working folder
338 | .mfractor/
339 |
340 | # Local History for Visual Studio
341 | .localhistory/
342 |
343 | # BeatPulse healthcheck temp database
344 | healthchecksdb
345 |
346 | # Backup folder for Package Reference Convert tool in Visual Studio 2017
347 | MigrationBackup/
348 |
349 | # Ionide (cross platform F# VS Code tools) working folder
350 | .ionide/
351 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Microsoft Open Source Code of Conduct
2 |
3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
4 |
5 | Resources:
6 |
7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns
10 |
--------------------------------------------------------------------------------
/DSAEncoder/drn_d_22-4bd2f8ea.pth:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/S2R-DepthNet/aebc931c7e8c7baad4dec2a0fd8643244741c52e/DSAEncoder/drn_d_22-4bd2f8ea.pth
--------------------------------------------------------------------------------
/DSAModules.py:
--------------------------------------------------------------------------------
1 | # https://docs.opensource.microsoft.com/content/releasing/copyright-headers.html
2 | import torch.nn as nn
3 | import math
4 | import torch.utils.model_zoo as model_zoo
5 | import torch.nn.functional as F
6 | import torch
7 | BatchNorm = nn.BatchNorm2d
8 |
9 |
10 | # __all__ = ['DRN', 'drn26', 'drn42', 'drn58']
11 |
12 |
13 | webroot = 'https://tigress-web.princeton.edu/~fy/drn/models/'
14 |
15 | model_urls = {
16 | 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
17 | 'drn-c-26': webroot + 'drn_c_26-ddedf421.pth',
18 | 'drn-c-42': webroot + 'drn_c_42-9d336e8c.pth',
19 | 'drn-c-58': webroot + 'drn_c_58-0a53a92c.pth',
20 | 'drn-d-22': webroot + 'drn_d_22-4bd2f8ea.pth',
21 | 'drn-d-38': webroot + 'drn_d_38-eebb45f0.pth',
22 | 'drn-d-54': webroot + 'drn_d_54-0e0534ff.pth',
23 | 'drn-d-105': webroot + 'drn_d_105-12b40979.pth',
24 | 'drn-d-105_ms': webroot + 'drn-d-105_ms_cityscapes.pth'
25 | }
26 |
27 |
28 | def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
29 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
30 | padding=padding, bias=False, dilation=dilation)
31 |
32 |
33 | class BasicBlock(nn.Module):
34 | expansion = 1
35 |
36 | def __init__(self, inplanes, planes, stride=1, downsample=None,
37 | dilation=(1, 1), residual=True):
38 | super(BasicBlock, self).__init__()
39 | self.conv1 = conv3x3(inplanes, planes, stride,
40 | padding=dilation[0], dilation=dilation[0])
41 | self.bn1 = BatchNorm(planes)
42 | self.relu = nn.ReLU(inplace=True)
43 | self.conv2 = conv3x3(planes, planes,
44 | padding=dilation[1], dilation=dilation[1])
45 | self.bn2 = BatchNorm(planes)
46 | self.downsample = downsample
47 | self.stride = stride
48 | self.residual = residual
49 |
50 | def forward(self, x):
51 | residual = x
52 |
53 | out = self.conv1(x) # 32/64 1/2
54 | out = self.bn1(out)
55 | out = self.relu(out)
56 |
57 | out = self.conv2(out)
58 | out = self.bn2(out)
59 |
60 | if self.downsample is not None:
61 | residual = self.downsample(x)
62 | if self.residual:
63 | out += residual
64 | out = self.relu(out)
65 |
66 | return out
67 |
68 |
69 | class Bottleneck(nn.Module):
70 | expansion = 4
71 |
72 | def __init__(self, inplanes, planes, stride=1, downsample=None,
73 | dilation=(1, 1), residual=True):
74 | super(Bottleneck, self).__init__()
75 | self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
76 | self.bn1 = BatchNorm(planes)
77 | self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
78 | padding=dilation[1], bias=False,
79 | dilation=dilation[1])
80 | self.bn2 = BatchNorm(planes)
81 | self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
82 | self.bn3 = BatchNorm(planes * 4)
83 | self.relu = nn.ReLU(inplace=True)
84 | self.downsample = downsample
85 | self.stride = stride
86 |
87 | def forward(self, x):
88 | residual = x
89 |
90 | out = self.conv1(x)
91 | out = self.bn1(out)
92 | out = self.relu(out)
93 |
94 | out = self.conv2(out)
95 | out = self.bn2(out)
96 | out = self.relu(out)
97 |
98 | out = self.conv3(out)
99 | out = self.bn3(out)
100 |
101 | if self.downsample is not None:
102 | residual = self.downsample(x)
103 |
104 | out += residual
105 | out = self.relu(out)
106 |
107 | return out
108 |
109 |
110 | class DRN(nn.Module):
111 | # BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='D', **kwargs
112 | def __init__(self, block, layers, num_classes=1000,
113 | channels=(16, 32, 64, 128, 256, 512, 512, 512),
114 | out_map=False, out_middle=False, pool_size=28, arch='D'):
115 | super(DRN, self).__init__()
116 | self.inplanes = channels[0] # 16
117 | self.out_map = out_map
118 | self.out_dim = channels[-1]
119 | self.out_middle = out_middle
120 | self.arch = arch
121 |
122 | if arch == 'C':
123 | self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
124 | padding=3, bias=False)
125 | self.bn1 = BatchNorm(channels[0])
126 | self.relu = nn.ReLU(inplace=True)
127 |
128 | self.layer1 = self._make_layer(
129 | BasicBlock, channels[0], layers[0], stride=1)
130 | self.layer2 = self._make_layer(
131 | BasicBlock, channels[1], layers[1], stride=2)
132 | elif arch == 'D':
133 | self.layer0 = nn.Sequential(
134 | nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3, # 3/16 7x7
135 | bias=False),
136 | BatchNorm(channels[0]),
137 | nn.ReLU(inplace=True)
138 | )
139 |
140 | self.layer1 = self._make_conv_layers(
141 | channels[0], layers[0], stride=1) # (16, 1, stride=1)
142 | self.layer2 = self._make_conv_layers(
143 | channels[1], layers[1], stride=2) # (32, 1, stride=2)
144 |
145 | self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2) # (64, 2)
146 | self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2) # (128, 2)
147 | self.layer5 = self._make_layer(block, channels[4], layers[4],
148 | dilation=2, new_level=False) # (256, 2)
149 | self.layer6 = None if layers[5] == 0 else \
150 | self._make_layer(block, channels[5], layers[5], dilation=4, # (512, 2)
151 | new_level=False)
152 |
153 | if arch == 'C':
154 | self.layer7 = None if layers[6] == 0 else \
155 | self._make_layer(BasicBlock, channels[6], layers[6], dilation=2,
156 | new_level=False, residual=False)
157 | self.layer8 = None if layers[7] == 0 else \
158 | self._make_layer(BasicBlock, channels[7], layers[7], dilation=1,
159 | new_level=False, residual=False)
160 | elif arch == 'D':
161 | self.layer7 = None if layers[6] == 0 else \
162 | self._make_conv_layers(channels[6], layers[6], dilation=2) # (512,1)
163 | self.layer8 = None if layers[7] == 0 else \
164 | self._make_conv_layers(channels[7], layers[7], dilation=1) # (512,1)
165 |
166 | if num_classes > 0:
167 | self.avgpool = nn.AvgPool2d(pool_size)
168 | self.fc = nn.Conv2d(self.out_dim, num_classes, kernel_size=1,
169 | stride=1, padding=0, bias=True)
170 | for m in self.modules():
171 | if isinstance(m, nn.Conv2d):
172 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
173 | m.weight.data.normal_(0, math.sqrt(2. / n))
174 | elif isinstance(m, BatchNorm):
175 | m.weight.data.fill_(1)
176 | m.bias.data.zero_()
177 |
178 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1,
179 | new_level=True, residual=True):
180 | assert dilation == 1 or dilation % 2 == 0
181 | downsample = None
182 |
183 | if stride != 1 or self.inplanes != planes * block.expansion:
184 | # 32/64 stride=2
185 | # 128/256
186 | downsample = nn.Sequential(
187 | nn.Conv2d(self.inplanes, planes * block.expansion, #
188 | kernel_size=1, stride=stride, bias=False),
189 | BatchNorm(planes * block.expansion),
190 | )
191 | ## 32/64
192 | ## 128/256 dilation=(2,2)
193 | layers = list()
194 | layers.append(block(
195 | self.inplanes, planes, stride, downsample,
196 | dilation=(1, 1) if dilation == 1 else (
197 | dilation // 2 if new_level else dilation, dilation),
198 | residual=residual))
199 | self.inplanes = planes * block.expansion
200 | for i in range(1, blocks):
201 | layers.append(block(self.inplanes, planes, residual=residual,
202 | dilation=(dilation, dilation)))
203 |
204 | return nn.Sequential(*layers)
205 |
206 | def _make_conv_layers(self, channels, convs, stride=1, dilation=1):
207 | modules = []
208 | # 16
209 | for i in range(convs):
210 | modules.extend([
211 | nn.Conv2d(self.inplanes, channels, kernel_size=3,
212 | stride=stride if i == 0 else 1,
213 | padding=dilation, bias=False, dilation=dilation), # 16/16 3x3 16/32
214 | BatchNorm(channels),
215 | nn.ReLU(inplace=True)])
216 | self.inplanes = channels
217 | return nn.Sequential(*modules)
218 |
219 | def forward(self, x):
220 | y = list()
221 |
222 | if self.arch == 'C':
223 | x = self.conv1(x)
224 | x = self.bn1(x)
225 | x = self.relu(x)
226 | elif self.arch == 'D':
227 | x = self.layer0(x) # 3/16 7x7
228 |
229 | x = self.layer1(x) # 16/16
230 | y.append(x)
231 | x = self.layer2(x) # 16/32 (1/2)
232 | y.append(x)
233 |
234 | x = self.layer3(x) # 32/64 (1/4)
235 | y.append(x)
236 |
237 | x = self.layer4(x) # 64/128 (1/8)
238 | y.append(x)
239 |
240 | x = self.layer5(x) # 128/256 (1/8)
241 | y.append(x)
242 |
243 | if self.layer6 is not None:
244 | x = self.layer6(x) # 256/512 (1/8)
245 | y.append(x)
246 |
247 | if self.layer7 is not None:
248 | x = self.layer7(x) # 512/512 (1/8)
249 | y.append(x)
250 |
251 | if self.layer8 is not None:
252 | x = self.layer8(x) # 512/512 (1/8)
253 | y.append(x)
254 |
255 | if self.out_map:
256 | x = self.fc(x)
257 | else:
258 | x = self.avgpool(x) #
259 | x = self.fc(x)
260 | x = x.view(x.size(0), -1)
261 |
262 | if self.out_middle:
263 | return x, y
264 | else:
265 | return x
266 |
267 | class _UpProjection(nn.Sequential):
268 |
269 | def __init__(self, num_input_features, num_output_features):
270 | super(_UpProjection, self).__init__()
271 | #self.reflect_pad_1 = nn.ReflectionPad2d(2)
272 | self.conv1 = nn.Conv2d(num_input_features, num_output_features,
273 | kernel_size=5, stride=1, padding=2, bias=False)
274 | self.bn1 = nn.BatchNorm2d(num_output_features)
275 | self.relu = nn.ReLU(inplace=True)
276 |
277 | #self.reflect_pad_2 = nn.ReflectionPad2d(1)
278 | self.conv1_2 = nn.Conv2d(num_output_features, num_output_features,
279 | kernel_size=3, stride=1, padding=1, bias=False)
280 | self.bn1_2 = nn.BatchNorm2d(num_output_features)
281 |
282 | #self.reflect_pad_3 = nn.ReflectionPad2d(2)
283 | self.conv2 = nn.Conv2d(num_input_features, num_output_features,
284 | kernel_size=5, stride=1, padding=2, bias=False)
285 | self.bn2 = nn.BatchNorm2d(num_output_features)
286 |
287 | def forward(self, x, size):
288 | x = F.interpolate(x, size=size, mode='bilinear',align_corners=True)
289 | #x = F.upsample(x, size=size, mode='bilinear')
290 | x_conv1 = self.relu(self.bn1(self.conv1(x)))
291 | bran1 = self.bn1_2(self.conv1_2(x_conv1))
292 | bran2 = self.bn2(self.conv2(x))
293 |
294 | out = self.relu(bran1 + bran2)
295 |
296 | return out
297 |
298 |
299 |
300 | class AutoED(nn.Module):
301 | def __init__(self, model):
302 | super(AutoED, self).__init__()
303 | self.E = _E(model)
304 | self.D = _D()
305 |
306 | def freeze_bn(self):
307 | for m in self.modules():
308 | if isinstance(m, nn.BatchNorm2d):
309 | m.eval()
310 |
311 |
312 | def forward(self, x):
313 | x_, size1, size2, size3 = self.E(x)
314 | out = self.D(x_, size1, size2, size3)
315 |
316 | return out
317 |
318 |
319 | class _E(nn.Module):
320 | def __init__(self, model):
321 | super(_E, self).__init__()
322 | self.base = nn.Sequential(*list(model.children())[:-2])
323 |
324 | def forward(self, x):
325 | x1 = self.base[0](x)
326 | x2 = self.base[1](x1)
327 | x3 = self.base[2](x2)
328 | x4 = self.base[3](x3)
329 | x5 = self.base[4](x4)
330 | x6 = self.base[5](x5)
331 | x7 = self.base[6](x6)
332 | x8 = self.base[7](x7)
333 | x9 = self.base[8](x8)
334 |
335 | return x9,[x4.size(2),x4.size(3)],[x3.size(2),x3.size(3)], [x.size(2),x.size(3)]
336 |
337 |
338 | class _D(nn.Module):
339 | def __init__(self):
340 | super(_D, self).__init__()
341 | num_features = 512
342 | self.up1 = _UpProjection(num_input_features=num_features, num_output_features=num_features // 2)
343 | num_features = num_features // 2
344 |
345 | self.up2 = _UpProjection(num_input_features=num_features, num_output_features=num_features // 2)
346 | num_features = num_features // 2
347 |
348 | self.up3 = _UpProjection(num_input_features=num_features, num_output_features=num_features // 2)
349 | num_features = num_features // 2
350 |
351 | self.conv = nn.Conv2d(num_features, 1, kernel_size=3, stride=1, bias=True)
352 |
353 | self.reflect_pad = nn.ReflectionPad2d(1)
354 |
355 | def forward(self, x, size1, size2, size3):
356 |
357 |
358 | u1 = self.up1(x, size1)
359 | u2 = self.up2(u1, size2)
360 | u3 = self.up3(u2, size3)
361 | u4 = self.reflect_pad(u3)
362 |
363 | out = torch.sigmoid(self.conv(u4))
364 | return out
365 |
366 |
367 |
368 |
369 | class DRN_A(nn.Module):
370 |
371 | def __init__(self, block, layers, num_classes=1000):
372 | self.inplanes = 64
373 | super(DRN_A, self).__init__()
374 | self.out_dim = 512 * block.expansion
375 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
376 | bias=False)
377 | self.bn1 = nn.BatchNorm2d(64)
378 | self.relu = nn.ReLU(inplace=True)
379 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
380 | self.layer1 = self._make_layer(block, 64, layers[0])
381 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
382 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
383 | dilation=2)
384 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
385 | dilation=4)
386 | self.avgpool = nn.AvgPool2d(28, stride=1)
387 | self.fc = nn.Linear(512 * block.expansion, num_classes)
388 |
389 | for m in self.modules():
390 | if isinstance(m, nn.Conv2d):
391 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
392 | m.weight.data.normal_(0, math.sqrt(2. / n))
393 | elif isinstance(m, BatchNorm):
394 | m.weight.data.fill_(1)
395 | m.bias.data.zero_()
396 |
397 | # for m in self.modules():
398 | # if isinstance(m, nn.Conv2d):
399 | # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
400 | # elif isinstance(m, nn.BatchNorm2d):
401 | # nn.init.constant_(m.weight, 1)
402 | # nn.init.constant_(m.bias, 0)
403 |
404 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
405 | downsample = None
406 | if stride != 1 or self.inplanes != planes * block.expansion:
407 | downsample = nn.Sequential(
408 | nn.Conv2d(self.inplanes, planes * block.expansion,
409 | kernel_size=1, stride=stride, bias=False),
410 | nn.BatchNorm2d(planes * block.expansion),
411 | )
412 |
413 | layers = []
414 | layers.append(block(self.inplanes, planes, stride, downsample))
415 | self.inplanes = planes * block.expansion
416 | for i in range(1, blocks):
417 | layers.append(block(self.inplanes, planes,
418 | dilation=(dilation, dilation)))
419 |
420 | return nn.Sequential(*layers)
421 |
422 | def forward(self, x):
423 | x = self.conv1(x)
424 | x = self.bn1(x)
425 | x = self.relu(x)
426 | x = self.maxpool(x)
427 |
428 | x = self.layer1(x)
429 | x = self.layer2(x)
430 | x = self.layer3(x)
431 | x = self.layer4(x)
432 |
433 | x = self.avgpool(x)
434 | x = x.view(x.size(0), -1)
435 | x = self.fc(x)
436 |
437 | return x
438 |
439 |
440 | def drn_a_50(pretrained=False, **kwargs):
441 | model = DRN_A(Bottleneck, [3, 4, 6, 3], **kwargs)
442 | if pretrained:
443 | model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
444 | return model
445 |
446 |
447 | def drn_c_26(pretrained=False, **kwargs):
448 | model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='C', **kwargs)
449 | if pretrained:
450 | model.load_state_dict(model_zoo.load_url(model_urls['drn-c-26']))
451 | return model
452 |
453 |
454 | def drn_c_42(pretrained=False, **kwargs):
455 | model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
456 | if pretrained:
457 | model.load_state_dict(model_zoo.load_url(model_urls['drn-c-42']))
458 | return model
459 |
460 |
461 | def drn_c_58(pretrained=False, **kwargs):
462 | model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
463 | if pretrained:
464 | model.load_state_dict(model_zoo.load_url(model_urls['drn-c-58']))
465 | return model
466 |
467 |
468 | def drn_d_22(pretrained=False, **kwargs):
469 | model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='D', **kwargs)
470 | if pretrained:
471 | model.load_state_dict(model_zoo.load_url(model_urls['drn-d-22'], 'DSAEncoder'))
472 | return model
473 |
474 |
475 | def drn_d_24(pretrained=False, **kwargs):
476 | model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch='D', **kwargs)
477 | if pretrained:
478 | model.load_state_dict(model_zoo.load_url(model_urls['drn-d-24', 'DSAEncoder']))
479 | return model
480 |
481 |
482 | def drn_d_38(pretrained=False, **kwargs):
483 | model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', **kwargs)
484 | if pretrained:
485 | model.load_state_dict(model_zoo.load_url(model_urls['drn-d-38'], 'DSAEncoder'))
486 | return model
487 |
488 |
489 | def drn_d_40(pretrained=False, **kwargs):
490 | model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', **kwargs)
491 | if pretrained:
492 | model.load_state_dict(model_zoo.load_url(model_urls['drn-d-40'], 'DSAEncoder'))
493 | return model
494 |
495 |
496 | def drn_d_54(pretrained=False, **kwargs):
497 | model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', **kwargs)
498 | if pretrained:
499 | model.load_state_dict(model_zoo.load_url(model_urls['drn-d-54'], 'DSAEncoder'))
500 | return model
501 |
502 |
503 | def drn_d_56(pretrained=False, **kwargs):
504 | model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', **kwargs)
505 | if pretrained:
506 | model.load_state_dict(model_zoo.load_url(model_urls['drn-d-56'], 'DSAEncoder'))
507 | return model
508 |
509 |
510 | def drn_d_105(pretrained=False, **kwargs):
511 | model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', **kwargs)
512 | if pretrained:
513 | model.load_state_dict(model_zoo.load_url(model_urls['drn-d-105'], 'DSAEncoder'))
514 | return model
515 |
516 |
517 | def drn_d_107(pretrained=False, **kwargs):
518 | model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 2, 2], arch='D', **kwargs)
519 | if pretrained:
520 | model.load_state_dict(model_zoo.load_url(model_urls['drn-d-107'], 'DSAEncoder'))
521 | return model
522 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) Microsoft Corporation.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # S2R-DepthNet: Learning a Generalizable Depth-specific Structural Representation
2 |
3 | This is the official PyTorch implementation of the paper [***S2R-DepthNet: Learning a Generalizable Depth-specific Structural Representation***](https://arxiv.org/pdf/2104.00877v2.pdf), ***CVPR 2021 (Oral), Xiaotian Chen, Yuwang Wang, Xuejin Chen, and Wenjun Zeng.***
4 |
5 |
6 | ## Citation
7 |
8 | ```
9 | @inproceedings{Chen2021S2R-DepthNet,
10 | title = {S2R-DepthNet: Learning a Generalizable Depth-specific Structural Representation},
11 | author = {Chen, Xiaotian and Wang , Yuwang and Chen, Xuejin and Zeng, Wenjun},
12 | conference={IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
13 | year = {2021}
14 | }
15 | ```
16 |
17 | ## Introduction
18 | Human can infer the 3D geometry of a scene from a sketch instead of a realistic image, which indicates that the spatial structure plays a fundamental role in understanding the depth of scenes. We are the first to explore the learning of a depth-specific structural representation, which captures the essential feature for depth estimation and ignores irrelevant style information. Our S2R-DepthNet (Synthetic to Real DepthNet) can be well generalized to unseen real-world data directly even though it is only trained on synthetic data. S2R-DepthNet consists of: a) a Structure Extraction (STE) module which extracts a domaininvariant structural representation from an image by disentangling the image into domain-invariant structure and domain-specific style components, b) a Depth-specific Attention (DSA) module, which learns task-specific knowledge to suppress depth-irrelevant structures for better depth estimation and generalization, and c) a depth prediction module (DP) to predict depth from the depth-specific representation. Without access of any real-world images, our method even outperforms the state-of-the-art unsupervised domain adaptation methods which use real-world images of the target domain for training. In addition, when using a small amount of labeled real-world data, we achieve the state-of-the-art performance under the semi-supervised setting.
19 |
20 | #### The following figure shows the overview of S2RDepthNet.
21 | 
22 |
23 | #### Examples of Depth-specific Structural Representation.
24 |

25 |
26 |
27 | ## Usage
28 | ### Dependencies
29 | - [Python3.6.9](https://www.python.org/downloads/)
30 | - [PyTorch(1.7.1)](https://pytorch.org/)
31 | - [torchvision(0.8.2+cu101)](https://pypi.org/project/torchvision/)
32 | - [TensorboardX(2.1)](https://pypi.org/project/tensorboardX/)
33 | - [matplotlib(3.3.4)](https://matplotlib.org/)
34 | - [Pillow(8.1.2)](https://pypi.org/project/Pillow/)
35 |
36 | ## Datasets
37 | The outdoor Synthetic Dataset is [vKITTI](http://www.europe.naverlabs.com/Research/Computer-Vision/Proxy-Virtual-Worlds) and outdoor Real dataset is [KITTI](http://www.cvlibs.net/datasets/kitti/)
38 |
39 | ## TODO
40 |
41 | - [x] Trianing Structure Encoder
42 | ## Pretrained Models
43 | We also provide our trained models for inference(outdoor and indoor scenes). [Models Link](https://1drv.ms/u/s!AlwUS_9Kpa8qgkb9ldQbMAMgNd_I?e=aX2PWY)
44 |
45 | ### Train
46 | As an example, use the following command to train S2RDepthNet on vKITTI.
47 | #### *Train Structure Decoder*
48 |
49 | python train.py --syn_dataset VKITTI \
50 | --syn_root "the path of vKITTI dataset" \
51 | --syn_train_datafile datasets/vkitti/train.txt \
52 | --batchSize 32 \
53 | --loadSize 192 640 \
54 | --Shared_Struct_Encoder_path "the path of pretrained Struct encoder(.pth)" \
55 | --train_stage TrainStructDecoder
56 |
57 | #### *Train DSA Module and DP module*
58 |
59 | python train.py --syn_dataset VKITTI \
60 | --syn_root "the path of vKITTI dataset" \
61 | --syn_train_datafile datasets/vkitti/train.txt \
62 | --batchSize 32 \
63 | --loadSize 192 640 \
64 | --Shared_Struct_Encoder_path "the path of pretrained Struct encoder(.pth)" \
65 | --Struct_Decoder_path "the path of pretrained Structure decoder(.pth)" \
66 | --train_stage TrainDSAandDPModule
67 |
68 | ### Evaluation
69 | Use the following command to evaluate the trained S2RDepthNet on KITTI test data.
70 |
71 | python test.py --dataset KITTI --root "the path of kitti dataset" --test_datafile datasets/kitti/test.txt --loadSize 192 640 --Shared_Struct_Encoder_path "the path of pretrained Struct encoder(.pth)" --Struct_Decoder_path "the path of pretrained Structure decoder(.pth)" --DSAModle_path "the path of pretrained DSAModle(.pth)" --DepthNet_path "the path of pretrained DepthNet(.pth)" --out_dir "Path to save results"
72 |
73 | Use the following command to evaluate the trained S2RDepthNet on NYUD-v2 test data.
74 |
75 | python test.py --dataset NYUD_V2 --root "the path of NYUD_V2 dataset" --test_datafile datasets/nyudv2/nyu2_test.csv --loadSize 192 256 --Shared_Struct_Encoder_path "the path of pretrained Struct encoder(.pth)" --Struct_Decoder_path "the path of pretrained Structure decoder(.pth)" --DSAModle_path "the path of pretrained DSAModle(.pth)" --DepthNet_path "the path of pretrained DepthNet(.pth)" --out_dir "Path to save results"
76 |
77 | ## Acknowledgement
78 | We borrowed code from [GASDA](https://github.com/sshan-zhao/GASDA) and [VisualizationOC](https://github.com/JunjH/Visualizing-CNNs-for-monocular-depth-estimation).
79 |
80 |
81 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ## Security
4 |
5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
6 |
7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below.
8 |
9 | ## Reporting Security Issues
10 |
11 | **Please do not report security vulnerabilities through public GitHub issues.**
12 |
13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report).
14 |
15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc).
16 |
17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
18 |
19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
20 |
21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
22 | * Full paths of source file(s) related to the manifestation of the issue
23 | * The location of the affected source code (tag/branch/commit or direct URL)
24 | * Any special configuration required to reproduce the issue
25 | * Step-by-step instructions to reproduce the issue
26 | * Proof-of-concept or exploit code (if possible)
27 | * Impact of the issue, including how an attacker might exploit the issue
28 |
29 | This information will help us triage your report more quickly.
30 |
31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs.
32 |
33 | ## Preferred Languages
34 |
35 | We prefer all communications to be in English.
36 |
37 | ## Policy
38 |
39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd).
40 |
41 |
--------------------------------------------------------------------------------
/SUPPORT.md:
--------------------------------------------------------------------------------
1 | # TODO: The maintainer of this repo has not yet edited this file
2 |
3 | **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project?
4 |
5 | - **No CSS support:** Fill out this template with information about how to file issues and get help.
6 | - **Yes CSS support:** Fill out an intake form at [aka.ms/spot](https://aka.ms/spot). CSS will work with/help you to determine next steps. More details also available at [aka.ms/onboardsupport](https://aka.ms/onboardsupport).
7 | - **Not sure?** Fill out a SPOT intake as though the answer were "Yes". CSS will help you decide.
8 |
9 | *Then remove this first heading from this SUPPORT.MD file before publishing your repo.*
10 |
11 | # Support
12 |
13 | ## How to file issues and get help
14 |
15 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing
16 | issues before filing new issues to avoid duplicates. For new issues, file your bug or
17 | feature request as a new Issue.
18 |
19 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE
20 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER
21 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**.
22 |
23 | ## Microsoft Support Policy
24 |
25 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above.
26 |
--------------------------------------------------------------------------------
/datasets/vkitti/test.txt:
--------------------------------------------------------------------------------
1 | rgb/0006/clone/00051.png depth/0006/clone/00051.png
2 | rgb/0006/clone/00163.png depth/0006/clone/00163.png
3 | rgb/0006/clone/00150.png depth/0006/clone/00150.png
4 | rgb/0006/clone/00104.png depth/0006/clone/00104.png
5 | rgb/0006/clone/00208.png depth/0006/clone/00208.png
6 | rgb/0006/clone/00238.png depth/0006/clone/00238.png
7 | rgb/0006/clone/00228.png depth/0006/clone/00228.png
8 | rgb/0006/clone/00241.png depth/0006/clone/00241.png
9 | rgb/0006/15-deg-left/00011.png depth/0006/15-deg-left/00011.png
10 | rgb/0006/15-deg-left/00208.png depth/0006/15-deg-left/00208.png
11 | rgb/0006/15-deg-left/00181.png depth/0006/15-deg-left/00181.png
12 | rgb/0006/sunset/00157.png depth/0006/sunset/00157.png
13 | rgb/0006/sunset/00190.png depth/0006/sunset/00190.png
14 | rgb/0006/sunset/00173.png depth/0006/sunset/00173.png
15 | rgb/0006/sunset/00222.png depth/0006/sunset/00222.png
16 | rgb/0006/sunset/00023.png depth/0006/sunset/00023.png
17 | rgb/0006/sunset/00207.png depth/0006/sunset/00207.png
18 | rgb/0006/sunset/00153.png depth/0006/sunset/00153.png
19 | rgb/0006/sunset/00169.png depth/0006/sunset/00169.png
20 | rgb/0006/rain/00067.png depth/0006/rain/00067.png
21 | rgb/0006/rain/00032.png depth/0006/rain/00032.png
22 | rgb/0006/rain/00014.png depth/0006/rain/00014.png
23 | rgb/0006/rain/00061.png depth/0006/rain/00061.png
24 | rgb/0006/morning/00258.png depth/0006/morning/00258.png
25 | rgb/0006/morning/00212.png depth/0006/morning/00212.png
26 | rgb/0006/morning/00032.png depth/0006/morning/00032.png
27 | rgb/0006/morning/00172.png depth/0006/morning/00172.png
28 | rgb/0006/morning/00193.png depth/0006/morning/00193.png
29 | rgb/0006/morning/00071.png depth/0006/morning/00071.png
30 | rgb/0006/morning/00250.png depth/0006/morning/00250.png
31 | rgb/0006/15-deg-right/00245.png depth/0006/15-deg-right/00245.png
32 | rgb/0006/15-deg-right/00173.png depth/0006/15-deg-right/00173.png
33 | rgb/0006/15-deg-right/00162.png depth/0006/15-deg-right/00162.png
34 | rgb/0006/15-deg-right/00149.png depth/0006/15-deg-right/00149.png
35 | rgb/0006/15-deg-right/00074.png depth/0006/15-deg-right/00074.png
36 | rgb/0006/15-deg-right/00075.png depth/0006/15-deg-right/00075.png
37 | rgb/0006/15-deg-right/00011.png depth/0006/15-deg-right/00011.png
38 | rgb/0006/15-deg-right/00215.png depth/0006/15-deg-right/00215.png
39 | rgb/0006/15-deg-right/00182.png depth/0006/15-deg-right/00182.png
40 | rgb/0006/15-deg-right/00105.png depth/0006/15-deg-right/00105.png
41 | rgb/0006/15-deg-right/00243.png depth/0006/15-deg-right/00243.png
42 | rgb/0006/30-deg-right/00028.png depth/0006/30-deg-right/00028.png
43 | rgb/0006/30-deg-right/00067.png depth/0006/30-deg-right/00067.png
44 | rgb/0006/30-deg-right/00120.png depth/0006/30-deg-right/00120.png
45 | rgb/0006/30-deg-right/00229.png depth/0006/30-deg-right/00229.png
46 | rgb/0006/30-deg-right/00236.png depth/0006/30-deg-right/00236.png
47 | rgb/0006/30-deg-right/00030.png depth/0006/30-deg-right/00030.png
48 | rgb/0006/fog/00160.png depth/0006/fog/00160.png
49 | rgb/0006/30-deg-left/00210.png depth/0006/30-deg-left/00210.png
50 | rgb/0006/30-deg-left/00051.png depth/0006/30-deg-left/00051.png
51 | rgb/0006/30-deg-left/00111.png depth/0006/30-deg-left/00111.png
52 | rgb/0006/30-deg-left/00094.png depth/0006/30-deg-left/00094.png
53 | rgb/0006/30-deg-left/00168.png depth/0006/30-deg-left/00168.png
54 | rgb/0006/30-deg-left/00030.png depth/0006/30-deg-left/00030.png
55 | rgb/0006/30-deg-left/00263.png depth/0006/30-deg-left/00263.png
56 | rgb/0006/30-deg-left/00071.png depth/0006/30-deg-left/00071.png
57 | rgb/0006/30-deg-left/00262.png depth/0006/30-deg-left/00262.png
58 | rgb/0006/overcast/00008.png depth/0006/overcast/00008.png
59 | rgb/0006/overcast/00220.png depth/0006/overcast/00220.png
60 | rgb/0006/overcast/00064.png depth/0006/overcast/00064.png
61 | rgb/0006/overcast/00147.png depth/0006/overcast/00147.png
62 | rgb/0006/overcast/00237.png depth/0006/overcast/00237.png
63 | rgb/0006/overcast/00241.png depth/0006/overcast/00241.png
64 | rgb/0018/clone/00176.png depth/0018/clone/00176.png
65 | rgb/0018/clone/00106.png depth/0018/clone/00106.png
66 | rgb/0018/clone/00080.png depth/0018/clone/00080.png
67 | rgb/0018/clone/00067.png depth/0018/clone/00067.png
68 | rgb/0018/clone/00029.png depth/0018/clone/00029.png
69 | rgb/0018/clone/00306.png depth/0018/clone/00306.png
70 | rgb/0018/clone/00070.png depth/0018/clone/00070.png
71 | rgb/0018/clone/00130.png depth/0018/clone/00130.png
72 | rgb/0018/15-deg-left/00318.png depth/0018/15-deg-left/00318.png
73 | rgb/0018/15-deg-left/00244.png depth/0018/15-deg-left/00244.png
74 | rgb/0018/15-deg-left/00301.png depth/0018/15-deg-left/00301.png
75 | rgb/0018/15-deg-left/00102.png depth/0018/15-deg-left/00102.png
76 | rgb/0018/15-deg-left/00315.png depth/0018/15-deg-left/00315.png
77 | rgb/0018/15-deg-left/00127.png depth/0018/15-deg-left/00127.png
78 | rgb/0018/15-deg-left/00177.png depth/0018/15-deg-left/00177.png
79 | rgb/0018/15-deg-left/00132.png depth/0018/15-deg-left/00132.png
80 | rgb/0018/15-deg-left/00118.png depth/0018/15-deg-left/00118.png
81 | rgb/0018/15-deg-left/00297.png depth/0018/15-deg-left/00297.png
82 | rgb/0018/15-deg-left/00105.png depth/0018/15-deg-left/00105.png
83 | rgb/0018/15-deg-left/00021.png depth/0018/15-deg-left/00021.png
84 | rgb/0018/15-deg-left/00062.png depth/0018/15-deg-left/00062.png
85 | rgb/0018/sunset/00097.png depth/0018/sunset/00097.png
86 | rgb/0018/sunset/00266.png depth/0018/sunset/00266.png
87 | rgb/0018/sunset/00302.png depth/0018/sunset/00302.png
88 | rgb/0018/sunset/00115.png depth/0018/sunset/00115.png
89 | rgb/0018/sunset/00140.png depth/0018/sunset/00140.png
90 | rgb/0018/sunset/00146.png depth/0018/sunset/00146.png
91 | rgb/0018/sunset/00207.png depth/0018/sunset/00207.png
92 | rgb/0018/sunset/00029.png depth/0018/sunset/00029.png
93 | rgb/0018/sunset/00211.png depth/0018/sunset/00211.png
94 | rgb/0018/sunset/00061.png depth/0018/sunset/00061.png
95 | rgb/0018/rain/00253.png depth/0018/rain/00253.png
96 | rgb/0018/rain/00292.png depth/0018/rain/00292.png
97 | rgb/0018/rain/00238.png depth/0018/rain/00238.png
98 | rgb/0018/rain/00191.png depth/0018/rain/00191.png
99 | rgb/0018/rain/00272.png depth/0018/rain/00272.png
100 | rgb/0018/rain/00130.png depth/0018/rain/00130.png
101 | rgb/0018/rain/00250.png depth/0018/rain/00250.png
102 | rgb/0018/morning/00176.png depth/0018/morning/00176.png
103 | rgb/0018/morning/00024.png depth/0018/morning/00024.png
104 | rgb/0018/morning/00076.png depth/0018/morning/00076.png
105 | rgb/0018/morning/00214.png depth/0018/morning/00214.png
106 | rgb/0018/morning/00110.png depth/0018/morning/00110.png
107 | rgb/0018/morning/00316.png depth/0018/morning/00316.png
108 | rgb/0018/morning/00131.png depth/0018/morning/00131.png
109 | rgb/0018/morning/00105.png depth/0018/morning/00105.png
110 | rgb/0018/15-deg-right/00148.png depth/0018/15-deg-right/00148.png
111 | rgb/0018/15-deg-right/00206.png depth/0018/15-deg-right/00206.png
112 | rgb/0018/15-deg-right/00210.png depth/0018/15-deg-right/00210.png
113 | rgb/0018/15-deg-right/00181.png depth/0018/15-deg-right/00181.png
114 | rgb/0018/15-deg-right/00216.png depth/0018/15-deg-right/00216.png
115 | rgb/0018/15-deg-right/00084.png depth/0018/15-deg-right/00084.png
116 | rgb/0018/15-deg-right/00330.png depth/0018/15-deg-right/00330.png
117 | rgb/0018/15-deg-right/00042.png depth/0018/15-deg-right/00042.png
118 | rgb/0018/15-deg-right/00130.png depth/0018/15-deg-right/00130.png
119 | rgb/0018/15-deg-right/00319.png depth/0018/15-deg-right/00319.png
120 | rgb/0018/30-deg-right/00311.png depth/0018/30-deg-right/00311.png
121 | rgb/0018/30-deg-right/00072.png depth/0018/30-deg-right/00072.png
122 | rgb/0018/30-deg-right/00217.png depth/0018/30-deg-right/00217.png
123 | rgb/0018/30-deg-right/00318.png depth/0018/30-deg-right/00318.png
124 | rgb/0018/30-deg-right/00036.png depth/0018/30-deg-right/00036.png
125 | rgb/0018/30-deg-right/00282.png depth/0018/30-deg-right/00282.png
126 | rgb/0018/30-deg-right/00123.png depth/0018/30-deg-right/00123.png
127 | rgb/0018/30-deg-right/00204.png depth/0018/30-deg-right/00204.png
128 | rgb/0018/30-deg-right/00035.png depth/0018/30-deg-right/00035.png
129 | rgb/0018/fog/00203.png depth/0018/fog/00203.png
130 | rgb/0018/fog/00016.png depth/0018/fog/00016.png
131 | rgb/0018/fog/00141.png depth/0018/fog/00141.png
132 | rgb/0018/fog/00275.png depth/0018/fog/00275.png
133 | rgb/0018/fog/00299.png depth/0018/fog/00299.png
134 | rgb/0018/fog/00192.png depth/0018/fog/00192.png
135 | rgb/0018/30-deg-left/00046.png depth/0018/30-deg-left/00046.png
136 | rgb/0018/30-deg-left/00253.png depth/0018/30-deg-left/00253.png
137 | rgb/0018/30-deg-left/00223.png depth/0018/30-deg-left/00223.png
138 | rgb/0018/30-deg-left/00038.png depth/0018/30-deg-left/00038.png
139 | rgb/0018/30-deg-left/00009.png depth/0018/30-deg-left/00009.png
140 | rgb/0018/30-deg-left/00104.png depth/0018/30-deg-left/00104.png
141 | rgb/0018/30-deg-left/00087.png depth/0018/30-deg-left/00087.png
142 | rgb/0018/30-deg-left/00127.png depth/0018/30-deg-left/00127.png
143 | rgb/0018/30-deg-left/00193.png depth/0018/30-deg-left/00193.png
144 | rgb/0018/30-deg-left/00133.png depth/0018/30-deg-left/00133.png
145 | rgb/0018/30-deg-left/00069.png depth/0018/30-deg-left/00069.png
146 | rgb/0018/30-deg-left/00235.png depth/0018/30-deg-left/00235.png
147 | rgb/0018/30-deg-left/00108.png depth/0018/30-deg-left/00108.png
148 | rgb/0018/overcast/00206.png depth/0018/overcast/00206.png
149 | rgb/0018/overcast/00210.png depth/0018/overcast/00210.png
150 | rgb/0018/overcast/00066.png depth/0018/overcast/00066.png
151 | rgb/0018/overcast/00303.png depth/0018/overcast/00303.png
152 | rgb/0018/overcast/00174.png depth/0018/overcast/00174.png
153 | rgb/0018/overcast/00014.png depth/0018/overcast/00014.png
154 | rgb/0018/overcast/00132.png depth/0018/overcast/00132.png
155 | rgb/0018/overcast/00168.png depth/0018/overcast/00168.png
156 | rgb/0018/overcast/00035.png depth/0018/overcast/00035.png
157 | rgb/0001/clone/00085.png depth/0001/clone/00085.png
158 | rgb/0001/clone/00082.png depth/0001/clone/00082.png
159 | rgb/0001/clone/00161.png depth/0001/clone/00161.png
160 | rgb/0001/clone/00074.png depth/0001/clone/00074.png
161 | rgb/0001/clone/00356.png depth/0001/clone/00356.png
162 | rgb/0001/clone/00355.png depth/0001/clone/00355.png
163 | rgb/0001/clone/00261.png depth/0001/clone/00261.png
164 | rgb/0001/clone/00362.png depth/0001/clone/00362.png
165 | rgb/0001/clone/00284.png depth/0001/clone/00284.png
166 | rgb/0001/clone/00349.png depth/0001/clone/00349.png
167 | rgb/0001/15-deg-left/00025.png depth/0001/15-deg-left/00025.png
168 | rgb/0001/15-deg-left/00221.png depth/0001/15-deg-left/00221.png
169 | rgb/0001/15-deg-left/00218.png depth/0001/15-deg-left/00218.png
170 | rgb/0001/15-deg-left/00018.png depth/0001/15-deg-left/00018.png
171 | rgb/0001/15-deg-left/00320.png depth/0001/15-deg-left/00320.png
172 | rgb/0001/15-deg-left/00216.png depth/0001/15-deg-left/00216.png
173 | rgb/0001/15-deg-left/00303.png depth/0001/15-deg-left/00303.png
174 | rgb/0001/15-deg-left/00359.png depth/0001/15-deg-left/00359.png
175 | rgb/0001/15-deg-left/00283.png depth/0001/15-deg-left/00283.png
176 | rgb/0001/15-deg-left/00315.png depth/0001/15-deg-left/00315.png
177 | rgb/0001/15-deg-left/00410.png depth/0001/15-deg-left/00410.png
178 | rgb/0001/15-deg-left/00017.png depth/0001/15-deg-left/00017.png
179 | rgb/0001/sunset/00080.png depth/0001/sunset/00080.png
180 | rgb/0001/sunset/00038.png depth/0001/sunset/00038.png
181 | rgb/0001/sunset/00121.png depth/0001/sunset/00121.png
182 | rgb/0001/sunset/00376.png depth/0001/sunset/00376.png
183 | rgb/0001/sunset/00207.png depth/0001/sunset/00207.png
184 | rgb/0001/sunset/00029.png depth/0001/sunset/00029.png
185 | rgb/0001/sunset/00132.png depth/0001/sunset/00132.png
186 | rgb/0001/sunset/00167.png depth/0001/sunset/00167.png
187 | rgb/0001/rain/00294.png depth/0001/rain/00294.png
188 | rgb/0001/rain/00239.png depth/0001/rain/00239.png
189 | rgb/0001/rain/00266.png depth/0001/rain/00266.png
190 | rgb/0001/rain/00268.png depth/0001/rain/00268.png
191 | rgb/0001/rain/00270.png depth/0001/rain/00270.png
192 | rgb/0001/rain/00238.png depth/0001/rain/00238.png
193 | rgb/0001/rain/00342.png depth/0001/rain/00342.png
194 | rgb/0001/rain/00055.png depth/0001/rain/00055.png
195 | rgb/0001/morning/00423.png depth/0001/morning/00423.png
196 | rgb/0001/morning/00429.png depth/0001/morning/00429.png
197 | rgb/0001/morning/00080.png depth/0001/morning/00080.png
198 | rgb/0001/morning/00408.png depth/0001/morning/00408.png
199 | rgb/0001/morning/00068.png depth/0001/morning/00068.png
200 | rgb/0001/morning/00406.png depth/0001/morning/00406.png
201 | rgb/0001/morning/00167.png depth/0001/morning/00167.png
202 | rgb/0001/morning/00133.png depth/0001/morning/00133.png
203 | rgb/0001/morning/00436.png depth/0001/morning/00436.png
204 | rgb/0001/morning/00071.png depth/0001/morning/00071.png
205 | rgb/0001/15-deg-right/00415.png depth/0001/15-deg-right/00415.png
206 | rgb/0001/15-deg-right/00325.png depth/0001/15-deg-right/00325.png
207 | rgb/0001/15-deg-right/00163.png depth/0001/15-deg-right/00163.png
208 | rgb/0001/15-deg-right/00089.png depth/0001/15-deg-right/00089.png
209 | rgb/0001/15-deg-right/00003.png depth/0001/15-deg-right/00003.png
210 | rgb/0001/15-deg-right/00257.png depth/0001/15-deg-right/00257.png
211 | rgb/0001/15-deg-right/00050.png depth/0001/15-deg-right/00050.png
212 | rgb/0001/15-deg-right/00054.png depth/0001/15-deg-right/00054.png
213 | rgb/0001/15-deg-right/00069.png depth/0001/15-deg-right/00069.png
214 | rgb/0001/15-deg-right/00037.png depth/0001/15-deg-right/00037.png
215 | rgb/0001/15-deg-right/00255.png depth/0001/15-deg-right/00255.png
216 | rgb/0001/30-deg-right/00253.png depth/0001/30-deg-right/00253.png
217 | rgb/0001/30-deg-right/00429.png depth/0001/30-deg-right/00429.png
218 | rgb/0001/30-deg-right/00159.png depth/0001/30-deg-right/00159.png
219 | rgb/0001/30-deg-right/00325.png depth/0001/30-deg-right/00325.png
220 | rgb/0001/30-deg-right/00165.png depth/0001/30-deg-right/00165.png
221 | rgb/0001/30-deg-right/00160.png depth/0001/30-deg-right/00160.png
222 | rgb/0001/30-deg-right/00368.png depth/0001/30-deg-right/00368.png
223 | rgb/0001/30-deg-right/00029.png depth/0001/30-deg-right/00029.png
224 | rgb/0001/30-deg-right/00390.png depth/0001/30-deg-right/00390.png
225 | rgb/0001/30-deg-right/00017.png depth/0001/30-deg-right/00017.png
226 | rgb/0001/30-deg-right/00341.png depth/0001/30-deg-right/00341.png
227 | rgb/0001/fog/00326.png depth/0001/fog/00326.png
228 | rgb/0001/fog/00246.png depth/0001/fog/00246.png
229 | rgb/0001/fog/00090.png depth/0001/fog/00090.png
230 | rgb/0001/fog/00063.png depth/0001/fog/00063.png
231 | rgb/0001/fog/00233.png depth/0001/fog/00233.png
232 | rgb/0001/fog/00419.png depth/0001/fog/00419.png
233 | rgb/0001/fog/00352.png depth/0001/fog/00352.png
234 | rgb/0001/fog/00244.png depth/0001/fog/00244.png
235 | rgb/0001/fog/00404.png depth/0001/fog/00404.png
236 | rgb/0001/fog/00273.png depth/0001/fog/00273.png
237 | rgb/0001/fog/00019.png depth/0001/fog/00019.png
238 | rgb/0001/fog/00104.png depth/0001/fog/00104.png
239 | rgb/0001/fog/00344.png depth/0001/fog/00344.png
240 | rgb/0001/fog/00367.png depth/0001/fog/00367.png
241 | rgb/0001/fog/00312.png depth/0001/fog/00312.png
242 | rgb/0001/fog/00227.png depth/0001/fog/00227.png
243 | rgb/0001/30-deg-left/00348.png depth/0001/30-deg-left/00348.png
244 | rgb/0001/30-deg-left/00067.png depth/0001/30-deg-left/00067.png
245 | rgb/0001/30-deg-left/00038.png depth/0001/30-deg-left/00038.png
246 | rgb/0001/30-deg-left/00347.png depth/0001/30-deg-left/00347.png
247 | rgb/0001/30-deg-left/00345.png depth/0001/30-deg-left/00345.png
248 | rgb/0001/30-deg-left/00222.png depth/0001/30-deg-left/00222.png
249 | rgb/0001/30-deg-left/00270.png depth/0001/30-deg-left/00270.png
250 | rgb/0001/30-deg-left/00014.png depth/0001/30-deg-left/00014.png
251 | rgb/0001/30-deg-left/00134.png depth/0001/30-deg-left/00134.png
252 | rgb/0001/30-deg-left/00015.png depth/0001/30-deg-left/00015.png
253 | rgb/0001/overcast/00403.png depth/0001/overcast/00403.png
254 | rgb/0001/overcast/00377.png depth/0001/overcast/00377.png
255 | rgb/0001/overcast/00393.png depth/0001/overcast/00393.png
256 | rgb/0001/overcast/00120.png depth/0001/overcast/00120.png
257 | rgb/0001/overcast/00300.png depth/0001/overcast/00300.png
258 | rgb/0001/overcast/00003.png depth/0001/overcast/00003.png
259 | rgb/0001/overcast/00301.png depth/0001/overcast/00301.png
260 | rgb/0001/overcast/00141.png depth/0001/overcast/00141.png
261 | rgb/0002/clone/00152.png depth/0002/clone/00152.png
262 | rgb/0002/clone/00188.png depth/0002/clone/00188.png
263 | rgb/0002/clone/00162.png depth/0002/clone/00162.png
264 | rgb/0002/clone/00075.png depth/0002/clone/00075.png
265 | rgb/0002/clone/00032.png depth/0002/clone/00032.png
266 | rgb/0002/clone/00202.png depth/0002/clone/00202.png
267 | rgb/0002/clone/00187.png depth/0002/clone/00187.png
268 | rgb/0002/clone/00084.png depth/0002/clone/00084.png
269 | rgb/0002/clone/00141.png depth/0002/clone/00141.png
270 | rgb/0002/clone/00225.png depth/0002/clone/00225.png
271 | rgb/0002/clone/00201.png depth/0002/clone/00201.png
272 | rgb/0002/clone/00136.png depth/0002/clone/00136.png
273 | rgb/0002/clone/00065.png depth/0002/clone/00065.png
274 | rgb/0002/clone/00035.png depth/0002/clone/00035.png
275 | rgb/0002/15-deg-left/00046.png depth/0002/15-deg-left/00046.png
276 | rgb/0002/15-deg-left/00000.png depth/0002/15-deg-left/00000.png
277 | rgb/0002/15-deg-left/00190.png depth/0002/15-deg-left/00190.png
278 | rgb/0002/15-deg-left/00088.png depth/0002/15-deg-left/00088.png
279 | rgb/0002/15-deg-left/00059.png depth/0002/15-deg-left/00059.png
280 | rgb/0002/15-deg-left/00198.png depth/0002/15-deg-left/00198.png
281 | rgb/0002/15-deg-left/00168.png depth/0002/15-deg-left/00168.png
282 | rgb/0002/15-deg-left/00192.png depth/0002/15-deg-left/00192.png
283 | rgb/0002/15-deg-left/00026.png depth/0002/15-deg-left/00026.png
284 | rgb/0002/15-deg-left/00006.png depth/0002/15-deg-left/00006.png
285 | rgb/0002/15-deg-left/00096.png depth/0002/15-deg-left/00096.png
286 | rgb/0002/sunset/00148.png depth/0002/sunset/00148.png
287 | rgb/0002/sunset/00187.png depth/0002/sunset/00187.png
288 | rgb/0002/rain/00004.png depth/0002/rain/00004.png
289 | rgb/0002/rain/00203.png depth/0002/rain/00203.png
290 | rgb/0002/rain/00220.png depth/0002/rain/00220.png
291 | rgb/0002/rain/00137.png depth/0002/rain/00137.png
292 | rgb/0002/rain/00100.png depth/0002/rain/00100.png
293 | rgb/0002/morning/00157.png depth/0002/morning/00157.png
294 | rgb/0002/morning/00206.png depth/0002/morning/00206.png
295 | rgb/0002/morning/00081.png depth/0002/morning/00081.png
296 | rgb/0002/morning/00060.png depth/0002/morning/00060.png
297 | rgb/0002/morning/00100.png depth/0002/morning/00100.png
298 | rgb/0002/15-deg-right/00176.png depth/0002/15-deg-right/00176.png
299 | rgb/0002/15-deg-right/00076.png depth/0002/15-deg-right/00076.png
300 | rgb/0002/15-deg-right/00221.png depth/0002/15-deg-right/00221.png
301 | rgb/0002/15-deg-right/00045.png depth/0002/15-deg-right/00045.png
302 | rgb/0002/15-deg-right/00055.png depth/0002/15-deg-right/00055.png
303 | rgb/0002/30-deg-right/00112.png depth/0002/30-deg-right/00112.png
304 | rgb/0002/30-deg-right/00107.png depth/0002/30-deg-right/00107.png
305 | rgb/0002/30-deg-right/00069.png depth/0002/30-deg-right/00069.png
306 | rgb/0002/fog/00115.png depth/0002/fog/00115.png
307 | rgb/0002/fog/00104.png depth/0002/fog/00104.png
308 | rgb/0002/fog/00219.png depth/0002/fog/00219.png
309 | rgb/0002/fog/00058.png depth/0002/fog/00058.png
310 | rgb/0002/30-deg-left/00085.png depth/0002/30-deg-left/00085.png
311 | rgb/0002/30-deg-left/00081.png depth/0002/30-deg-left/00081.png
312 | rgb/0002/30-deg-left/00164.png depth/0002/30-deg-left/00164.png
313 | rgb/0002/30-deg-left/00232.png depth/0002/30-deg-left/00232.png
314 | rgb/0002/30-deg-left/00003.png depth/0002/30-deg-left/00003.png
315 | rgb/0002/30-deg-left/00094.png depth/0002/30-deg-left/00094.png
316 | rgb/0002/overcast/00098.png depth/0002/overcast/00098.png
317 | rgb/0002/overcast/00190.png depth/0002/overcast/00190.png
318 | rgb/0002/overcast/00081.png depth/0002/overcast/00081.png
319 | rgb/0002/overcast/00164.png depth/0002/overcast/00164.png
320 | rgb/0002/overcast/00029.png depth/0002/overcast/00029.png
321 | rgb/0002/overcast/00192.png depth/0002/overcast/00192.png
322 | rgb/0002/overcast/00056.png depth/0002/overcast/00056.png
323 | rgb/0002/overcast/00204.png depth/0002/overcast/00204.png
324 | rgb/0002/overcast/00035.png depth/0002/overcast/00035.png
325 | rgb/0020/clone/00640.png depth/0020/clone/00640.png
326 | rgb/0020/clone/00081.png depth/0020/clone/00081.png
327 | rgb/0020/clone/00418.png depth/0020/clone/00418.png
328 | rgb/0020/clone/00439.png depth/0020/clone/00439.png
329 | rgb/0020/clone/00741.png depth/0020/clone/00741.png
330 | rgb/0020/clone/00091.png depth/0020/clone/00091.png
331 | rgb/0020/clone/00712.png depth/0020/clone/00712.png
332 | rgb/0020/clone/00166.png depth/0020/clone/00166.png
333 | rgb/0020/clone/00198.png depth/0020/clone/00198.png
334 | rgb/0020/clone/00363.png depth/0020/clone/00363.png
335 | rgb/0020/clone/00124.png depth/0020/clone/00124.png
336 | rgb/0020/clone/00700.png depth/0020/clone/00700.png
337 | rgb/0020/clone/00748.png depth/0020/clone/00748.png
338 | rgb/0020/clone/00751.png depth/0020/clone/00751.png
339 | rgb/0020/15-deg-left/00046.png depth/0020/15-deg-left/00046.png
340 | rgb/0020/15-deg-left/00594.png depth/0020/15-deg-left/00594.png
341 | rgb/0020/15-deg-left/00732.png depth/0020/15-deg-left/00732.png
342 | rgb/0020/15-deg-left/00212.png depth/0020/15-deg-left/00212.png
343 | rgb/0020/15-deg-left/00488.png depth/0020/15-deg-left/00488.png
344 | rgb/0020/15-deg-left/00630.png depth/0020/15-deg-left/00630.png
345 | rgb/0020/15-deg-left/00601.png depth/0020/15-deg-left/00601.png
346 | rgb/0020/15-deg-left/00578.png depth/0020/15-deg-left/00578.png
347 | rgb/0020/15-deg-left/00628.png depth/0020/15-deg-left/00628.png
348 | rgb/0020/15-deg-left/00691.png depth/0020/15-deg-left/00691.png
349 | rgb/0020/15-deg-left/00453.png depth/0020/15-deg-left/00453.png
350 | rgb/0020/15-deg-left/00370.png depth/0020/15-deg-left/00370.png
351 | rgb/0020/15-deg-left/00776.png depth/0020/15-deg-left/00776.png
352 | rgb/0020/15-deg-left/00582.png depth/0020/15-deg-left/00582.png
353 | rgb/0020/15-deg-left/00168.png depth/0020/15-deg-left/00168.png
354 | rgb/0020/15-deg-left/00521.png depth/0020/15-deg-left/00521.png
355 | rgb/0020/15-deg-left/00773.png depth/0020/15-deg-left/00773.png
356 | rgb/0020/15-deg-left/00714.png depth/0020/15-deg-left/00714.png
357 | rgb/0020/15-deg-left/00481.png depth/0020/15-deg-left/00481.png
358 | rgb/0020/15-deg-left/00561.png depth/0020/15-deg-left/00561.png
359 | rgb/0020/15-deg-left/00047.png depth/0020/15-deg-left/00047.png
360 | rgb/0020/15-deg-left/00396.png depth/0020/15-deg-left/00396.png
361 | rgb/0020/sunset/00805.png depth/0020/sunset/00805.png
362 | rgb/0020/sunset/00514.png depth/0020/sunset/00514.png
363 | rgb/0020/sunset/00592.png depth/0020/sunset/00592.png
364 | rgb/0020/sunset/00557.png depth/0020/sunset/00557.png
365 | rgb/0020/sunset/00163.png depth/0020/sunset/00163.png
366 | rgb/0020/sunset/00183.png depth/0020/sunset/00183.png
367 | rgb/0020/sunset/00686.png depth/0020/sunset/00686.png
368 | rgb/0020/sunset/00715.png depth/0020/sunset/00715.png
369 | rgb/0020/sunset/00411.png depth/0020/sunset/00411.png
370 | rgb/0020/sunset/00768.png depth/0020/sunset/00768.png
371 | rgb/0020/sunset/00682.png depth/0020/sunset/00682.png
372 | rgb/0020/sunset/00146.png depth/0020/sunset/00146.png
373 | rgb/0020/sunset/00644.png depth/0020/sunset/00644.png
374 | rgb/0020/sunset/00338.png depth/0020/sunset/00338.png
375 | rgb/0020/sunset/00056.png depth/0020/sunset/00056.png
376 | rgb/0020/sunset/00664.png depth/0020/sunset/00664.png
377 | rgb/0020/sunset/00297.png depth/0020/sunset/00297.png
378 | rgb/0020/sunset/00093.png depth/0020/sunset/00093.png
379 | rgb/0020/sunset/00706.png depth/0020/sunset/00706.png
380 | rgb/0020/sunset/00618.png depth/0020/sunset/00618.png
381 | rgb/0020/rain/00640.png depth/0020/rain/00640.png
382 | rgb/0020/rain/00324.png depth/0020/rain/00324.png
383 | rgb/0020/rain/00139.png depth/0020/rain/00139.png
384 | rgb/0020/rain/00066.png depth/0020/rain/00066.png
385 | rgb/0020/rain/00126.png depth/0020/rain/00126.png
386 | rgb/0020/rain/00146.png depth/0020/rain/00146.png
387 | rgb/0020/rain/00344.png depth/0020/rain/00344.png
388 | rgb/0020/rain/00440.png depth/0020/rain/00440.png
389 | rgb/0020/rain/00773.png depth/0020/rain/00773.png
390 | rgb/0020/rain/00795.png depth/0020/rain/00795.png
391 | rgb/0020/rain/00777.png depth/0020/rain/00777.png
392 | rgb/0020/rain/00675.png depth/0020/rain/00675.png
393 | rgb/0020/rain/00544.png depth/0020/rain/00544.png
394 | rgb/0020/rain/00319.png depth/0020/rain/00319.png
395 | rgb/0020/rain/00012.png depth/0020/rain/00012.png
396 | rgb/0020/morning/00654.png depth/0020/morning/00654.png
397 | rgb/0020/morning/00473.png depth/0020/morning/00473.png
398 | rgb/0020/morning/00259.png depth/0020/morning/00259.png
399 | rgb/0020/morning/00082.png depth/0020/morning/00082.png
400 | rgb/0020/morning/00090.png depth/0020/morning/00090.png
401 | rgb/0020/morning/00645.png depth/0020/morning/00645.png
402 | rgb/0020/morning/00447.png depth/0020/morning/00447.png
403 | rgb/0020/morning/00336.png depth/0020/morning/00336.png
404 | rgb/0020/morning/00278.png depth/0020/morning/00278.png
405 | rgb/0020/morning/00438.png depth/0020/morning/00438.png
406 | rgb/0020/morning/00691.png depth/0020/morning/00691.png
407 | rgb/0020/morning/00171.png depth/0020/morning/00171.png
408 | rgb/0020/morning/00605.png depth/0020/morning/00605.png
409 | rgb/0020/morning/00435.png depth/0020/morning/00435.png
410 | rgb/0020/morning/00409.png depth/0020/morning/00409.png
411 | rgb/0020/morning/00169.png depth/0020/morning/00169.png
412 | rgb/0020/morning/00669.png depth/0020/morning/00669.png
413 | rgb/0020/morning/00138.png depth/0020/morning/00138.png
414 | rgb/0020/morning/00572.png depth/0020/morning/00572.png
415 | rgb/0020/morning/00598.png depth/0020/morning/00598.png
416 | rgb/0020/15-deg-right/00584.png depth/0020/15-deg-right/00584.png
417 | rgb/0020/15-deg-right/00148.png depth/0020/15-deg-right/00148.png
418 | rgb/0020/15-deg-right/00709.png depth/0020/15-deg-right/00709.png
419 | rgb/0020/15-deg-right/00223.png depth/0020/15-deg-right/00223.png
420 | rgb/0020/15-deg-right/00650.png depth/0020/15-deg-right/00650.png
421 | rgb/0020/15-deg-right/00581.png depth/0020/15-deg-right/00581.png
422 | rgb/0020/15-deg-right/00537.png depth/0020/15-deg-right/00537.png
423 | rgb/0020/15-deg-right/00505.png depth/0020/15-deg-right/00505.png
424 | rgb/0020/15-deg-right/00339.png depth/0020/15-deg-right/00339.png
425 | rgb/0020/15-deg-right/00218.png depth/0020/15-deg-right/00218.png
426 | rgb/0020/15-deg-right/00068.png depth/0020/15-deg-right/00068.png
427 | rgb/0020/15-deg-right/00487.png depth/0020/15-deg-right/00487.png
428 | rgb/0020/15-deg-right/00477.png depth/0020/15-deg-right/00477.png
429 | rgb/0020/15-deg-right/00273.png depth/0020/15-deg-right/00273.png
430 | rgb/0020/15-deg-right/00578.png depth/0020/15-deg-right/00578.png
431 | rgb/0020/15-deg-right/00682.png depth/0020/15-deg-right/00682.png
432 | rgb/0020/15-deg-right/00666.png depth/0020/15-deg-right/00666.png
433 | rgb/0020/15-deg-right/00767.png depth/0020/15-deg-right/00767.png
434 | rgb/0020/15-deg-right/00117.png depth/0020/15-deg-right/00117.png
435 | rgb/0020/15-deg-right/00522.png depth/0020/15-deg-right/00522.png
436 | rgb/0020/15-deg-right/00308.png depth/0020/15-deg-right/00308.png
437 | rgb/0020/15-deg-right/00127.png depth/0020/15-deg-right/00127.png
438 | rgb/0020/15-deg-right/00029.png depth/0020/15-deg-right/00029.png
439 | rgb/0020/15-deg-right/00497.png depth/0020/15-deg-right/00497.png
440 | rgb/0020/15-deg-right/00026.png depth/0020/15-deg-right/00026.png
441 | rgb/0020/15-deg-right/00265.png depth/0020/15-deg-right/00265.png
442 | rgb/0020/15-deg-right/00802.png depth/0020/15-deg-right/00802.png
443 | rgb/0020/15-deg-right/00396.png depth/0020/15-deg-right/00396.png
444 | rgb/0020/30-deg-right/00622.png depth/0020/30-deg-right/00622.png
445 | rgb/0020/30-deg-right/00415.png depth/0020/30-deg-right/00415.png
446 | rgb/0020/30-deg-right/00514.png depth/0020/30-deg-right/00514.png
447 | rgb/0020/30-deg-right/00333.png depth/0020/30-deg-right/00333.png
448 | rgb/0020/30-deg-right/00611.png depth/0020/30-deg-right/00611.png
449 | rgb/0020/30-deg-right/00223.png depth/0020/30-deg-right/00223.png
450 | rgb/0020/30-deg-right/00472.png depth/0020/30-deg-right/00472.png
451 | rgb/0020/30-deg-right/00526.png depth/0020/30-deg-right/00526.png
452 | rgb/0020/30-deg-right/00734.png depth/0020/30-deg-right/00734.png
453 | rgb/0020/30-deg-right/00484.png depth/0020/30-deg-right/00484.png
454 | rgb/0020/30-deg-right/00120.png depth/0020/30-deg-right/00120.png
455 | rgb/0020/30-deg-right/00758.png depth/0020/30-deg-right/00758.png
456 | rgb/0020/30-deg-right/00556.png depth/0020/30-deg-right/00556.png
457 | rgb/0020/30-deg-right/00014.png depth/0020/30-deg-right/00014.png
458 | rgb/0020/30-deg-right/00198.png depth/0020/30-deg-right/00198.png
459 | rgb/0020/30-deg-right/00813.png depth/0020/30-deg-right/00813.png
460 | rgb/0020/30-deg-right/00797.png depth/0020/30-deg-right/00797.png
461 | rgb/0020/30-deg-right/00748.png depth/0020/30-deg-right/00748.png
462 | rgb/0020/30-deg-right/00209.png depth/0020/30-deg-right/00209.png
463 | rgb/0020/30-deg-right/00047.png depth/0020/30-deg-right/00047.png
464 | rgb/0020/30-deg-right/00237.png depth/0020/30-deg-right/00237.png
465 | rgb/0020/30-deg-right/00101.png depth/0020/30-deg-right/00101.png
466 | rgb/0020/30-deg-right/00389.png depth/0020/30-deg-right/00389.png
467 | rgb/0020/30-deg-right/00071.png depth/0020/30-deg-right/00071.png
468 | rgb/0020/30-deg-right/00757.png depth/0020/30-deg-right/00757.png
469 | rgb/0020/30-deg-right/00096.png depth/0020/30-deg-right/00096.png
470 | rgb/0020/30-deg-right/00824.png depth/0020/30-deg-right/00824.png
471 | rgb/0020/30-deg-right/00108.png depth/0020/30-deg-right/00108.png
472 | rgb/0020/fog/00806.png depth/0020/fog/00806.png
473 | rgb/0020/fog/00369.png depth/0020/fog/00369.png
474 | rgb/0020/fog/00473.png depth/0020/fog/00473.png
475 | rgb/0020/fog/00781.png depth/0020/fog/00781.png
476 | rgb/0020/fog/00183.png depth/0020/fog/00183.png
477 | rgb/0020/fog/00269.png depth/0020/fog/00269.png
478 | rgb/0020/fog/00735.png depth/0020/fog/00735.png
479 | rgb/0020/fog/00273.png depth/0020/fog/00273.png
480 | rgb/0020/fog/00229.png depth/0020/fog/00229.png
481 | rgb/0020/fog/00542.png depth/0020/fog/00542.png
482 | rgb/0020/fog/00219.png depth/0020/fog/00219.png
483 | rgb/0020/fog/00406.png depth/0020/fog/00406.png
484 | rgb/0020/fog/00580.png depth/0020/fog/00580.png
485 | rgb/0020/fog/00635.png depth/0020/fog/00635.png
486 | rgb/0020/fog/00110.png depth/0020/fog/00110.png
487 | rgb/0020/fog/00427.png depth/0020/fog/00427.png
488 | rgb/0020/fog/00820.png depth/0020/fog/00820.png
489 | rgb/0020/fog/00101.png depth/0020/fog/00101.png
490 | rgb/0020/fog/00201.png depth/0020/fog/00201.png
491 | rgb/0020/fog/00536.png depth/0020/fog/00536.png
492 | rgb/0020/fog/00704.png depth/0020/fog/00704.png
493 | rgb/0020/fog/00061.png depth/0020/fog/00061.png
494 | rgb/0020/30-deg-left/00294.png depth/0020/30-deg-left/00294.png
495 | rgb/0020/30-deg-left/00384.png depth/0020/30-deg-left/00384.png
496 | rgb/0020/30-deg-left/00031.png depth/0020/30-deg-left/00031.png
497 | rgb/0020/30-deg-left/00804.png depth/0020/30-deg-left/00804.png
498 | rgb/0020/30-deg-left/00335.png depth/0020/30-deg-left/00335.png
499 | rgb/0020/30-deg-left/00476.png depth/0020/30-deg-left/00476.png
500 | rgb/0020/30-deg-left/00285.png depth/0020/30-deg-left/00285.png
501 |
--------------------------------------------------------------------------------
/datasets/vkitti/val.txt:
--------------------------------------------------------------------------------
1 | rgb/0006/clone/00051.png depth/0006/clone/00051.png
2 | rgb/0006/clone/00163.png depth/0006/clone/00163.png
3 | rgb/0006/clone/00150.png depth/0006/clone/00150.png
4 | rgb/0006/clone/00104.png depth/0006/clone/00104.png
5 | rgb/0006/clone/00208.png depth/0006/clone/00208.png
6 | rgb/0006/clone/00238.png depth/0006/clone/00238.png
7 | rgb/0006/clone/00228.png depth/0006/clone/00228.png
8 | rgb/0006/clone/00241.png depth/0006/clone/00241.png
9 | rgb/0006/15-deg-left/00011.png depth/0006/15-deg-left/00011.png
10 | rgb/0006/15-deg-left/00208.png depth/0006/15-deg-left/00208.png
11 | rgb/0006/15-deg-left/00181.png depth/0006/15-deg-left/00181.png
12 | rgb/0006/sunset/00157.png depth/0006/sunset/00157.png
13 | rgb/0006/sunset/00190.png depth/0006/sunset/00190.png
14 | rgb/0006/sunset/00173.png depth/0006/sunset/00173.png
15 | rgb/0006/sunset/00222.png depth/0006/sunset/00222.png
16 | rgb/0006/sunset/00023.png depth/0006/sunset/00023.png
17 | rgb/0006/sunset/00207.png depth/0006/sunset/00207.png
18 | rgb/0006/sunset/00153.png depth/0006/sunset/00153.png
19 | rgb/0006/sunset/00169.png depth/0006/sunset/00169.png
20 | rgb/0006/rain/00067.png depth/0006/rain/00067.png
21 | rgb/0006/rain/00032.png depth/0006/rain/00032.png
22 | rgb/0006/rain/00014.png depth/0006/rain/00014.png
23 | rgb/0006/rain/00061.png depth/0006/rain/00061.png
24 | rgb/0006/morning/00258.png depth/0006/morning/00258.png
25 | rgb/0006/morning/00212.png depth/0006/morning/00212.png
26 | rgb/0006/morning/00032.png depth/0006/morning/00032.png
27 | rgb/0006/morning/00172.png depth/0006/morning/00172.png
28 | rgb/0006/morning/00193.png depth/0006/morning/00193.png
29 | rgb/0006/morning/00071.png depth/0006/morning/00071.png
30 | rgb/0006/morning/00250.png depth/0006/morning/00250.png
31 | rgb/0006/15-deg-right/00245.png depth/0006/15-deg-right/00245.png
32 | rgb/0006/15-deg-right/00173.png depth/0006/15-deg-right/00173.png
33 | rgb/0006/15-deg-right/00162.png depth/0006/15-deg-right/00162.png
34 | rgb/0006/15-deg-right/00149.png depth/0006/15-deg-right/00149.png
35 | rgb/0006/15-deg-right/00074.png depth/0006/15-deg-right/00074.png
36 | rgb/0006/15-deg-right/00075.png depth/0006/15-deg-right/00075.png
37 | rgb/0006/15-deg-right/00011.png depth/0006/15-deg-right/00011.png
38 | rgb/0006/15-deg-right/00215.png depth/0006/15-deg-right/00215.png
39 | rgb/0006/15-deg-right/00182.png depth/0006/15-deg-right/00182.png
40 | rgb/0006/15-deg-right/00105.png depth/0006/15-deg-right/00105.png
41 | rgb/0006/15-deg-right/00243.png depth/0006/15-deg-right/00243.png
42 | rgb/0006/30-deg-right/00028.png depth/0006/30-deg-right/00028.png
43 | rgb/0006/30-deg-right/00067.png depth/0006/30-deg-right/00067.png
44 | rgb/0006/30-deg-right/00120.png depth/0006/30-deg-right/00120.png
45 | rgb/0006/30-deg-right/00229.png depth/0006/30-deg-right/00229.png
46 | rgb/0006/30-deg-right/00236.png depth/0006/30-deg-right/00236.png
47 | rgb/0006/30-deg-right/00030.png depth/0006/30-deg-right/00030.png
48 | rgb/0006/fog/00160.png depth/0006/fog/00160.png
49 | rgb/0006/30-deg-left/00210.png depth/0006/30-deg-left/00210.png
50 | rgb/0006/30-deg-left/00051.png depth/0006/30-deg-left/00051.png
51 | rgb/0006/30-deg-left/00111.png depth/0006/30-deg-left/00111.png
52 | rgb/0006/30-deg-left/00094.png depth/0006/30-deg-left/00094.png
53 | rgb/0006/30-deg-left/00168.png depth/0006/30-deg-left/00168.png
54 | rgb/0006/30-deg-left/00030.png depth/0006/30-deg-left/00030.png
55 | rgb/0006/30-deg-left/00263.png depth/0006/30-deg-left/00263.png
56 | rgb/0006/30-deg-left/00071.png depth/0006/30-deg-left/00071.png
57 | rgb/0006/30-deg-left/00262.png depth/0006/30-deg-left/00262.png
58 | rgb/0006/overcast/00008.png depth/0006/overcast/00008.png
59 | rgb/0006/overcast/00220.png depth/0006/overcast/00220.png
60 | rgb/0006/overcast/00064.png depth/0006/overcast/00064.png
61 | rgb/0006/overcast/00147.png depth/0006/overcast/00147.png
62 | rgb/0006/overcast/00237.png depth/0006/overcast/00237.png
63 | rgb/0006/overcast/00241.png depth/0006/overcast/00241.png
64 | rgb/0018/clone/00176.png depth/0018/clone/00176.png
65 | rgb/0018/clone/00106.png depth/0018/clone/00106.png
66 | rgb/0018/clone/00080.png depth/0018/clone/00080.png
67 | rgb/0018/clone/00067.png depth/0018/clone/00067.png
68 | rgb/0018/clone/00029.png depth/0018/clone/00029.png
69 | rgb/0018/clone/00306.png depth/0018/clone/00306.png
70 | rgb/0018/clone/00070.png depth/0018/clone/00070.png
71 | rgb/0018/clone/00130.png depth/0018/clone/00130.png
72 | rgb/0018/15-deg-left/00318.png depth/0018/15-deg-left/00318.png
73 | rgb/0018/15-deg-left/00244.png depth/0018/15-deg-left/00244.png
74 | rgb/0018/15-deg-left/00301.png depth/0018/15-deg-left/00301.png
75 | rgb/0018/15-deg-left/00102.png depth/0018/15-deg-left/00102.png
76 | rgb/0018/15-deg-left/00315.png depth/0018/15-deg-left/00315.png
77 | rgb/0018/15-deg-left/00127.png depth/0018/15-deg-left/00127.png
78 | rgb/0018/15-deg-left/00177.png depth/0018/15-deg-left/00177.png
79 | rgb/0018/15-deg-left/00132.png depth/0018/15-deg-left/00132.png
80 | rgb/0018/15-deg-left/00118.png depth/0018/15-deg-left/00118.png
81 | rgb/0018/15-deg-left/00297.png depth/0018/15-deg-left/00297.png
82 | rgb/0018/15-deg-left/00105.png depth/0018/15-deg-left/00105.png
83 | rgb/0018/15-deg-left/00021.png depth/0018/15-deg-left/00021.png
84 | rgb/0018/15-deg-left/00062.png depth/0018/15-deg-left/00062.png
85 | rgb/0018/sunset/00097.png depth/0018/sunset/00097.png
86 | rgb/0018/sunset/00266.png depth/0018/sunset/00266.png
87 | rgb/0018/sunset/00302.png depth/0018/sunset/00302.png
88 | rgb/0018/sunset/00115.png depth/0018/sunset/00115.png
89 | rgb/0018/sunset/00140.png depth/0018/sunset/00140.png
90 | rgb/0018/sunset/00146.png depth/0018/sunset/00146.png
91 | rgb/0018/sunset/00207.png depth/0018/sunset/00207.png
92 | rgb/0018/sunset/00029.png depth/0018/sunset/00029.png
93 | rgb/0018/sunset/00211.png depth/0018/sunset/00211.png
94 | rgb/0018/sunset/00061.png depth/0018/sunset/00061.png
95 | rgb/0018/rain/00253.png depth/0018/rain/00253.png
96 | rgb/0018/rain/00292.png depth/0018/rain/00292.png
97 | rgb/0018/rain/00238.png depth/0018/rain/00238.png
98 | rgb/0018/rain/00191.png depth/0018/rain/00191.png
99 | rgb/0018/rain/00272.png depth/0018/rain/00272.png
100 | rgb/0018/rain/00130.png depth/0018/rain/00130.png
101 | rgb/0018/rain/00250.png depth/0018/rain/00250.png
102 | rgb/0018/morning/00176.png depth/0018/morning/00176.png
103 | rgb/0018/morning/00024.png depth/0018/morning/00024.png
104 | rgb/0018/morning/00076.png depth/0018/morning/00076.png
105 | rgb/0018/morning/00214.png depth/0018/morning/00214.png
106 | rgb/0018/morning/00110.png depth/0018/morning/00110.png
107 | rgb/0018/morning/00316.png depth/0018/morning/00316.png
108 | rgb/0018/morning/00131.png depth/0018/morning/00131.png
109 | rgb/0018/morning/00105.png depth/0018/morning/00105.png
110 | rgb/0018/15-deg-right/00148.png depth/0018/15-deg-right/00148.png
111 | rgb/0018/15-deg-right/00206.png depth/0018/15-deg-right/00206.png
112 | rgb/0018/15-deg-right/00210.png depth/0018/15-deg-right/00210.png
113 | rgb/0018/15-deg-right/00181.png depth/0018/15-deg-right/00181.png
114 | rgb/0018/15-deg-right/00216.png depth/0018/15-deg-right/00216.png
115 | rgb/0018/15-deg-right/00084.png depth/0018/15-deg-right/00084.png
116 | rgb/0018/15-deg-right/00330.png depth/0018/15-deg-right/00330.png
117 | rgb/0018/15-deg-right/00042.png depth/0018/15-deg-right/00042.png
118 | rgb/0018/15-deg-right/00130.png depth/0018/15-deg-right/00130.png
119 | rgb/0018/15-deg-right/00319.png depth/0018/15-deg-right/00319.png
120 | rgb/0018/30-deg-right/00311.png depth/0018/30-deg-right/00311.png
121 | rgb/0018/30-deg-right/00072.png depth/0018/30-deg-right/00072.png
122 | rgb/0018/30-deg-right/00217.png depth/0018/30-deg-right/00217.png
123 | rgb/0018/30-deg-right/00318.png depth/0018/30-deg-right/00318.png
124 | rgb/0018/30-deg-right/00036.png depth/0018/30-deg-right/00036.png
125 | rgb/0018/30-deg-right/00282.png depth/0018/30-deg-right/00282.png
126 | rgb/0018/30-deg-right/00123.png depth/0018/30-deg-right/00123.png
127 | rgb/0018/30-deg-right/00204.png depth/0018/30-deg-right/00204.png
128 | rgb/0018/30-deg-right/00035.png depth/0018/30-deg-right/00035.png
129 | rgb/0018/fog/00203.png depth/0018/fog/00203.png
130 | rgb/0018/fog/00016.png depth/0018/fog/00016.png
131 | rgb/0018/fog/00141.png depth/0018/fog/00141.png
132 | rgb/0018/fog/00275.png depth/0018/fog/00275.png
133 | rgb/0018/fog/00299.png depth/0018/fog/00299.png
134 | rgb/0018/fog/00192.png depth/0018/fog/00192.png
135 | rgb/0018/30-deg-left/00046.png depth/0018/30-deg-left/00046.png
136 | rgb/0018/30-deg-left/00253.png depth/0018/30-deg-left/00253.png
137 | rgb/0018/30-deg-left/00223.png depth/0018/30-deg-left/00223.png
138 | rgb/0018/30-deg-left/00038.png depth/0018/30-deg-left/00038.png
139 | rgb/0018/30-deg-left/00009.png depth/0018/30-deg-left/00009.png
140 | rgb/0018/30-deg-left/00104.png depth/0018/30-deg-left/00104.png
141 | rgb/0018/30-deg-left/00087.png depth/0018/30-deg-left/00087.png
142 | rgb/0018/30-deg-left/00127.png depth/0018/30-deg-left/00127.png
143 | rgb/0018/30-deg-left/00193.png depth/0018/30-deg-left/00193.png
144 | rgb/0018/30-deg-left/00133.png depth/0018/30-deg-left/00133.png
145 | rgb/0018/30-deg-left/00069.png depth/0018/30-deg-left/00069.png
146 | rgb/0018/30-deg-left/00235.png depth/0018/30-deg-left/00235.png
147 | rgb/0018/30-deg-left/00108.png depth/0018/30-deg-left/00108.png
148 | rgb/0018/overcast/00206.png depth/0018/overcast/00206.png
149 | rgb/0018/overcast/00210.png depth/0018/overcast/00210.png
150 | rgb/0018/overcast/00066.png depth/0018/overcast/00066.png
151 | rgb/0018/overcast/00303.png depth/0018/overcast/00303.png
152 | rgb/0018/overcast/00174.png depth/0018/overcast/00174.png
153 | rgb/0018/overcast/00014.png depth/0018/overcast/00014.png
154 | rgb/0018/overcast/00132.png depth/0018/overcast/00132.png
155 | rgb/0018/overcast/00168.png depth/0018/overcast/00168.png
156 | rgb/0018/overcast/00035.png depth/0018/overcast/00035.png
157 | rgb/0001/clone/00085.png depth/0001/clone/00085.png
158 | rgb/0001/clone/00082.png depth/0001/clone/00082.png
159 | rgb/0001/clone/00161.png depth/0001/clone/00161.png
160 | rgb/0001/clone/00074.png depth/0001/clone/00074.png
161 | rgb/0001/clone/00356.png depth/0001/clone/00356.png
162 | rgb/0001/clone/00355.png depth/0001/clone/00355.png
163 | rgb/0001/clone/00261.png depth/0001/clone/00261.png
164 | rgb/0001/clone/00362.png depth/0001/clone/00362.png
165 | rgb/0001/clone/00284.png depth/0001/clone/00284.png
166 | rgb/0001/clone/00349.png depth/0001/clone/00349.png
167 | rgb/0001/15-deg-left/00025.png depth/0001/15-deg-left/00025.png
168 | rgb/0001/15-deg-left/00221.png depth/0001/15-deg-left/00221.png
169 | rgb/0001/15-deg-left/00218.png depth/0001/15-deg-left/00218.png
170 | rgb/0001/15-deg-left/00018.png depth/0001/15-deg-left/00018.png
171 | rgb/0001/15-deg-left/00320.png depth/0001/15-deg-left/00320.png
172 | rgb/0001/15-deg-left/00216.png depth/0001/15-deg-left/00216.png
173 | rgb/0001/15-deg-left/00303.png depth/0001/15-deg-left/00303.png
174 | rgb/0001/15-deg-left/00359.png depth/0001/15-deg-left/00359.png
175 | rgb/0001/15-deg-left/00283.png depth/0001/15-deg-left/00283.png
176 | rgb/0001/15-deg-left/00315.png depth/0001/15-deg-left/00315.png
177 | rgb/0001/15-deg-left/00410.png depth/0001/15-deg-left/00410.png
178 | rgb/0001/15-deg-left/00017.png depth/0001/15-deg-left/00017.png
179 | rgb/0001/sunset/00080.png depth/0001/sunset/00080.png
180 | rgb/0001/sunset/00038.png depth/0001/sunset/00038.png
181 | rgb/0001/sunset/00121.png depth/0001/sunset/00121.png
182 | rgb/0001/sunset/00376.png depth/0001/sunset/00376.png
183 | rgb/0001/sunset/00207.png depth/0001/sunset/00207.png
184 | rgb/0001/sunset/00029.png depth/0001/sunset/00029.png
185 | rgb/0001/sunset/00132.png depth/0001/sunset/00132.png
186 | rgb/0001/sunset/00167.png depth/0001/sunset/00167.png
187 | rgb/0001/rain/00294.png depth/0001/rain/00294.png
188 | rgb/0001/rain/00239.png depth/0001/rain/00239.png
189 | rgb/0001/rain/00266.png depth/0001/rain/00266.png
190 | rgb/0001/rain/00268.png depth/0001/rain/00268.png
191 | rgb/0001/rain/00270.png depth/0001/rain/00270.png
192 | rgb/0001/rain/00238.png depth/0001/rain/00238.png
193 | rgb/0001/rain/00342.png depth/0001/rain/00342.png
194 | rgb/0001/rain/00055.png depth/0001/rain/00055.png
195 | rgb/0001/morning/00423.png depth/0001/morning/00423.png
196 | rgb/0001/morning/00429.png depth/0001/morning/00429.png
197 | rgb/0001/morning/00080.png depth/0001/morning/00080.png
198 | rgb/0001/morning/00408.png depth/0001/morning/00408.png
199 | rgb/0001/morning/00068.png depth/0001/morning/00068.png
200 | rgb/0001/morning/00406.png depth/0001/morning/00406.png
201 | rgb/0001/morning/00167.png depth/0001/morning/00167.png
202 | rgb/0001/morning/00133.png depth/0001/morning/00133.png
203 | rgb/0001/morning/00436.png depth/0001/morning/00436.png
204 | rgb/0001/morning/00071.png depth/0001/morning/00071.png
205 | rgb/0001/15-deg-right/00415.png depth/0001/15-deg-right/00415.png
206 | rgb/0001/15-deg-right/00325.png depth/0001/15-deg-right/00325.png
207 | rgb/0001/15-deg-right/00163.png depth/0001/15-deg-right/00163.png
208 | rgb/0001/15-deg-right/00089.png depth/0001/15-deg-right/00089.png
209 | rgb/0001/15-deg-right/00003.png depth/0001/15-deg-right/00003.png
210 | rgb/0001/15-deg-right/00257.png depth/0001/15-deg-right/00257.png
211 | rgb/0001/15-deg-right/00050.png depth/0001/15-deg-right/00050.png
212 | rgb/0001/15-deg-right/00054.png depth/0001/15-deg-right/00054.png
213 | rgb/0001/15-deg-right/00069.png depth/0001/15-deg-right/00069.png
214 | rgb/0001/15-deg-right/00037.png depth/0001/15-deg-right/00037.png
215 | rgb/0001/15-deg-right/00255.png depth/0001/15-deg-right/00255.png
216 | rgb/0001/30-deg-right/00253.png depth/0001/30-deg-right/00253.png
217 | rgb/0001/30-deg-right/00429.png depth/0001/30-deg-right/00429.png
218 | rgb/0001/30-deg-right/00159.png depth/0001/30-deg-right/00159.png
219 | rgb/0001/30-deg-right/00325.png depth/0001/30-deg-right/00325.png
220 | rgb/0001/30-deg-right/00165.png depth/0001/30-deg-right/00165.png
221 | rgb/0001/30-deg-right/00160.png depth/0001/30-deg-right/00160.png
222 | rgb/0001/30-deg-right/00368.png depth/0001/30-deg-right/00368.png
223 | rgb/0001/30-deg-right/00029.png depth/0001/30-deg-right/00029.png
224 | rgb/0001/30-deg-right/00390.png depth/0001/30-deg-right/00390.png
225 | rgb/0001/30-deg-right/00017.png depth/0001/30-deg-right/00017.png
226 | rgb/0001/30-deg-right/00341.png depth/0001/30-deg-right/00341.png
227 | rgb/0001/fog/00326.png depth/0001/fog/00326.png
228 | rgb/0001/fog/00246.png depth/0001/fog/00246.png
229 | rgb/0001/fog/00090.png depth/0001/fog/00090.png
230 | rgb/0001/fog/00063.png depth/0001/fog/00063.png
231 | rgb/0001/fog/00233.png depth/0001/fog/00233.png
232 | rgb/0001/fog/00419.png depth/0001/fog/00419.png
233 | rgb/0001/fog/00352.png depth/0001/fog/00352.png
234 | rgb/0001/fog/00244.png depth/0001/fog/00244.png
235 | rgb/0001/fog/00404.png depth/0001/fog/00404.png
236 | rgb/0001/fog/00273.png depth/0001/fog/00273.png
237 | rgb/0001/fog/00019.png depth/0001/fog/00019.png
238 | rgb/0001/fog/00104.png depth/0001/fog/00104.png
239 | rgb/0001/fog/00344.png depth/0001/fog/00344.png
240 | rgb/0001/fog/00367.png depth/0001/fog/00367.png
241 | rgb/0001/fog/00312.png depth/0001/fog/00312.png
242 | rgb/0001/fog/00227.png depth/0001/fog/00227.png
243 | rgb/0001/30-deg-left/00348.png depth/0001/30-deg-left/00348.png
244 | rgb/0001/30-deg-left/00067.png depth/0001/30-deg-left/00067.png
245 | rgb/0001/30-deg-left/00038.png depth/0001/30-deg-left/00038.png
246 | rgb/0001/30-deg-left/00347.png depth/0001/30-deg-left/00347.png
247 | rgb/0001/30-deg-left/00345.png depth/0001/30-deg-left/00345.png
248 | rgb/0001/30-deg-left/00222.png depth/0001/30-deg-left/00222.png
249 | rgb/0001/30-deg-left/00270.png depth/0001/30-deg-left/00270.png
250 | rgb/0001/30-deg-left/00014.png depth/0001/30-deg-left/00014.png
251 | rgb/0001/30-deg-left/00134.png depth/0001/30-deg-left/00134.png
252 | rgb/0001/30-deg-left/00015.png depth/0001/30-deg-left/00015.png
253 | rgb/0001/overcast/00403.png depth/0001/overcast/00403.png
254 | rgb/0001/overcast/00377.png depth/0001/overcast/00377.png
255 | rgb/0001/overcast/00393.png depth/0001/overcast/00393.png
256 | rgb/0001/overcast/00120.png depth/0001/overcast/00120.png
257 | rgb/0001/overcast/00300.png depth/0001/overcast/00300.png
258 | rgb/0001/overcast/00003.png depth/0001/overcast/00003.png
259 | rgb/0001/overcast/00301.png depth/0001/overcast/00301.png
260 | rgb/0001/overcast/00141.png depth/0001/overcast/00141.png
261 | rgb/0002/clone/00152.png depth/0002/clone/00152.png
262 | rgb/0002/clone/00188.png depth/0002/clone/00188.png
263 | rgb/0002/clone/00162.png depth/0002/clone/00162.png
264 | rgb/0002/clone/00075.png depth/0002/clone/00075.png
265 | rgb/0002/clone/00032.png depth/0002/clone/00032.png
266 | rgb/0002/clone/00202.png depth/0002/clone/00202.png
267 | rgb/0002/clone/00187.png depth/0002/clone/00187.png
268 | rgb/0002/clone/00084.png depth/0002/clone/00084.png
269 | rgb/0002/clone/00141.png depth/0002/clone/00141.png
270 | rgb/0002/clone/00225.png depth/0002/clone/00225.png
271 | rgb/0002/clone/00201.png depth/0002/clone/00201.png
272 | rgb/0002/clone/00136.png depth/0002/clone/00136.png
273 | rgb/0002/clone/00065.png depth/0002/clone/00065.png
274 | rgb/0002/clone/00035.png depth/0002/clone/00035.png
275 | rgb/0002/15-deg-left/00046.png depth/0002/15-deg-left/00046.png
276 | rgb/0002/15-deg-left/00000.png depth/0002/15-deg-left/00000.png
277 | rgb/0002/15-deg-left/00190.png depth/0002/15-deg-left/00190.png
278 | rgb/0002/15-deg-left/00088.png depth/0002/15-deg-left/00088.png
279 | rgb/0002/15-deg-left/00059.png depth/0002/15-deg-left/00059.png
280 | rgb/0002/15-deg-left/00198.png depth/0002/15-deg-left/00198.png
281 | rgb/0002/15-deg-left/00168.png depth/0002/15-deg-left/00168.png
282 | rgb/0002/15-deg-left/00192.png depth/0002/15-deg-left/00192.png
283 | rgb/0002/15-deg-left/00026.png depth/0002/15-deg-left/00026.png
284 | rgb/0002/15-deg-left/00006.png depth/0002/15-deg-left/00006.png
285 | rgb/0002/15-deg-left/00096.png depth/0002/15-deg-left/00096.png
286 | rgb/0002/sunset/00148.png depth/0002/sunset/00148.png
287 | rgb/0002/sunset/00187.png depth/0002/sunset/00187.png
288 | rgb/0002/rain/00004.png depth/0002/rain/00004.png
289 | rgb/0002/rain/00203.png depth/0002/rain/00203.png
290 | rgb/0002/rain/00220.png depth/0002/rain/00220.png
291 | rgb/0002/rain/00137.png depth/0002/rain/00137.png
292 | rgb/0002/rain/00100.png depth/0002/rain/00100.png
293 | rgb/0002/morning/00157.png depth/0002/morning/00157.png
294 | rgb/0002/morning/00206.png depth/0002/morning/00206.png
295 | rgb/0002/morning/00081.png depth/0002/morning/00081.png
296 | rgb/0002/morning/00060.png depth/0002/morning/00060.png
297 | rgb/0002/morning/00100.png depth/0002/morning/00100.png
298 | rgb/0002/15-deg-right/00176.png depth/0002/15-deg-right/00176.png
299 | rgb/0002/15-deg-right/00076.png depth/0002/15-deg-right/00076.png
300 | rgb/0002/15-deg-right/00221.png depth/0002/15-deg-right/00221.png
301 | rgb/0002/15-deg-right/00045.png depth/0002/15-deg-right/00045.png
302 | rgb/0002/15-deg-right/00055.png depth/0002/15-deg-right/00055.png
303 | rgb/0002/30-deg-right/00112.png depth/0002/30-deg-right/00112.png
304 | rgb/0002/30-deg-right/00107.png depth/0002/30-deg-right/00107.png
305 | rgb/0002/30-deg-right/00069.png depth/0002/30-deg-right/00069.png
306 | rgb/0002/fog/00115.png depth/0002/fog/00115.png
307 | rgb/0002/fog/00104.png depth/0002/fog/00104.png
308 | rgb/0002/fog/00219.png depth/0002/fog/00219.png
309 | rgb/0002/fog/00058.png depth/0002/fog/00058.png
310 | rgb/0002/30-deg-left/00085.png depth/0002/30-deg-left/00085.png
311 | rgb/0002/30-deg-left/00081.png depth/0002/30-deg-left/00081.png
312 | rgb/0002/30-deg-left/00164.png depth/0002/30-deg-left/00164.png
313 | rgb/0002/30-deg-left/00232.png depth/0002/30-deg-left/00232.png
314 | rgb/0002/30-deg-left/00003.png depth/0002/30-deg-left/00003.png
315 | rgb/0002/30-deg-left/00094.png depth/0002/30-deg-left/00094.png
316 | rgb/0002/overcast/00098.png depth/0002/overcast/00098.png
317 | rgb/0002/overcast/00190.png depth/0002/overcast/00190.png
318 | rgb/0002/overcast/00081.png depth/0002/overcast/00081.png
319 | rgb/0002/overcast/00164.png depth/0002/overcast/00164.png
320 | rgb/0002/overcast/00029.png depth/0002/overcast/00029.png
321 | rgb/0002/overcast/00192.png depth/0002/overcast/00192.png
322 | rgb/0002/overcast/00056.png depth/0002/overcast/00056.png
323 | rgb/0002/overcast/00204.png depth/0002/overcast/00204.png
324 | rgb/0002/overcast/00035.png depth/0002/overcast/00035.png
325 | rgb/0020/clone/00640.png depth/0020/clone/00640.png
326 | rgb/0020/clone/00081.png depth/0020/clone/00081.png
327 | rgb/0020/clone/00418.png depth/0020/clone/00418.png
328 | rgb/0020/clone/00439.png depth/0020/clone/00439.png
329 | rgb/0020/clone/00741.png depth/0020/clone/00741.png
330 | rgb/0020/clone/00091.png depth/0020/clone/00091.png
331 | rgb/0020/clone/00712.png depth/0020/clone/00712.png
332 | rgb/0020/clone/00166.png depth/0020/clone/00166.png
333 | rgb/0020/clone/00198.png depth/0020/clone/00198.png
334 | rgb/0020/clone/00363.png depth/0020/clone/00363.png
335 | rgb/0020/clone/00124.png depth/0020/clone/00124.png
336 | rgb/0020/clone/00700.png depth/0020/clone/00700.png
337 | rgb/0020/clone/00748.png depth/0020/clone/00748.png
338 | rgb/0020/clone/00751.png depth/0020/clone/00751.png
339 | rgb/0020/15-deg-left/00046.png depth/0020/15-deg-left/00046.png
340 | rgb/0020/15-deg-left/00594.png depth/0020/15-deg-left/00594.png
341 | rgb/0020/15-deg-left/00732.png depth/0020/15-deg-left/00732.png
342 | rgb/0020/15-deg-left/00212.png depth/0020/15-deg-left/00212.png
343 | rgb/0020/15-deg-left/00488.png depth/0020/15-deg-left/00488.png
344 | rgb/0020/15-deg-left/00630.png depth/0020/15-deg-left/00630.png
345 | rgb/0020/15-deg-left/00601.png depth/0020/15-deg-left/00601.png
346 | rgb/0020/15-deg-left/00578.png depth/0020/15-deg-left/00578.png
347 | rgb/0020/15-deg-left/00628.png depth/0020/15-deg-left/00628.png
348 | rgb/0020/15-deg-left/00691.png depth/0020/15-deg-left/00691.png
349 | rgb/0020/15-deg-left/00453.png depth/0020/15-deg-left/00453.png
350 | rgb/0020/15-deg-left/00370.png depth/0020/15-deg-left/00370.png
351 | rgb/0020/15-deg-left/00776.png depth/0020/15-deg-left/00776.png
352 | rgb/0020/15-deg-left/00582.png depth/0020/15-deg-left/00582.png
353 | rgb/0020/15-deg-left/00168.png depth/0020/15-deg-left/00168.png
354 | rgb/0020/15-deg-left/00521.png depth/0020/15-deg-left/00521.png
355 | rgb/0020/15-deg-left/00773.png depth/0020/15-deg-left/00773.png
356 | rgb/0020/15-deg-left/00714.png depth/0020/15-deg-left/00714.png
357 | rgb/0020/15-deg-left/00481.png depth/0020/15-deg-left/00481.png
358 | rgb/0020/15-deg-left/00561.png depth/0020/15-deg-left/00561.png
359 | rgb/0020/15-deg-left/00047.png depth/0020/15-deg-left/00047.png
360 | rgb/0020/15-deg-left/00396.png depth/0020/15-deg-left/00396.png
361 | rgb/0020/sunset/00805.png depth/0020/sunset/00805.png
362 | rgb/0020/sunset/00514.png depth/0020/sunset/00514.png
363 | rgb/0020/sunset/00592.png depth/0020/sunset/00592.png
364 | rgb/0020/sunset/00557.png depth/0020/sunset/00557.png
365 | rgb/0020/sunset/00163.png depth/0020/sunset/00163.png
366 | rgb/0020/sunset/00183.png depth/0020/sunset/00183.png
367 | rgb/0020/sunset/00686.png depth/0020/sunset/00686.png
368 | rgb/0020/sunset/00715.png depth/0020/sunset/00715.png
369 | rgb/0020/sunset/00411.png depth/0020/sunset/00411.png
370 | rgb/0020/sunset/00768.png depth/0020/sunset/00768.png
371 | rgb/0020/sunset/00682.png depth/0020/sunset/00682.png
372 | rgb/0020/sunset/00146.png depth/0020/sunset/00146.png
373 | rgb/0020/sunset/00644.png depth/0020/sunset/00644.png
374 | rgb/0020/sunset/00338.png depth/0020/sunset/00338.png
375 | rgb/0020/sunset/00056.png depth/0020/sunset/00056.png
376 | rgb/0020/sunset/00664.png depth/0020/sunset/00664.png
377 | rgb/0020/sunset/00297.png depth/0020/sunset/00297.png
378 | rgb/0020/sunset/00093.png depth/0020/sunset/00093.png
379 | rgb/0020/sunset/00706.png depth/0020/sunset/00706.png
380 | rgb/0020/sunset/00618.png depth/0020/sunset/00618.png
381 | rgb/0020/rain/00640.png depth/0020/rain/00640.png
382 | rgb/0020/rain/00324.png depth/0020/rain/00324.png
383 | rgb/0020/rain/00139.png depth/0020/rain/00139.png
384 | rgb/0020/rain/00066.png depth/0020/rain/00066.png
385 | rgb/0020/rain/00126.png depth/0020/rain/00126.png
386 | rgb/0020/rain/00146.png depth/0020/rain/00146.png
387 | rgb/0020/rain/00344.png depth/0020/rain/00344.png
388 | rgb/0020/rain/00440.png depth/0020/rain/00440.png
389 | rgb/0020/rain/00773.png depth/0020/rain/00773.png
390 | rgb/0020/rain/00795.png depth/0020/rain/00795.png
391 | rgb/0020/rain/00777.png depth/0020/rain/00777.png
392 | rgb/0020/rain/00675.png depth/0020/rain/00675.png
393 | rgb/0020/rain/00544.png depth/0020/rain/00544.png
394 | rgb/0020/rain/00319.png depth/0020/rain/00319.png
395 | rgb/0020/rain/00012.png depth/0020/rain/00012.png
396 | rgb/0020/morning/00654.png depth/0020/morning/00654.png
397 | rgb/0020/morning/00473.png depth/0020/morning/00473.png
398 | rgb/0020/morning/00259.png depth/0020/morning/00259.png
399 | rgb/0020/morning/00082.png depth/0020/morning/00082.png
400 | rgb/0020/morning/00090.png depth/0020/morning/00090.png
401 | rgb/0020/morning/00645.png depth/0020/morning/00645.png
402 | rgb/0020/morning/00447.png depth/0020/morning/00447.png
403 | rgb/0020/morning/00336.png depth/0020/morning/00336.png
404 | rgb/0020/morning/00278.png depth/0020/morning/00278.png
405 | rgb/0020/morning/00438.png depth/0020/morning/00438.png
406 | rgb/0020/morning/00691.png depth/0020/morning/00691.png
407 | rgb/0020/morning/00171.png depth/0020/morning/00171.png
408 | rgb/0020/morning/00605.png depth/0020/morning/00605.png
409 | rgb/0020/morning/00435.png depth/0020/morning/00435.png
410 | rgb/0020/morning/00409.png depth/0020/morning/00409.png
411 | rgb/0020/morning/00169.png depth/0020/morning/00169.png
412 | rgb/0020/morning/00669.png depth/0020/morning/00669.png
413 | rgb/0020/morning/00138.png depth/0020/morning/00138.png
414 | rgb/0020/morning/00572.png depth/0020/morning/00572.png
415 | rgb/0020/morning/00598.png depth/0020/morning/00598.png
416 | rgb/0020/15-deg-right/00584.png depth/0020/15-deg-right/00584.png
417 | rgb/0020/15-deg-right/00148.png depth/0020/15-deg-right/00148.png
418 | rgb/0020/15-deg-right/00709.png depth/0020/15-deg-right/00709.png
419 | rgb/0020/15-deg-right/00223.png depth/0020/15-deg-right/00223.png
420 | rgb/0020/15-deg-right/00650.png depth/0020/15-deg-right/00650.png
421 | rgb/0020/15-deg-right/00581.png depth/0020/15-deg-right/00581.png
422 | rgb/0020/15-deg-right/00537.png depth/0020/15-deg-right/00537.png
423 | rgb/0020/15-deg-right/00505.png depth/0020/15-deg-right/00505.png
424 | rgb/0020/15-deg-right/00339.png depth/0020/15-deg-right/00339.png
425 | rgb/0020/15-deg-right/00218.png depth/0020/15-deg-right/00218.png
426 | rgb/0020/15-deg-right/00068.png depth/0020/15-deg-right/00068.png
427 | rgb/0020/15-deg-right/00487.png depth/0020/15-deg-right/00487.png
428 | rgb/0020/15-deg-right/00477.png depth/0020/15-deg-right/00477.png
429 | rgb/0020/15-deg-right/00273.png depth/0020/15-deg-right/00273.png
430 | rgb/0020/15-deg-right/00578.png depth/0020/15-deg-right/00578.png
431 | rgb/0020/15-deg-right/00682.png depth/0020/15-deg-right/00682.png
432 | rgb/0020/15-deg-right/00666.png depth/0020/15-deg-right/00666.png
433 | rgb/0020/15-deg-right/00767.png depth/0020/15-deg-right/00767.png
434 | rgb/0020/15-deg-right/00117.png depth/0020/15-deg-right/00117.png
435 | rgb/0020/15-deg-right/00522.png depth/0020/15-deg-right/00522.png
436 | rgb/0020/15-deg-right/00308.png depth/0020/15-deg-right/00308.png
437 | rgb/0020/15-deg-right/00127.png depth/0020/15-deg-right/00127.png
438 | rgb/0020/15-deg-right/00029.png depth/0020/15-deg-right/00029.png
439 | rgb/0020/15-deg-right/00497.png depth/0020/15-deg-right/00497.png
440 | rgb/0020/15-deg-right/00026.png depth/0020/15-deg-right/00026.png
441 | rgb/0020/15-deg-right/00265.png depth/0020/15-deg-right/00265.png
442 | rgb/0020/15-deg-right/00802.png depth/0020/15-deg-right/00802.png
443 | rgb/0020/15-deg-right/00396.png depth/0020/15-deg-right/00396.png
444 | rgb/0020/30-deg-right/00622.png depth/0020/30-deg-right/00622.png
445 | rgb/0020/30-deg-right/00415.png depth/0020/30-deg-right/00415.png
446 | rgb/0020/30-deg-right/00514.png depth/0020/30-deg-right/00514.png
447 | rgb/0020/30-deg-right/00333.png depth/0020/30-deg-right/00333.png
448 | rgb/0020/30-deg-right/00611.png depth/0020/30-deg-right/00611.png
449 | rgb/0020/30-deg-right/00223.png depth/0020/30-deg-right/00223.png
450 | rgb/0020/30-deg-right/00472.png depth/0020/30-deg-right/00472.png
451 | rgb/0020/30-deg-right/00526.png depth/0020/30-deg-right/00526.png
452 | rgb/0020/30-deg-right/00734.png depth/0020/30-deg-right/00734.png
453 | rgb/0020/30-deg-right/00484.png depth/0020/30-deg-right/00484.png
454 | rgb/0020/30-deg-right/00120.png depth/0020/30-deg-right/00120.png
455 | rgb/0020/30-deg-right/00758.png depth/0020/30-deg-right/00758.png
456 | rgb/0020/30-deg-right/00556.png depth/0020/30-deg-right/00556.png
457 | rgb/0020/30-deg-right/00014.png depth/0020/30-deg-right/00014.png
458 | rgb/0020/30-deg-right/00198.png depth/0020/30-deg-right/00198.png
459 | rgb/0020/30-deg-right/00813.png depth/0020/30-deg-right/00813.png
460 | rgb/0020/30-deg-right/00797.png depth/0020/30-deg-right/00797.png
461 | rgb/0020/30-deg-right/00748.png depth/0020/30-deg-right/00748.png
462 | rgb/0020/30-deg-right/00209.png depth/0020/30-deg-right/00209.png
463 | rgb/0020/30-deg-right/00047.png depth/0020/30-deg-right/00047.png
464 | rgb/0020/30-deg-right/00237.png depth/0020/30-deg-right/00237.png
465 | rgb/0020/30-deg-right/00101.png depth/0020/30-deg-right/00101.png
466 | rgb/0020/30-deg-right/00389.png depth/0020/30-deg-right/00389.png
467 | rgb/0020/30-deg-right/00071.png depth/0020/30-deg-right/00071.png
468 | rgb/0020/30-deg-right/00757.png depth/0020/30-deg-right/00757.png
469 | rgb/0020/30-deg-right/00096.png depth/0020/30-deg-right/00096.png
470 | rgb/0020/30-deg-right/00824.png depth/0020/30-deg-right/00824.png
471 | rgb/0020/30-deg-right/00108.png depth/0020/30-deg-right/00108.png
472 | rgb/0020/fog/00806.png depth/0020/fog/00806.png
473 | rgb/0020/fog/00369.png depth/0020/fog/00369.png
474 | rgb/0020/fog/00473.png depth/0020/fog/00473.png
475 | rgb/0020/fog/00781.png depth/0020/fog/00781.png
476 | rgb/0020/fog/00183.png depth/0020/fog/00183.png
477 | rgb/0020/fog/00269.png depth/0020/fog/00269.png
478 | rgb/0020/fog/00735.png depth/0020/fog/00735.png
479 | rgb/0020/fog/00273.png depth/0020/fog/00273.png
480 | rgb/0020/fog/00229.png depth/0020/fog/00229.png
481 | rgb/0020/fog/00542.png depth/0020/fog/00542.png
482 | rgb/0020/fog/00219.png depth/0020/fog/00219.png
483 | rgb/0020/fog/00406.png depth/0020/fog/00406.png
484 | rgb/0020/fog/00580.png depth/0020/fog/00580.png
485 | rgb/0020/fog/00635.png depth/0020/fog/00635.png
486 | rgb/0020/fog/00110.png depth/0020/fog/00110.png
487 | rgb/0020/fog/00427.png depth/0020/fog/00427.png
488 | rgb/0020/fog/00820.png depth/0020/fog/00820.png
489 | rgb/0020/fog/00101.png depth/0020/fog/00101.png
490 | rgb/0020/fog/00201.png depth/0020/fog/00201.png
491 | rgb/0020/fog/00536.png depth/0020/fog/00536.png
492 | rgb/0020/fog/00704.png depth/0020/fog/00704.png
493 | rgb/0020/fog/00061.png depth/0020/fog/00061.png
494 | rgb/0020/30-deg-left/00294.png depth/0020/30-deg-left/00294.png
495 | rgb/0020/30-deg-left/00384.png depth/0020/30-deg-left/00384.png
496 | rgb/0020/30-deg-left/00031.png depth/0020/30-deg-left/00031.png
497 | rgb/0020/30-deg-left/00804.png depth/0020/30-deg-left/00804.png
498 | rgb/0020/30-deg-left/00335.png depth/0020/30-deg-left/00335.png
499 | rgb/0020/30-deg-left/00476.png depth/0020/30-deg-left/00476.png
500 | rgb/0020/30-deg-left/00285.png depth/0020/30-deg-left/00285.png
501 |
--------------------------------------------------------------------------------
/img/intro.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/S2R-DepthNet/aebc931c7e8c7baad4dec2a0fd8643244741c52e/img/intro.PNG
--------------------------------------------------------------------------------
/img/overview.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/microsoft/S2R-DepthNet/aebc931c7e8c7baad4dec2a0fd8643244741c52e/img/overview.png
--------------------------------------------------------------------------------
/loaddata.py:
--------------------------------------------------------------------------------
1 | # https://docs.opensource.microsoft.com/content/releasing/copyright-headers.html
2 | import os
3 | import torch
4 | import random
5 | import transform
6 | import numpy as np
7 | import pandas as pd
8 | from PIL import Image
9 | from torch.utils import data
10 | from collections import Counter
11 | from torchvision.transforms import Compose, Normalize, ToTensor
12 |
13 |
14 |
15 |
16 |
17 |
18 | def get_dataset(root, data_file='train.list',
19 | dataset='vkitti', phase='train',
20 | img_transform=None,
21 | depth_transform=None,
22 | joint_transform=None
23 | ):
24 | DEFINED_DATASET = {'KITTI', 'VKITTI', 'SUNCG', 'NYUD_V2'}
25 | assert dataset.upper() in DEFINED_DATASET
26 | print("name:", dataset.upper())
27 | name2obj = {'KITTI': KittiDataset,
28 | 'VKITTI': VKittiDataset,
29 | 'SUNCG': SUNCGDataset,
30 | 'NYUD_V2': NYUD_V2Dataset
31 | }
32 | return name2obj[dataset.upper()](root=root, data_file=data_file, phase=phase,
33 | img_transform=img_transform,
34 | depth_transform=depth_transform,
35 | joint_transform=joint_transform)
36 |
37 |
38 |
39 | class KittiDataset(data.Dataset):
40 | def __init__(self, root='./datasets', data_file='tgt_train.list', phase='train',
41 | img_transform=None, joint_transform=None, depth_transform=None):
42 | self.root = root
43 | self.data_file = data_file
44 | self.files = []
45 | self.phase = phase
46 | self.img_transform = img_transform
47 | self.joint_transform = joint_transform
48 | self.depth_transform = depth_transform
49 |
50 | with open(self.data_file, 'r') as f:
51 | data_list = f.read().split('\n')
52 | for data in data_list:
53 | if len(data) == 0:
54 | continue
55 | data_info = data.split(' ')
56 |
57 | self.files.append({
58 | "l_rgb": data_info[0],
59 | "r_rgb": data_info[1],
60 | "cam_intrin": data_info[2],
61 | "depth": data_info[3]
62 | })
63 |
64 | def __len__(self):
65 | return len(self.files)
66 |
67 | def read_data(self, datafiles):
68 | assert os.path.exists(os.path.join(self.root, datafiles['l_rgb'])), "Image does not exist"
69 | l_rgb = Image.open(os.path.join(self.root, datafiles['l_rgb'])).convert('RGB')
70 | w = l_rgb.size[0]
71 | h = l_rgb.size[1]
72 | assert os.path.exists(os.path.join(self.root, datafiles['r_rgb'])), "Image does not exist"
73 | r_rgb = Image.open(os.path.join(self.root, datafiles['r_rgb'])).convert('RGB')
74 |
75 | kitti = KITTI()
76 | assert os.path.exists(os.path.join(self.root, datafiles['cam_intrin'])), "Camera info does not exist"
77 | fb = kitti.get_fb(os.path.join(self.root, datafiles['cam_intrin'])) # get focal_length * baseline
78 | assert os.path.exists(os.path.join(self.root, datafiles['depth'])), "Depth does not exist"
79 |
80 | depth, depth_interp = kitti.get_depth(os.path.join(self.root, datafiles['cam_intrin']),
81 | os.path.join(self.root, datafiles['depth']), [h, w], interp=True)
82 |
83 | return l_rgb, r_rgb, fb, depth, depth_interp
84 |
85 | def __getitem__(self, index):
86 | if self.phase == 'train':
87 | index = random.randint(0, len(self)-1)
88 | if index > len(self)-1:
89 | index = index % len(self)
90 | datafiles = self.files[index]
91 | l_img, r_img , fb, depth, depth_interp = self.read_data(datafiles)
92 |
93 | if self.joint_transform is not None:
94 | if self.phase == 'train':
95 | l_img, r_img, _, fb = self.joint_transform((l_img, r_img, None, 'train', fb))
96 | else:
97 | l_img, r_img, _, fb = self.joint_transform((l_img, r_img, None, 'test', fb))
98 | if self.img_transform is not None:
99 | l_img = self.img_transform(l_img)
100 | if r_img is not None:
101 | r_img = self.img_transform(r_img)
102 |
103 | if self.phase == 'test':
104 | data = {}
105 | data['left_img'] = l_img
106 | data['right_img'] = r_img
107 | data['depth'] = depth
108 | data['fb'] = fb
109 | data['depth_interp'] = depth_interp
110 | return data
111 |
112 | data = {}
113 | if l_img is not None:
114 | data['left_img'] = l_img
115 | if r_img is not None:
116 | data['right_img'] = r_img
117 | if fb is not None:
118 | data['fb'] = fb
119 |
120 | return {'tgt': data}
121 |
122 |
123 | class VKittiDataset(data.Dataset):
124 | def __init__(self, root='./datasets', data_file='src_train.list',
125 | phase='train', img_transform=None, depth_transform=None,
126 | joint_transform=None):
127 |
128 | self.root = root
129 | self.data_file = data_file
130 | self.files = []
131 | self.phase = phase
132 | self.img_transform = img_transform
133 | self.depth_transform = depth_transform
134 | self.joint_transform = joint_transform
135 |
136 | with open(self.data_file, 'r') as f:
137 | data_list = f.read().split('\n')
138 | for data in data_list:
139 | if len(data) == 0:
140 | continue
141 | data_info = data.split(' ')
142 |
143 | self.files.append({
144 | "rgb": data_info[0],
145 | "depth": data_info[1]
146 | })
147 |
148 | def __len__(self):
149 | return len(self.files)
150 |
151 | def read_data(self, datafiles):
152 | assert os.path.exists(os.path.join(self.root, datafiles['rgb'])), "Image does not exist"
153 | rgb = Image.open(os.path.join(self.root, datafiles['rgb'])).convert('RGB')
154 | assert os.path.exists(os.path.join(self.root, datafiles['depth'])), 'Depth does not exist'
155 | depth = Image.open(os.path.join(self.root, datafiles['depth']))
156 |
157 | return rgb, depth
158 |
159 | def __getitem__(self, index):
160 | if self.phase == 'train':
161 | index = random.randint(0, len(self)-1)
162 | if index > len(self) - 1:
163 | index = index % len(self)
164 | datafiles = self.files[index]
165 | img, depth = self.read_data(datafiles)
166 |
167 | if self.joint_transform is not None:
168 | if self.phase == 'train':
169 | img, _, depth, _ = self.joint_transform((img, None, depth, self.phase, None))
170 | else:
171 | img, _, depth, _ = self.joint_transform((img, None, depth, 'test', None))
172 |
173 | if self.img_transform is not None:
174 | img = self.img_transform(img)
175 |
176 | if self.depth_transform is not None:
177 | depth = self.depth_transform(depth)
178 |
179 | if self.phase == 'test':
180 | data = {}
181 | data['img'] = img
182 | data['depth'] = depth
183 | return data
184 | data = {}
185 |
186 | if img is not None:
187 | data['img'] = img
188 | if depth is not None:
189 | data['depth'] = depth
190 | return {'src': data}
191 |
192 | class NYUD_V2Dataset(data.Dataset):
193 | def __init__(self, root='./datasets', data_file='nyu_data.txt', phase='train',
194 | img_transform=None, joint_transform=None, depth_transform=None):
195 | self.root = root
196 | self.data_file = data_file
197 | self.phase = phase
198 | self.img_transform = img_transform
199 | self.joint_transform = joint_transform
200 | self.depth_transform = depth_transform
201 |
202 | self.frame = pd.read_csv(self.data_file, header=None)
203 |
204 | def __len__(self):
205 | return len(self.frame)
206 |
207 | def read_data(self, datafiles):
208 | assert os.path.exists(os.path.join(self.root, datafiles['rgb'])), "Image does not exist"
209 | rgb = Image.open(os.path.join(self.root, datafiles['rgb'])).convert('RGB')
210 | assert os.path.exists(os.path.join(self.root, datafiles['depth'])), 'Depth does not exist'
211 | depth = Image.open(os.path.join(self.root, datafiles['depth']))
212 |
213 | return rgb, depth
214 |
215 | def __getitem__(self, index):
216 | if self.phase == 'train':
217 | index = random.randint(0, len(self)-1)
218 | if index > len(self) - 1:
219 | index = index % len(self)
220 | image_name = self.frame.loc[index, 0]
221 | depth_name = self.frame.loc[index, 1]
222 | datafiles = {"rgb": image_name, "depth": depth_name}
223 | img, depth = self.read_data(datafiles)
224 |
225 | if self.joint_transform is not None:
226 | if self.phase == 'train':
227 | img, _, depth, _ = self.joint_transform((img, None, depth, self.phase, None))
228 | else:
229 | img, _, depth, _ = self.joint_transform((img, None, depth, 'test', None))
230 |
231 | if self.img_transform is not None:
232 | img = self.img_transform(img)
233 |
234 | if self.depth_transform is not None:
235 | depth = self.depth_transform(depth)
236 |
237 | if self.phase == 'test':
238 | data = {}
239 | data['img'] = img
240 | data['depth'] = depth
241 | return data
242 |
243 | data = {}
244 | if img is not None:
245 | data['img'] = img
246 | if depth is not None:
247 | data['depth'] = depth
248 | return {'src': data}
249 |
250 |
251 | class SUNCGDataset(data.Dataset):
252 | def __init__(self, root='./suncg_datasets', data_file='suncg_train.txt',
253 | phase='train', img_transform=None, depth_transform=None,
254 | joint_transform=None):
255 |
256 | self.root = root
257 | self.data_file = data_file
258 | self.files = []
259 | self.phase = phase
260 | self.img_transform = img_transform ## img_transform
261 | self.depth_transform = depth_transform ## depth_transform
262 | self.joint_transform = joint_transform ## joint_transform
263 |
264 | with open(self.data_file, 'r') as f:
265 | data_list = f.read().split('\n')
266 | for data in data_list:
267 | if len(data) == 0:
268 | continue
269 | data_info = data.split(',')
270 | self.files.append({
271 | "rgb":data_info[0],
272 | "depth":data_info[1]
273 | })
274 |
275 | def __len__(self):
276 | return len(self.files)
277 |
278 | def read_data(self, datafiles):
279 | image_path = os.path.join(self.root, "trainA_SYN10/trainA_SYN10", datafiles['rgb'])
280 | depth_path = os.path.join(self.root, "trainC_SYN10/trainC_SYN10", datafiles['depth'])
281 |
282 | assert os.path.exists(image_path), "Image does not exist"
283 | rgb = Image.open(image_path).convert('RGB')
284 | assert os.path.exists(depth_path), 'Depth does not exist'
285 | depth = Image.open(depth_path)
286 | return rgb, depth
287 |
288 | def __getitem__(self, index):
289 | if self.phase == 'train':
290 | index = random.randint(0, len(self)-1)
291 | if index > len(self) - 1:
292 | index = index % len(self)
293 | datafiles = self.files[index]
294 | img, depth = self.read_data(datafiles)
295 |
296 | if self.joint_transform is not None:
297 | if self.phase == 'train':
298 | img, _, depth, _ = self.joint_transform((img, None, depth, self.phase, None))
299 | else:
300 | img, _, depth, _ = self.joint_transform((img, None, depth, 'test', None))
301 |
302 | if self.img_transform is not None:
303 | img = self.img_transform(img)
304 |
305 | if self.depth_transform is not None:
306 | depth = self.depth_transform(depth)
307 |
308 | if self.phase == 'test':
309 | data = {}
310 | data['img'] = img
311 | data['depth'] = depth
312 | return data
313 | data = {}
314 | if img is not None:
315 | data['img'] = img
316 | if depth is not None:
317 | data['depth'] = depth
318 | return {'src': data}
319 |
320 |
321 |
322 | class DepthToTensor(object):
323 | def __call__(self, input):
324 | arr_input = np.array(input)
325 | tensors = torch.from_numpy(arr_input.reshape((1, arr_input.shape[0], arr_input.shape[1]))).float()
326 | return tensors
327 |
328 |
329 |
330 | def creat_train_dataloader(dataset, root, data_file, batchsize, nThreads,
331 | no_flip, no_rotation, no_augment, loadSize):
332 |
333 | joint_transform_list = [transform.RandomImgAugment(no_flip, no_rotation, no_augment, loadSize, dataset)]
334 |
335 | img_transform_list = [ToTensor(), Normalize([.5, .5, .5], [.5, .5, .5])]
336 | joint_transform = Compose(joint_transform_list)
337 | img_transform = Compose(img_transform_list)
338 | depth_transform = Compose([DepthToTensor()])
339 |
340 | dataset = get_dataset(root=root, data_file=data_file, phase='train',
341 | dataset=dataset,
342 | img_transform=img_transform, depth_transform=depth_transform,
343 | joint_transform=joint_transform)
344 | loader = torch.utils.data.DataLoader(dataset, batch_size=batchsize,
345 | shuffle=True, num_workers=int(nThreads),
346 | pin_memory=True)
347 | return loader
348 |
349 | def create_test_dataloader(dataset, root, data_file, batchsize, nThreads, loadSize):
350 |
351 |
352 | joint_transform_list = [transform.RandomImgAugment(True, True, True, loadSize, dataset)]
353 |
354 | img_transform_list = [ToTensor(), Normalize([.5, .5, .5], [.5, .5, .5])]
355 | joint_transform = Compose(joint_transform_list)
356 | img_transform = Compose(img_transform_list)
357 | depth_transform = Compose([DepthToTensor()])
358 |
359 | dataset = get_dataset(root=root, data_file=data_file, phase='test',
360 | dataset=dataset, img_transform=img_transform, depth_transform=depth_transform,
361 | joint_transform=joint_transform)
362 |
363 | loader = torch.utils.data.DataLoader(
364 | dataset,batch_size=1,
365 | shuffle=False,
366 | num_workers=int(nThreads),
367 | pin_memory=True)
368 | return loader
369 |
370 |
371 |
372 |
373 |
374 |
375 |
376 | class KITTI:
377 | def read_calib_file(self, path):
378 | # taken from https://github.com/hunse/kitti
379 | float_chars = set("0123456789.e+- ")
380 | data = {}
381 | with open(path, 'r') as f:
382 | for line in f.readlines():
383 | key, value = line.split(':', 1)
384 | value = value.strip()
385 | data[key] = value
386 | if float_chars.issuperset(value):
387 | # try to cast to float array
388 | try:
389 | data[key] = np.array(list(map(float, value.split(' '))))
390 | except ValueError:
391 | # casting error: data[key] already eq.value, so pass
392 | pass
393 | return data
394 |
395 | def get_fb(self, calib_dir, cam=2):
396 | cam2cam = self.read_calib_file(os.path.join(calib_dir, 'calib_cam_to_cam.txt'))
397 | P2_rect = cam2cam['P_rect_02'].reshape(3, 4) # Projection matrix of the left camera
398 | P3_rect = cam2cam['P_rect_03'].reshape(3, 4) # Projection matrix of the right camera
399 |
400 | # cam 2 is left of cam 0 -6cm
401 | # cam 3 is to the right +54cm
402 |
403 | b2 = P2_rect[0, 3] / -P2_rect[0,0] # offset of cam 2 relative to cam0
404 | b3 = P3_rect[0, 3] / -P3_rect[0,0] # offset of cam 3 relative to cam0
405 |
406 | baseline = b3 - b2
407 |
408 | if cam == 2:
409 | focal_length = P2_rect[0, 0] # focal_length of cam 2
410 | elif cam == 3:
411 | focal_length = P3_rect[0, 0] # focal_length of cam 3
412 | return focal_length * baseline
413 |
414 | def load_velodyne_points(self, file_name):
415 | # adapted from https://github.com/hunse/kitti
416 | points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
417 | points[:, 3] = 1.0
418 | return points
419 |
420 | def lin_interp(self, shape, xyd):
421 | # taken from https://github.com/hunse/kitti
422 | from scipy.interpolate import LinearNDInterpolator
423 | ## m=h, n=w xyd
424 | m, n = shape
425 | ij, d = xyd[:, 1::-1], xyd[:, 2]
426 | f = LinearNDInterpolator(ij, d, fill_value=0)
427 | # h, w
428 | J, I = np.meshgrid(np.arange(n), np.arange(m))
429 | IJ = np.vstack([I.flatten(), J.flatten()]).T
430 | disparity = f(IJ).reshape(shape)
431 | return disparity
432 |
433 | def sub2ind(self, metrixSize, rowSub, colSub):
434 | # m=h, n=w
435 | # rowsub y
436 | # colsub x
437 | m, n = metrixSize
438 |
439 | return rowSub * (n-1) + colSub - 1 # num
440 |
441 | def get_depth(self, calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False):
442 | # load calibration files
443 | cam2cam = self.read_calib_file(os.path.join(calib_dir, 'calib_cam_to_cam.txt'))
444 | velo2cam = self.read_calib_file(os.path.join(calib_dir, 'calib_velo_to_cam.txt'))
445 | velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
446 | velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0]))) # Projection matrix of Point cloud to cam
447 |
448 | # compute projection matrix velodyne --> image plane
449 | R_cam2rect = np.eye(4)
450 | R_cam2rect[:3,:3] = cam2cam['R_rect_00'].reshape(3, 3) # Corrected rotation matrix for camera 0 to camera 0
451 | P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3, 4) # Projection matrix of the left camera
452 | P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
453 |
454 | # load velodyne points and remove all behind image plane (approximation)
455 | # each row of the velodyne data is forward, left, up, reflectance
456 | velo = self.load_velodyne_points(velo_file_name)
457 | velo = velo[velo[:, 0]>=0, :] # remove all behind image plane
458 |
459 | # project the points to camera
460 | velo_pts_im = np.dot(P_velo2im, velo.T).T
461 | velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis] #homogenous --> not homogenous
462 |
463 | if vel_depth:
464 | velo_pts_im[:, 2] = velo[:, 0]
465 |
466 | # check is in bounds
467 | # use minus 1 to get the exact same value as KITTI matlab code
468 |
469 | velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1
470 | velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1
471 | val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
472 | val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] 1]
483 | for dd in dupe_inds:
484 | pts = np.where(inds==dd)[0]
485 | x_loc = int(velo_pts_im[pts[0], 0]) # x
486 | y_loc = int(velo_pts_im[pts[0], 1]) # y
487 | depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
488 | depth[depth<0] = 0
489 |
490 | if interp:
491 | # interpolate the depth map to fill in holes
492 | depth_interp = self.lin_interp(im_shape, velo_pts_im)
493 | return depth, depth_interp
494 | else:
495 | return depth
--------------------------------------------------------------------------------
/models/modules.py:
--------------------------------------------------------------------------------
1 | # https://docs.opensource.microsoft.com/content/releasing/copyright-headers.html
2 | import torch
3 | import functools
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 |
8 |
9 |
10 | class AdaptiveInstanceNorm2d(nn.Module):
11 | def __init__(self, num_features, eps=1e-5, momentum=0.1):
12 | super(AdaptiveInstanceNorm2d, self).__init__()
13 | self.num_features = num_features
14 | self.eps = eps
15 | self.momentum = momentum
16 | # weight and bias are dynamically assigned
17 | self.weight = None
18 | self.bias = None
19 | # just dummy buffers, not used
20 | self.register_buffer('running_mean', torch.zeros(num_features))
21 | self.register_buffer('running_var', torch.ones(num_features))
22 |
23 | def forward(self, x):
24 | assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!"
25 | b, c = x.size(0), x.size(1)
26 | running_mean = self.running_mean.repeat(b)
27 | running_var = self.running_var.repeat(b)
28 |
29 | # Apply instance norm
30 | x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
31 |
32 | out = F.batch_norm(
33 | x_reshaped, running_mean, running_var, self.weight, self.bias,
34 | True, self.momentum, self.eps)
35 |
36 | return out.view(b, c, *x.size()[2:])
37 |
38 | def __repr__(self):
39 | return self.__class__.__name__ + '(' + str(self.num_features) + ')'
40 |
41 | class ResBlocks(nn.Module):
42 | def __init__(self, num_blocks, dim, norm='in', activation='relu', pad_type='zero'):
43 | super(ResBlocks, self).__init__()
44 | self.model = []
45 | # num_blocks=4
46 | #
47 | for i in range(num_blocks):
48 | self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type)]
49 | self.model = nn.Sequential(*self.model)
50 |
51 | def forward(self, x):
52 | return self.model(x)
53 |
54 |
55 | class ResBlock(nn.Module):
56 | def __init__(self, dim, norm='in', activation='relu', pad_type='zero'):
57 | super(ResBlock, self).__init__()
58 |
59 | # initialize padding
60 | padding = 1
61 | if pad_type == 'reflect':
62 | self.pad = nn.ReflectionPad2d(padding)
63 | elif pad_type == 'replicate':
64 | self.pad = nn.ReplicationPad2d(padding)
65 | elif pad_type == 'zero':
66 | self.pad = nn.ZeroPad2d(padding)
67 | else:
68 | assert 0, "Unsupported padding type: {}".format(pad_type)
69 |
70 | self.conv1 = nn.Conv2d(dim, dim, 3, 1, bias=True)
71 | #print("res_norm:", norm)
72 | if norm=="in":
73 | self.norm1 = nn.InstanceNorm2d(dim)
74 | elif norm=='adain':
75 | self.norm1 = AdaptiveInstanceNorm2d(dim)
76 | self.relu1 = nn.LeakyReLU(0.2, inplace=True)
77 |
78 | self.conv2 = nn.Conv2d(dim, dim, 3, 1, bias=True)
79 | if norm=="in":
80 | self.norm2 = nn.InstanceNorm2d(dim)
81 | elif norm=='adain':
82 | self.norm2 = AdaptiveInstanceNorm2d(dim)
83 |
84 | def forward(self, x):
85 | residual = x
86 |
87 | x = self.conv1(self.pad(x))
88 | x = self.norm1(x)
89 | x = self.relu1(x)
90 | x = self.conv2(self.pad(x))
91 | out = self.norm2(x)
92 |
93 | out += residual
94 | return out
95 |
96 | def get_nonlinearity_layer(activation_type='PReLU'):
97 | if activation_type == 'ReLU':
98 | nonlinearity_layer = nn.ReLU(True)
99 | elif activation_type == 'SELU':
100 | nonlinearity_layer = nn.SELU(True)
101 | elif activation_type == 'LeakyReLU':
102 | nonlinearity_layer = nn.LeakyReLU(0.1, True)
103 | elif activation_type == 'PReLU':
104 | nonlinearity_layer = nn.PReLU()
105 | else:
106 | raise NotImplementedError('activation layer [%s] is not found' % activation_type)
107 | return nonlinearity_layer
108 |
109 | class Conv2dBlock(nn.Module):
110 | # input_dim=3, dim=64, 7, 1, 3, none, lrelu, reflect
111 | def __init__(self, input_dim ,output_dim, kernel_size, stride,
112 | padding=0, norm='none', activation='relu', pad_type='zero'):
113 | super(Conv2dBlock, self).__init__()
114 | self.use_bias = True
115 | # initialize padding
116 | if pad_type == 'reflect':
117 | self.pad = nn.ReflectionPad2d(padding)
118 | elif pad_type == 'replicate':
119 | self.pad = nn.ReplicationPad2d(padding)
120 | elif pad_type == 'zero':
121 | self.pad = nn.ZeroPad2d(padding)
122 | else:
123 | assert 0, "Unsupported padding type: {}".format(pad_type)
124 |
125 | # initialize normalization
126 | norm_dim = output_dim # 64
127 | #print("norm_dim:", norm_dim)
128 | if norm == 'bn':
129 | self.norm = nn.BatchNorm2d(norm_dim)
130 | elif norm == 'in':
131 | #self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
132 | self.norm = nn.InstanceNorm2d(norm_dim)
133 | elif norm == 'ln':
134 | self.norm = LayerNorm(norm_dim)
135 | elif norm == 'adain':
136 | self.norm = AdaptiveInstanceNorm2d(norm_dim)
137 | elif norm == 'none' or norm == 'sn':
138 | self.norm = None
139 | else:
140 | assert 0, "Unsupported normalization: {}".format(norm)
141 |
142 | # initialize activation
143 | if activation == 'relu':
144 | self.activation = nn.ReLU(inplace=True)
145 | elif activation == 'lrelu':
146 | self.activation = nn.LeakyReLU(0.2, inplace=True)
147 | elif activation == 'prelu':
148 | self.activation = nn.PReLU()
149 | elif activation == 'selu':
150 | self.activation = nn.SELU(inplace=True)
151 | elif activation == 'tanh':
152 | self.activation = nn.Tanh()
153 | elif activation == 'none':
154 | self.activation = None
155 | else:
156 | assert 0, "Unsupported activation: {}".format(activation)
157 |
158 | # initialize convolution
159 | if norm == 'sn':
160 | self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias))
161 | else:
162 | self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
163 |
164 | def forward(self, x):
165 | x = self.pad(x)
166 | x = self.conv(x)
167 | if self.norm:
168 | x = self.norm(x)
169 | if self.activation:
170 | x = self.activation(x)
171 | return x
172 |
173 | class LayerNorm(nn.Module):
174 | def __init__(self, num_features, eps=1e-5, affine=True):
175 | super(LayerNorm, self).__init__()
176 | self.num_features = num_features
177 | self.affine = affine
178 | self.eps = eps
179 |
180 | if self.affine:
181 | self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
182 | self.beta = nn.Parameter(torch.zeros(num_features))
183 |
184 | def forward(self, x):
185 | shape = [-1] + [1] * (x.dim() - 1)
186 | # print(x.size())
187 | if x.size(0) == 1:
188 | # These two lines run much faster in pytorch 0.4 than the two lines listed below.
189 | mean = x.view(-1).mean().view(*shape)
190 | std = x.view(-1).std().view(*shape)
191 | else:
192 | mean = x.view(x.size(0), -1).mean(1).view(*shape)
193 | std = x.view(x.size(0), -1).std(1).view(*shape)
194 |
195 | x = (x - mean) / (std + self.eps)
196 |
197 | if self.affine:
198 | shape = [1, -1] + [1] * (x.dim() - 2)
199 | x = x * self.gamma.view(*shape) + self.beta.view(*shape)
200 | return x
201 |
202 |
203 |
204 | class Struct_Encoder(nn.Module):
205 | def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type):
206 | super(Struct_Encoder, self).__init__()
207 | self.conv = Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)
208 | self.model = []
209 | self.conv1 = Conv2dBlock(dim, 2*dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)
210 | dim *= 2
211 | self.conv2 = Conv2dBlock(dim, 2*dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)
212 | dim *= 2
213 | self.resblock = ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)
214 | self.output_dim = dim
215 |
216 | def forward(self, x):
217 | x1 = self.conv(x)
218 | x2 = self.conv1(x1)
219 | x3 = self.conv2(x2)
220 | x4 = self.resblock(x3)
221 | return x4
222 |
223 |
224 | class _UpProjection(nn.Sequential):
225 |
226 | def __init__(self, num_input_features, num_output_features):
227 | super(_UpProjection, self).__init__()
228 |
229 | self.conv1 = nn.Conv2d(num_input_features, num_output_features,
230 | kernel_size=5, stride=1, padding=2, bias=False)
231 | self.bn1 = nn.BatchNorm2d(num_output_features)
232 | self.relu = nn.ReLU(inplace=True)
233 | self.conv1_2 = nn.Conv2d(num_output_features, num_output_features,
234 | kernel_size=3, stride=1, padding=1, bias=False)
235 | self.bn1_2 = nn.BatchNorm2d(num_output_features)
236 |
237 | self.conv2 = nn.Conv2d(num_input_features, num_output_features,
238 | kernel_size=5, stride=1, padding=2, bias=False)
239 | self.bn2 = nn.BatchNorm2d(num_output_features)
240 |
241 | def forward(self, x, size):
242 | x = F.interpolate(x, size=size, mode='bilinear',align_corners=True)
243 | #x = F.upsample(x, size=size, mode='bilinear')
244 | x_conv1 = self.relu(self.bn1(self.conv1(x)))
245 | bran1 = self.bn1_2(self.conv1_2(x_conv1))
246 | bran2 = self.bn2(self.conv2(x))
247 |
248 | out = self.relu(bran1 + bran2)
249 |
250 | return out
251 |
252 |
253 | class _EncoderBlock(nn.Module):
254 | def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
255 | super(_EncoderBlock, self).__init__()
256 |
257 | model = [
258 | nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
259 | norm_layer(middle_nc),
260 | nonlinearity,
261 | nn.Conv2d(middle_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
262 | norm_layer(output_nc),
263 | nonlinearity
264 | ]
265 |
266 | self.model = nn.Sequential(*model)
267 |
268 | def forward(self, x):
269 | return self.model(x)
270 |
271 | class _InceptionBlock(nn.Module):
272 | def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), width=1, drop_rate=0, use_bias=False):
273 | super(_InceptionBlock, self).__init__()
274 |
275 | self.width = width
276 | self.drop_rate = drop_rate
277 |
278 | for i in range(width): # 0, 1, 2
279 | layer = nn.Sequential(
280 | nn.ReflectionPad2d(i*2+1),
281 | nn.Conv2d(input_nc, output_nc, kernel_size=3, padding=0, dilation=i*2+1, bias=use_bias)
282 | )
283 | setattr(self, 'layer'+str(i), layer)
284 |
285 | self.norm1 = norm_layer(output_nc*width)
286 | self.norm2 = norm_layer(output_nc)
287 | self.nonlinearity = nonlinearity
288 | self.branch1x1 = nn.Sequential(
289 | nn.ReflectionPad2d(1),
290 | nn.Conv2d(output_nc*width, output_nc, kernel_size=3, padding=0, bias=use_bias)
291 | )
292 | def forward(self, x):
293 | result = []
294 |
295 | for i in range(self.width):
296 | layer = getattr(self, 'layer'+str(i))
297 | result.append(layer(x))
298 | output = torch.cat(result, 1)
299 | output = self.nonlinearity(self.norm1(output))
300 | output = self.norm2(self.branch1x1(output))
301 | if self.drop_rate > 0:
302 | output = F.dropout(output, p=self.drop_rate, training=self.training)
303 |
304 | return self.nonlinearity(output+x)
305 |
306 | class GaussianNoiseLayer(nn.Module):
307 | def __init__(self):
308 | super(GaussianNoiseLayer, self).__init__()
309 |
310 | def forward(self, x):
311 | if self.training == False:
312 | return x
313 | noise = Variable((torch.randn(x.size()).cuda(x.data.get_device()) - 0.5) / 10.0)
314 | return x+noise
315 |
316 |
317 | class _DecoderUpBlock(nn.Module):
318 | def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
319 | super(_DecoderUpBlock, self).__init__()
320 |
321 | model = [
322 | nn.ReflectionPad2d(1),
323 | nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=0, bias=use_bias),
324 | norm_layer(middle_nc),
325 | nonlinearity,
326 | nn.ConvTranspose2d(middle_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1),
327 | norm_layer(output_nc),
328 | nonlinearity
329 | ]
330 |
331 | self.model = nn.Sequential(*model)
332 |
333 | def forward(self, x):
334 | return self.model(x)
335 |
336 | class _OutputBlock(nn.Module):
337 | def __init__(self, input_nc, output_nc, kernel_size=3, use_bias=False):
338 | super(_OutputBlock, self).__init__()
339 | model = [
340 | nn.ReflectionPad2d(int(kernel_size/2)),
341 | nn.Conv2d(input_nc, output_nc, kernel_size=kernel_size, padding=0, bias=use_bias),
342 | nn.Tanh()
343 | ]
344 |
345 | self.model = nn.Sequential(*model)
346 |
347 | def forward(self, x):
348 | return self.model(x)
349 |
350 |
351 | class Struct_Decoder(nn.Module):
352 | def __init__(self):
353 | super(Struct_Decoder, self).__init__()
354 | num_features = 256
355 |
356 | self.up1 = _UpProjection(num_input_features=num_features, num_output_features=num_features // 2)
357 | num_features = num_features // 2 # 128
358 |
359 | self.up2 = _UpProjection(num_input_features=num_features, num_output_features=num_features // 2)
360 | num_features = num_features // 2 # 64
361 |
362 | self.conv = nn.Conv2d(num_features, 1, kernel_size=3, stride=1, bias=True)
363 | self.reflect_pad = nn.ReflectionPad2d(1)
364 |
365 |
366 | def forward(self, x):
367 | u1 = self.up1(x, (x.size(2)*2, x.size(3)*2))
368 | u2 = self.up2(u1, (x.size(2)*4, x.size(3)*4))
369 | u3 = self.reflect_pad(u2)
370 | out = torch.sigmoid(self.conv(u3))
371 | return out
372 |
373 | def get_norm_layer(norm_type='instance'):
374 | if norm_type == 'batch':
375 | norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
376 | elif norm_type == 'instance':
377 | norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True)
378 | elif norm_layer == 'none':
379 | norm_layer = None
380 | else:
381 | raise NotImplementedError('normalization layer [%s] is not found'% norm_type)
382 | return norm_layer
383 |
384 |
385 | class Depth_Net(nn.Module):
386 | def __init__(self, input_nc=1, output_nc=1, ngf=64, layers=4, norm='batch', drop_rate=0, add_noise=False, weight=0.1):
387 | super(Depth_Net, self).__init__()
388 |
389 | self.layers = layers
390 | self.weight = weight
391 | norm_layer = get_norm_layer(norm_type=norm)
392 | nonlinearity = get_nonlinearity_layer(activation_type='PReLU')
393 |
394 | if type(norm_layer) == functools.partial:
395 | use_bias = norm_layer.func == nn.InstanceNorm2d
396 | else:
397 | use_bias = norm_layer == nn.InstanceNorm2d
398 |
399 | # encoder part
400 | self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
401 | self.conv1 = nn.Sequential(nn.ReflectionPad2d(3),
402 | nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
403 | norm_layer(ngf),
404 | nonlinearity)
405 | self.conv2 = _EncoderBlock(ngf, ngf*2, ngf*2, norm_layer, nonlinearity, use_bias) # 64/128/128
406 | self.conv3 = _EncoderBlock(ngf*2, ngf*4, ngf*4, norm_layer, nonlinearity, use_bias) # 128/256/256
407 | self.conv4 = _EncoderBlock(ngf*4, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias) # 256/512/512
408 |
409 | for i in range(layers - 4):
410 | conv = _EncoderBlock(ngf*8, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias)
411 | setattr(self, 'down'+str(i), conv.model)
412 |
413 | center = []
414 | for i in range(7 - layers): # 0, 1, 2
415 | center += [
416 | _InceptionBlock(ngf*8, ngf*8, norm_layer, nonlinearity, 7-layers, drop_rate, use_bias)
417 | ]
418 |
419 | center += [
420 | _DecoderUpBlock(ngf*8, ngf*8, ngf*4, norm_layer, nonlinearity, use_bias)
421 | ]
422 |
423 | if add_noise:
424 | center += [GaussianNoiseLayer()]
425 | self.center = nn.Sequential(*center)
426 |
427 | for i in range(layers-4):
428 | upconv = _DecoderUpBlock(ngf*(8+4), ngf*8, ngf*4, norm_layer, nonlinearity, use_bias)
429 | setattr(self, 'up'+str(i), upconv.model)
430 |
431 | self.deconv4 = _DecoderUpBlock(ngf*(4+4), ngf*8, ngf*2, norm_layer, nonlinearity, use_bias)
432 | self.deconv3 = _DecoderUpBlock(ngf*(2+2)+output_nc, ngf*4, ngf, norm_layer, nonlinearity, use_bias)
433 | self.deconv2 = _DecoderUpBlock(ngf*(1+1)+output_nc, ngf*2, int(ngf/2), norm_layer, nonlinearity, use_bias)
434 |
435 | self.output4 = _OutputBlock(ngf*(4+4), output_nc, 3, use_bias)
436 | self.output3 = _OutputBlock(ngf*(2+2)+output_nc, output_nc, 3, use_bias)
437 | self.output2 = _OutputBlock(ngf*(1+1)+output_nc, output_nc, 3, use_bias)
438 | self.output1 = _OutputBlock(int(ngf/2)+output_nc, output_nc, 7, use_bias)
439 |
440 | self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
441 |
442 | def forward(self, input):
443 | conv1 = self.pool(self.conv1(input)) # 3/64 1/2
444 | conv2 = self.pool(self.conv2.forward(conv1)) # 64/128 1/4
445 | conv3 = self.pool(self.conv3.forward(conv2)) # 128/256 1/8
446 |
447 | center_in = self.pool(self.conv4.forward(conv3)) # 256/512 1/16
448 |
449 | middle = [center_in]
450 | for i in range(self.layers-4):
451 | model = getattr(self, 'down'+str(i))
452 | center_in = self.pool(model.forward(center_in))
453 | middle.append(center_in)
454 | center_out = self.center.forward(center_in)
455 |
456 | for i in range(self.layers-4):
457 | model = getattr(self, 'up'+str(i))
458 | center_out = model.forward(torch.cat([center_out, middle[self.layers-5-i]], 1))
459 |
460 | scale = 1.0
461 | result= []
462 | deconv4 = self.deconv4.forward(torch.cat([center_out, conv3 * self.weight], 1))
463 | output4 = scale * self.output4.forward(torch.cat([center_out, conv3 * self.weight], 1))
464 | result.append(output4)
465 | deconv3 = self.deconv3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1))
466 | output3 = scale * self.output3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1))
467 | result.append(output3)
468 | deconv2 = self.deconv2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1))
469 | output2 = scale * self.output2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1))
470 | result.append(output2)
471 | output1 = scale * self.output1.forward(torch.cat([deconv2, self.upsample(output2)], 1))
472 | result.append(output1)
473 |
474 | return result
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | # https://docs.opensource.microsoft.com/content/releasing/copyright-headers.html
2 | import os
3 | import torch
4 | import argparse
5 | import loaddata
6 | import matplotlib
7 | import numpy as np
8 | from utils import *
9 | import matplotlib.cm
10 | import torch.nn as nn
11 | import DSAModules
12 |
13 | import torch.nn.parallel
14 | import matplotlib as mpl
15 | from models import modules
16 | import matplotlib.pyplot as plt
17 | import matplotlib.image as mpimg
18 |
19 | from torch.autograd import Variable
20 | import torch.backends.cudnn as cudnn
21 |
22 | plt.switch_backend('agg')
23 | plt.set_cmap("jet")
24 |
25 | ## =========================== Parameters =================
26 | parser = argparse.ArgumentParser(description="S2R-DepthNet-Test.")
27 | parser.add_argument('--dataset', type=str, default='VKITTI', help='synthetic domain') # *******
28 | parser.add_argument('--root', type=str, default='', help='path to source dataset.') # *******
29 | parser.add_argument('--test_datafile', type=str, default='', help='stores data list, in syn_root') # *******
30 | parser.add_argument('--batchSize', type=int, default=1, help='input batch size') # *******
31 | parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data') # *******
32 | parser.add_argument('--loadSize', nargs='+', type=int, default=286, help='scale images to this size') # *******
33 | parser.add_argument('--out_dir', type=str, default="out", help="the path of log")
34 | parser.add_argument('--Shared_Struct_Encoder_path', type=str, default="", help='the path of Shared_Struct_Encoder models')
35 | parser.add_argument('--Struct_Decoder_path', type=str, default="", help='the path of Struct_Decoder models')
36 | parser.add_argument('--DepthNet_path', type=str, default="", help='the path of DepthNet models')
37 | parser.add_argument('--DSAModle_path', type=str, default='', help='the path of DSAModle')
38 |
39 |
40 | def save_test(handle, result1_log):
41 | '''
42 | This function save the test metrics in a given file.
43 | ------
44 | handle: text file handle
45 | result_log: the metrics results, a 2D list
46 | '''
47 | abs_rel_1 = np.array(result1_log[0]).mean()
48 | sq_rel_1 = np.array(result1_log[1]).mean()
49 | rmse_1 = np.array(result1_log[2]).mean()
50 | rmse_log_1 = np.array(result1_log[3]).mean()
51 | a1_1 = np.array(result1_log[4]).mean()
52 | a2_1 = np.array(result1_log[5]).mean()
53 | a3_1 = np.array(result1_log[6]).mean()
54 |
55 |
56 | # write test result to test file by using handle
57 | handle.write("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}\n" \
58 | .format('abs_rel', 'sq_rel', 'rmse', 'rmse_log', 'a1', 'a2', 'a3'))
59 |
60 | handle.write("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}\n"\
61 | .format(abs_rel_1, sq_rel_1, rmse_1, rmse_log_1, a1_1, a2_1, a3_1))
62 |
63 |
64 |
65 | def kitti_metrics_preprocess(pred, gt):
66 | '''
67 | This function do some preprocessing before metrics calculation
68 | - check zero value to avoid numerical problems;
69 | -
70 | Note that the input 'pred' and 'gt' are both 4D nparrays
71 | return the corresponding image pair
72 | '''
73 | # squeeze the first and last idx(which is one in test processing)
74 |
75 | pred = np.squeeze(pred)
76 | gt = np.squeeze(gt)
77 |
78 | min_depth = 1e-3
79 | max_depth = 80
80 | pred[pred < min_depth] = min_depth
81 | pred[pred > max_depth] = max_depth
82 |
83 | mask = np.logical_and(gt > min_depth, gt < max_depth)
84 | gt_height, gt_width = gt.shape
85 | crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,
86 | 0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)
87 | crop_mask = np.zeros(mask.shape)
88 | crop_mask[crop[0]:crop[1],crop[2]:crop[3]] = 1
89 | mask = np.logical_and(mask, crop_mask)
90 | scalar = np.median(gt[mask])/np.median(pred[mask])
91 | pred[mask] *= scalar
92 |
93 | return pred[mask], gt[mask]
94 |
95 |
96 |
97 | def kitti_compute_metrics(pred, gt):
98 | """
99 | This function computes the metrics value on a pair of (pred, gt).
100 | Note that the input 'pred' and 'gt' are both nparrays
101 | Return a list of result float-values which correspond to MAE, MSE, RMSE, and a1, a2, a3
102 | """
103 | # test image pre-processing
104 | pred, gt = kitti_metrics_preprocess(pred, gt)
105 |
106 | ## compute MSE and RMSE
107 | mse = ((gt - pred) ** 2).mean()
108 | rmse = np.sqrt(mse)
109 |
110 | rmse_log = (np.log(gt) - np.log(pred)) ** 2
111 | rmse_log = np.sqrt(rmse_log.mean())
112 |
113 | # compute ap accuracy
114 | thresh = np.maximum((gt/pred), (pred/gt))
115 | a1 = (thresh < 1.25).mean()
116 | a2 = (thresh < 1.25 ** 2).mean()
117 | a3 = (thresh < 1.25 ** 3).mean()
118 |
119 | abs_rel = np.mean((np.abs(gt - pred) / gt))
120 | sq_rel = np.mean(((gt - pred) ** 2) / gt)
121 | #print("sq_rel:", sq_rel)
122 |
123 | return [abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3]
124 |
125 | def nyu_compute_metrics(pred, gt):
126 | """
127 | This function computes the metrics value on a pair of (pred, gt).
128 | Note that the input 'pred' and 'gt' are both nparrays
129 | Return a list of result float-values which correspond to MAE, MSE, RMSE, and a1, a2, a3
130 | """
131 | # test image pre-processing
132 | pred, gt = nyu_metrics_preprocess(pred, gt)
133 | #print("pred:", pred)
134 | #print("gt:", gt)
135 | #print("++++++++++++++++++++++++++++++++==")
136 |
137 | ## compute MSE and RMSE
138 | mse = ((gt - pred) ** 2).mean()
139 | rmse = np.sqrt(mse)
140 |
141 | #print("rmse:", rmse)
142 |
143 | rmse_log = (np.log(gt) - np.log(pred)) ** 2
144 | rmse_log = np.sqrt(rmse_log.mean())
145 |
146 | # compute ap accuracy
147 | thresh = np.maximum((gt/pred), (pred/gt))
148 | a1 = (thresh < 1.25).mean()
149 | a2 = (thresh < 1.25 ** 2).mean()
150 | a3 = (thresh < 1.25 ** 3).mean()
151 |
152 | abs_rel = np.mean((np.abs(gt - pred) / gt))
153 | sq_rel = np.mean(((gt - pred) ** 2) / gt)
154 | #print("sq_rel:", sq_rel)
155 | print(abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3)
156 |
157 | return [abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3]
158 |
159 |
160 | def nyu_metrics_preprocess(pred, gt):
161 | '''
162 | This function do some preprocessing before metrics calculation
163 | - check zero value to avoid numerical problems;
164 | -
165 | Note that the input 'pred' and 'gt' are both 4D nparrays
166 | return the corresponding image pair
167 | '''
168 | # squeeze the first and last idx(which is one in test processing)
169 |
170 | pred = np.squeeze(pred)
171 | gt = np.squeeze(gt)
172 | #print("gt1:", gt)
173 |
174 |
175 | min_depth = 1e-3
176 | max_depth = 8
177 | pred[pred < min_depth] = min_depth
178 | pred[pred > max_depth] = max_depth
179 |
180 | mask = np.logical_and(gt > min_depth, gt < max_depth)
181 |
182 | scalar = np.median(gt[mask])/np.median(pred[mask])
183 | pred[mask] *= scalar
184 | # gtiheight, gt_width = gt.shape
185 |
186 | #print("gt2:", gt[mask])
187 | return pred[mask], gt[mask]
188 |
189 |
190 | def main():
191 | global args
192 | args = parser.parse_args()
193 | # =========================== DataLoader ===============================
194 | # syn_dataset "VKITTI"
195 | # syn_root path
196 | print("Loading the dataset ...")
197 |
198 |
199 | real_loader = loaddata.create_test_dataloader(dataset=args.dataset,
200 | root=args.root,
201 | data_file=args.test_datafile,
202 | batchsize=args.batchSize,
203 | nThreads=args.nThreads,
204 | loadSize=args.loadSize)
205 |
206 | print("Loading data set is complete!")
207 | print("=======================================================================================")
208 | print("Building models ...")
209 |
210 | # Define Shared Structure Encoder
211 | Shared_Struct_Encoder = modules.Struct_Encoder(n_downsample=2, n_res=4,
212 | input_dim=3, dim=64,
213 | norm='in', activ='lrelu',
214 | pad_type='reflect')
215 |
216 |
217 | # Define Structure Decoder
218 | Struct_Decoder = modules.Struct_Decoder()
219 |
220 | # Define Depth-specific Attention (DSA) module
221 |
222 | Attention_Model = DSAModules.drn_d_22(pretrained=True)
223 | DSAModle = DSAModules.AutoED(Attention_Model)
224 |
225 |
226 | # Define DepthNet
227 | DepthNet = modules.Depth_Net()
228 | init_weights(DepthNet, init_type='normal')
229 |
230 |
231 | Shared_Struct_Encoder = Shared_Struct_Encoder.cuda()
232 | Struct_Decoder = torch.nn.DataParallel(Struct_Decoder).cuda()
233 | DSAModle = torch.nn.DataParallel(DSAModle).cuda()
234 | DepthNet = torch.nn.DataParallel(DepthNet).cuda()
235 |
236 | # Load models
237 | Shared_Struct_Encoder.load_state_dict(torch.load(args.Shared_Struct_Encoder_path))
238 | Struct_Decoder.load_state_dict(torch.load(args.Struct_Decoder_path))
239 | DSAModle.load_state_dict(torch.load(args.DSAModle_path))
240 | DepthNet.load_state_dict(torch.load(args.DepthNet_path))
241 |
242 | if not os.path.exists(args.out_dir):
243 | os.mkdir(args.out_dir)
244 |
245 |
246 | if args.dataset == "KITTI":
247 | Shared_Struct_Encoder.eval()
248 | Struct_Decoder.eval()
249 | DSAModle.eval()
250 | DepthNet.eval()
251 |
252 | result_log = [[] for i in range(7)]
253 |
254 | step = 0
255 |
256 | for i, real_batched in enumerate(real_loader):
257 | print("step:", step+1)
258 | image, depth_, depth_interp_ = real_batched['left_img'], real_batched['depth'], real_batched['depth_interp']
259 |
260 | image = torch.autograd.Variable(image).cuda()
261 | depth_ = torch.autograd.Variable(depth_).cuda()
262 |
263 | # predict
264 | struct_code = Shared_Struct_Encoder(image)
265 | structure_map = Struct_Decoder(struct_code)
266 |
267 | attention_map = DSAModle(image)
268 |
269 | depth_specific_structure = attention_map * structure_map
270 |
271 | pred_depth = DepthNet(depth_specific_structure)
272 | pred_depth = torch.nn.functional.interpolate(pred_depth[-1], size=[depth_.size(1),depth_.size(2)], mode='bilinear',align_corners=True)
273 |
274 | pred_depth_np = np.squeeze(pred_depth.cpu().detach().numpy())
275 | gt_np = np.squeeze(depth_.cpu().detach().numpy())
276 |
277 | depth_interp_np = np.squeeze(depth_interp_.cpu().detach().numpy())
278 |
279 | pred_depth_np += 1.0
280 | pred_depth_np /= 2.0
281 | pred_depth_np *= 80.0
282 |
283 | test_result = kitti_compute_metrics(pred_depth_np, gt_np) # list1
284 |
285 | for it, item in enumerate(test_result):
286 | result_log[it].append(item)
287 |
288 | step = step + 1
289 |
290 |
291 | f = open(args.out_dir + "/evalog.txt", 'w')
292 | f.write('Done testing -- epoch limit reached')
293 | f.write("after %d iteration \n\n" % (step))
294 | save_test(f, result_log)
295 | f.close()
296 |
297 |
298 | if args.dataset == "NYUD_V2":
299 | Shared_Struct_Encoder.eval()
300 | Struct_Decoder.eval()
301 | DSAModle.eval()
302 | DepthNet.eval()
303 |
304 | result_log = [[] for i in range(7)]
305 |
306 | step = 0
307 | for i, real_batched in enumerate(real_loader):
308 | print("step:", step+1)
309 | image, depth_ = real_batched['img'], real_batched['depth']
310 |
311 | image = torch.autograd.Variable(image).cuda()
312 | depth_ = torch.autograd.Variable(depth_).cuda()
313 |
314 | struct_code = Shared_Struct_Encoder(image)
315 | structure_map = Struct_Decoder(struct_code)
316 | attention_map = DSAModle(image)
317 | depth_specific_structure = attention_map * structure_map
318 | pred_depth = DepthNet(depth_specific_structure)
319 | pred_depth = torch.nn.functional.interpolate(pred_depth[-1], size=[depth_.size(2),depth_.size(3)], mode='bilinear',align_corners=True)
320 |
321 |
322 | pred_depth_np = np.squeeze(pred_depth.cpu().detach().numpy())
323 | gt_np = np.squeeze(depth_.cpu().detach().numpy())
324 |
325 | pred_depth_np += 1.0
326 | pred_depth_np /= 2.0
327 | pred_depth_np *= 8.0
328 | gt_np /= 1000.0
329 |
330 |
331 | test_result = nyu_compute_metrics(pred_depth_np, gt_np) # list1
332 |
333 | for it, item in enumerate(test_result):
334 |
335 | result_log[it].append(item)
336 |
337 | step = step + 1
338 |
339 |
340 | f = open(args.out_dir + "/evalog.txt", 'w')
341 | f.write('Done testing -- epoch limit reached')
342 | f.write("after %d iteration \n\n" % (step))
343 | save_test(f, result_log)
344 | f.close()
345 |
346 |
347 |
348 | if __name__ == '__main__':
349 | main()
350 |
351 |
352 |
353 |
354 |
355 |
356 |
357 |
358 |
359 |
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 |
368 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | # https://docs.opensource.microsoft.com/content/releasing/copyright-headers.html
2 | import os
3 | import time
4 | import torch
5 | import argparse
6 | import loaddata
7 | import DSAModules
8 | import itertools
9 | import train_loss
10 | import numpy as np
11 | from utils import *
12 | import torch.nn as nn
13 | import torch.nn.parallel
14 | from models import modules
15 | from itertools import chain
16 | import torchvision.utils as vutils
17 | import torch.backends.cudnn as cudnn
18 | from tensorboardX import SummaryWriter
19 |
20 | ## =========================== Parameters =================
21 | parser = argparse.ArgumentParser(description="Domian transfer on depth estimation.")
22 | parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)')
23 | parser.add_argument('--epochs', default=120, type=int, help='number of total epochs to run')
24 | parser.add_argument('--syn_dataset', type=str, default='VKITTI', help='synthetic domain') # *******
25 | parser.add_argument('--syn_root', type=str, default='', help='path to source dataset.') # *******
26 | parser.add_argument('--syn_train_datafile', type=str, default='', help='stores data list, in syn_root') # *******
27 | parser.add_argument('--batchSize', type=int, default=1, help='input batch size') # *******
28 | parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data') # *******
29 | parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') # *******
30 | parser.add_argument('--no_rotation', action='store_true', help='if specified, do not rotate the images for data augmentation') # *******
31 | parser.add_argument('--no_augment', action='store_true', help='if specified, do not use data augmentation, e.g., randomly shifting gamma') # *******
32 | parser.add_argument('--loadSize', nargs='+', type=int, default=286, help='scale images to this size') # *******
33 | parser.add_argument('--checkpoint_dir', type=str, default="./checkpoints/", help='the path of saving models')
34 | parser.add_argument('--log_dir', type=str, default="./log", help="the path of log")
35 | parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate of network') # *******
36 | parser.add_argument('--Shared_Struct_Encoder_path', type=str, default='', help='the path of shared geo encoder')
37 | parser.add_argument('--Struct_Decoder_path', type=str, default='', help="the path of Struct_Decoder")
38 | parser.add_argument('--lr_policy', type=str, default='step', help='learning rate policy: lambda|step|plateau')
39 | parser.add_argument('--lr_decay_iters', type=int, default=10, help='multiply by a gamma every lr_decay_iters iterations')
40 | parser.add_argument('--lambda_w', type=float, default=1.0, help='the weight parameters of structure map.')
41 | parser.add_argument('--hyper_w', type=float, default=0.001, help='the weight parameters.')
42 | parser.add_argument('--train_stage', type=str, default='TrainStructDecoder', help='train stage(TrainStructDecoder/TrainDSAandDPModule).')
43 |
44 |
45 | def main():
46 | global args
47 | args = parser.parse_args()
48 | # make log_dir and checkpoint_dir
49 | makedir(args.log_dir)
50 | makedir(args.checkpoint_dir)
51 | # =========================== DataLoader ===============================
52 | # syn_dataset "VKITTI"
53 | # syn_root path
54 | print("Loading the dataset ...")
55 | synthetic_loader = loaddata.creat_train_dataloader(dataset=args.syn_dataset,
56 | root=args.syn_root,
57 | data_file=args.syn_train_datafile,
58 | batchsize=args.batchSize,
59 | nThreads=args.nThreads,
60 | no_flip=args.no_flip,
61 | no_rotation=args.no_rotation,
62 | no_augment=args.no_augment,
63 | loadSize=args.loadSize)
64 |
65 | print("Loading datasets is complete!")
66 | print("=======================================================================================")
67 | print("Building models ...")
68 |
69 | ### ================================= STE Module ==================================================
70 |
71 | # Define Shared Structure Encoder
72 | Shared_Struct_Encoder = modules.Struct_Encoder(n_downsample=2, n_res=4,
73 | input_dim=3, dim=64,
74 | norm='in', activ='lrelu',
75 | pad_type='reflect')
76 | # Define Structure Decoder
77 | Struct_Decoder = modules.Struct_Decoder()
78 |
79 | ### ================================ DSA Module ===================================================
80 |
81 | # Define Depth-specific Attention (DSA) module
82 | Attention_Model = DSAModules.drn_d_22(pretrained=True)
83 | DSAModle = DSAModules.AutoED(Attention_Model)
84 |
85 | # Define DepthNet
86 | DepthNet = modules.Depth_Net()
87 | init_weights(DepthNet, init_type='normal')
88 |
89 |
90 | cudnn.enabled = True
91 | cudnn.benchmark = True
92 |
93 | if args.train_stage == 'TrainStructDecoder':
94 | ## Load pretrained shared_geo_encoder
95 | Shared_Struct_Encoder.load_state_dict(torch.load(args.Shared_Struct_Encoder_path))
96 |
97 | # =============================== Multi-GPU ======================
98 | print("GPU num:", torch.cuda.device_count())
99 | if torch.cuda.device_count() == 8:
100 | Shared_Struct_Encoder = torch.nn.DataParallel(Shared_Struct_Encoder, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda()
101 | Struct_Decoder = torch.nn.DataParallel(Struct_Decoder, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda()
102 | DepthNet = torch.nn.DataParallel(DepthNet, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda()
103 |
104 | elif torch.cuda.device_count() == 4:
105 | Shared_Struct_Encoder = torch.nn.DataParallel(Shared_Struct_Encoder, device_ids=[0, 1, 2, 3]).cuda()
106 | Struct_Decoder = torch.nn.DataParallel(Struct_Decoder, device_ids=[0, 1, 2, 3]).cuda()
107 | DepthNet = torch.nn.DataParallel(DepthNet, device_ids=[0, 1, 2, 3]).cuda()
108 |
109 | else:
110 | Shared_Struct_Encoder = Shared_Struct_Encoder.cuda()
111 | Struct_Decoder = Struct_Decoder.cuda()
112 | DepthNet = DepthNet.cuda()
113 |
114 | elif args.train_stage == 'TrainDSAandDPModule':
115 | ## Load pretrained shared_geo_encoder
116 | Shared_Struct_Encoder.load_state_dict(torch.load(args.Shared_Struct_Encoder_path))
117 |
118 | # =============================== Multi-GPU ======================
119 | print("GPU num:", torch.cuda.device_count())
120 | if torch.cuda.device_count() == 8:
121 | Shared_Struct_Encoder = torch.nn.DataParallel(Shared_Struct_Encoder, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda()
122 | Struct_Decoder = torch.nn.DataParallel(Struct_Decoder, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda()
123 | DSAModle = torch.nn.DataParallel(DSAModle, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda()
124 | DepthNet = torch.nn.DataParallel(DepthNet, device_ids=[0, 1, 2, 3, 4, 5, 6, 7]).cuda()
125 |
126 | elif torch.cuda.device_count() == 4:
127 | Shared_Struct_Encoder = torch.nn.DataParallel(Shared_Struct_Encoder, device_ids=[0, 1, 2, 3]).cuda()
128 | Struct_Decoder = torch.nn.DataParallel(Struct_Decoder, device_ids=[0, 1, 2, 3]).cuda()
129 | DSAModle = torch.nn.DataParallel(DSAModle, device_ids=[0, 1, 2, 3]).cuda()
130 | DepthNet = torch.nn.DataParallel(DepthNet, device_ids=[0, 1, 2, 3]).cuda()
131 |
132 | else:
133 | Shared_Struct_Encoder = Shared_Struct_Encoder.cuda()
134 | Struct_Decoder = Struct_Decoder.cuda()
135 | DSAModle = DSAModle.cuda()
136 | DepthNet = DepthNet.cuda()
137 |
138 | ## Load Struct_Decoder
139 | Struct_Decoder.load_state_dict(torch.load(args.Struct_Decoder_path))
140 |
141 | if args.train_stage == 'TrainStructDecoder':
142 | # =============================== Optim ============================================
143 | optimizer = torch.optim.Adam(itertools.chain(Struct_Decoder.parameters(), DepthNet.parameters()), lr=args.lr, betas=(0.9, 0.999))
144 | elif args.train_stage == 'TrainDSAandDPModule':
145 | # =============================== Optim ============================================
146 | optimizer = torch.optim.Adam(itertools.chain(DSAModle.parameters(), DepthNet.parameters()), lr=args.lr, betas=(0.9, 0.999))
147 |
148 | # Set logger
149 | writer = SummaryWriter(log_dir=args.log_dir)
150 |
151 | # Set scheduler
152 | scheduler = get_scheduler(optimizer, args)
153 |
154 | lr = args.lr
155 |
156 | # train process
157 | for epoch in range(args.start_epoch, args.epochs):
158 |
159 | batch_time = AverageMeter()
160 | losses = AverageMeter()
161 |
162 | if args.train_stage == 'TrainStructDecoder':
163 | Shared_Struct_Encoder.eval()
164 | Struct_Decoder.train()
165 | DepthNet.train()
166 |
167 | elif args.train_stage == 'TrainDSAandDPModule':
168 | Shared_Struct_Encoder.eval()
169 | Struct_Decoder.eval()
170 | DSAModle.train()
171 | DepthNet.train()
172 |
173 | end = time.time()
174 |
175 | for i, sample_batched in enumerate(synthetic_loader):
176 | image, depth = sample_batched['src']['img'], sample_batched['src']['depth']
177 | train_iteration = epoch * len(synthetic_loader) + i
178 |
179 | image = torch.autograd.Variable(image).cuda() # image
180 | depth = torch.autograd.Variable(depth).cuda() # depth
181 | if args.train_stage == 'TrainStructDecoder':
182 | # Fix Shared Structure Encoder
183 | struct_code = Shared_Struct_Encoder(image).detach()
184 | elif args.train_stage == 'TrainDSAandDPModule':
185 | # Fix Shared Structure Encoder
186 | struct_code = Shared_Struct_Encoder(image).detach()
187 | structure_map = Struct_Decoder(struct_code).detach()
188 |
189 | optimizer.zero_grad()
190 |
191 | if args.train_stage == 'TrainStructDecoder':
192 | structure_map = Struct_Decoder(struct_code)
193 | pred_depth = DepthNet(structure_map)
194 |
195 | elif args.train_stage == 'TrainDSAandDPModule':
196 | attention_map = DSAModle(image)
197 | depth_specific_structure = attention_map * structure_map
198 | pred_depth = DepthNet(depth_specific_structure)
199 |
200 |
201 | gt_depth = adjust_gt(depth, pred_depth)
202 |
203 |
204 | depth_loss = train_loss.depth_loss(pred_depth, gt_depth)
205 | if args.train_stage == 'TrainStructDecoder':
206 | struct_weighted_loss = train_loss.struct_weighted_loss(structure_map, depth, train_iteration, args.hyper_w)
207 | total_loss = depth_loss + args.lambda_w * struct_weighted_loss
208 | elif args.train_stage == 'TrainDSAandDPModule':
209 | total_loss = depth_loss
210 |
211 | losses.update(total_loss.item(), image.size(0))
212 | total_loss.backward()
213 | optimizer.step()
214 |
215 | batch_time.update(time.time() - end)
216 | end = time.time()
217 |
218 | batchSize = depth.size(0)
219 | if train_iteration % 30 == 0:
220 | writer.add_scalar('train/total_loss', total_loss, train_iteration)
221 | writer.add_scalar('train/batches_loss_avg', losses.avg, train_iteration)
222 | writer.add_scalar('train/depth_loss', depth_loss, train_iteration)
223 | if args.train_stage == 'TrainStructDecoder':
224 | writer.add_scalar('train/struct_weighted_loss', struct_weighted_loss, train_iteration)
225 |
226 | writer.add_image('train/image', vutils.make_grid(image*0.5+0.5), train_iteration)
227 | writer.add_image('train/pred_depth', vutils.make_grid(colormap(pred_depth[-1])), train_iteration)
228 | writer.add_image('train/depth_gt', vutils.make_grid(colormap(depth)), train_iteration)
229 | writer.add_image('train/structure_map', vutils.make_grid(colormap(structure_map, 'viridis')), train_iteration)
230 | if args.train_stage == 'TrainDSAandDPModule':
231 | writer.add_image('train/attention_map', vutils.make_grid(colormap(attention_map, 'viridis')), train_iteration)
232 | writer.add_image('train/depth_specific_structure', vutils.make_grid(colormap(depth_specific_structure, 'viridis')), train_iteration)
233 |
234 |
235 | print('Epoch: [{0}][{1}/{2}]\t'
236 | 'Time {batch_time.val:.3f} ({batch_time.sum:.3f})\t'
237 | 'Loss {loss.val:.4f} ({loss.avg:.4f})'
238 | .format(epoch, i, len(synthetic_loader), batch_time=batch_time, loss=losses))
239 |
240 | lr = update_learning_rate(optimizer, scheduler)
241 | if (epoch+1) % 1 == 0:
242 | if args.train_stage == 'TrainStructDecoder':
243 | torch.save(Struct_Decoder.state_dict(), args.checkpoint_dir + 'struct_decoder_'+str(epoch+1) + ".pth")
244 | torch.save(DepthNet.state_dict(), args.checkpoint_dir + 'depth_net_'+str(epoch+1) + ".pth")
245 | if args.train_stage == 'TrainDSAandDPModule':
246 | torch.save(DSAModle.state_dict(), args.checkpoint_dir + 'dsa_modle_'+str(epoch+1) + ".pth")
247 | torch.save(DepthNet.state_dict(), args.checkpoint_dir + 'depth_net_'+str(epoch+1) + ".pth")
248 |
249 |
250 |
251 |
252 | if __name__ == '__main__':
253 | main()
254 |
--------------------------------------------------------------------------------
/train_loss.py:
--------------------------------------------------------------------------------
1 | # https://docs.opensource.microsoft.com/content/releasing/copyright-headers.html
2 | import os
3 | import torch
4 | import torch.nn as nn
5 | import torch.nn.functional as F
6 |
7 |
8 |
9 |
10 |
11 | def depth_loss(output, depth_gt):
12 |
13 | losses=[]
14 |
15 | depth_loss = torch.nn.L1Loss()
16 |
17 | for depth_index in range(len(output)):
18 |
19 | loss = depth_loss(output[depth_index], depth_gt[depth_index])
20 |
21 | losses.append(loss)
22 |
23 |
24 | total_loss = sum(losses)
25 |
26 | return total_loss
27 |
28 |
29 | def gradient_x(img):
30 | # Pad input to keep output size consistent
31 | img = F.pad(img, (0, 1, 0, 0), mode="replicate")
32 | gx = img[:, :, :, :-1] - img[:, :, :, 1:] # NCHW
33 | return gx
34 |
35 | def gradient_y(img):
36 | # Pad input to keep output size consistent
37 | img = F.pad(img, (0, 0, 0, 1), mode="replicate")
38 | gy = img[:, :, :-1, :] - img[:, :, 1:, :] # NCHW
39 | return gy
40 |
41 |
42 |
43 |
44 | def struct_weighted_loss(struct_map, depth, train_iteration, hyper_w):
45 |
46 | depth_grad_dx = gradient_x(depth)
47 |
48 | depth_grad_dy = gradient_y(depth)
49 |
50 | depth_grad = torch.abs(depth_grad_dx) + torch.abs(depth_grad_dy)
51 |
52 | weight = torch.exp(-torch.mean(depth_grad, 1, keepdim=True) * hyper_w)
53 |
54 | weighted_struct = struct_map * weight
55 |
56 | return torch.mean(torch.abs(weighted_struct))
--------------------------------------------------------------------------------
/transform.py:
--------------------------------------------------------------------------------
1 | # https://docs.opensource.microsoft.com/content/releasing/copyright-headers.html
2 | import math
3 | import random
4 | import numpy as np
5 | from PIL import Image
6 | import torchvision.transforms as transforms
7 | import torchvision.transforms.functional as F
8 |
9 |
10 | class RandomHorizontalFlip(object):
11 | def __init__(self, prob=None):
12 | self.prob = prob
13 | def __call__(self, img):
14 | if (self.prob is None and random.random()<0.5) or self.prob<0.5:
15 | return img.transpose(Image.FLIP_LEFT_RIGHT)
16 | return img
17 |
18 | class RandomVerticalFlip(object):
19 | def __init__(self, img):
20 | if (self.prob is None and random.random()<0.5) or self.prob < 0.5:
21 | return img.transpose(Image.FLIP_TOP_BOTTOM)
22 | return img
23 |
24 |
25 | class RandomImgAugment(object):
26 |
27 | def __init__(self, no_flip, no_rotation, no_augment, size=None, dataset=None):
28 | self.flip = not no_flip ## default all run
29 | self.augment = not no_augment
30 | self.rotation = not no_rotation
31 | self.size = size
32 | self.dataset = dataset
33 |
34 | def __call__(self, inputs):
35 | img1 = inputs[0] # Image
36 | img2 = inputs[1] # None
37 | depth = inputs[2] # Depth
38 | phase = inputs[3] # train/test
39 | fb = inputs[4] # focallength*baseline
40 |
41 | h = img1.height # height
42 | w = img1.width # width
43 | w0 = w # w0
44 |
45 | if self.size == [-1]:
46 | divisor = 32.0 # divisor
47 | h = int(math.ceil(h/divisor) * divisor)
48 | w = int(math.ceil(w/divisor) * divisor)
49 | self.size = (h, w)
50 |
51 |
52 | ## resize to 256 1024
53 | scale_transform = transforms.Compose([transforms.Resize(self.size, Image.BICUBIC)])
54 | img1 = scale_transform(img1) ## RGB image
55 | if img2 is not None:
56 | img2 = scale_transform(img2)
57 | if fb is not None: ## fb is None
58 | scale = float(self.size[1]) / float(w0) ##
59 | fb = fb * scale
60 | if phase == 'test': ## phase train
61 | return img1, img2, depth, fb
62 | if depth is not None:
63 | scale_transform_d = transforms.Compose([transforms.Resize(self.size, Image.BICUBIC)])
64 | depth = scale_transform_d(depth)
65 | if not self.size == 0:
66 | if depth is not None:
67 | if self.dataset.upper() == 'KITTI' or self.dataset.upper() == 'VKITTI':
68 | #print("Using outdoor scene transform.")
69 | arr_depth = np.array(depth, dtype=np.float32)
70 | arr_depth[arr_depth>8000.0]=8000.0
71 | arr_depth /= 8000.0 # cm -> m
72 | arr_depth[arr_depth<0.0] = 0.0
73 | if self.dataset.upper() == 'NYUD_V2' or self.dataset.upper() == 'SUNCG':
74 | #print("Using indoor scene transform.")
75 | arr_depth = np.array(depth, dtype=np.float32)
76 | arr_depth /= 255.0 ## normalize to (0, 1)
77 | arr_depth[arr_depth<0.0] = 0.0
78 |
79 | depth = Image.fromarray(arr_depth, 'F')
80 | ## random horizontal flip
81 | if self.flip and not (img2 is not None and depth is not None):
82 | flip_prob = random.random()
83 | flip_transform = transforms.Compose([RandomHorizontalFlip(flip_prob)])
84 | if img2 is None:
85 | img1 = flip_transform(img1)
86 | else:
87 | if flip_prob < 0.5:
88 | img1_ = img1
89 | img2_ = img2
90 | img1 = flip_transform(img2_)
91 | img2 = flip_transform(img1_)
92 |
93 | if depth is not None:
94 | depth = flip_transform(depth)
95 |
96 |
97 | ### rotation
98 | if self.rotation and not (img2 is not None and depth is not None):
99 | if random.random() < 0.5:
100 | degree = random.randrange(-500, 500)/100
101 | img1 = F.rotate(img1, degree, Image.BICUBIC)
102 | if depth is not None:
103 | depth = F.rotate(depth, degree, Image.BICUBIC)
104 | if img2 is not None:
105 | img2 = F.rotate(img2, degree, Image.BICUBIC)
106 | # convert depth to range [-1, 1]
107 | if depth is not None:
108 | depth = np.array(depth, dtype=np.float32)
109 | depth = depth * 2.0
110 | depth -= 1.0
111 |
112 | if self.augment:
113 | if random.random() < 0.5:
114 |
115 | brightness = random.uniform(0.8, 1.0)
116 | contrast = random.uniform(0.8, 1.0)
117 | saturation = random.uniform(0.8, 1.0)
118 |
119 | img1 = F.adjust_brightness(img1, brightness)
120 | img1 = F.adjust_contrast(img1, contrast)
121 | img1 = F.adjust_saturation(img1, saturation)
122 |
123 | if img2 is not None:
124 | img2 = F.adjust_brightness(img2, brightness)
125 | img2 = F.adjust_contrast(img2, contrast)
126 | img2 = F.adjust_saturation(img2, saturation)
127 |
128 | return img1, img2, depth, fb
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | # https://docs.opensource.microsoft.com/content/releasing/copyright-headers.html
2 | import os
3 | import torch
4 | import matplotlib
5 | import numpy as np
6 | import matplotlib.cm
7 | from torch.nn import init
8 | import torch.nn.functional as F
9 | from torch.optim import lr_scheduler
10 |
11 | def makedir(directory):
12 | if not os.path.exists(directory):
13 | os.makedirs(directory)
14 |
15 |
16 | def colormap(image, cmap="jet"):
17 | image_min = torch.min(image)
18 | image_max = torch.max(image)
19 | image = (image - image_min) / (image_max - image_min)
20 | image = torch.squeeze(image)
21 |
22 | if len(image.shape) == 2:
23 | image = image.unsqueeze(0)
24 |
25 | # quantize
26 | indices = torch.round(image * 255).long()
27 | # gather
28 | cm = matplotlib.cm.get_cmap(cmap if cmap is not None else 'gray')
29 |
30 | colors = cm(np.arange(256))[:, :3]
31 | colors = torch.cuda.FloatTensor(colors)
32 | color_map = colors[indices].transpose(2, 3).transpose(1, 2)
33 |
34 | return color_map
35 |
36 | def update_learning_rate(optimizers, scheduler):
37 | scheduler.step()
38 | lr = optimizers.param_groups[0]['lr']
39 | print('learning rate = %.7f' % lr)
40 |
41 | return lr
42 |
43 | def get_scheduler(optimizer, opt):
44 | if opt.lr_policy == 'lambda':
45 | def lambda_rule(epoch):
46 | lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
47 | return lr_l
48 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
49 | elif opt.lr_policy == 'step':
50 | scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.5)
51 | elif opt.lr_policy == 'plateau':
52 | scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
53 | else:
54 | return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
55 | return scheduler
56 |
57 | def adjust_gt(gt_depth, pred_depth):
58 | adjusted_gt = []
59 | for each_depth in pred_depth:
60 | adjusted_gt.append(F.interpolate(gt_depth, size=[each_depth.size(2), each_depth.size(3)],
61 | mode='bilinear', align_corners=True))
62 | return adjusted_gt
63 |
64 |
65 | def init_weights(net, init_type='normal', gain=0.02):
66 | def init_func(m):
67 | classname = m.__class__.__name__
68 | if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
69 | if init_type == 'normal':
70 | init.normal_(m.weight.data, 0.0, gain)
71 | elif init_type == 'xavier':
72 | init.xavier_normal_(m.weight.data, gain=gain)
73 | elif init_type == 'kaiming':
74 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
75 | elif init_type == 'orthogonal':
76 | init.orthogonal_(m.weight.data, gain=gain)
77 | else:
78 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
79 | if hasattr(m, 'bias') and m.bias is not None:
80 | init.constant_(m.bias.data, 0.0)
81 | elif classname.find('BatchNorm2d') != -1:
82 | init.normal_(m.weight.data, 1.0, gain)
83 | init.constant_(m.bias.data, 0.0)
84 |
85 | print('initialize network with %s' % init_type)
86 | net.apply(init_func)
87 |
88 | class AverageMeter(object):
89 | def __init__(self):
90 | self.reset()
91 |
92 | def reset(self):
93 | self.val = 0
94 | self.avg = 0
95 | self.sum = 0
96 | self.count = 0
97 |
98 | def update(self, val, n=1):
99 | self.val = val
100 | self.sum += val * n
101 | self.count += n
102 | self.avg = self.sum / self.count
--------------------------------------------------------------------------------