├── .gitignore
├── LICENSE
├── README.md
├── data
├── data_prep.sh
└── imus
│ ├── 00.mat
│ ├── 01.mat
│ ├── 02.mat
│ ├── 04.mat
│ ├── 05.mat
│ ├── 06.mat
│ ├── 07.mat
│ ├── 08.mat
│ ├── 09.mat
│ └── 10.mat
├── data_helper.py
├── evaluation.py
├── flops_target.zip
├── helper.py
├── latency_target.zip
├── layers.py
├── model.py
├── params.py
├── test.py
└── tools
├── __init__.py
├── pose_evaluation_utils.py
└── transformations.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # NASVIO
2 |
3 | This repository contains the evaluation code for *Search for efficient deep visual-inertial odometry through neural architecture search* and the searched models
4 |
5 | ## Data Preparation
6 |
7 | The test dataset is KITTI Odometry dataset. The IMU data after pre-processing is provided under `data/imus`. To download the images and poses, please run
8 |
9 | $cd data
10 | $source data_prep.sh
11 |
12 | ## Pretrained checkpoints on searched best models
13 |
14 | Two checkpoints with low FLOPS target (`flops_target.zip`) and low latency target (`latency_target.zip`) are provided. Simply unzip to retrieve the checkpoints.
15 |
16 | ## Test the pretrained models
17 |
18 | Select which model to run by changing the `self.target` parameter (`flops` or `latency`) in the `params.py`. Then run:
19 |
20 | python3 test.py
21 |
--------------------------------------------------------------------------------
/data/data_prep.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | wget -c https://s3.eu-central-1.amazonaws.com/avg-kitti/data_odometry_color.zip # download the KITTI ODOMETRY dataset
4 | unzip data_odometry_color.zip
5 | mkdir sequences
6 | mv dataset/sequences/* sequences/
7 | rm -r dataset
8 |
9 | for i in {11..21}
10 | do
11 | rm -r 'sequences/'$i
12 | done
13 |
14 | wget -c https://s3.eu-central-1.amazonaws.com/avg-kitti/data_odometry_poses.zip # download the KITTI ODOMETRY groundtruth poses
15 | unzip data_odometry_poses.zip
16 | mkdir poses
17 | mv dataset/poses/* poses/
18 | rm -r dataset
19 |
20 | rm data_odometry_color.zip
21 | rm data_odometry_poses.zip
22 |
--------------------------------------------------------------------------------
/data/imus/00.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/00.mat
--------------------------------------------------------------------------------
/data/imus/01.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/01.mat
--------------------------------------------------------------------------------
/data/imus/02.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/02.mat
--------------------------------------------------------------------------------
/data/imus/04.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/04.mat
--------------------------------------------------------------------------------
/data/imus/05.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/05.mat
--------------------------------------------------------------------------------
/data/imus/06.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/06.mat
--------------------------------------------------------------------------------
/data/imus/07.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/07.mat
--------------------------------------------------------------------------------
/data/imus/08.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/08.mat
--------------------------------------------------------------------------------
/data/imus/09.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/09.mat
--------------------------------------------------------------------------------
/data/imus/10.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/data/imus/10.mat
--------------------------------------------------------------------------------
/data_helper.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import glob
4 | import pandas as pd
5 | import numpy as np
6 | from PIL import Image
7 | import random
8 | import scipy.io as sio
9 | from scipy import interpolate
10 |
11 | import torch
12 | from torch.utils.data import Dataset, DataLoader
13 | from torch.utils.data.sampler import Sampler
14 | from torchvision import transforms
15 | import torchvision.transforms.functional as TF
16 |
17 | from params import par
18 |
19 |
20 | def get_data_info_test(folder, seq_len):
21 | X_path, Y, I = [], [], []
22 |
23 | start_t = time.time()
24 |
25 | # Load & sort the raw data
26 | poses = np.load('{}{}.npy'.format(par.pose_dir, folder)) # (n_images, 6)
27 | fpaths = glob.glob('{}{}/image_2/*.png'.format(par.image_dir, folder))
28 | imus = sio.loadmat('{}{}.mat'.format(par.imu_dir, folder))['imu_data_interp']
29 | # imus = sio.loadmat('{}{}/imu.mat'.format(par.image_dir, folder))['imu_data_interp']
30 | fpaths.sort()
31 |
32 | n_frames = len(fpaths)
33 | start = 0
34 | while start + seq_len < n_frames:
35 | x_seg = fpaths[start:start+seq_len]
36 | X_path.append(x_seg)
37 | Y.append(poses[start:start+seq_len-1])
38 |
39 | start_tmp = start*par.imu_per_image-par.imu_prev
40 | stop_tmp = (start+seq_len-1)*par.imu_per_image+1
41 | if start_tmp < 0: # If starting point before the start of imu sequences
42 | pad_zero = np.zeros((-start_tmp, 6))
43 | padded = np.concatenate((pad_zero, imus[:stop_tmp]))
44 | I.append(padded.tolist())
45 | else:
46 | I.append(imus[start_tmp:stop_tmp])
47 | start += seq_len - 1
48 |
49 | X_path.append(fpaths[start:])
50 | Y.append(poses[start:])
51 | I.append(imus[start*par.imu_per_image-par.imu_prev:])
52 |
53 | print('Folder {} finish in {} sec'.format(folder, time.time() - start_t))
54 | # Convert to pandas dataframes
55 | data = {'image_path': X_path, 'pose': Y, 'imu': I}
56 | df = pd.DataFrame(data, columns=['image_path', 'pose', 'imu'])
57 | return df
58 |
--------------------------------------------------------------------------------
/evaluation.py:
--------------------------------------------------------------------------------
1 | #
2 | # Copyright Qing Li (hello.qingli@gmail.com) 2018. All Rights Reserved.
3 | #
4 | # References: 1. KITTI odometry development kit: http://www.cvlibs.net/datasets/kitti/eval_odometry.php
5 | # 2. A Geiger, P Lenz, R Urtasun. Are we ready for Autonomous Driving? The KITTI Vision Benchmark Suite. CVPR 2012.
6 | #
7 |
8 | import glob
9 | import argparse
10 | import os, os.path
11 | import numpy as np
12 | import matplotlib.pyplot as plt
13 | # choose other backend that not required GUI (Agg, Cairo, PS, PDF or SVG) when use matplotlib
14 | plt.switch_backend('agg')
15 | import matplotlib.backends.backend_pdf
16 | import tools.transformations as tr
17 | from tools.pose_evaluation_utils import quat_pose_to_mat
18 | import scipy.io as sio
19 |
20 | from params import par
21 |
22 | class kittiOdomEval():
23 | def __init__(self, config):
24 | assert os.path.exists(config.gt_dir), "Error of ground_truth pose path!"
25 | gt_files = glob.glob(config.gt_dir + '/*.txt')
26 | gt_files = [os.path.split(f)[1] for f in gt_files]
27 | self.seqs_with_gt = [os.path.splitext(f)[0] for f in gt_files]
28 |
29 | self.lengths = [100,200,300,400,500,600,700,800]
30 | self.num_lengths = len(self.lengths)
31 | self.gt_dir = config.gt_dir
32 | self.result_dir = config.result_dir
33 | self.eval_seqs = []
34 |
35 | # evalute all files in the folder
36 | if config.eva_seqs == '*':
37 | if not os.path.exists(self.result_dir):
38 | print('File path error!')
39 | exit()
40 | if os.path.exists(self.result_dir + '/all_stats.txt'):
41 | os.remove(self.result_dir + '/all_stats.txt')
42 | files = glob.glob(self.result_dir + '/*.txt')
43 | assert files, "There is not trajectory files in: {}".format(self.result_dir)
44 | for f in files:
45 | dirname, basename = os.path.split(f)
46 | file_name = os.path.splitext(basename)[0]
47 | self.eval_seqs.append(str(file_name))
48 | else:
49 | seqs = config.eva_seqs.split(',')
50 | self.eval_seqs = [str(s) for s in seqs]
51 |
52 | self.eval_seqs = [s[:-5] for s in self.eval_seqs] # xxxx_pred => xxxx
53 |
54 | # # Ref: https://github.com/MichaelGrupp/evo/wiki/Plotting
55 | # os.system("evo_config set plot_seaborn_style whitegrid \
56 | # plot_linewidth 1.0 \
57 | # plot_fontfamily sans-serif \
58 | # plot_fontscale 1.0 \
59 | # plot_figsize 10 10 \
60 | # plot_export_format pdf")
61 |
62 | def toCameraCoord(self, pose_mat):
63 | '''
64 | Convert the pose of lidar coordinate to camera coordinate
65 | '''
66 | R_C2L = np.array([[0, 0, 1, 0],
67 | [-1, 0, 0, 0],
68 | [0, -1, 0, 0],
69 | [0, 0, 0, 1]])
70 | inv_R_C2L = np.linalg.inv(R_C2L)
71 | R = np.dot(inv_R_C2L, pose_mat)
72 | rot = np.dot(R, R_C2L)
73 | return rot
74 |
75 | def loadPoses(self, file_name, toCameraCoord):
76 | '''
77 | Each line in the file should follow one of the following structures
78 | (1) idx pose(3x4 matrix in terms of 12 numbers)
79 | (2) pose(3x4 matrix in terms of 12 numbers)
80 | '''
81 | f = open(file_name, 'r')
82 | s = f.readlines()
83 | f.close()
84 | file_len = len(s)
85 | poses = {}
86 | frame_idx = 0
87 | for cnt, line in enumerate(s):
88 | P = np.eye(4)
89 | line_split = [float(i) for i in line.split()]
90 | withIdx = int(len(line_split)==13)
91 | for row in range(3):
92 | for col in range(4):
93 | P[row, col] = line_split[row*4 + col + withIdx]
94 | if withIdx:
95 | frame_idx = line_split[0]
96 | else:
97 | frame_idx = cnt
98 | if toCameraCoord:
99 | poses[frame_idx] = self.toCameraCoord(P)
100 | else:
101 | poses[frame_idx] = P
102 | return poses
103 |
104 | def trajectoryDistances(self, poses):
105 | '''
106 | Compute the length of the trajectory
107 | poses dictionary: [frame_idx: pose]
108 | '''
109 | dist = [0]
110 | sort_frame_idx = sorted(poses.keys())
111 | for i in range(len(sort_frame_idx)-1):
112 | cur_frame_idx = sort_frame_idx[i]
113 | next_frame_idx = sort_frame_idx[i+1]
114 | P1 = poses[cur_frame_idx]
115 | P2 = poses[next_frame_idx]
116 | dx = P1[0,3] - P2[0,3]
117 | dy = P1[1,3] - P2[1,3]
118 | dz = P1[2,3] - P2[2,3]
119 | dist.append(dist[i]+np.sqrt(dx**2+dy**2+dz**2))
120 | self.distance = dist[-1]
121 | return dist
122 |
123 | def rotationError(self, pose_error):
124 | a = pose_error[0,0]
125 | b = pose_error[1,1]
126 | c = pose_error[2,2]
127 | d = 0.5*(a+b+c-1.0)
128 | return np.arccos(max(min(d,1.0),-1.0))
129 |
130 | def translationError(self, pose_error):
131 | dx = pose_error[0,3]
132 | dy = pose_error[1,3]
133 | dz = pose_error[2,3]
134 | return np.sqrt(dx**2+dy**2+dz**2)
135 |
136 | def lastFrameFromSegmentLength(self, dist, first_frame, len_):
137 | for i in range(first_frame, len(dist), 1):
138 | if dist[i] > (dist[first_frame] + len_):
139 | return i
140 | return -1
141 |
142 | def calcSequenceErrors(self, poses_gt, poses_result):
143 | err = []
144 | self.max_speed = 0
145 | # pre-compute distances (from ground truth as reference)
146 | dist = self.trajectoryDistances(poses_gt)
147 | # every second, kitti data 10Hz
148 | self.step_size = 10
149 | # for all start positions do
150 | # for first_frame in range(9, len(poses_gt), self.step_size):
151 | for first_frame in range(0, len(poses_gt), self.step_size):
152 | # for all segment lengths do
153 | for i in range(self.num_lengths):
154 | # current length
155 | len_ = self.lengths[i]
156 | # compute last frame of the segment
157 | last_frame = self.lastFrameFromSegmentLength(dist, first_frame, len_)
158 |
159 | # Continue if sequence not long enough
160 | if last_frame == -1 or not(last_frame in poses_result.keys()) or not(first_frame in poses_result.keys()):
161 | continue
162 |
163 | # compute rotational and translational errors, relative pose error (RPE)
164 | pose_delta_gt = np.dot(np.linalg.inv(poses_gt[first_frame]), poses_gt[last_frame])
165 | pose_delta_result = np.dot(np.linalg.inv(poses_result[first_frame]), poses_result[last_frame])
166 | pose_error = np.dot(np.linalg.inv(pose_delta_result), pose_delta_gt)
167 |
168 | r_err = self.rotationError(pose_error)
169 | t_err = self.translationError(pose_error)
170 |
171 | # compute speed
172 | num_frames = last_frame - first_frame + 1.0
173 | speed = len_ / (0.1*num_frames) # 10Hz
174 | if speed > self.max_speed:
175 | self.max_speed = speed
176 | err.append([first_frame, r_err/len_, t_err/len_, len_, speed])
177 | return err
178 |
179 | def calcRTEErrors(self, poses_gt, poses_result):
180 | err = []
181 | # for all start positions do
182 | for first_frame in range(len(poses_gt)-1):
183 |
184 | # compute rotational and translational errors, relative pose error (RPE)
185 | pose_delta_gt = np.dot(np.linalg.inv(poses_gt[first_frame]), poses_gt[first_frame+1])
186 | pose_delta_result = np.dot(np.linalg.inv(poses_result[first_frame]), poses_result[first_frame+1])
187 | pose_error = np.dot(np.linalg.inv(pose_delta_result), pose_delta_gt)
188 |
189 | r_err = self.rotationError(pose_error)
190 | t_err = self.translationError(pose_error)
191 |
192 | err.append([None, r_err, t_err])
193 | return err
194 |
195 | def calcATEErrors(self, poses_gt, poses_result):
196 | err = []
197 | # for all start positions do
198 | for first_frame in range(1, len(poses_gt)):
199 | # compute rotational and translational errors, relative pose error (RPE)
200 | pose_error = np.dot(np.linalg.inv(poses_result[first_frame]), poses_gt[first_frame])
201 | t_err = self.translationError(pose_error)
202 | err.append([None, t_err, t_err])
203 | return err
204 |
205 | def calcSpeed(self, poses_gt, poses_result):
206 | speed_err_gt_list = []
207 | speed_err_est_list = []
208 | # for all start positions do
209 | for first_frame in range(len(poses_gt)-1):
210 | # compute rotational and translational errors, relative pose error (RPE)
211 | pose_error_gt = np.dot(np.linalg.inv(poses_gt[first_frame]), poses_gt[first_frame+1])
212 | pose_error_est = np.dot(np.linalg.inv(poses_result[first_frame]), poses_result[first_frame+1])
213 | speed_err_gt = self.rotationError(pose_error_gt)
214 | speed_err_est = self.rotationError(pose_error_est)
215 | speed_err_gt_list.append(speed_err_gt)
216 | speed_err_est_list.append(speed_err_est)
217 | return speed_err_gt_list, speed_err_est_list
218 |
219 | def calcAngleSpeed(self, poses_gt, poses_result):
220 | speed_err_gt_list = []
221 | speed_err_est_list = []
222 | # for all start positions do
223 | for first_frame in range(len(poses_gt)-1):
224 | # compute rotational and translational errors, relative pose error (RPE)
225 | pose_error_gt = np.dot(np.linalg.inv(poses_gt[first_frame]), poses_gt[first_frame+1])
226 | pose_error_est = np.dot(np.linalg.inv(poses_result[first_frame]), poses_result[first_frame+1])
227 | speed_err_gt = self.translationError(pose_error_gt)
228 | speed_err_est = self.translationError(pose_error_est)
229 | speed_err_gt_list.append(speed_err_gt)
230 | speed_err_est_list.append(speed_err_est)
231 | return speed_err_gt_list, speed_err_est_list
232 |
233 | def saveSequenceErrors(self, err, file_name):
234 | fp = open(file_name,'w')
235 | for i in err:
236 | line_to_write = " ".join([str(j) for j in i])
237 | fp.writelines(line_to_write+"\n")
238 | fp.close()
239 |
240 | def computeOverallErr(self, seq_err):
241 | t_err = 0
242 | r_err = 0
243 | seq_len = len(seq_err)
244 |
245 | for item in seq_err:
246 | r_err += item[1]
247 | t_err += item[2]
248 | ave_t_err = t_err / seq_len
249 | ave_r_err = r_err / seq_len
250 | return ave_t_err, ave_r_err
251 |
252 | def plot_xyz(self, seq, poses_ref, poses_pred, plot_path_dir):
253 |
254 | def traj_xyz(axarr, positions_xyz, style='-', color='black', title="", label="", alpha=1.0):
255 | """
256 | plot a path/trajectory based on xyz coordinates into an axis
257 | :param axarr: an axis array (for x, y & z) e.g. from 'fig, axarr = plt.subplots(3)'
258 | :param traj: trajectory
259 | :param style: matplotlib line style
260 | :param color: matplotlib color
261 | :param label: label (for legend)
262 | :param alpha: alpha value for transparency
263 | """
264 | x = range(0, len(positions_xyz))
265 | xlabel = "index"
266 | ylabels = ["$x$ (m)", "$y$ (m)", "$z$ (m)"]
267 | # plt.title('PRY')
268 | for i in range(0, 3):
269 | axarr[i].plot(x, positions_xyz[:, i], style, color=color, label=label, alpha=alpha)
270 | axarr[i].set_ylabel(ylabels[i])
271 | axarr[i].legend(loc="upper right", frameon=True)
272 | axarr[2].set_xlabel(xlabel)
273 | if title:
274 | axarr[0].set_title('XYZ')
275 |
276 | fig, axarr = plt.subplots(3, sharex="col", figsize=tuple([20, 10]))
277 |
278 | pred_xyz = np.array([p[:3, 3] for _,p in poses_pred.items()])
279 | traj_xyz(axarr, pred_xyz, '-', 'b', title='XYZ', label='Ours', alpha=1.0)
280 | if poses_ref:
281 | ref_xyz = np.array([p[:3, 3] for _,p in poses_ref.items()])
282 | traj_xyz(axarr, ref_xyz, '-', 'r', label='GT', alpha=1.0)
283 |
284 | name = "{}_xyz".format(seq)
285 | plt.savefig(plot_path_dir + "/" + name + ".png", bbox_inches='tight', pad_inches=0.1)
286 | pdf = matplotlib.backends.backend_pdf.PdfPages(plot_path_dir + "/" + name + ".pdf")
287 | fig.tight_layout()
288 | pdf.savefig(fig)
289 | # plt.show()
290 | pdf.close()
291 |
292 | def plot_rpy(self, seq, poses_ref, poses_pred, plot_path_dir, axes='szxy'):
293 |
294 | def traj_rpy(axarr, orientations_euler, style='-', color='black', title="", label="", alpha=1.0):
295 | """
296 | plot a path/trajectory's Euler RPY angles into an axis
297 | :param axarr: an axis array (for R, P & Y) e.g. from 'fig, axarr = plt.subplots(3)'
298 | :param traj: trajectory
299 | :param style: matplotlib line style
300 | :param color: matplotlib color
301 | :param label: label (for legend)
302 | :param alpha: alpha value for transparency
303 | """
304 | x = range(0, len(orientations_euler))
305 | xlabel = "index"
306 | ylabels = ["$roll$ (deg)", "$pitch$ (deg)", "$yaw$ (deg)"]
307 | # plt.title('PRY')
308 | for i in range(0, 3):
309 | axarr[i].plot(x, np.rad2deg(orientations_euler[:, i]), style,
310 | color=color, label=label, alpha=alpha)
311 | axarr[i].set_ylabel(ylabels[i])
312 | axarr[i].legend(loc="upper right", frameon=True)
313 | axarr[2].set_xlabel(xlabel)
314 | if title:
315 | axarr[0].set_title('PRY')
316 |
317 | fig_rpy, axarr_rpy = plt.subplots(3, sharex="col", figsize=tuple([20, 10]))
318 |
319 | pred_rpy = np.array([tr.euler_from_matrix(p, axes=axes) for _,p in poses_pred.items()])
320 | traj_rpy(axarr_rpy, pred_rpy, '-', 'b', title='RPY', label='Ours', alpha=1.0)
321 | if poses_ref:
322 | ref_rpy = np.array([tr.euler_from_matrix(p, axes=axes) for _,p in poses_ref.items()])
323 | traj_rpy(axarr_rpy, ref_rpy, '-', 'r', label='GT', alpha=1.0)
324 |
325 | name = "{}_rpy".format(seq)
326 | plt.savefig(plot_path_dir + "/" + name + ".png", bbox_inches='tight', pad_inches=0.1)
327 | pdf = matplotlib.backends.backend_pdf.PdfPages(plot_path_dir + "/" + name + ".pdf")
328 | fig_rpy.tight_layout()
329 | pdf.savefig(fig_rpy)
330 | # plt.show()
331 | pdf.close()
332 |
333 | def plotPath_2D_3(self, seq, poses_gt, poses_result, plot_path_dir):
334 | '''
335 | plot path in XY, XZ and YZ plane
336 | '''
337 | fontsize_ = 10
338 | plot_keys = ["Ground Truth", "Ours"]
339 | start_point = [0, 0]
340 | style_pred = 'b-'
341 | style_gt = 'r-'
342 | style_O = 'ko'
343 |
344 | ### get the value
345 | if poses_gt:
346 | poses_gt = [(k,poses_gt[k]) for k in sorted(poses_gt.keys())]
347 | x_gt = np.asarray([pose[0,3] for _,pose in poses_gt])
348 | y_gt = np.asarray([pose[1,3] for _,pose in poses_gt])
349 | z_gt = np.asarray([pose[2,3] for _,pose in poses_gt])
350 | poses_result = [(k,poses_result[k]) for k in sorted(poses_result.keys())]
351 | x_pred = np.asarray([pose[0,3] for _,pose in poses_result])
352 | y_pred = np.asarray([pose[1,3] for _,pose in poses_result])
353 | z_pred = np.asarray([pose[2,3] for _,pose in poses_result])
354 |
355 | fig = plt.figure(figsize=(20,6), dpi=100)
356 | ### plot the figure
357 | plt.subplot(1,3,1)
358 | ax = plt.gca()
359 | if poses_gt: plt.plot(x_gt, z_gt, style_gt, label=plot_keys[0])
360 | plt.plot(x_pred, z_pred, style_pred, label=plot_keys[1])
361 | plt.plot(start_point[0], start_point[1], style_O, label='Start Point')
362 | plt.legend(loc="upper right", prop={'size':fontsize_})
363 | plt.xlabel('x (m)', fontsize=fontsize_)
364 | plt.ylabel('z (m)', fontsize=fontsize_)
365 | ### set the range of x and y
366 | xlim = ax.get_xlim()
367 | ylim = ax.get_ylim()
368 | xmean = np.mean(xlim)
369 | ymean = np.mean(ylim)
370 | plot_radius = max([abs(lim - mean_)
371 | for lims, mean_ in ((xlim, xmean),
372 | (ylim, ymean))
373 | for lim in lims])
374 | ax.set_xlim([xmean - plot_radius, xmean + plot_radius])
375 | ax.set_ylim([ymean - plot_radius, ymean + plot_radius])
376 |
377 | plt.subplot(1,3,2)
378 | ax = plt.gca()
379 | if poses_gt: plt.plot(x_gt, y_gt, style_gt, label=plot_keys[0])
380 | plt.plot(x_pred, y_pred, style_pred, label=plot_keys[1])
381 | plt.plot(start_point[0], start_point[1], style_O, label='Start Point')
382 | plt.legend(loc="upper right", prop={'size':fontsize_})
383 | plt.xlabel('x (m)', fontsize=fontsize_)
384 | plt.ylabel('y (m)', fontsize=fontsize_)
385 | xlim = ax.get_xlim()
386 | ylim = ax.get_ylim()
387 | xmean = np.mean(xlim)
388 | ymean = np.mean(ylim)
389 | ax.set_xlim([xmean - plot_radius, xmean + plot_radius])
390 | ax.set_ylim([ymean - plot_radius, ymean + plot_radius])
391 |
392 | plt.subplot(1,3,3)
393 | ax = plt.gca()
394 | if poses_gt: plt.plot(y_gt, z_gt, style_gt, label=plot_keys[0])
395 | plt.plot(y_pred, z_pred, style_pred, label=plot_keys[1])
396 | plt.plot(start_point[0], start_point[1], style_O, label='Start Point')
397 | plt.legend(loc="upper right", prop={'size':fontsize_})
398 | plt.xlabel('y (m)', fontsize=fontsize_)
399 | plt.ylabel('z (m)', fontsize=fontsize_)
400 | xlim = ax.get_xlim()
401 | ylim = ax.get_ylim()
402 | xmean = np.mean(xlim)
403 | ymean = np.mean(ylim)
404 | ax.set_xlim([xmean - plot_radius, xmean + plot_radius])
405 | ax.set_ylim([ymean - plot_radius, ymean + plot_radius])
406 |
407 | png_title = "{}_path".format(seq)
408 | plt.savefig(plot_path_dir + "/" + png_title + ".png", bbox_inches='tight', pad_inches=0.1)
409 | pdf = matplotlib.backends.backend_pdf.PdfPages(plot_path_dir + "/" + png_title + ".pdf")
410 | fig.tight_layout()
411 | pdf.savefig(fig)
412 | # plt.show()
413 | plt.close()
414 |
415 |
416 | def plotPath_3D(self, seq, poses_gt, poses_result, plot_path_dir):
417 | """
418 | plot the path in 3D space
419 | """
420 | from mpl_toolkits.mplot3d import Axes3D
421 |
422 | start_point = [[0], [0], [0]]
423 | fontsize_ = 8
424 | style_pred = 'b-'
425 | style_gt = 'r-'
426 | style_O = 'ko'
427 |
428 | poses_dict = {}
429 | poses_dict["Ours"] = poses_result
430 | if poses_gt:
431 | poses_dict["Ground Truth"] = poses_gt
432 |
433 | fig = plt.figure(figsize=(8,8), dpi=110)
434 | # ax = fig.gca(projection='3d')
435 | ax = fig.add_subplot(projection='3d')
436 |
437 | for key,_ in poses_dict.items():
438 | plane_point = []
439 | for frame_idx in sorted(poses_dict[key].keys()):
440 | pose = poses_dict[key][frame_idx]
441 | plane_point.append([pose[0,3], pose[2,3], pose[1,3]])
442 | plane_point = np.asarray(plane_point)
443 | style = style_pred if key == 'Ours' else style_gt
444 | plt.plot(plane_point[:,0], plane_point[:,1], plane_point[:,2], style, label=key)
445 | plt.plot(start_point[0], start_point[1], start_point[2], style_O, label='Start Point')
446 |
447 | xlim = ax.get_xlim3d()
448 | ylim = ax.get_ylim3d()
449 | zlim = ax.get_zlim3d()
450 | xmean = np.mean(xlim)
451 | ymean = np.mean(ylim)
452 | zmean = np.mean(zlim)
453 | plot_radius = max([abs(lim - mean_)
454 | for lims, mean_ in ((xlim, xmean),
455 | (ylim, ymean),
456 | (zlim, zmean))
457 | for lim in lims])
458 | ax.set_xlim3d([xmean - plot_radius, xmean + plot_radius])
459 | ax.set_ylim3d([ymean - plot_radius, ymean + plot_radius])
460 | ax.set_zlim3d([zmean - plot_radius, zmean + plot_radius])
461 |
462 | ax.legend()
463 | # plt.legend(loc="upper right", prop={'size':fontsize_})
464 | ax.set_xlabel('x (m)', fontsize=fontsize_)
465 | ax.set_ylabel('z (m)', fontsize=fontsize_)
466 | ax.set_zlabel('y (m)', fontsize=fontsize_)
467 | ax.view_init(elev=20., azim=-35)
468 |
469 | png_title = "{}_path_3D".format(seq)
470 | plt.savefig(plot_path_dir+"/"+png_title+".png", bbox_inches='tight', pad_inches=0.1)
471 | pdf = matplotlib.backends.backend_pdf.PdfPages(plot_path_dir + "/" + png_title + ".pdf")
472 | fig.tight_layout()
473 | pdf.savefig(fig)
474 | # plt.show()
475 | plt.close()
476 |
477 | def plotError_segment(self, seq, avg_segment_errs, plot_error_dir):
478 | '''
479 | avg_segment_errs: dict [100: err, 200: err...]
480 | '''
481 | fontsize_ = 15
482 | plot_y_t = []
483 | plot_y_r = []
484 | plot_x = []
485 | for idx, value in avg_segment_errs.items():
486 | if value == []:
487 | continue
488 | plot_x.append(idx)
489 | plot_y_t.append(value[0] * 100)
490 | plot_y_r.append(value[1]/np.pi * 180)
491 |
492 | fig = plt.figure(figsize=(15,6), dpi=100)
493 | plt.subplot(1,2,1)
494 | plt.plot(plot_x, plot_y_t, 'ks-')
495 | plt.axis([100, np.max(plot_x), 0, np.max(plot_y_t)*(1+0.1)])
496 | plt.xlabel('Path Length (m)',fontsize=fontsize_)
497 | plt.ylabel('Translation Error (%)',fontsize=fontsize_)
498 |
499 | plt.subplot(1,2,2)
500 | plt.plot(plot_x, plot_y_r, 'ks-')
501 | plt.axis([100, np.max(plot_x), 0, np.max(plot_y_r)*(1+0.1)])
502 | plt.xlabel('Path Length (m)',fontsize=fontsize_)
503 | plt.ylabel('Rotation Error (deg/m)',fontsize=fontsize_)
504 | png_title = "{}_error_seg".format(seq)
505 | plt.savefig(plot_error_dir + "/" + png_title + ".png", bbox_inches='tight', pad_inches=0.1)
506 | # plt.show()
507 |
508 | def plotError_speed(self, seq, avg_speed_errs, plot_error_dir):
509 | '''
510 | avg_speed_errs: dict [s1: err, s2: err...]
511 | '''
512 | fontsize_ = 15
513 | plot_y_t = []
514 | plot_y_r = []
515 | plot_x = []
516 | for idx, value in avg_speed_errs.items():
517 | if value == []:
518 | continue
519 | plot_x.append(idx * 3.6)
520 | plot_y_t.append(value[0] * 100)
521 | plot_y_r.append(value[1]/np.pi * 180)
522 |
523 | fig = plt.figure(figsize=(15,6), dpi=100)
524 | plt.subplot(1,2,1)
525 | plt.plot(plot_x, plot_y_t, 'ks-')
526 | plt.axis([np.min(plot_x), np.max(plot_x), 0, np.max(plot_y_t)*(1+0.1)])
527 | plt.xlabel('Speed (km/h)',fontsize = fontsize_)
528 | plt.ylabel('Translation Error (%)',fontsize = fontsize_)
529 |
530 | plt.subplot(1,2,2)
531 | plt.plot(plot_x, plot_y_r, 'ks-')
532 | plt.axis([np.min(plot_x), np.max(plot_x), 0, np.max(plot_y_r)*(1+0.1)])
533 | plt.xlabel('Speed (km/h)',fontsize = fontsize_)
534 | plt.ylabel('Rotation Error (deg/m)',fontsize = fontsize_)
535 | png_title = "{}_error_speed".format(seq)
536 | plt.savefig(plot_error_dir + "/" + png_title + ".png", bbox_inches='tight', pad_inches=0.1)
537 | # plt.show()
538 |
539 | def computeSegmentErr(self, seq_errs):
540 | '''
541 | This function calculates average errors for different segment.
542 | '''
543 | segment_errs = {}
544 | avg_segment_errs = {}
545 | for len_ in self.lengths:
546 | segment_errs[len_] = []
547 |
548 | # Get errors
549 | for err in seq_errs:
550 | len_ = err[3]
551 | t_err = err[2]
552 | r_err = err[1]
553 | segment_errs[len_].append([t_err, r_err])
554 |
555 | # Compute average
556 | for len_ in self.lengths:
557 | if segment_errs[len_] != []:
558 | avg_t_err = np.mean(np.asarray(segment_errs[len_])[:,0])
559 | avg_r_err = np.mean(np.asarray(segment_errs[len_])[:,1])
560 | avg_segment_errs[len_] = [avg_t_err, avg_r_err]
561 | else:
562 | avg_segment_errs[len_] = []
563 | return avg_segment_errs
564 |
565 | def computeSpeedErr(self, seq_errs):
566 | '''
567 | This function calculates average errors for different speed.
568 | '''
569 | segment_errs = {}
570 | avg_segment_errs = {}
571 | for s in range(2, 25, 2):
572 | segment_errs[s] = []
573 |
574 | # Get errors
575 | for err in seq_errs:
576 | speed = err[4]
577 | t_err = err[2]
578 | r_err = err[1]
579 | for key in segment_errs.keys():
580 | if np.abs(speed - key) < 2.0:
581 | segment_errs[key].append([t_err, r_err])
582 |
583 | # Compute average
584 | for key in segment_errs.keys():
585 | if segment_errs[key] != []:
586 | avg_t_err = np.mean(np.asarray(segment_errs[key])[:,0])
587 | avg_r_err = np.mean(np.asarray(segment_errs[key])[:,1])
588 | avg_segment_errs[key] = [avg_t_err, avg_r_err]
589 | else:
590 | avg_segment_errs[key] = []
591 | return avg_segment_errs
592 |
593 | def call_evo_traj(self, pred_file, save_file, gt_file=None, plot_plane='xy'):
594 | command = ''
595 | if os.path.exists(save_file): os.remove(save_file)
596 |
597 | if gt_file != None:
598 | command = ("evo_traj kitti %s --ref=%s --plot_mode=%s --save_plot=%s") \
599 | % (pred_file, gt_file, plot_plane, save_file)
600 | else:
601 | command = ("evo_traj kitti %s --plot_mode=%s --save_plot=%s") \
602 | % (pred_file, plot_plane, save_file)
603 | os.system(command)
604 |
605 | def eval(self, toCameraCoord):
606 | '''
607 | to_camera_coord: whether the predicted pose needs to be convert to camera coordinate
608 | '''
609 | eval_dir = self.result_dir
610 | if not os.path.exists(eval_dir): os.makedirs(eval_dir)
611 | total_err = []
612 | ave_errs = {}
613 | for seq in self.eval_seqs:
614 | eva_seq_dir = os.path.join(eval_dir, '{}_eval'.format(seq))
615 | pred_file_name = self.result_dir + '/{}_pred.txt'.format(seq)
616 | # pred_file_name = self.result_dir + '/{}.txt'.format(seq)
617 | gt_file_name = self.gt_dir + '/{}.txt'.format(seq)
618 | save_file_name = eva_seq_dir + '/{}.pdf'.format(seq)
619 | assert os.path.exists(pred_file_name), "File path error: {}".format(pred_file_name)
620 |
621 | # ----------------------------------------------------------------------
622 | # load pose
623 | # if seq in self.seqs_with_gt:
624 | # self.call_evo_traj(pred_file_name, save_file_name, gt_file=gt_file_name)
625 | # else:
626 | # self.call_evo_traj(pred_file_name, save_file_name, gt_file=None)
627 | # continue
628 |
629 | poses_result = self.loadPoses(pred_file_name, toCameraCoord=toCameraCoord)
630 |
631 | if not os.path.exists(eva_seq_dir): os.makedirs(eva_seq_dir)
632 |
633 | if seq not in self.seqs_with_gt:
634 | self.calcSequenceErrors(poses_result, poses_result)
635 | print ("\nSequence: " + str(seq))
636 | print ('Distance (m): %d' % self.distance)
637 | print ('Max speed (km/h): %d' % (self.max_speed*3.6))
638 | self.plot_rpy(seq, None, poses_result, eva_seq_dir)
639 | self.plot_xyz(seq, None, poses_result, eva_seq_dir)
640 | self.plotPath_3D(seq, None, poses_result, eva_seq_dir)
641 | self.plotPath_2D_3(seq, None, poses_result, eva_seq_dir)
642 | continue
643 |
644 | poses_gt = self.loadPoses(gt_file_name, toCameraCoord=False)
645 |
646 | # ----------------------------------------------------------------------
647 | # compute sequence errors
648 | seq_err = self.calcSequenceErrors(poses_gt, poses_result)
649 | self.saveSequenceErrors(seq_err, eva_seq_dir + '/{}_error.txt'.format(seq))
650 |
651 | ATE_err = self.calcATEErrors(poses_gt, poses_result)
652 | RTE_err = self.calcRTEErrors(poses_gt, poses_result)
653 | total_err += seq_err
654 | speed_gt, speed_est = self.calcSpeed(poses_gt, poses_result)
655 | sio.savemat('speed_'+str(seq)+'.mat', {'gt': np.array(speed_gt), 'est': np.array(speed_est)})
656 | speed_gt, speed_est = self.calcAngleSpeed(poses_gt, poses_result)
657 | sio.savemat('Angle_speed_'+str(seq)+'.mat', {'gt': np.array(speed_gt), 'est': np.array(speed_est)})
658 | # ----------------------------------------------------------------------
659 | # Compute segment errors
660 | avg_segment_errs = self.computeSegmentErr(seq_err)
661 | avg_speed_errs = self.computeSpeedErr(seq_err)
662 |
663 | # ----------------------------------------------------------------------
664 | # compute overall error
665 | ave_t_err, ave_r_err = self.computeOverallErr(seq_err)
666 | rte_t_err, rte_r_err = self.computeOverallErr(RTE_err)
667 | ate_t_err, _ = self.computeOverallErr(ATE_err)
668 |
669 | vector = np.vectorize(np.float)
670 | x = vector(np.array(RTE_err)[:, 1:])
671 |
672 | sio.savemat(seq + '_RTE.mat', {'RTE': x})
673 | print ("\nSequence: " + str(seq))
674 | print ('Distance (m): %d' % self.distance)
675 | print ('Max speed (km/h): %d' % (self.max_speed*3.6))
676 | print ("Average sequence translational RMSE (%): {0:.4f}".format(ave_t_err * 100))
677 | print ("Average sequence rotational error (deg/m): {0:.4f}\n".format(ave_r_err/np.pi * 180))
678 | print ("Relative translation error (m): {0:.4f}\n".format(rte_t_err))
679 | print ("Relative rotational error (deg): {0:.4f}\n".format(rte_r_err/np.pi * 180))
680 | print ("Absolute RMSE (m): {0:.4f}\n".format(ate_t_err))
681 | with open(eva_seq_dir + '/%s_stats.txt' % seq, 'w') as f:
682 | f.writelines('Average sequence translation RMSE (%): {0:.4f}\n'.format(ave_t_err * 100))
683 | f.writelines('Average sequence rotation error (deg/m): {0:.4f}'.format(ave_r_err/np.pi * 180))
684 |
685 |
686 | ave_errs[seq] = [ave_t_err, ave_r_err]
687 |
688 | self.ave_t_err = ave_t_err
689 | self.ave_r_err = ave_r_err/np.pi * 180
690 | self.ate_t_err = ate_t_err
691 |
692 |
693 | # ----------------------------------------------------------------------
694 | # Ploting
695 | self.plot_rpy(seq, poses_gt, poses_result, eva_seq_dir)
696 | self.plot_xyz(seq, poses_gt, poses_result, eva_seq_dir)
697 | self.plotPath_3D(seq, poses_gt, poses_result, eva_seq_dir)
698 | self.plotPath_2D_3(seq, poses_gt, poses_result, eva_seq_dir)
699 | self.plotError_segment(seq, avg_segment_errs, eva_seq_dir)
700 | self.plotError_speed(seq, avg_speed_errs, eva_seq_dir)
701 | plt.close('all')
702 |
703 | # total_avg_segment_errs = self.computeSegmentErr(total_err)
704 | # total_avg_speed_errs = self.computeSpeedErr(total_err)
705 | # self.plotError_segment('total_error_seg', total_avg_segment_errs, eval_dir)
706 | # self.plotError_speed('total_error_speed', total_avg_speed_errs, eval_dir)
707 |
708 |
709 | # if ave_errs:
710 | # with open(eval_dir + '/all_stats.txt', 'w') as f:
711 | # for seq, ave_err in ave_errs.items():
712 | # f.writelines('%s:\n' % seq)
713 | # f.writelines('Average sequence translation RMSE (%): {0:.4f}\n'.format(ave_err[0] * 100))
714 | # f.writelines('Average sequence rotation error (deg/m): {0:.4f}\n\n'.format(ave_err[1]/np.pi * 180))
715 |
716 | # parent_path, model_step = os.path.split(os.path.normpath(eval_dir))
717 | # with open(os.path.join(parent_path, 'test_statistics.txt'), 'a') as f:
718 | # f.writelines('------------------ %s -----------------\n' % model_step)
719 | # for seq, ave_err in ave_errs.items():
720 | # f.writelines('%s:\n' % seq)
721 | # f.writelines('Average sequence translation RMSE (%): {0:.4f}\n'.format(ave_err[0] * 100))
722 | # f.writelines('Average sequence rotation error (deg/m): {0:.4f}\n\n'.format(ave_err[1]/np.pi * 180))
723 |
724 | # print ("-------------------------------------------------")
725 | # for seq in range(len(ave_t_errs)):
726 | # print ("{0:.2f}".format(ave_t_errs[seq]*100))
727 | # print ("{0:.2f}".format(ave_r_errs[seq]/np.pi*180*100))
728 | # print ("-------------------------------------------------")
729 |
730 |
731 | if __name__ == '__main__':
732 | parser = argparse.ArgumentParser(description='KITTI Evaluation toolkit')
733 | parser.add_argument('--gt_dir', type=str, default='./dataset/poses', help='Directory path of the ground truth odometry')
734 | parser.add_argument('--result_dir', type=str, default=f'./result/{par.name}/02', help='Directory path of storing the odometry results')
735 | #parser.add_argument('--result_dir', type=str, default=f'.', help='Directory path of storing the odometry results')
736 | #parser.add_argument('--eva_seqs', type=str, default='08_pred', help='The sequences to be evaluated')
737 | parser.add_argument('--eva_seqs', type=str, default='05_pred,07_pred,10_pred', help='The sequences to be evaluated')
738 | parser.add_argument('--toCameraCoord', type=lambda x: (str(x).lower() == 'true'), default=False, help='Whether to convert the pose to camera coordinate')
739 |
740 | args = parser.parse_args()
741 | pose_eval = kittiOdomEval(args)
742 | pose_eval.eval(toCameraCoord=args.toCameraCoord) # set the value according to the predicted results
--------------------------------------------------------------------------------
/flops_target.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/flops_target.zip
--------------------------------------------------------------------------------
/helper.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 |
4 | # epsilon for testing whether a number is close to zero
5 | _EPS = np.finfo(float).eps * 4.0
6 |
7 |
8 | # Determine whether the matrix R is a valid rotation matrix
9 | def isRotationMatrix(R) :
10 | Rt = np.transpose(R)
11 | shouldBeIdentity = np.dot(Rt, R)
12 | I = np.identity(3, dtype = R.dtype)
13 | n = np.linalg.norm(I - shouldBeIdentity)
14 | return n < 1e-6
15 |
16 | # Calculate the eular angles from a pose matrix (3x4)
17 | def R_to_angle(Rt):
18 |
19 | Rt = np.reshape(np.array(Rt), (3,4))
20 | t = Rt[:,-1]
21 | R = Rt[:,:3]
22 |
23 | assert(isRotationMatrix(R))
24 |
25 | x, y, z = euler_from_matrix_new(R)
26 | theta = [x, y, z]
27 | pose_15 = np.concatenate((theta, t, R.flatten()))
28 | assert(pose_15.shape == (15,))
29 | return pose_15
30 |
31 | # Calculate the relative rotation (Eular angles) and translation from two pose matrices
32 | # Rt1: a list of 12 floats
33 | # Rt2: a list of 12 floats
34 | def cal_rel_pose(Rt1, Rt2):
35 | Rt1 = np.reshape(np.array(Rt1), (3,4))
36 | Rt1 = np.concatenate((Rt1, np.array([[0,0,0,1]])), 0)
37 | Rt2 = np.reshape(np.array(Rt2), (3,4))
38 | Rt2 = np.concatenate((Rt2, np.array([[0,0,0,1]])), 0)
39 |
40 | # Calculate the relative transformation Rt_rel
41 | Rt1_inv = np.linalg.inv(Rt1)
42 | Rt_rel = Rt1_inv @ Rt2
43 |
44 | R_rel = Rt_rel[:3, :3]
45 | t_rel = Rt_rel[:3, 3]
46 | assert(isRotationMatrix(R_rel))
47 |
48 | # Extract the Eular angle from the relative rotation matrix
49 | x, y, z = euler_from_matrix_new(R_rel)
50 | theta = [x, y, z]
51 |
52 | rod = SO3_to_so3(R_rel)
53 |
54 | pose_rel_9 = np.concatenate((theta, t_rel, rod))
55 |
56 | assert(pose_rel_9.shape == (9,))
57 | return pose_rel_9
58 |
59 | # Calculate the 3x4 transformation matrix from Eular angles and translation vector
60 | # pose: (3 angles, 3 translations)
61 | def angle_to_R(pose):
62 | R = eulerAnglesToRotationMatrix(pose[:3])
63 | t = pose[3:].reshape(3, 1)
64 | R = np.concatenate((R, t), 1)
65 | return R
66 |
67 |
68 | # Calculate the rotation matrix from eular angles (roll, yaw, pitch)
69 | def eulerAnglesToRotationMatrix(theta) :
70 | R_x = np.array([[1, 0, 0 ],
71 | [0, np.cos(theta[0]), -np.sin(theta[0]) ],
72 | [0, np.sin(theta[0]), np.cos(theta[0]) ]
73 | ])
74 | R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1]) ],
75 | [0, 1, 0 ],
76 | [-np.sin(theta[1]), 0, np.cos(theta[1]) ]
77 | ])
78 | R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],
79 | [np.sin(theta[2]), np.cos(theta[2]), 0],
80 | [0, 0, 1]
81 | ])
82 | R = np.dot(R_z, np.dot( R_y, R_x ))
83 | return R
84 |
85 | # Calculate the eular angles (roll, yaw, pitch) from a rotation matrix
86 | def euler_from_matrix_new(matrix):
87 |
88 | M = np.array(matrix, dtype=np.float64, copy=False)[:3, :3]
89 |
90 | cy = math.sqrt(M[0, 0]*M[0, 0] + M[1, 0]*M[1, 0])
91 | ay = math.atan2(-M[2, 0], cy)
92 |
93 | if ay < -math.pi/2 + _EPS and ay > -math.pi/2 - _EPS: # pitch = -90 deg
94 | ax = 0
95 | az = math.atan2( -M[1, 2], -M[0, 2])
96 | elif ay < math.pi/2 + _EPS and ay > math.pi/2 - _EPS:
97 | ax = 0
98 | az = math.atan2( M[1, 2], M[0, 2])
99 | else:
100 | ax = math.atan2( M[2, 1], M[2, 2])
101 | az = math.atan2( M[1, 0], M[0, 0])
102 |
103 | return np.array([ax, ay, az])
104 |
105 | # Normalization angles to constrain that it is between -pi and pi
106 | def normalize_angle_delta(angle):
107 | if(angle > np.pi):
108 | angle = angle - 2 * np.pi
109 | elif(angle < -np.pi):
110 | angle = 2 * np.pi + angle
111 | return angle
112 |
113 | # Transformation from SO3 to so3
114 | def SO3_to_so3(R):
115 | assert(isRotationMatrix(R))
116 | cos = ((R[0,0]+R[1,1]+R[2,2])-1)/2
117 | theta = np.arccos(max(min(cos,1.0),-1.0))
118 | logR = (theta/np.sin(theta)/2)*(R-R.T)
119 | w = np.array([logR[2,1], logR[0,2], logR[1,0]])
120 | return w
121 |
122 | # Transformation from so3 to SO3
123 | def so3_to_SO3(w):
124 | theta = np.sqrt(np.sum(w**2))
125 | w_hat = np.array([ [ 0, -w[2], w[1] ],
126 | [ w[2], 0, -w[0] ],
127 | [-w[1], w[0], 0 ]
128 | ])
129 | w_hat_2 = w_hat @ w_hat
130 | R = np.eye(3) + (np.sin(theta)/theta)*w_hat + ((1-np.cos(theta))/theta**2)*w_hat_2
131 | return R
132 |
133 | # Transformation from SE3 to se3
134 | def SE3_to_se3(Rt):
135 |
136 | Rt = np.reshape(np.array(Rt), (3,4))
137 | t = Rt[:,-1]
138 | R = Rt[:,:3]
139 | assert(isRotationMatrix(R))
140 |
141 | cos = ((R[0,0]+R[1,1]+R[2,2])-1)/2
142 | theta = np.arccos(max(min(cos,1.0),-1.0))
143 | logR = (theta/np.sin(theta)/2)*(R-R.T)
144 |
145 | w = [logR[2,1], logR[0,2], logR[1,0]]
146 |
147 | alpha = 1 - (theta*np.cos(theta/2))/(2*np.sin(theta/2))
148 | V_inv = np.eye(3) - 0.5*logR + (alpha/theta**2)*(logR @ logR)
149 | t_ = V_inv.dot(t)
150 |
151 | v = np.concatenate((t_, w))
152 | return v
153 |
154 | # Transformation from se3 to SE3
155 | def se3_to_SE3(v):
156 |
157 | t_ = v[:3]
158 | w = v[3:]
159 |
160 | theta = np.sqrt(np.sum(w**2))
161 | w_hat = np.array([ [ 0, -w[2], w[1] ],
162 | [ w[2], 0, -w[0] ],
163 | [-w[1], w[0], 0 ]
164 | ])
165 |
166 | w_hat_2 = w_hat @ w_hat
167 | R = np.eye(3) + (np.sin(theta)/theta)*w_hat + ((1-np.cos(theta))/theta**2)*w_hat_2
168 | V = np.eye(3) + ((1-np.cos(theta))/theta**2)*w_hat + ((theta-np.sin(theta))/theta**3)*w_hat_2
169 | t = V.dot(t_).reshape(3,1)
170 |
171 | R = np.concatenate((R, t), 1)
172 | return R
173 |
174 |
175 |
176 |
177 | if __name__ == '__main__':
178 | #clean_unused_images()
179 | pose1 = np.array([0.3, 0.4, 0.5, 0, 2, 3])
180 | pose2 = np.array([0.5, 0.1, 0.5, 1, 2, 2])
181 |
182 | Rt1 = angle_to_R(pose1)
183 | pose1_ = R_to_angle(Rt1.flatten().tolist())
184 |
185 | Rt2 = angle_to_R(pose2)
186 |
187 | R1, t1 = Rt1[:,:3], Rt1[:, 3]
188 | R2, t2 = Rt2[:,:3], Rt2[:, 3]
189 |
190 | R3 = R1 @ R2
191 | t3 = R1.dot(t2) + t1
192 | Rt3 = np.concatenate((R3, t3.reshape(3,1)), 1)
193 |
194 | v1 = SE3_to_se3(Rt1.flatten().tolist())
195 | Rt1_ = se3_to_SE3(v1)
196 |
197 | v2 = SE3_to_se3(Rt2.flatten().tolist())
198 | Rt2_ = se3_to_SE3(v2)
199 |
200 | v3 = v1+v2
201 | v3_ = SE3_to_se3(Rt3.flatten().tolist())
202 | Rt3_ = se3_to_SE3(v3)
203 |
204 | pose3 = R_to_angle(Rt3.flatten().tolist())
205 | pose3_ = R_to_angle(Rt3_.flatten().tolist())
206 |
207 |
208 | pose = cal_rel_pose(Rt1.flatten().tolist(), Rt3.flatten().tolist())
209 | import pdb; pdb.set_trace() # breakpoint c322e71c //
--------------------------------------------------------------------------------
/latency_target.zip:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/latency_target.zip
--------------------------------------------------------------------------------
/layers.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.nn as nn
3 |
4 | from collections import OrderedDict
5 |
6 |
7 | def get_same_padding(kernel_size):
8 | if isinstance(kernel_size, tuple):
9 | assert len(kernel_size) == 2, "invalid kernel size: %s" % kernel_size
10 | p1 = get_same_padding(kernel_size[0])
11 | p2 = get_same_padding(kernel_size[1])
12 | return p1, p2
13 | assert isinstance(kernel_size, int), "kernel size should be either `int` or `tuple`"
14 | assert kernel_size % 2 > 0, "kernel size should be odd number"
15 | return kernel_size // 2
16 |
17 |
18 | def build_activation(act_func, inplace=True):
19 | if act_func == "relu":
20 | return nn.ReLU(inplace=inplace)
21 | elif act_func == "relu6":
22 | return nn.ReLU6(inplace=inplace)
23 | elif act_func == "lrelu":
24 | return nn.LeakyReLU(0.1, inplace=inplace)
25 | elif act_func == "gelu":
26 | return nn.GELU()
27 | elif act_func == "tanh":
28 | return nn.Tanh()
29 | elif act_func == "sigmoid":
30 | return nn.Sigmoid()
31 | elif act_func is None or act_func == "none":
32 | return None
33 | else:
34 | raise ValueError("do not support: %s" % act_func)
35 |
36 |
37 | class ConvLayer(nn.Module):
38 | def __init__(
39 | self,
40 | in_channels,
41 | out_channels,
42 | kernel_size=3,
43 | stride=1,
44 | dilation=1,
45 | groups=1,
46 | bias=False,
47 | use_norm=True,
48 | norm_func='LN',
49 | norm_chan_per_group=None,
50 | use_act=True,
51 | act_func="relu",
52 | dropout_rate=0,
53 | ):
54 | super().__init__()
55 |
56 | self.in_channels = in_channels
57 | self.out_channels = out_channels
58 | self.use_norm = use_norm
59 | self.norm_func = norm_func
60 | self.use_act = use_act
61 | self.act_func = act_func
62 | self.dropout_rate = dropout_rate
63 |
64 | # default normal 3x3_Conv with bn and relu
65 | self.kernel_size = kernel_size
66 | self.stride = stride
67 | self.dilation = dilation
68 | self.groups = groups
69 | self.bias = bias
70 |
71 | """ modules """
72 | modules = {}
73 | # norm layer
74 | if self.use_norm:
75 | if norm_func == "BN":
76 | modules["norm"] = nn.BatchNorm2d(out_channels)
77 | if norm_func == "LN":
78 | modules["norm"] = nn.GroupNorm(1, out_channels)
79 | if norm_func == "GN":
80 | modules["norm"] = nn.GroupNorm(out_channels//norm_chan_per_group, out_channels)
81 | else:
82 | modules["norm"] = None
83 | # activation
84 | if use_act:
85 | modules["act"] = build_activation(
86 | self.act_func, self.use_norm
87 | )
88 | else:
89 | modules["act"] = None
90 | # dropout
91 | if self.dropout_rate > 0:
92 | modules["dropout"] = nn.Dropout2d(self.dropout_rate, inplace=True)
93 | else:
94 | modules["dropout"] = None
95 | # weight
96 | modules["weight"] = self.weight_op()
97 |
98 | # add modules
99 | for op in ["weight", "norm", "act"]:
100 | if modules[op] is None:
101 | continue
102 | elif op == "weight":
103 | # dropout before weight operation
104 | if modules["dropout"] is not None:
105 | self.add_module("dropout", modules["dropout"])
106 | for key in modules["weight"]:
107 | self.add_module(key, modules["weight"][key])
108 | else:
109 | self.add_module(op, modules[op])
110 |
111 | def weight_op(self):
112 | padding = get_same_padding(self.kernel_size)
113 | if isinstance(padding, int):
114 | padding *= self.dilation
115 | else:
116 | padding[0] *= self.dilation
117 | padding[1] *= self.dilation
118 |
119 | weight_dict = OrderedDict(
120 | {
121 | "conv": nn.Conv2d(
122 | self.in_channels,
123 | self.out_channels,
124 | kernel_size=self.kernel_size,
125 | stride=self.stride,
126 | padding=padding,
127 | dilation=self.dilation,
128 | bias=self.bias,
129 | )
130 | }
131 | )
132 | return weight_dict
133 |
134 | def forward(self, x):
135 | # similar to nn.Sequential
136 | for module in self._modules.values():
137 | x = module(x)
138 | return x
139 |
--------------------------------------------------------------------------------
/model.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 | from torch.autograd import Variable
6 | from torch.nn.init import kaiming_normal_, orthogonal_
7 |
8 | from params import par
9 |
10 | from layers import ConvLayer
11 |
12 |
13 | # The inertial encoder for raw imu data
14 | class InertialEncoder(nn.Module):
15 | def __init__(self, par):
16 | super(InertialEncoder, self).__init__()
17 | self.method = par.imu_method
18 | if self.method == 'bi-LSTM':
19 | self.rnn_imu_head = nn.Linear(6, 128)
20 | self.encoder = nn.LSTM(
21 | input_size=128,
22 | hidden_size=par.imu_hidden_size,
23 | num_layers=2,
24 | dropout=par.rnn_dropout_between,
25 | batch_first=True,
26 | bidirectional=True)
27 | elif self.method == 'conv':
28 | self.encoder_conv = nn.Sequential(
29 | nn.Conv1d(6, 64, kernel_size=3, padding=1),
30 | nn.BatchNorm1d(64),
31 | nn.LeakyReLU(0.1, inplace=True),
32 | nn.Dropout(par.dropout),
33 | nn.Conv1d(64, 128, kernel_size=3, padding=1),
34 | nn.BatchNorm1d(128),
35 | nn.LeakyReLU(0.1, inplace=True),
36 | nn.Dropout(par.dropout),
37 | nn.Conv1d(128, 256, kernel_size=3, padding=1),
38 | nn.BatchNorm1d(256),
39 | nn.LeakyReLU(0.1, inplace=True),
40 | nn.Dropout(par.dropout))
41 | len_f = par.imu_per_image + 1 + par.imu_prev
42 | #len_f = (len_f - 1) // 2 // 2 + 1
43 | self.proj = nn.Linear(256 * 1 * len_f, par.imu_f_len)
44 |
45 | def forward(self, x):
46 | # x: (N, seq_len, 11, 6)
47 | batch_size = x.shape[0]
48 | seq_len = x.shape[1]
49 | x = x.view(batch_size * seq_len, x.size(2), x.size(3)) # x: (N x seq_len, 11, 6)
50 | if self.method == 'bi-LSTM':
51 | x = self.rnn_imu_head(x) # x: (N x seq_len, 11, 128)
52 | x, hc = self.encoder(x) # x: (N x seq_len, 11, 2, 128)
53 | x = x.view(x.shape[0], x.shape[1], 2, -1)
54 | out = torch.cat((x[:, 0, 0, :], x[:, -1, 1, :]), -1) # out: (N x seq_len, 256)
55 | return out.view(batch_size, seq_len, 256)
56 | elif self.method == 'conv':
57 | x = self.encoder_conv(x.permute(0, 2, 1)) # x: (N x seq_len, 64, 11)
58 | out = self.proj(x.view(x.shape[0], -1)) # out: (N x seq_len, 256)
59 | return out.view(batch_size, seq_len, 256)
60 |
61 |
62 | # The fusion module
63 | class FusionModule(nn.Module):
64 | def __init__(self, par, temp=None):
65 | super(FusionModule, self).__init__()
66 | self.fuse_method = par.fuse_method
67 | self.f_len = par.imu_f_len + par.visual_f_len
68 | if self.fuse_method == 'soft':
69 | self.net = nn.Sequential(
70 | nn.Linear(self.f_len, self.f_len))
71 | elif self.fuse_method == 'hard':
72 | self.net = nn.Sequential(
73 | nn.Linear(self.f_len, 2 * self.f_len))
74 | if temp is None:
75 | self.temp = 1
76 | else:
77 | self.temp = temp
78 |
79 | def forward(self, v, i):
80 | if self.fuse_method == 'cat':
81 | return torch.cat((v, i), -1)
82 | elif self.fuse_method == 'soft':
83 | feat_cat = torch.cat((v, i), -1)
84 | weights = self.net(feat_cat)
85 | return feat_cat * weights
86 | elif self.fuse_method == 'hard':
87 | feat_cat = torch.cat((v, i), -1)
88 | weights = self.net(feat_cat)
89 | weights = weights.view(v.shape[0], v.shape[1], self.f_len, 2)
90 | mask = F.gumbel_softmax(weights, tau=1, hard=True, dim=-1)
91 | return feat_cat * mask[:, :, :, 0]
92 |
93 |
94 | # The pose estimation network
95 | class PoseRNN(nn.Module):
96 | def __init__(self, par):
97 | super(PoseRNN, self).__init__()
98 |
99 | # The main RNN network
100 | f_len = par.visual_f_len + par.imu_f_len
101 | self.rnn = nn.LSTM(
102 | input_size=f_len,
103 | hidden_size=par.rnn_hidden_size,
104 | num_layers=2,
105 | dropout=par.rnn_dropout_between,
106 | batch_first=True)
107 |
108 | self.fuse = FusionModule(par)
109 |
110 | # The output networks
111 | self.rnn_drop_out = nn.Dropout(par.rnn_dropout_out)
112 | # self.regressor = nn.Sequential(
113 | # nn.Linear(par.rnn_hidden_size, 6))
114 | self.regressor = nn.Sequential(
115 | nn.Linear(par.rnn_hidden_size, 128),
116 | nn.LeakyReLU(0.1, inplace=True),
117 | nn.Linear(128, 6))
118 |
119 | def forward(self, visual_f, imu_f, prev=None):
120 |
121 | if prev is not None:
122 | prev = (prev[0].transpose(1, 0).contiguous(), prev[1].transpose(1, 0).contiguous())
123 |
124 | batch_size = visual_f.shape[0]
125 | seq_len = visual_f.shape[1]
126 |
127 | fused = self.fuse(visual_f, imu_f)
128 | #self.rnn.flatten_parameters()
129 | out, hc = self.rnn(fused) if prev is None else self.rnn(fused, prev)
130 | out = self.rnn_drop_out(out)
131 | pose = self.regressor(out)
132 | angle = pose[:, :, :3]
133 | trans = pose[:, :, 3:]
134 |
135 | hc = (hc[0].transpose(1, 0).contiguous(), hc[1].transpose(1, 0).contiguous())
136 | return angle, trans, hc, out
137 |
138 |
139 | class VisualEncoderLatency(nn.Module):
140 | def __init__(self, par):
141 | super(VisualEncoderLatency, self).__init__()
142 | # CNN
143 | self.par = par
144 | self.batchNorm = par.batch_norm
145 | # searched net with low latency
146 | self.conv1 = ConvLayer(6, 8, kernel_size=5, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
147 | self.conv2 = ConvLayer(8, 16, kernel_size=3, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
148 | self.conv3 = ConvLayer(16, 64, kernel_size=5, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
149 | self.conv4 = ConvLayer(64, 64, kernel_size=3, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
150 | self.conv4_1 = ConvLayer(64, 64, kernel_size=3, stride=1, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
151 | self.conv5 = ConvLayer(64, 192, kernel_size=3, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
152 | self.conv5_1 = ConvLayer(192, 64, kernel_size=3, stride=1, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
153 | self.conv6 = ConvLayer(64, 1024, kernel_size=3, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
154 | # Comput the shape based on diff image size
155 | __tmp = Variable(torch.zeros(1, 6, par.img_w, par.img_h))
156 | __tmp = self.encode_image(__tmp)
157 |
158 | self.visual_head = nn.Linear(int(np.prod(__tmp.size())), par.visual_f_len)
159 |
160 | def forward(self, x, batch_size, seq_len):
161 | x = self.encode_image(x)
162 | x = x.view(batch_size, seq_len, -1) # (batch, seq_len, fv)
163 | v_f = self.visual_head(x) # (batch, seq_len, 256)
164 | return v_f
165 |
166 | def encode_image(self, x):
167 | out_conv2 = self.conv2(self.conv1(x))
168 | out_conv3 = self.conv3(out_conv2)
169 | out_conv4 = self.conv4_1(self.conv4(out_conv3))
170 | out_conv5 = self.conv5_1(self.conv5(out_conv4))
171 | out_conv6 = self.conv6(out_conv5)
172 | return out_conv6
173 |
174 |
175 | class VisualEncoderFlops(nn.Module):
176 | def __init__(self, par):
177 | super(VisualEncoderFlops, self).__init__()
178 | # CNN
179 | self.par = par
180 | self.batchNorm = par.batch_norm
181 | # searched net with low latency
182 | self.conv1 = ConvLayer(6, 8, kernel_size=5, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
183 | self.conv2 = ConvLayer(8, 16, kernel_size=5, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
184 | self.conv3 = ConvLayer(16, 32, kernel_size=3, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
185 | self.conv3_1 = ConvLayer(32, 64, kernel_size=3, stride=1, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
186 | self.conv4 = ConvLayer(64, 64, kernel_size=3, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
187 | self.conv4_1 = ConvLayer(64, 64, kernel_size=3, stride=1, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
188 | self.conv5 = ConvLayer(64, 64, kernel_size=3, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
189 | self.conv5_1 = ConvLayer(64, 64, kernel_size=3, stride=1, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
190 | self.conv6 = ConvLayer(64, 1024, kernel_size=3, stride=2, use_norm=self.batchNorm, norm_func='BN', act_func='lrelu')
191 | # Comput the shape based on diff image size
192 | __tmp = Variable(torch.zeros(1, 6, par.img_w, par.img_h))
193 | __tmp = self.encode_image(__tmp)
194 |
195 | self.visual_head = nn.Linear(int(np.prod(__tmp.size())), par.visual_f_len)
196 |
197 | def forward(self, x, batch_size, seq_len):
198 | x = self.encode_image(x)
199 | x = x.view(batch_size, seq_len, -1) # (batch, seq_len, fv)
200 | v_f = self.visual_head(x) # (batch, seq_len, 256)
201 | return v_f
202 |
203 | def encode_image(self, x):
204 | out_conv2 = self.conv2(self.conv1(x))
205 | out_conv3 = self.conv3_1(self.conv3(out_conv2))
206 | out_conv4 = self.conv4_1(self.conv4(out_conv3))
207 | out_conv5 = self.conv5_1(self.conv5(out_conv4))
208 | out_conv6 = self.conv6(out_conv5)
209 | return out_conv6
210 |
211 |
212 | class DeepVIO(nn.Module):
213 | def __init__(self, par):
214 | super(DeepVIO, self).__init__()
215 |
216 | if par.target == 'flops':
217 | self.visual_encoder = VisualEncoderFlops(par)
218 | else:
219 | self.visual_encoder = VisualEncoderLatency(par)
220 | self.inertial_encoder = InertialEncoder(par)
221 | self.pose_net = PoseRNN(par)
222 |
223 | self.par = par
224 |
225 | def forward(self, t_x, t_i, prev=None):
226 | v_f, imu = self.encoder_forward(t_x, t_i)
227 | batch_size = v_f.shape[0]
228 | seq_len = v_f.shape[1]
229 |
230 | angle_list, trans_list = [], []
231 | hidden = torch.zeros(batch_size, par.rnn_hidden_size).to(t_x.device) if prev is None else prev[0].contiguous()[:, -1, :]
232 |
233 | for i in range(seq_len):
234 | angle, trans, prev, _ = self.pose_net(v_f[:, i, :].unsqueeze(1), imu[:, i, :].unsqueeze(1), prev)
235 | angle_list.append(angle)
236 | trans_list.append(trans)
237 | hidden = prev[0].contiguous()[:, -1, :]
238 | angles = torch.cat(angle_list, dim=1)
239 | trans = torch.cat(trans_list, dim=1)
240 |
241 | return angles, trans, prev
242 |
243 | def encoder_forward(self, v, imu):
244 | # x: (batch, seq_len, channel, width, height)
245 | # stack_image
246 |
247 | v = torch.cat((v[:, :-1], v[:, 1:]), dim=2)
248 | batch_size = v.size(0)
249 | seq_len = v.size(1)
250 |
251 | # CNN
252 | v = v.view(batch_size * seq_len, v.size(2), v.size(3), v.size(4))
253 | v_f = self.visual_encoder(v, batch_size, seq_len)
254 |
255 | ll = 11 + self.par.imu_prev
256 | imu = torch.cat([imu[:, i * 10:i * 10 + ll, :].unsqueeze(1) for i in range(seq_len)], dim=1)
257 | imu = self.inertial_encoder(imu)
258 |
259 | return v_f, imu
--------------------------------------------------------------------------------
/params.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 |
4 | class Parameters():
5 | def __init__(self):
6 | self.devices = [0]
7 | self.n_processors = 4
8 | # Path
9 | self.data_dir = './data'
10 | self.image_dir = self.data_dir + '/sequences/'
11 | self.pose_dir = self.data_dir + '/poses/'
12 | self.imu_dir = self.data_dir + '/imus/'
13 |
14 | self.train_video = ['00', '01', '02', '04', '06', '08', '09']
15 | self.valid_video = ['05', '07', '10']
16 |
17 | self.imu_per_image = 10
18 | self.imu_int_prev = 0
19 |
20 | self.experiment_name = 'low_latency'
21 |
22 | # Data Preprocessing
23 | self.img_w = 512 # original size is about 1226
24 | self.img_h = 256 # original size is about 370
25 |
26 | # Data Augmentation
27 | self.is_hflip = True
28 | self.is_color = False
29 | self.flag_imu_aug = False
30 | self.is_crop = False
31 |
32 | self.rnn_hidden_size = 1024
33 | self.conv_dropout = (0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.5)
34 | self.rnn_dropout_out = 0.2
35 | self.rnn_dropout_between = 0.2 # 0: no dropout
36 | self.clip = None
37 | self.batch_norm = True
38 |
39 | self.imu_method = 'conv' # ['bi-LSTM', 'conv']
40 | self.imu_hidden_size = 128
41 | self.fuse_method = 'cat' # ['cat', 'soft']
42 | self.visual_f_len = 512
43 | self.imu_f_len = 256
44 |
45 | self.dropout = 0
46 | self.imu_prev = 0
47 |
48 | # Training
49 | self.decay = 5e-6
50 | self.batch_size = 16
51 | self.pin_mem = True
52 |
53 | # Select searched model
54 | self.target = 'latency'
55 | if self.target == 'flops':
56 | self.load_ckpt = './flops_target.ckpt'
57 | else:
58 | self.load_ckpt = './latency_target.ckpt'
59 |
60 |
61 | par = Parameters()
62 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | # predicted as a batch
2 | import os
3 | import glob
4 | import numpy as np
5 | from PIL import Image
6 | import scipy.io as sio
7 | import argparse
8 |
9 | import torch
10 | import torch.nn as nn
11 | from torch.utils.data import DataLoader
12 | import torchvision.transforms.functional as TF
13 |
14 | from params import par
15 | from model import DeepVIO
16 | from data_helper import get_data_info_test
17 | from helper import eulerAnglesToRotationMatrix, angle_to_R, normalize_angle_delta
18 | from evaluation import kittiOdomEval
19 |
20 |
21 | torch.manual_seed(0)
22 | np.random.seed(0)
23 |
24 | device = torch.device('cuda:0')
25 |
26 | def test(path_list):
27 | # Path
28 | save_dir = f'result/{par.experiment_name}/' # directory to save prediction answer
29 | if not os.path.exists(save_dir):
30 | os.makedirs(save_dir)
31 |
32 | test_len = 41
33 |
34 | # Load model
35 | VIONet = DeepVIO(par)
36 |
37 | VIONet.load_state_dict(torch.load(par.load_ckpt, map_location='cpu'))
38 | VIONet = VIONet.cuda(device)
39 |
40 | print('Load model from: ', par.load_ckpt)
41 |
42 | x_scaling, y_scaling = 1.1, 1.1
43 | scaled_h, scaled_w = int(par.img_h * y_scaling), int(par.img_w * x_scaling)
44 |
45 | VIONet.eval()
46 | for test_video in path_list:
47 | df = get_data_info_test(folder=test_video, seq_len=test_len)
48 | image_arr = np.asarray(df.image_path) # image paths
49 | groundtruth_arr = np.asarray(df.pose)
50 | imu_arr = np.asarray(df.imu)
51 |
52 | prev = None
53 | answer = [[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0], ]
54 | pose_est_list = []
55 | ang_err_list = []
56 | trans_err_list = []
57 | pose_gt_list = []
58 | yaw_list = []
59 |
60 | for i in range(len(df)):
61 |
62 | print('{} / {}'.format(i, len(df)), end='\r', flush=True)
63 | # Load the test images
64 | image_path_sequence = image_arr[i]
65 | image_sequence = []
66 | image_sequence_LR = []
67 | for img_path in image_path_sequence:
68 | img_as_img = Image.open(img_path)
69 |
70 | if par.is_crop:
71 | img_as_img = TF.resize(img_as_img, size=(scaled_h, scaled_w))
72 | img_as_img = TF.center_crop(img_as_img, (par.img_h, par.img_w))
73 | else:
74 | img_as_img = TF.resize(img_as_img, size=(par.img_h, par.img_w))
75 |
76 | img_as_img_lr = TF.resize(img_as_img, size=(par.img_h//4, par.img_w//4))
77 |
78 | img_as_tensor = TF.to_tensor(img_as_img)-0.5
79 | img_as_tensor_lr = TF.to_tensor(img_as_img_lr)-0.5
80 | img_as_tensor = img_as_tensor.unsqueeze(0)
81 | img_as_tensor_lr = img_as_tensor_lr.unsqueeze(0)
82 |
83 | image_sequence.append(img_as_tensor)
84 | image_sequence_LR.append(img_as_tensor_lr)
85 |
86 | image_sequence = torch.cat(image_sequence, 0).to(device)
87 | image_sequence_LR = torch.cat(image_sequence_LR, 0).to(device)
88 |
89 | imu_sequence = torch.FloatTensor(imu_arr[i])
90 |
91 | gt_sequence = groundtruth_arr[i][:, :6]
92 |
93 | with torch.no_grad():
94 | x_in1 = image_sequence.unsqueeze(0)
95 | i_in = imu_sequence.to(device).unsqueeze(0)
96 | angle, trans, hc = VIONet(x_in1, i_in, prev=prev)
97 | prev = hc
98 |
99 | angle = angle.squeeze().detach().cpu().numpy()
100 | trans = trans.squeeze().detach().cpu().numpy()
101 |
102 | pose_pred = np.hstack((angle, trans))
103 |
104 | # Record the estimation error
105 | ang_err_list.append(np.mean((gt_sequence[:, :3] - angle)**2))
106 | trans_err_list.append(np.mean((gt_sequence[:, 3:] - trans)**2))
107 | yaw_list.append(np.mean((gt_sequence[:, 1] - angle[:, 1])**2))
108 |
109 | pose_est_list.append(pose_pred)
110 | pose_gt_list.append(gt_sequence)
111 |
112 | # Accumulate the relative poses
113 | for index in range(angle.shape[0]):
114 | poses_pre = answer[-1]
115 | poses_pre = np.array(poses_pre).reshape(3, 4)
116 | R_pre = poses_pre[:, :3]
117 | t_pre = poses_pre[:, 3]
118 |
119 | pose_rel = pose_pred[index, :]
120 | Rt_rel = angle_to_R(pose_rel)
121 | R_rel = Rt_rel[:, :3]
122 | t_rel = Rt_rel[:, 3]
123 |
124 | R = R_pre @ R_rel
125 | t = R_pre.dot(t_rel) + t_pre
126 |
127 | pose = np.concatenate((R, t.reshape(3, 1)), 1).flatten().tolist()
128 | answer.append(pose)
129 |
130 | ang_err_m = np.mean(ang_err_list)
131 | trans_err_m = np.mean(trans_err_list)
132 | yaw_err_m = np.mean(yaw_list)
133 |
134 | poses_rel_est = np.vstack(pose_est_list)
135 | poses_rel_gt = np.vstack(pose_gt_list)
136 |
137 | sio.savemat(test_video + '.mat', {'poses_est': poses_rel_est, 'poses_gt': poses_rel_gt})
138 |
139 | # Save answer
140 | with open('{}/{}_pred.txt'.format(save_dir, test_video), 'w') as f:
141 | for pose in answer:
142 | if type(pose) == list:
143 | f.write(' '.join([str(r) for r in pose]))
144 | else:
145 | f.write(str(pose))
146 | f.write('\n')
147 |
148 |
149 | if __name__ == '__main__':
150 |
151 | test(['05', '07', '10'])
152 |
153 | parser = argparse.ArgumentParser(description='KITTI Evaluation toolkit')
154 | parser.add_argument('--gt_dir', type=str, default=par.pose_dir, help='Directory path of the ground truth odometry')
155 | parser.add_argument('--result_dir', type=str, default=f'./result/{par.experiment_name}/', help='Directory path of storing the odometry results')
156 | parser.add_argument('--eva_seqs', type=str, default='05_pred,07_pred,10_pred', help='The sequences to be evaluated')
157 | parser.add_argument('--toCameraCoord', type=lambda x: (str(x).lower() == 'true'), default=False, help='Whether to convert the pose to camera coordinate')
158 |
159 | args = parser.parse_args()
160 | pose_eval = kittiOdomEval(args)
161 | pose_eval.eval(toCameraCoord=args.toCameraCoord) # set the value according to the predicted results
--------------------------------------------------------------------------------
/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/unchenyu/NASVIO/7b92ce82b79605152c265b57bef332b8c5cf0384/tools/__init__.py
--------------------------------------------------------------------------------
/tools/pose_evaluation_utils.py:
--------------------------------------------------------------------------------
1 | # Some of the code are from the TUM evaluation toolkit:
2 | # https://vision.in.tum.de/data/datasets/rgbd-dataset/tools#absolute_trajectory_error_ate
3 |
4 | import math
5 | import numpy as np
6 |
7 | def compute_ate(gtruth_file, pred_file):
8 | gtruth_list = read_file_list(gtruth_file)
9 | pred_list = read_file_list(pred_file)
10 | matches = associate(gtruth_list, pred_list, 0, 0.01)
11 | if len(matches) < 2:
12 | return False
13 |
14 | gtruth_xyz = np.array([[float(value) for value in gtruth_list[a][0:3]] for a,b in matches])
15 | pred_xyz = np.array([[float(value) for value in pred_list[b][0:3]] for a,b in matches])
16 |
17 | # Make sure that the first matched frames align (no need for rotational alignment as
18 | # all the predicted/ground-truth snippets have been converted to use the same coordinate
19 | # system with the first frame of the snippet being the origin).
20 | offset = gtruth_xyz[0] - pred_xyz[0]
21 | pred_xyz += offset[None,:]
22 |
23 | # Optimize the scaling factor
24 | scale = np.sum(gtruth_xyz * pred_xyz)/np.sum(pred_xyz ** 2)
25 | alignment_error = pred_xyz * scale - gtruth_xyz
26 | rmse = np.sqrt(np.sum(alignment_error ** 2))/len(matches)
27 | return rmse
28 |
29 | def read_file_list(filename):
30 | """
31 | Reads a trajectory from a text file.
32 |
33 | File format:
34 | The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)
35 | and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.
36 |
37 | Input:
38 | filename -- File name
39 |
40 | Output:
41 | dict -- dictionary of (stamp,data) tuples
42 |
43 | """
44 | file = open(filename)
45 | data = file.read()
46 | lines = data.replace(","," ").replace("\t"," ").split("\n")
47 | list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
48 | list = [(float(l[0]),l[1:]) for l in list if len(l)>1]
49 | return dict(list)
50 |
51 | def associate(first_list, second_list,offset,max_difference):
52 | """
53 | Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim
54 | to find the closest match for every input tuple.
55 |
56 | Input:
57 | first_list -- first dictionary of (stamp,data) tuples
58 | second_list -- second dictionary of (stamp,data) tuples
59 | offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
60 | max_difference -- search radius for candidate generation
61 |
62 | Output:
63 | matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))
64 |
65 | """
66 | first_keys = list(first_list.keys())
67 | second_keys = list(second_list.keys())
68 | potential_matches = [(abs(a - (b + offset)), a, b)
69 | for a in first_keys
70 | for b in second_keys
71 | if abs(a - (b + offset)) < max_difference]
72 | potential_matches.sort()
73 | matches = []
74 | for diff, a, b in potential_matches:
75 | if a in first_keys and b in second_keys:
76 | first_keys.remove(a)
77 | second_keys.remove(b)
78 | matches.append((a, b))
79 |
80 | matches.sort()
81 | return matches
82 |
83 |
84 | def rot2quat(R):
85 | ''' Calculate quaternion corresponding to rotation matrix'''
86 | rz, ry, rx = mat2euler(R)
87 | qw, qx, qy, qz = euler2quat(rz, ry, rx)
88 | return qw, qx, qy, qz
89 |
90 | def quat2mat(q):
91 | ''' Calculate rotation matrix corresponding to quaternion
92 | https://afni.nimh.nih.gov/pub/dist/src/pkundu/meica.libs/nibabel/quaternions.py
93 | Parameters
94 | ----------
95 | q : 4 element array-like
96 |
97 | Returns
98 | -------
99 | M : (3,3) array
100 | Rotation matrix corresponding to input quaternion *q*
101 |
102 | Notes
103 | -----
104 | Rotation matrix applies to column vectors, and is applied to the
105 | left of coordinate vectors. The algorithm here allows non-unit
106 | quaternions.
107 |
108 | References
109 | ----------
110 | Algorithm from
111 | http://en.wikipedia.org/wiki/Rotation_matrix#Quaternion
112 |
113 | Examples
114 | --------
115 | >>> import numpy as np
116 | >>> M = quat2mat([1, 0, 0, 0]) # Identity quaternion
117 | >>> np.allclose(M, np.eye(3))
118 | True
119 | >>> M = quat2mat([0, 1, 0, 0]) # 180 degree rotn around axis 0
120 | >>> np.allclose(M, np.diag([1, -1, -1]))
121 | True
122 | '''
123 | w, x, y, z = q
124 | Nq = w*w + x*x + y*y + z*z
125 | if Nq < 1e-8:
126 | return np.eye(3)
127 | s = 2.0/Nq
128 | X = x*s
129 | Y = y*s
130 | Z = z*s
131 | wX = w*X; wY = w*Y; wZ = w*Z
132 | xX = x*X; xY = x*Y; xZ = x*Z
133 | yY = y*Y; yZ = y*Z; zZ = z*Z
134 | return np.array(
135 | [[ 1.0-(yY+zZ), xY-wZ, xZ+wY ],
136 | [ xY+wZ, 1.0-(xX+zZ), yZ-wX ],
137 | [ xZ-wY, yZ+wX, 1.0-(xX+yY) ]])
138 |
139 | def mat2euler(M, cy_thresh=None, seq='zyx'):
140 | '''
141 | Taken From: http://afni.nimh.nih.gov/pub/dist/src/pkundu/meica.libs/nibabel/eulerangles.py
142 | Discover Euler angle vector from 3x3 matrix
143 | Uses the conventions above.
144 | Parameters
145 | ----------
146 | M : array-like, shape (3,3)
147 | cy_thresh : None or scalar, optional
148 | threshold below which to give up on straightforward arctan for
149 | estimating x rotation. If None (default), estimate from
150 | precision of input.
151 | Returns
152 | -------
153 | z : scalar
154 | y : scalar
155 | x : scalar
156 | Rotations in radians around z, y, x axes, respectively
157 | Notes
158 | -----
159 | If there was no numerical error, the routine could be derived using
160 | Sympy expression for z then y then x rotation matrix, which is::
161 | [ cos(y)*cos(z), -cos(y)*sin(z), sin(y)],
162 | [cos(x)*sin(z) + cos(z)*sin(x)*sin(y), cos(x)*cos(z) - sin(x)*sin(y)*sin(z), -cos(y)*sin(x)],
163 | [sin(x)*sin(z) - cos(x)*cos(z)*sin(y), cos(z)*sin(x) + cos(x)*sin(y)*sin(z), cos(x)*cos(y)]
164 | with the obvious derivations for z, y, and x
165 | z = atan2(-r12, r11)
166 | y = asin(r13)
167 | x = atan2(-r23, r33)
168 | for x,y,z order
169 | y = asin(-r31)
170 | x = atan2(r32, r33)
171 | z = atan2(r21, r11)
172 | Problems arise when cos(y) is close to zero, because both of::
173 | z = atan2(cos(y)*sin(z), cos(y)*cos(z))
174 | x = atan2(cos(y)*sin(x), cos(x)*cos(y))
175 | will be close to atan2(0, 0), and highly unstable.
176 | The ``cy`` fix for numerical instability below is from: *Graphics
177 | Gems IV*, Paul Heckbert (editor), Academic Press, 1994, ISBN:
178 | 0123361559. Specifically it comes from EulerAngles.c by Ken
179 | Shoemake, and deals with the case where cos(y) is close to zero:
180 | See: http://www.graphicsgems.org/
181 | The code appears to be licensed (from the website) as "can be used
182 | without restrictions".
183 | '''
184 | M = np.asarray(M)
185 | if cy_thresh is None:
186 | try:
187 | cy_thresh = np.finfo(M.dtype).eps * 4
188 | except ValueError:
189 | cy_thresh = _FLOAT_EPS_4
190 | r11, r12, r13, r21, r22, r23, r31, r32, r33 = M.flat
191 | # cy: sqrt((cos(y)*cos(z))**2 + (cos(x)*cos(y))**2)
192 | cy = math.sqrt(r33*r33 + r23*r23)
193 | if seq=='zyx':
194 | if cy > cy_thresh: # cos(y) not close to zero, standard form
195 | z = math.atan2(-r12, r11) # atan2(cos(y)*sin(z), cos(y)*cos(z))
196 | y = math.atan2(r13, cy) # atan2(sin(y), cy)
197 | x = math.atan2(-r23, r33) # atan2(cos(y)*sin(x), cos(x)*cos(y))
198 | else: # cos(y) (close to) zero, so x -> 0.0 (see above)
199 | # so r21 -> sin(z), r22 -> cos(z) and
200 | z = math.atan2(r21, r22)
201 | y = math.atan2(r13, cy) # atan2(sin(y), cy)
202 | x = 0.0
203 | elif seq=='xyz':
204 | if cy > cy_thresh:
205 | y = math.atan2(-r31, cy)
206 | x = math.atan2(r32, r33)
207 | z = math.atan2(r21, r11)
208 | else:
209 | z = 0.0
210 | if r31 < 0:
211 | y = np.pi/2
212 | x = atan2(r12, r13)
213 | else:
214 | y = -np.pi/2
215 | else:
216 | raise Exception('Sequence not recognized')
217 | return z, y, x
218 |
219 | import functools
220 | def euler2mat(z=0, y=0, x=0, isRadian=True):
221 | ''' Return matrix for rotations around z, y and x axes
222 | Uses the z, then y, then x convention above
223 | Parameters
224 | ----------
225 | z : scalar
226 | Rotation angle in radians around z-axis (performed first)
227 | y : scalar
228 | Rotation angle in radians around y-axis
229 | x : scalar
230 | Rotation angle in radians around x-axis (performed last)
231 | Returns
232 | -------
233 | M : array shape (3,3)
234 | Rotation matrix giving same rotation as for given angles
235 | Examples
236 | --------
237 | >>> zrot = 1.3 # radians
238 | >>> yrot = -0.1
239 | >>> xrot = 0.2
240 | >>> M = euler2mat(zrot, yrot, xrot)
241 | >>> M.shape == (3, 3)
242 | True
243 | The output rotation matrix is equal to the composition of the
244 | individual rotations
245 | >>> M1 = euler2mat(zrot)
246 | >>> M2 = euler2mat(0, yrot)
247 | >>> M3 = euler2mat(0, 0, xrot)
248 | >>> composed_M = np.dot(M3, np.dot(M2, M1))
249 | >>> np.allclose(M, composed_M)
250 | True
251 | You can specify rotations by named arguments
252 | >>> np.all(M3 == euler2mat(x=xrot))
253 | True
254 | When applying M to a vector, the vector should column vector to the
255 | right of M. If the right hand side is a 2D array rather than a
256 | vector, then each column of the 2D array represents a vector.
257 | >>> vec = np.array([1, 0, 0]).reshape((3,1))
258 | >>> v2 = np.dot(M, vec)
259 | >>> vecs = np.array([[1, 0, 0],[0, 1, 0]]).T # giving 3x2 array
260 | >>> vecs2 = np.dot(M, vecs)
261 | Rotations are counter-clockwise.
262 | >>> zred = np.dot(euler2mat(z=np.pi/2), np.eye(3))
263 | >>> np.allclose(zred, [[0, -1, 0],[1, 0, 0], [0, 0, 1]])
264 | True
265 | >>> yred = np.dot(euler2mat(y=np.pi/2), np.eye(3))
266 | >>> np.allclose(yred, [[0, 0, 1],[0, 1, 0], [-1, 0, 0]])
267 | True
268 | >>> xred = np.dot(euler2mat(x=np.pi/2), np.eye(3))
269 | >>> np.allclose(xred, [[1, 0, 0],[0, 0, -1], [0, 1, 0]])
270 | True
271 | Notes
272 | -----
273 | The direction of rotation is given by the right-hand rule (orient
274 | the thumb of the right hand along the axis around which the rotation
275 | occurs, with the end of the thumb at the positive end of the axis;
276 | curl your fingers; the direction your fingers curl is the direction
277 | of rotation). Therefore, the rotations are counterclockwise if
278 | looking along the axis of rotation from positive to negative.
279 | '''
280 |
281 | if not isRadian:
282 | z = ((np.pi)/180.) * z
283 | y = ((np.pi)/180.) * y
284 | x = ((np.pi)/180.) * x
285 | assert z>=(-np.pi) and z < np.pi, 'Inapprorpriate z: %f' % z
286 | assert y>=(-np.pi) and y < np.pi, 'Inapprorpriate y: %f' % y
287 | assert x>=(-np.pi) and x < np.pi, 'Inapprorpriate x: %f' % x
288 |
289 | Ms = []
290 | if z:
291 | cosz = math.cos(z)
292 | sinz = math.sin(z)
293 | Ms.append(np.array(
294 | [[cosz, -sinz, 0],
295 | [sinz, cosz, 0],
296 | [0, 0, 1]]))
297 | if y:
298 | cosy = math.cos(y)
299 | siny = math.sin(y)
300 | Ms.append(np.array(
301 | [[cosy, 0, siny],
302 | [0, 1, 0],
303 | [-siny, 0, cosy]]))
304 | if x:
305 | cosx = math.cos(x)
306 | sinx = math.sin(x)
307 | Ms.append(np.array(
308 | [[1, 0, 0],
309 | [0, cosx, -sinx],
310 | [0, sinx, cosx]]))
311 | if Ms:
312 | return functools.reduce(np.dot, Ms[::-1])
313 | return np.eye(3)
314 |
315 | def euler2quat(z=0, y=0, x=0, isRadian=True):
316 | ''' Return quaternion corresponding to these Euler angles
317 | Uses the z, then y, then x convention above
318 | Parameters
319 | ----------
320 | z : scalar
321 | Rotation angle in radians around z-axis (performed first)
322 | y : scalar
323 | Rotation angle in radians around y-axis
324 | x : scalar
325 | Rotation angle in radians around x-axis (performed last)
326 | Returns
327 | -------
328 | quat : array shape (4,)
329 | Quaternion in w, x, y z (real, then vector) format
330 | Notes
331 | -----
332 | We can derive this formula in Sympy using:
333 | 1. Formula giving quaternion corresponding to rotation of theta radians
334 | about arbitrary axis:
335 | http://mathworld.wolfram.com/EulerParameters.html
336 | 2. Generated formulae from 1.) for quaternions corresponding to
337 | theta radians rotations about ``x, y, z`` axes
338 | 3. Apply quaternion multiplication formula -
339 | http://en.wikipedia.org/wiki/Quaternions#Hamilton_product - to
340 | formulae from 2.) to give formula for combined rotations.
341 | '''
342 |
343 | if not isRadian:
344 | z = ((np.pi)/180.) * z
345 | y = ((np.pi)/180.) * y
346 | x = ((np.pi)/180.) * x
347 | z = z/2.0
348 | y = y/2.0
349 | x = x/2.0
350 | cz = math.cos(z)
351 | sz = math.sin(z)
352 | cy = math.cos(y)
353 | sy = math.sin(y)
354 | cx = math.cos(x)
355 | sx = math.sin(x)
356 | return np.array([
357 | cx*cy*cz - sx*sy*sz,
358 | cx*sy*sz + cy*cz*sx,
359 | cx*cz*sy - sx*cy*sz,
360 | cx*cy*sz + sx*cz*sy])
361 |
362 | def pose_vec_to_mat(vec):
363 | ''' Calculate transformation matrix corresponding to euler pose vector'''
364 | tx = vec[0]
365 | ty = vec[1]
366 | tz = vec[2]
367 | trans = np.array([tx, ty, tz]).reshape((3,1))
368 | rot = euler2mat(vec[5], vec[4], vec[3]) # order: z, y, x
369 | Tmat = np.concatenate((rot, trans), axis=1)
370 | hfiller = np.array([0, 0, 0, 1]).reshape((1,4))
371 | Tmat = np.concatenate((Tmat, hfiller), axis=0)
372 | return Tmat
373 |
374 | def quat_pose_to_mat(vec):
375 | ''' Calculate transformation matrix corresponding to quaternion pose vector'''
376 | tx = vec[0]
377 | ty = vec[1]
378 | tz = vec[2]
379 | trans = np.array([tx, ty, tz]).reshape((3,1))
380 | rot = quat2mat([vec[6], vec[3], vec[4], vec[5]]) # order: w, x, y, z
381 | Tmat = np.concatenate((rot, trans), axis=1)
382 | hfiller = np.array([0, 0, 0, 1]).reshape((1,4))
383 | Tmat = np.concatenate((Tmat, hfiller), axis=0)
384 | return Tmat
385 |
386 | def dump_pose_seq_TUM(out_file, poses, times):
387 | # First frame as the origin
388 | first_pose = pose_vec_to_mat(poses[0])
389 | with open(out_file, 'w') as f:
390 | for p in range(len(times)):
391 | this_pose = pose_vec_to_mat(poses[p])
392 | # this_pose = np.dot(this_pose, np.linalg.inv(first_pose))
393 | this_pose = np.dot(first_pose, np.linalg.inv(this_pose))
394 | tx = this_pose[0, 3]
395 | ty = this_pose[1, 3]
396 | tz = this_pose[2, 3]
397 | rot = this_pose[:3, :3]
398 | qw, qx, qy, qz = rot2quat(rot)
399 | f.write('%f %f %f %f %f %f %f %f\n' % (times[p], tx, ty, tz, qx, qy, qz, qw))
400 |
401 |
402 | # from minieigen import Quaternion, Vector3
403 | # def compute_motion_errors(predicted_motion, gt_motion, normalize_translations):
404 | # """
405 | # Computes the error of the motion.
406 |
407 | # predicted_motion: 1d numpy array with 6 elements
408 | # the motions as [aa1, aa2, aa3, tx, ty, tz]
409 | # aa1,aa2,aa3 is an angle axis representation.
410 | # The angle is the norm of the axis
411 |
412 | # gt_motion: 1d numpy array with 6 elements
413 | # ground truth motion in the same format as the predicted motion
414 |
415 | # normalize_translations: bool
416 | # If True then translations will be normalized before computing the error
417 |
418 | # Returns
419 | # rotation angular distance in radian
420 | # tranlation distance of the normalized translations
421 | # tranlation angular distance in radian
422 | # """
423 | # def _numpy_to_Vector3(arr):
424 | # tmp = arr.astype(np.float64)
425 | # return Vector3(tmp[0],tmp[1],tmp[2])
426 |
427 | # gt_axis = _numpy_to_Vector3(gt_motion[0:3])
428 | # gt_angle = gt_axis.norm()
429 | # if gt_angle < 1e-6:
430 | # gt_angle = 0
431 | # gt_axis = Vector3(1,0,0)
432 | # else:
433 | # gt_axis.normalize()
434 | # gt_q = Quaternion(gt_angle,gt_axis)
435 |
436 | # predicted_axis = _numpy_to_Vector3(predicted_motion[0:3])
437 | # predicted_angle = predicted_axis.norm()
438 | # if predicted_angle < 1e-6:
439 | # predicted_angle = 0
440 | # predicted_axis = Vector3(1,0,0)
441 | # else:
442 | # predicted_axis.normalize()
443 | # predicted_axis.normalize()
444 | # predicted_q = Quaternion(predicted_angle,predicted_axis)
445 |
446 | # rotation_angle_dist = gt_q.angularDistance(predicted_q)
447 |
448 | # gt_trans = _numpy_to_Vector3(gt_motion[3:6])
449 | # if normalize_translations:
450 | # gt_trans.normalize()
451 | # predicted_trans = _numpy_to_Vector3(predicted_motion[3:6])
452 | # if normalize_translations and predicted_trans.norm() > 1e-6:
453 | # predicted_trans.normalize()
454 | # translation_dist = (gt_trans-predicted_trans).norm()
455 |
456 | # translation_angle_diff = math.acos(np.clip(gt_trans.dot(predicted_trans),-1,1))
457 |
458 | # return np.rad2deg(rotation_angle_dist), translation_dist, np.rad2deg(translation_angle_diff)
459 |
--------------------------------------------------------------------------------
/tools/transformations.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # transformations.py
3 |
4 | # Copyright (c) 2006-2015, Christoph Gohlke
5 | # Copyright (c) 2006-2015, The Regents of the University of California
6 | # Produced at the Laboratory for Fluorescence Dynamics
7 | # All rights reserved.
8 | #
9 | # Redistribution and use in source and binary forms, with or without
10 | # modification, are permitted provided that the following conditions are met:
11 | #
12 | # * Redistributions of source code must retain the above copyright
13 | # notice, this list of conditions and the following disclaimer.
14 | # * Redistributions in binary form must reproduce the above copyright
15 | # notice, this list of conditions and the following disclaimer in the
16 | # documentation and/or other materials provided with the distribution.
17 | # * Neither the name of the copyright holders nor the names of any
18 | # contributors may be used to endorse or promote products derived
19 | # from this software without specific prior written permission.
20 | #
21 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 | # POSSIBILITY OF SUCH DAMAGE.
32 |
33 | """Homogeneous Transformation Matrices and Quaternions.
34 |
35 | A library for calculating 4x4 matrices for translating, rotating, reflecting,
36 | scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
37 | 3D homogeneous coordinates as well as for converting between rotation matrices,
38 | Euler angles, and quaternions. Also includes an Arcball control object and
39 | functions to decompose transformation matrices.
40 |
41 | :Author:
42 | `Christoph Gohlke `_
43 |
44 | :Organization:
45 | Laboratory for Fluorescence Dynamics, University of California, Irvine
46 |
47 | :Version: 2015.07.18
48 |
49 | Requirements
50 | ------------
51 | * `CPython 2.7 or 3.4 `_
52 | * `Numpy 1.9 `_
53 | * `Transformations.c 2015.07.18 `_
54 | (recommended for speedup of some functions)
55 |
56 | Notes
57 | -----
58 | The API is not stable yet and is expected to change between revisions.
59 |
60 | This Python code is not optimized for speed. Refer to the transformations.c
61 | module for a faster implementation of some functions.
62 |
63 | Documentation in HTML format can be generated with epydoc.
64 |
65 | Matrices (M) can be inverted using numpy.linalg.inv(M), be concatenated using
66 | numpy.dot(M0, M1), or transform homogeneous coordinate arrays (v) using
67 | numpy.dot(M, v) for shape (4, \*) column vectors, respectively
68 | numpy.dot(v, M.T) for shape (\*, 4) row vectors ("array of points").
69 |
70 | This module follows the "column vectors on the right" and "row major storage"
71 | (C contiguous) conventions. The translation components are in the right column
72 | of the transformation matrix, i.e. M[:3, 3].
73 | The transpose of the transformation matrices may have to be used to interface
74 | with other graphics systems, e.g. with OpenGL's glMultMatrixd(). See also [16].
75 |
76 | Calculations are carried out with numpy.float64 precision.
77 |
78 | Vector, point, quaternion, and matrix function arguments are expected to be
79 | "array like", i.e. tuple, list, or numpy arrays.
80 |
81 | Return types are numpy arrays unless specified otherwise.
82 |
83 | Angles are in radians unless specified otherwise.
84 |
85 | Quaternions w+ix+jy+kz are represented as [w, x, y, z].
86 |
87 | A triple of Euler angles can be applied/interpreted in 24 ways, which can
88 | be specified using a 4 character string or encoded 4-tuple:
89 |
90 | *Axes 4-string*: e.g. 'sxyz' or 'ryxy'
91 |
92 | - first character : rotations are applied to 's'tatic or 'r'otating frame
93 | - remaining characters : successive rotation axis 'x', 'y', or 'z'
94 |
95 | *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
96 |
97 | - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
98 | - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
99 | by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
100 | - repetition : first and last axis are same (1) or different (0).
101 | - frame : rotations are applied to static (0) or rotating (1) frame.
102 |
103 | Other Python packages and modules for 3D transformations and quaternions:
104 |
105 | * `Transforms3d `_
106 | includes most code of this module.
107 | * `Blender.mathutils `_
108 | * `numpy-dtypes `_
109 |
110 | References
111 | ----------
112 | (1) Matrices and transformations. Ronald Goldman.
113 | In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
114 | (2) More matrices and transformations: shear and pseudo-perspective.
115 | Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
116 | (3) Decomposing a matrix into simple transformations. Spencer Thomas.
117 | In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
118 | (4) Recovering the data from the transformation matrix. Ronald Goldman.
119 | In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
120 | (5) Euler angle conversion. Ken Shoemake.
121 | In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
122 | (6) Arcball rotation control. Ken Shoemake.
123 | In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
124 | (7) Representing attitude: Euler angles, unit quaternions, and rotation
125 | vectors. James Diebel. 2006.
126 | (8) A discussion of the solution for the best rotation to relate two sets
127 | of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
128 | (9) Closed-form solution of absolute orientation using unit quaternions.
129 | BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642.
130 | (10) Quaternions. Ken Shoemake.
131 | http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
132 | (11) From quaternion to matrix and back. JMP van Waveren. 2005.
133 | http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
134 | (12) Uniform random rotations. Ken Shoemake.
135 | In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
136 | (13) Quaternion in molecular modeling. CFF Karney.
137 | J Mol Graph Mod, 25(5):595-604
138 | (14) New method for extracting the quaternion from a rotation matrix.
139 | Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087.
140 | (15) Multiple View Geometry in Computer Vision. Hartley and Zissermann.
141 | Cambridge University Press; 2nd Ed. 2004. Chapter 4, Algorithm 4.7, p 130.
142 | (16) Column Vectors vs. Row Vectors.
143 | http://steve.hollasch.net/cgindex/math/matrix/column-vec.html
144 |
145 | Examples
146 | --------
147 | >>> alpha, beta, gamma = 0.123, -1.234, 2.345
148 | >>> origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
149 | >>> I = identity_matrix()
150 | >>> Rx = rotation_matrix(alpha, xaxis)
151 | >>> Ry = rotation_matrix(beta, yaxis)
152 | >>> Rz = rotation_matrix(gamma, zaxis)
153 | >>> R = concatenate_matrices(Rx, Ry, Rz)
154 | >>> euler = euler_from_matrix(R, 'rxyz')
155 | >>> numpy.allclose([alpha, beta, gamma], euler)
156 | True
157 | >>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
158 | >>> is_same_transform(R, Re)
159 | True
160 | >>> al, be, ga = euler_from_matrix(Re, 'rxyz')
161 | >>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
162 | True
163 | >>> qx = quaternion_about_axis(alpha, xaxis)
164 | >>> qy = quaternion_about_axis(beta, yaxis)
165 | >>> qz = quaternion_about_axis(gamma, zaxis)
166 | >>> q = quaternion_multiply(qx, qy)
167 | >>> q = quaternion_multiply(q, qz)
168 | >>> Rq = quaternion_matrix(q)
169 | >>> is_same_transform(R, Rq)
170 | True
171 | >>> S = scale_matrix(1.23, origin)
172 | >>> T = translation_matrix([1, 2, 3])
173 | >>> Z = shear_matrix(beta, xaxis, origin, zaxis)
174 | >>> R = random_rotation_matrix(numpy.random.rand(3))
175 | >>> M = concatenate_matrices(T, R, Z, S)
176 | >>> scale, shear, angles, trans, persp = decompose_matrix(M)
177 | >>> numpy.allclose(scale, 1.23)
178 | True
179 | >>> numpy.allclose(trans, [1, 2, 3])
180 | True
181 | >>> numpy.allclose(shear, [0, math.tan(beta), 0])
182 | True
183 | >>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
184 | True
185 | >>> M1 = compose_matrix(scale, shear, angles, trans, persp)
186 | >>> is_same_transform(M, M1)
187 | True
188 | >>> v0, v1 = random_vector(3), random_vector(3)
189 | >>> M = rotation_matrix(angle_between_vectors(v0, v1), vector_product(v0, v1))
190 | >>> v2 = numpy.dot(v0, M[:3,:3].T)
191 | >>> numpy.allclose(unit_vector(v1), unit_vector(v2))
192 | True
193 |
194 | """
195 |
196 | from __future__ import division, print_function
197 |
198 | import math
199 |
200 | import numpy
201 |
202 | __version__ = '2015.07.18'
203 | __docformat__ = 'restructuredtext en'
204 | __all__ = ()
205 |
206 | # TODO evo: added to suppress annoying warnings, see README.md
207 | import warnings
208 | warnings.filterwarnings("ignore", message="failed to import module _transformations")
209 |
210 |
211 | def identity_matrix():
212 | """Return 4x4 identity/unit matrix.
213 |
214 | >>> I = identity_matrix()
215 | >>> numpy.allclose(I, numpy.dot(I, I))
216 | True
217 | >>> numpy.sum(I), numpy.trace(I)
218 | (4.0, 4.0)
219 | >>> numpy.allclose(I, numpy.identity(4))
220 | True
221 |
222 | """
223 | return numpy.identity(4)
224 |
225 |
226 | def translation_matrix(direction):
227 | """Return matrix to translate by direction vector.
228 |
229 | >>> v = numpy.random.random(3) - 0.5
230 | >>> numpy.allclose(v, translation_matrix(v)[:3, 3])
231 | True
232 |
233 | """
234 | M = numpy.identity(4)
235 | M[:3, 3] = direction[:3]
236 | return M
237 |
238 |
239 | def translation_from_matrix(matrix):
240 | """Return translation vector from translation matrix.
241 |
242 | >>> v0 = numpy.random.random(3) - 0.5
243 | >>> v1 = translation_from_matrix(translation_matrix(v0))
244 | >>> numpy.allclose(v0, v1)
245 | True
246 |
247 | """
248 | return numpy.array(matrix, copy=False)[:3, 3].copy()
249 |
250 |
251 | def reflection_matrix(point, normal):
252 | """Return matrix to mirror at plane defined by point and normal vector.
253 |
254 | >>> v0 = numpy.random.random(4) - 0.5
255 | >>> v0[3] = 1.
256 | >>> v1 = numpy.random.random(3) - 0.5
257 | >>> R = reflection_matrix(v0, v1)
258 | >>> numpy.allclose(2, numpy.trace(R))
259 | True
260 | >>> numpy.allclose(v0, numpy.dot(R, v0))
261 | True
262 | >>> v2 = v0.copy()
263 | >>> v2[:3] += v1
264 | >>> v3 = v0.copy()
265 | >>> v2[:3] -= v1
266 | >>> numpy.allclose(v2, numpy.dot(R, v3))
267 | True
268 |
269 | """
270 | normal = unit_vector(normal[:3])
271 | M = numpy.identity(4)
272 | M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
273 | M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
274 | return M
275 |
276 |
277 | def reflection_from_matrix(matrix):
278 | """Return mirror plane point and normal vector from reflection matrix.
279 |
280 | >>> v0 = numpy.random.random(3) - 0.5
281 | >>> v1 = numpy.random.random(3) - 0.5
282 | >>> M0 = reflection_matrix(v0, v1)
283 | >>> point, normal = reflection_from_matrix(M0)
284 | >>> M1 = reflection_matrix(point, normal)
285 | >>> is_same_transform(M0, M1)
286 | True
287 |
288 | """
289 | M = numpy.array(matrix, dtype=numpy.float64, copy=False)
290 | # normal: unit eigenvector corresponding to eigenvalue -1
291 | w, V = numpy.linalg.eig(M[:3, :3])
292 | i = numpy.where(abs(numpy.real(w) + 1.0) < 1e-8)[0]
293 | if not len(i):
294 | raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
295 | normal = numpy.real(V[:, i[0]]).squeeze()
296 | # point: any unit eigenvector corresponding to eigenvalue 1
297 | w, V = numpy.linalg.eig(M)
298 | i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
299 | if not len(i):
300 | raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
301 | point = numpy.real(V[:, i[-1]]).squeeze()
302 | point /= point[3]
303 | return point, normal
304 |
305 |
306 | def rotation_matrix(angle, direction, point=None):
307 | """Return matrix to rotate about axis defined by point and direction.
308 |
309 | >>> R = rotation_matrix(math.pi/2, [0, 0, 1], [1, 0, 0])
310 | >>> numpy.allclose(numpy.dot(R, [0, 0, 0, 1]), [1, -1, 0, 1])
311 | True
312 | >>> angle = (random.random() - 0.5) * (2*math.pi)
313 | >>> direc = numpy.random.random(3) - 0.5
314 | >>> point = numpy.random.random(3) - 0.5
315 | >>> R0 = rotation_matrix(angle, direc, point)
316 | >>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
317 | >>> is_same_transform(R0, R1)
318 | True
319 | >>> R0 = rotation_matrix(angle, direc, point)
320 | >>> R1 = rotation_matrix(-angle, -direc, point)
321 | >>> is_same_transform(R0, R1)
322 | True
323 | >>> I = numpy.identity(4, numpy.float64)
324 | >>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
325 | True
326 | >>> numpy.allclose(2, numpy.trace(rotation_matrix(math.pi/2,
327 | ... direc, point)))
328 | True
329 |
330 | """
331 | sina = math.sin(angle)
332 | cosa = math.cos(angle)
333 | direction = unit_vector(direction[:3])
334 | # rotation matrix around unit vector
335 | R = numpy.diag([cosa, cosa, cosa])
336 | R += numpy.outer(direction, direction) * (1.0 - cosa)
337 | direction *= sina
338 | R += numpy.array([[ 0.0, -direction[2], direction[1]],
339 | [ direction[2], 0.0, -direction[0]],
340 | [-direction[1], direction[0], 0.0]])
341 | M = numpy.identity(4)
342 | M[:3, :3] = R
343 | if point is not None:
344 | # rotation not around origin
345 | point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
346 | M[:3, 3] = point - numpy.dot(R, point)
347 | return M
348 |
349 |
350 | def rotation_from_matrix(matrix):
351 | """Return rotation angle and axis from rotation matrix.
352 |
353 | >>> angle = (random.random() - 0.5) * (2*math.pi)
354 | >>> direc = numpy.random.random(3) - 0.5
355 | >>> point = numpy.random.random(3) - 0.5
356 | >>> R0 = rotation_matrix(angle, direc, point)
357 | >>> angle, direc, point = rotation_from_matrix(R0)
358 | >>> R1 = rotation_matrix(angle, direc, point)
359 | >>> is_same_transform(R0, R1)
360 | True
361 |
362 | """
363 | R = numpy.array(matrix, dtype=numpy.float64, copy=False)
364 | R33 = R[:3, :3]
365 | # direction: unit eigenvector of R33 corresponding to eigenvalue of 1
366 | w, W = numpy.linalg.eig(R33.T)
367 | i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
368 | if not len(i):
369 | raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
370 | direction = numpy.real(W[:, i[-1]]).squeeze()
371 | # point: unit eigenvector of R33 corresponding to eigenvalue of 1
372 | w, Q = numpy.linalg.eig(R)
373 | i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
374 | if not len(i):
375 | raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
376 | point = numpy.real(Q[:, i[-1]]).squeeze()
377 | point /= point[3]
378 | # rotation angle depending on direction
379 | cosa = (numpy.trace(R33) - 1.0) / 2.0
380 | if abs(direction[2]) > 1e-8:
381 | sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
382 | elif abs(direction[1]) > 1e-8:
383 | sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
384 | else:
385 | sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
386 | angle = math.atan2(sina, cosa)
387 | return angle, direction, point
388 |
389 |
390 | def scale_matrix(factor, origin=None, direction=None):
391 | """Return matrix to scale by factor around origin in direction.
392 |
393 | Use factor -1 for point symmetry.
394 |
395 | >>> v = (numpy.random.rand(4, 5) - 0.5) * 20
396 | >>> v[3] = 1
397 | >>> S = scale_matrix(-1.234)
398 | >>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
399 | True
400 | >>> factor = random.random() * 10 - 5
401 | >>> origin = numpy.random.random(3) - 0.5
402 | >>> direct = numpy.random.random(3) - 0.5
403 | >>> S = scale_matrix(factor, origin)
404 | >>> S = scale_matrix(factor, origin, direct)
405 |
406 | """
407 | if direction is None:
408 | # uniform scaling
409 | M = numpy.diag([factor, factor, factor, 1.0])
410 | if origin is not None:
411 | M[:3, 3] = origin[:3]
412 | M[:3, 3] *= 1.0 - factor
413 | else:
414 | # nonuniform scaling
415 | direction = unit_vector(direction[:3])
416 | factor = 1.0 - factor
417 | M = numpy.identity(4)
418 | M[:3, :3] -= factor * numpy.outer(direction, direction)
419 | if origin is not None:
420 | M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
421 | return M
422 |
423 |
424 | def scale_from_matrix(matrix):
425 | """Return scaling factor, origin and direction from scaling matrix.
426 |
427 | >>> factor = random.random() * 10 - 5
428 | >>> origin = numpy.random.random(3) - 0.5
429 | >>> direct = numpy.random.random(3) - 0.5
430 | >>> S0 = scale_matrix(factor, origin)
431 | >>> factor, origin, direction = scale_from_matrix(S0)
432 | >>> S1 = scale_matrix(factor, origin, direction)
433 | >>> is_same_transform(S0, S1)
434 | True
435 | >>> S0 = scale_matrix(factor, origin, direct)
436 | >>> factor, origin, direction = scale_from_matrix(S0)
437 | >>> S1 = scale_matrix(factor, origin, direction)
438 | >>> is_same_transform(S0, S1)
439 | True
440 |
441 | """
442 | M = numpy.array(matrix, dtype=numpy.float64, copy=False)
443 | M33 = M[:3, :3]
444 | factor = numpy.trace(M33) - 2.0
445 | try:
446 | # direction: unit eigenvector corresponding to eigenvalue factor
447 | w, V = numpy.linalg.eig(M33)
448 | i = numpy.where(abs(numpy.real(w) - factor) < 1e-8)[0][0]
449 | direction = numpy.real(V[:, i]).squeeze()
450 | direction /= vector_norm(direction)
451 | except IndexError:
452 | # uniform scaling
453 | factor = (factor + 2.0) / 3.0
454 | direction = None
455 | # origin: any eigenvector corresponding to eigenvalue 1
456 | w, V = numpy.linalg.eig(M)
457 | i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
458 | if not len(i):
459 | raise ValueError("no eigenvector corresponding to eigenvalue 1")
460 | origin = numpy.real(V[:, i[-1]]).squeeze()
461 | origin /= origin[3]
462 | return factor, origin, direction
463 |
464 |
465 | def projection_matrix(point, normal, direction=None,
466 | perspective=None, pseudo=False):
467 | """Return matrix to project onto plane defined by point and normal.
468 |
469 | Using either perspective point, projection direction, or none of both.
470 |
471 | If pseudo is True, perspective projections will preserve relative depth
472 | such that Perspective = dot(Orthogonal, PseudoPerspective).
473 |
474 | >>> P = projection_matrix([0, 0, 0], [1, 0, 0])
475 | >>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
476 | True
477 | >>> point = numpy.random.random(3) - 0.5
478 | >>> normal = numpy.random.random(3) - 0.5
479 | >>> direct = numpy.random.random(3) - 0.5
480 | >>> persp = numpy.random.random(3) - 0.5
481 | >>> P0 = projection_matrix(point, normal)
482 | >>> P1 = projection_matrix(point, normal, direction=direct)
483 | >>> P2 = projection_matrix(point, normal, perspective=persp)
484 | >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
485 | >>> is_same_transform(P2, numpy.dot(P0, P3))
486 | True
487 | >>> P = projection_matrix([3, 0, 0], [1, 1, 0], [1, 0, 0])
488 | >>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20
489 | >>> v0[3] = 1
490 | >>> v1 = numpy.dot(P, v0)
491 | >>> numpy.allclose(v1[1], v0[1])
492 | True
493 | >>> numpy.allclose(v1[0], 3-v1[1])
494 | True
495 |
496 | """
497 | M = numpy.identity(4)
498 | point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
499 | normal = unit_vector(normal[:3])
500 | if perspective is not None:
501 | # perspective projection
502 | perspective = numpy.array(perspective[:3], dtype=numpy.float64,
503 | copy=False)
504 | M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
505 | M[:3, :3] -= numpy.outer(perspective, normal)
506 | if pseudo:
507 | # preserve relative depth
508 | M[:3, :3] -= numpy.outer(normal, normal)
509 | M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
510 | else:
511 | M[:3, 3] = numpy.dot(point, normal) * perspective
512 | M[3, :3] = -normal
513 | M[3, 3] = numpy.dot(perspective, normal)
514 | elif direction is not None:
515 | # parallel projection
516 | direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
517 | scale = numpy.dot(direction, normal)
518 | M[:3, :3] -= numpy.outer(direction, normal) / scale
519 | M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
520 | else:
521 | # orthogonal projection
522 | M[:3, :3] -= numpy.outer(normal, normal)
523 | M[:3, 3] = numpy.dot(point, normal) * normal
524 | return M
525 |
526 |
527 | def projection_from_matrix(matrix, pseudo=False):
528 | """Return projection plane and perspective point from projection matrix.
529 |
530 | Return values are same as arguments for projection_matrix function:
531 | point, normal, direction, perspective, and pseudo.
532 |
533 | >>> point = numpy.random.random(3) - 0.5
534 | >>> normal = numpy.random.random(3) - 0.5
535 | >>> direct = numpy.random.random(3) - 0.5
536 | >>> persp = numpy.random.random(3) - 0.5
537 | >>> P0 = projection_matrix(point, normal)
538 | >>> result = projection_from_matrix(P0)
539 | >>> P1 = projection_matrix(*result)
540 | >>> is_same_transform(P0, P1)
541 | True
542 | >>> P0 = projection_matrix(point, normal, direct)
543 | >>> result = projection_from_matrix(P0)
544 | >>> P1 = projection_matrix(*result)
545 | >>> is_same_transform(P0, P1)
546 | True
547 | >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
548 | >>> result = projection_from_matrix(P0, pseudo=False)
549 | >>> P1 = projection_matrix(*result)
550 | >>> is_same_transform(P0, P1)
551 | True
552 | >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
553 | >>> result = projection_from_matrix(P0, pseudo=True)
554 | >>> P1 = projection_matrix(*result)
555 | >>> is_same_transform(P0, P1)
556 | True
557 |
558 | """
559 | M = numpy.array(matrix, dtype=numpy.float64, copy=False)
560 | M33 = M[:3, :3]
561 | w, V = numpy.linalg.eig(M)
562 | i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
563 | if not pseudo and len(i):
564 | # point: any eigenvector corresponding to eigenvalue 1
565 | point = numpy.real(V[:, i[-1]]).squeeze()
566 | point /= point[3]
567 | # direction: unit eigenvector corresponding to eigenvalue 0
568 | w, V = numpy.linalg.eig(M33)
569 | i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
570 | if not len(i):
571 | raise ValueError("no eigenvector corresponding to eigenvalue 0")
572 | direction = numpy.real(V[:, i[0]]).squeeze()
573 | direction /= vector_norm(direction)
574 | # normal: unit eigenvector of M33.T corresponding to eigenvalue 0
575 | w, V = numpy.linalg.eig(M33.T)
576 | i = numpy.where(abs(numpy.real(w)) < 1e-8)[0]
577 | if len(i):
578 | # parallel projection
579 | normal = numpy.real(V[:, i[0]]).squeeze()
580 | normal /= vector_norm(normal)
581 | return point, normal, direction, None, False
582 | else:
583 | # orthogonal projection, where normal equals direction vector
584 | return point, direction, None, None, False
585 | else:
586 | # perspective projection
587 | i = numpy.where(abs(numpy.real(w)) > 1e-8)[0]
588 | if not len(i):
589 | raise ValueError(
590 | "no eigenvector not corresponding to eigenvalue 0")
591 | point = numpy.real(V[:, i[-1]]).squeeze()
592 | point /= point[3]
593 | normal = - M[3, :3]
594 | perspective = M[:3, 3] / numpy.dot(point[:3], normal)
595 | if pseudo:
596 | perspective -= normal
597 | return point, normal, None, perspective, pseudo
598 |
599 |
600 | def clip_matrix(left, right, bottom, top, near, far, perspective=False):
601 | """Return matrix to obtain normalized device coordinates from frustum.
602 |
603 | The frustum bounds are axis-aligned along x (left, right),
604 | y (bottom, top) and z (near, far).
605 |
606 | Normalized device coordinates are in range [-1, 1] if coordinates are
607 | inside the frustum.
608 |
609 | If perspective is True the frustum is a truncated pyramid with the
610 | perspective point at origin and direction along z axis, otherwise an
611 | orthographic canonical view volume (a box).
612 |
613 | Homogeneous coordinates transformed by the perspective clip matrix
614 | need to be dehomogenized (divided by w coordinate).
615 |
616 | >>> frustum = numpy.random.rand(6)
617 | >>> frustum[1] += frustum[0]
618 | >>> frustum[3] += frustum[2]
619 | >>> frustum[5] += frustum[4]
620 | >>> M = clip_matrix(perspective=False, *frustum)
621 | >>> numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
622 | array([-1., -1., -1., 1.])
623 | >>> numpy.dot(M, [frustum[1], frustum[3], frustum[5], 1])
624 | array([ 1., 1., 1., 1.])
625 | >>> M = clip_matrix(perspective=True, *frustum)
626 | >>> v = numpy.dot(M, [frustum[0], frustum[2], frustum[4], 1])
627 | >>> v / v[3]
628 | array([-1., -1., -1., 1.])
629 | >>> v = numpy.dot(M, [frustum[1], frustum[3], frustum[4], 1])
630 | >>> v / v[3]
631 | array([ 1., 1., -1., 1.])
632 |
633 | """
634 | if left >= right or bottom >= top or near >= far:
635 | raise ValueError("invalid frustum")
636 | if perspective:
637 | if near <= _EPS:
638 | raise ValueError("invalid frustum: near <= 0")
639 | t = 2.0 * near
640 | M = [[t/(left-right), 0.0, (right+left)/(right-left), 0.0],
641 | [0.0, t/(bottom-top), (top+bottom)/(top-bottom), 0.0],
642 | [0.0, 0.0, (far+near)/(near-far), t*far/(far-near)],
643 | [0.0, 0.0, -1.0, 0.0]]
644 | else:
645 | M = [[2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)],
646 | [0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)],
647 | [0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)],
648 | [0.0, 0.0, 0.0, 1.0]]
649 | return numpy.array(M)
650 |
651 |
652 | def shear_matrix(angle, direction, point, normal):
653 | """Return matrix to shear by angle along direction vector on shear plane.
654 |
655 | The shear plane is defined by a point and normal vector. The direction
656 | vector must be orthogonal to the plane's normal vector.
657 |
658 | A point P is transformed by the shear matrix into P" such that
659 | the vector P-P" is parallel to the direction vector and its extent is
660 | given by the angle of P-P'-P", where P' is the orthogonal projection
661 | of P onto the shear plane.
662 |
663 | >>> angle = (random.random() - 0.5) * 4*math.pi
664 | >>> direct = numpy.random.random(3) - 0.5
665 | >>> point = numpy.random.random(3) - 0.5
666 | >>> normal = numpy.cross(direct, numpy.random.random(3))
667 | >>> S = shear_matrix(angle, direct, point, normal)
668 | >>> numpy.allclose(1, numpy.linalg.det(S))
669 | True
670 |
671 | """
672 | normal = unit_vector(normal[:3])
673 | direction = unit_vector(direction[:3])
674 | if abs(numpy.dot(normal, direction)) > 1e-6:
675 | raise ValueError("direction and normal vectors are not orthogonal")
676 | angle = math.tan(angle)
677 | M = numpy.identity(4)
678 | M[:3, :3] += angle * numpy.outer(direction, normal)
679 | M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
680 | return M
681 |
682 |
683 | def shear_from_matrix(matrix):
684 | """Return shear angle, direction and plane from shear matrix.
685 |
686 | >>> angle = (random.random() - 0.5) * 4*math.pi
687 | >>> direct = numpy.random.random(3) - 0.5
688 | >>> point = numpy.random.random(3) - 0.5
689 | >>> normal = numpy.cross(direct, numpy.random.random(3))
690 | >>> S0 = shear_matrix(angle, direct, point, normal)
691 | >>> angle, direct, point, normal = shear_from_matrix(S0)
692 | >>> S1 = shear_matrix(angle, direct, point, normal)
693 | >>> is_same_transform(S0, S1)
694 | True
695 |
696 | """
697 | M = numpy.array(matrix, dtype=numpy.float64, copy=False)
698 | M33 = M[:3, :3]
699 | # normal: cross independent eigenvectors corresponding to the eigenvalue 1
700 | w, V = numpy.linalg.eig(M33)
701 | i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-4)[0]
702 | if len(i) < 2:
703 | raise ValueError("no two linear independent eigenvectors found %s" % w)
704 | V = numpy.real(V[:, i]).squeeze().T
705 | lenorm = -1.0
706 | for i0, i1 in ((0, 1), (0, 2), (1, 2)):
707 | n = numpy.cross(V[i0], V[i1])
708 | w = vector_norm(n)
709 | if w > lenorm:
710 | lenorm = w
711 | normal = n
712 | normal /= lenorm
713 | # direction and angle
714 | direction = numpy.dot(M33 - numpy.identity(3), normal)
715 | angle = vector_norm(direction)
716 | direction /= angle
717 | angle = math.atan(angle)
718 | # point: eigenvector corresponding to eigenvalue 1
719 | w, V = numpy.linalg.eig(M)
720 | i = numpy.where(abs(numpy.real(w) - 1.0) < 1e-8)[0]
721 | if not len(i):
722 | raise ValueError("no eigenvector corresponding to eigenvalue 1")
723 | point = numpy.real(V[:, i[-1]]).squeeze()
724 | point /= point[3]
725 | return angle, direction, point, normal
726 |
727 |
728 | def decompose_matrix(matrix):
729 | """Return sequence of transformations from transformation matrix.
730 |
731 | matrix : array_like
732 | Non-degenerative homogeneous transformation matrix
733 |
734 | Return tuple of:
735 | scale : vector of 3 scaling factors
736 | shear : list of shear factors for x-y, x-z, y-z axes
737 | angles : list of Euler angles about static x, y, z axes
738 | translate : translation vector along x, y, z axes
739 | perspective : perspective partition of matrix
740 |
741 | Raise ValueError if matrix is of wrong type or degenerative.
742 |
743 | >>> T0 = translation_matrix([1, 2, 3])
744 | >>> scale, shear, angles, trans, persp = decompose_matrix(T0)
745 | >>> T1 = translation_matrix(trans)
746 | >>> numpy.allclose(T0, T1)
747 | True
748 | >>> S = scale_matrix(0.123)
749 | >>> scale, shear, angles, trans, persp = decompose_matrix(S)
750 | >>> scale[0]
751 | 0.123
752 | >>> R0 = euler_matrix(1, 2, 3)
753 | >>> scale, shear, angles, trans, persp = decompose_matrix(R0)
754 | >>> R1 = euler_matrix(*angles)
755 | >>> numpy.allclose(R0, R1)
756 | True
757 |
758 | """
759 | M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
760 | if abs(M[3, 3]) < _EPS:
761 | raise ValueError("M[3, 3] is zero")
762 | M /= M[3, 3]
763 | P = M.copy()
764 | P[:, 3] = 0.0, 0.0, 0.0, 1.0
765 | if not numpy.linalg.det(P):
766 | raise ValueError("matrix is singular")
767 |
768 | scale = numpy.zeros((3, ))
769 | shear = [0.0, 0.0, 0.0]
770 | angles = [0.0, 0.0, 0.0]
771 |
772 | if any(abs(M[:3, 3]) > _EPS):
773 | perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
774 | M[:, 3] = 0.0, 0.0, 0.0, 1.0
775 | else:
776 | perspective = numpy.array([0.0, 0.0, 0.0, 1.0])
777 |
778 | translate = M[3, :3].copy()
779 | M[3, :3] = 0.0
780 |
781 | row = M[:3, :3].copy()
782 | scale[0] = vector_norm(row[0])
783 | row[0] /= scale[0]
784 | shear[0] = numpy.dot(row[0], row[1])
785 | row[1] -= row[0] * shear[0]
786 | scale[1] = vector_norm(row[1])
787 | row[1] /= scale[1]
788 | shear[0] /= scale[1]
789 | shear[1] = numpy.dot(row[0], row[2])
790 | row[2] -= row[0] * shear[1]
791 | shear[2] = numpy.dot(row[1], row[2])
792 | row[2] -= row[1] * shear[2]
793 | scale[2] = vector_norm(row[2])
794 | row[2] /= scale[2]
795 | shear[1:] /= scale[2]
796 |
797 | if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
798 | numpy.negative(scale, scale)
799 | numpy.negative(row, row)
800 |
801 | angles[1] = math.asin(-row[0, 2])
802 | if math.cos(angles[1]):
803 | angles[0] = math.atan2(row[1, 2], row[2, 2])
804 | angles[2] = math.atan2(row[0, 1], row[0, 0])
805 | else:
806 | #angles[0] = math.atan2(row[1, 0], row[1, 1])
807 | angles[0] = math.atan2(-row[2, 1], row[1, 1])
808 | angles[2] = 0.0
809 |
810 | return scale, shear, angles, translate, perspective
811 |
812 |
813 | def compose_matrix(scale=None, shear=None, angles=None, translate=None,
814 | perspective=None):
815 | """Return transformation matrix from sequence of transformations.
816 |
817 | This is the inverse of the decompose_matrix function.
818 |
819 | Sequence of transformations:
820 | scale : vector of 3 scaling factors
821 | shear : list of shear factors for x-y, x-z, y-z axes
822 | angles : list of Euler angles about static x, y, z axes
823 | translate : translation vector along x, y, z axes
824 | perspective : perspective partition of matrix
825 |
826 | >>> scale = numpy.random.random(3) - 0.5
827 | >>> shear = numpy.random.random(3) - 0.5
828 | >>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
829 | >>> trans = numpy.random.random(3) - 0.5
830 | >>> persp = numpy.random.random(4) - 0.5
831 | >>> M0 = compose_matrix(scale, shear, angles, trans, persp)
832 | >>> result = decompose_matrix(M0)
833 | >>> M1 = compose_matrix(*result)
834 | >>> is_same_transform(M0, M1)
835 | True
836 |
837 | """
838 | M = numpy.identity(4)
839 | if perspective is not None:
840 | P = numpy.identity(4)
841 | P[3, :] = perspective[:4]
842 | M = numpy.dot(M, P)
843 | if translate is not None:
844 | T = numpy.identity(4)
845 | T[:3, 3] = translate[:3]
846 | M = numpy.dot(M, T)
847 | if angles is not None:
848 | R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
849 | M = numpy.dot(M, R)
850 | if shear is not None:
851 | Z = numpy.identity(4)
852 | Z[1, 2] = shear[2]
853 | Z[0, 2] = shear[1]
854 | Z[0, 1] = shear[0]
855 | M = numpy.dot(M, Z)
856 | if scale is not None:
857 | S = numpy.identity(4)
858 | S[0, 0] = scale[0]
859 | S[1, 1] = scale[1]
860 | S[2, 2] = scale[2]
861 | M = numpy.dot(M, S)
862 | M /= M[3, 3]
863 | return M
864 |
865 |
866 | def orthogonalization_matrix(lengths, angles):
867 | """Return orthogonalization matrix for crystallographic cell coordinates.
868 |
869 | Angles are expected in degrees.
870 |
871 | The de-orthogonalization matrix is the inverse.
872 |
873 | >>> O = orthogonalization_matrix([10, 10, 10], [90, 90, 90])
874 | >>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
875 | True
876 | >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
877 | >>> numpy.allclose(numpy.sum(O), 43.063229)
878 | True
879 |
880 | """
881 | a, b, c = lengths
882 | angles = numpy.radians(angles)
883 | sina, sinb, _ = numpy.sin(angles)
884 | cosa, cosb, cosg = numpy.cos(angles)
885 | co = (cosa * cosb - cosg) / (sina * sinb)
886 | return numpy.array([
887 | [ a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0],
888 | [-a*sinb*co, b*sina, 0.0, 0.0],
889 | [ a*cosb, b*cosa, c, 0.0],
890 | [ 0.0, 0.0, 0.0, 1.0]])
891 |
892 |
893 | def affine_matrix_from_points(v0, v1, shear=True, scale=True, usesvd=True):
894 | """Return affine transform matrix to register two point sets.
895 |
896 | v0 and v1 are shape (ndims, \*) arrays of at least ndims non-homogeneous
897 | coordinates, where ndims is the dimensionality of the coordinate space.
898 |
899 | If shear is False, a similarity transformation matrix is returned.
900 | If also scale is False, a rigid/Euclidean transformation matrix
901 | is returned.
902 |
903 | By default the algorithm by Hartley and Zissermann [15] is used.
904 | If usesvd is True, similarity and Euclidean transformation matrices
905 | are calculated by minimizing the weighted sum of squared deviations
906 | (RMSD) according to the algorithm by Kabsch [8].
907 | Otherwise, and if ndims is 3, the quaternion based algorithm by Horn [9]
908 | is used, which is slower when using this Python implementation.
909 |
910 | The returned matrix performs rotation, translation and uniform scaling
911 | (if specified).
912 |
913 | >>> v0 = [[0, 1031, 1031, 0], [0, 0, 1600, 1600]]
914 | >>> v1 = [[675, 826, 826, 677], [55, 52, 281, 277]]
915 | >>> affine_matrix_from_points(v0, v1)
916 | array([[ 0.14549, 0.00062, 675.50008],
917 | [ 0.00048, 0.14094, 53.24971],
918 | [ 0. , 0. , 1. ]])
919 | >>> T = translation_matrix(numpy.random.random(3)-0.5)
920 | >>> R = random_rotation_matrix(numpy.random.random(3))
921 | >>> S = scale_matrix(random.random())
922 | >>> M = concatenate_matrices(T, R, S)
923 | >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
924 | >>> v0[3] = 1
925 | >>> v1 = numpy.dot(M, v0)
926 | >>> v0[:3] += numpy.random.normal(0, 1e-8, 300).reshape(3, -1)
927 | >>> M = affine_matrix_from_points(v0[:3], v1[:3])
928 | >>> numpy.allclose(v1, numpy.dot(M, v0))
929 | True
930 |
931 | More examples in superimposition_matrix()
932 |
933 | """
934 | v0 = numpy.array(v0, dtype=numpy.float64, copy=True)
935 | v1 = numpy.array(v1, dtype=numpy.float64, copy=True)
936 |
937 | ndims = v0.shape[0]
938 | if ndims < 2 or v0.shape[1] < ndims or v0.shape != v1.shape:
939 | raise ValueError("input arrays are of wrong shape or type")
940 |
941 | # move centroids to origin
942 | t0 = -numpy.mean(v0, axis=1)
943 | M0 = numpy.identity(ndims+1)
944 | M0[:ndims, ndims] = t0
945 | v0 += t0.reshape(ndims, 1)
946 | t1 = -numpy.mean(v1, axis=1)
947 | M1 = numpy.identity(ndims+1)
948 | M1[:ndims, ndims] = t1
949 | v1 += t1.reshape(ndims, 1)
950 |
951 | if shear:
952 | # Affine transformation
953 | A = numpy.concatenate((v0, v1), axis=0)
954 | u, s, vh = numpy.linalg.svd(A.T)
955 | vh = vh[:ndims].T
956 | B = vh[:ndims]
957 | C = vh[ndims:2*ndims]
958 | t = numpy.dot(C, numpy.linalg.pinv(B))
959 | t = numpy.concatenate((t, numpy.zeros((ndims, 1))), axis=1)
960 | M = numpy.vstack((t, ((0.0,)*ndims) + (1.0,)))
961 | elif usesvd or ndims != 3:
962 | # Rigid transformation via SVD of covariance matrix
963 | u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
964 | # rotation matrix from SVD orthonormal bases
965 | R = numpy.dot(u, vh)
966 | if numpy.linalg.det(R) < 0.0:
967 | # R does not constitute right handed system
968 | R -= numpy.outer(u[:, ndims-1], vh[ndims-1, :]*2.0)
969 | s[-1] *= -1.0
970 | # homogeneous transformation matrix
971 | M = numpy.identity(ndims+1)
972 | M[:ndims, :ndims] = R
973 | else:
974 | # Rigid transformation matrix via quaternion
975 | # compute symmetric matrix N
976 | xx, yy, zz = numpy.sum(v0 * v1, axis=1)
977 | xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
978 | xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
979 | N = [[xx+yy+zz, 0.0, 0.0, 0.0],
980 | [yz-zy, xx-yy-zz, 0.0, 0.0],
981 | [zx-xz, xy+yx, yy-xx-zz, 0.0],
982 | [xy-yx, zx+xz, yz+zy, zz-xx-yy]]
983 | # quaternion: eigenvector corresponding to most positive eigenvalue
984 | w, V = numpy.linalg.eigh(N)
985 | q = V[:, numpy.argmax(w)]
986 | q /= vector_norm(q) # unit quaternion
987 | # homogeneous transformation matrix
988 | M = quaternion_matrix(q)
989 |
990 | if scale and not shear:
991 | # Affine transformation; scale is ratio of RMS deviations from centroid
992 | v0 *= v0
993 | v1 *= v1
994 | M[:ndims, :ndims] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
995 |
996 | # move centroids back
997 | M = numpy.dot(numpy.linalg.inv(M1), numpy.dot(M, M0))
998 | M /= M[ndims, ndims]
999 | return M
1000 |
1001 |
1002 | def superimposition_matrix(v0, v1, scale=False, usesvd=True):
1003 | """Return matrix to transform given 3D point set into second point set.
1004 |
1005 | v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 points.
1006 |
1007 | The parameters scale and usesvd are explained in the more general
1008 | affine_matrix_from_points function.
1009 |
1010 | The returned matrix is a similarity or Euclidean transformation matrix.
1011 | This function has a fast C implementation in transformations.c.
1012 |
1013 | >>> v0 = numpy.random.rand(3, 10)
1014 | >>> M = superimposition_matrix(v0, v0)
1015 | >>> numpy.allclose(M, numpy.identity(4))
1016 | True
1017 | >>> R = random_rotation_matrix(numpy.random.random(3))
1018 | >>> v0 = [[1,0,0], [0,1,0], [0,0,1], [1,1,1]]
1019 | >>> v1 = numpy.dot(R, v0)
1020 | >>> M = superimposition_matrix(v0, v1)
1021 | >>> numpy.allclose(v1, numpy.dot(M, v0))
1022 | True
1023 | >>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20
1024 | >>> v0[3] = 1
1025 | >>> v1 = numpy.dot(R, v0)
1026 | >>> M = superimposition_matrix(v0, v1)
1027 | >>> numpy.allclose(v1, numpy.dot(M, v0))
1028 | True
1029 | >>> S = scale_matrix(random.random())
1030 | >>> T = translation_matrix(numpy.random.random(3)-0.5)
1031 | >>> M = concatenate_matrices(T, R, S)
1032 | >>> v1 = numpy.dot(M, v0)
1033 | >>> v0[:3] += numpy.random.normal(0, 1e-9, 300).reshape(3, -1)
1034 | >>> M = superimposition_matrix(v0, v1, scale=True)
1035 | >>> numpy.allclose(v1, numpy.dot(M, v0))
1036 | True
1037 | >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
1038 | >>> numpy.allclose(v1, numpy.dot(M, v0))
1039 | True
1040 | >>> v = numpy.empty((4, 100, 3))
1041 | >>> v[:, :, 0] = v0
1042 | >>> M = superimposition_matrix(v0, v1, scale=True, usesvd=False)
1043 | >>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
1044 | True
1045 |
1046 | """
1047 | v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
1048 | v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
1049 | return affine_matrix_from_points(v0, v1, shear=False,
1050 | scale=scale, usesvd=usesvd)
1051 |
1052 |
1053 | def euler_matrix(ai, aj, ak, axes='sxyz'):
1054 | """Return homogeneous rotation matrix from Euler angles and axis sequence.
1055 |
1056 | ai, aj, ak : Euler's roll, pitch and yaw angles
1057 | axes : One of 24 axis sequences as string or encoded tuple
1058 |
1059 | >>> R = euler_matrix(1, 2, 3, 'syxz')
1060 | >>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
1061 | True
1062 | >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
1063 | >>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
1064 | True
1065 | >>> ai, aj, ak = (4*math.pi) * (numpy.random.random(3) - 0.5)
1066 | >>> for axes in _AXES2TUPLE.keys():
1067 | ... R = euler_matrix(ai, aj, ak, axes)
1068 | >>> for axes in _TUPLE2AXES.keys():
1069 | ... R = euler_matrix(ai, aj, ak, axes)
1070 |
1071 | """
1072 | try:
1073 | firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
1074 | except (AttributeError, KeyError):
1075 | _TUPLE2AXES[axes] # validation
1076 | firstaxis, parity, repetition, frame = axes
1077 |
1078 | i = firstaxis
1079 | j = _NEXT_AXIS[i+parity]
1080 | k = _NEXT_AXIS[i-parity+1]
1081 |
1082 | if frame:
1083 | ai, ak = ak, ai
1084 | if parity:
1085 | ai, aj, ak = -ai, -aj, -ak
1086 |
1087 | si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
1088 | ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
1089 | cc, cs = ci*ck, ci*sk
1090 | sc, ss = si*ck, si*sk
1091 |
1092 | M = numpy.identity(4)
1093 | if repetition:
1094 | M[i, i] = cj
1095 | M[i, j] = sj*si
1096 | M[i, k] = sj*ci
1097 | M[j, i] = sj*sk
1098 | M[j, j] = -cj*ss+cc
1099 | M[j, k] = -cj*cs-sc
1100 | M[k, i] = -sj*ck
1101 | M[k, j] = cj*sc+cs
1102 | M[k, k] = cj*cc-ss
1103 | else:
1104 | M[i, i] = cj*ck
1105 | M[i, j] = sj*sc-cs
1106 | M[i, k] = sj*cc+ss
1107 | M[j, i] = cj*sk
1108 | M[j, j] = sj*ss+cc
1109 | M[j, k] = sj*cs-sc
1110 | M[k, i] = -sj
1111 | M[k, j] = cj*si
1112 | M[k, k] = cj*ci
1113 | return M
1114 |
1115 |
1116 | def euler_from_matrix(matrix, axes='sxyz'):
1117 | """Return Euler angles from rotation matrix for specified axis sequence.
1118 |
1119 | axes : One of 24 axis sequences as string or encoded tuple
1120 |
1121 | Note that many Euler angle triplets can describe one matrix.
1122 |
1123 | >>> R0 = euler_matrix(1, 2, 3, 'syxz')
1124 | >>> al, be, ga = euler_from_matrix(R0, 'syxz')
1125 | >>> R1 = euler_matrix(al, be, ga, 'syxz')
1126 | >>> numpy.allclose(R0, R1)
1127 | True
1128 | >>> angles = (4*math.pi) * (numpy.random.random(3) - 0.5)
1129 | >>> for axes in _AXES2TUPLE.keys():
1130 | ... R0 = euler_matrix(axes=axes, *angles)
1131 | ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
1132 | ... if not numpy.allclose(R0, R1): print(axes, "failed")
1133 |
1134 | """
1135 | try:
1136 | firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
1137 | except (AttributeError, KeyError):
1138 | _TUPLE2AXES[axes] # validation
1139 | firstaxis, parity, repetition, frame = axes
1140 |
1141 | i = firstaxis
1142 | j = _NEXT_AXIS[i+parity]
1143 | k = _NEXT_AXIS[i-parity+1]
1144 |
1145 | M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
1146 | if repetition:
1147 | sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
1148 | if sy > _EPS:
1149 | ax = math.atan2( M[i, j], M[i, k])
1150 | ay = math.atan2( sy, M[i, i])
1151 | az = math.atan2( M[j, i], -M[k, i])
1152 | else:
1153 | ax = math.atan2(-M[j, k], M[j, j])
1154 | ay = math.atan2( sy, M[i, i])
1155 | az = 0.0
1156 | else:
1157 | cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
1158 | if cy > _EPS:
1159 | ax = math.atan2( M[k, j], M[k, k])
1160 | ay = math.atan2(-M[k, i], cy)
1161 | az = math.atan2( M[j, i], M[i, i])
1162 | else:
1163 | ax = math.atan2(-M[j, k], M[j, j])
1164 | ay = math.atan2(-M[k, i], cy)
1165 | az = 0.0
1166 |
1167 | if parity:
1168 | ax, ay, az = -ax, -ay, -az
1169 | if frame:
1170 | ax, az = az, ax
1171 | return ax, ay, az
1172 |
1173 |
1174 | def euler_from_quaternion(quaternion, axes='sxyz'):
1175 | """Return Euler angles from quaternion for specified axis sequence.
1176 |
1177 | >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
1178 | >>> numpy.allclose(angles, [0.123, 0, 0])
1179 | True
1180 |
1181 | """
1182 | return euler_from_matrix(quaternion_matrix(quaternion), axes)
1183 |
1184 |
1185 | def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
1186 | """Return quaternion from Euler angles and axis sequence.
1187 |
1188 | ai, aj, ak : Euler's roll, pitch and yaw angles
1189 | axes : One of 24 axis sequences as string or encoded tuple
1190 |
1191 | >>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
1192 | >>> numpy.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435])
1193 | True
1194 |
1195 | """
1196 | try:
1197 | firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
1198 | except (AttributeError, KeyError):
1199 | _TUPLE2AXES[axes] # validation
1200 | firstaxis, parity, repetition, frame = axes
1201 |
1202 | i = firstaxis + 1
1203 | j = _NEXT_AXIS[i+parity-1] + 1
1204 | k = _NEXT_AXIS[i-parity] + 1
1205 |
1206 | if frame:
1207 | ai, ak = ak, ai
1208 | if parity:
1209 | aj = -aj
1210 |
1211 | ai /= 2.0
1212 | aj /= 2.0
1213 | ak /= 2.0
1214 | ci = math.cos(ai)
1215 | si = math.sin(ai)
1216 | cj = math.cos(aj)
1217 | sj = math.sin(aj)
1218 | ck = math.cos(ak)
1219 | sk = math.sin(ak)
1220 | cc = ci*ck
1221 | cs = ci*sk
1222 | sc = si*ck
1223 | ss = si*sk
1224 |
1225 | q = numpy.empty((4, ))
1226 | if repetition:
1227 | q[0] = cj*(cc - ss)
1228 | q[i] = cj*(cs + sc)
1229 | q[j] = sj*(cc + ss)
1230 | q[k] = sj*(cs - sc)
1231 | else:
1232 | q[0] = cj*cc + sj*ss
1233 | q[i] = cj*sc - sj*cs
1234 | q[j] = cj*ss + sj*cc
1235 | q[k] = cj*cs - sj*sc
1236 | if parity:
1237 | q[j] *= -1.0
1238 |
1239 | return q
1240 |
1241 |
1242 | def quaternion_about_axis(angle, axis):
1243 | """Return quaternion for rotation about axis.
1244 |
1245 | >>> q = quaternion_about_axis(0.123, [1, 0, 0])
1246 | >>> numpy.allclose(q, [0.99810947, 0.06146124, 0, 0])
1247 | True
1248 |
1249 | """
1250 | q = numpy.array([0.0, axis[0], axis[1], axis[2]])
1251 | qlen = vector_norm(q)
1252 | if qlen > _EPS:
1253 | q *= math.sin(angle/2.0) / qlen
1254 | q[0] = math.cos(angle/2.0)
1255 | return q
1256 |
1257 |
1258 | def quaternion_matrix(quaternion):
1259 | """Return homogeneous rotation matrix from quaternion.
1260 |
1261 | >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0])
1262 | >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0]))
1263 | True
1264 | >>> M = quaternion_matrix([1, 0, 0, 0])
1265 | >>> numpy.allclose(M, numpy.identity(4))
1266 | True
1267 | >>> M = quaternion_matrix([0, 1, 0, 0])
1268 | >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1]))
1269 | True
1270 |
1271 | """
1272 | q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
1273 | n = numpy.dot(q, q)
1274 | if n < _EPS:
1275 | return numpy.identity(4)
1276 | q *= math.sqrt(2.0 / n)
1277 | q = numpy.outer(q, q)
1278 | return numpy.array([
1279 | [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0],
1280 | [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0],
1281 | [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0],
1282 | [ 0.0, 0.0, 0.0, 1.0]])
1283 |
1284 |
1285 | def quaternion_from_matrix(matrix, isprecise=False):
1286 | """Return quaternion from rotation matrix.
1287 |
1288 | If isprecise is True, the input matrix is assumed to be a precise rotation
1289 | matrix and a faster algorithm is used.
1290 |
1291 | >>> q = quaternion_from_matrix(numpy.identity(4), True)
1292 | >>> numpy.allclose(q, [1, 0, 0, 0])
1293 | True
1294 | >>> q = quaternion_from_matrix(numpy.diag([1, -1, -1, 1]))
1295 | >>> numpy.allclose(q, [0, 1, 0, 0]) or numpy.allclose(q, [0, -1, 0, 0])
1296 | True
1297 | >>> R = rotation_matrix(0.123, (1, 2, 3))
1298 | >>> q = quaternion_from_matrix(R, True)
1299 | >>> numpy.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786])
1300 | True
1301 | >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
1302 | ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
1303 | >>> q = quaternion_from_matrix(R)
1304 | >>> numpy.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611])
1305 | True
1306 | >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
1307 | ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
1308 | >>> q = quaternion_from_matrix(R)
1309 | >>> numpy.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603])
1310 | True
1311 | >>> R = random_rotation_matrix()
1312 | >>> q = quaternion_from_matrix(R)
1313 | >>> is_same_transform(R, quaternion_matrix(q))
1314 | True
1315 | >>> R = euler_matrix(0.0, 0.0, numpy.pi/2.0)
1316 | >>> numpy.allclose(quaternion_from_matrix(R, isprecise=False),
1317 | ... quaternion_from_matrix(R, isprecise=True))
1318 | True
1319 |
1320 | """
1321 | M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
1322 | if isprecise:
1323 | q = numpy.empty((4, ))
1324 | t = numpy.trace(M)
1325 | if t > M[3, 3]:
1326 | q[0] = t
1327 | q[3] = M[1, 0] - M[0, 1]
1328 | q[2] = M[0, 2] - M[2, 0]
1329 | q[1] = M[2, 1] - M[1, 2]
1330 | else:
1331 | i, j, k = 1, 2, 3
1332 | if M[1, 1] > M[0, 0]:
1333 | i, j, k = 2, 3, 1
1334 | if M[2, 2] > M[i, i]:
1335 | i, j, k = 3, 1, 2
1336 | t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
1337 | q[i] = t
1338 | q[j] = M[i, j] + M[j, i]
1339 | q[k] = M[k, i] + M[i, k]
1340 | q[3] = M[k, j] - M[j, k]
1341 | q *= 0.5 / math.sqrt(t * M[3, 3])
1342 | else:
1343 | m00 = M[0, 0]
1344 | m01 = M[0, 1]
1345 | m02 = M[0, 2]
1346 | m10 = M[1, 0]
1347 | m11 = M[1, 1]
1348 | m12 = M[1, 2]
1349 | m20 = M[2, 0]
1350 | m21 = M[2, 1]
1351 | m22 = M[2, 2]
1352 | # symmetric matrix K
1353 | K = numpy.array([[m00-m11-m22, 0.0, 0.0, 0.0],
1354 | [m01+m10, m11-m00-m22, 0.0, 0.0],
1355 | [m02+m20, m12+m21, m22-m00-m11, 0.0],
1356 | [m21-m12, m02-m20, m10-m01, m00+m11+m22]])
1357 | K /= 3.0
1358 | # quaternion is eigenvector of K that corresponds to largest eigenvalue
1359 | w, V = numpy.linalg.eigh(K)
1360 | q = V[[3, 0, 1, 2], numpy.argmax(w)]
1361 | if q[0] < 0.0:
1362 | numpy.negative(q, q)
1363 | return q
1364 |
1365 |
1366 | def quaternion_multiply(quaternion1, quaternion0):
1367 | """Return multiplication of two quaternions.
1368 |
1369 | >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7])
1370 | >>> numpy.allclose(q, [28, -44, -14, 48])
1371 | True
1372 |
1373 | """
1374 | w0, x0, y0, z0 = quaternion0
1375 | w1, x1, y1, z1 = quaternion1
1376 | return numpy.array([-x1*x0 - y1*y0 - z1*z0 + w1*w0,
1377 | x1*w0 + y1*z0 - z1*y0 + w1*x0,
1378 | -x1*z0 + y1*w0 + z1*x0 + w1*y0,
1379 | x1*y0 - y1*x0 + z1*w0 + w1*z0], dtype=numpy.float64)
1380 |
1381 |
1382 | def quaternion_conjugate(quaternion):
1383 | """Return conjugate of quaternion.
1384 |
1385 | >>> q0 = random_quaternion()
1386 | >>> q1 = quaternion_conjugate(q0)
1387 | >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:])
1388 | True
1389 |
1390 | """
1391 | q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
1392 | numpy.negative(q[1:], q[1:])
1393 | return q
1394 |
1395 |
1396 | def quaternion_inverse(quaternion):
1397 | """Return inverse of quaternion.
1398 |
1399 | >>> q0 = random_quaternion()
1400 | >>> q1 = quaternion_inverse(q0)
1401 | >>> numpy.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0])
1402 | True
1403 |
1404 | """
1405 | q = numpy.array(quaternion, dtype=numpy.float64, copy=True)
1406 | numpy.negative(q[1:], q[1:])
1407 | return q / numpy.dot(q, q)
1408 |
1409 |
1410 | def quaternion_real(quaternion):
1411 | """Return real part of quaternion.
1412 |
1413 | >>> quaternion_real([3, 0, 1, 2])
1414 | 3.0
1415 |
1416 | """
1417 | return float(quaternion[0])
1418 |
1419 |
1420 | def quaternion_imag(quaternion):
1421 | """Return imaginary part of quaternion.
1422 |
1423 | >>> quaternion_imag([3, 0, 1, 2])
1424 | array([ 0., 1., 2.])
1425 |
1426 | """
1427 | return numpy.array(quaternion[1:4], dtype=numpy.float64, copy=True)
1428 |
1429 |
1430 | def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
1431 | """Return spherical linear interpolation between two quaternions.
1432 |
1433 | >>> q0 = random_quaternion()
1434 | >>> q1 = random_quaternion()
1435 | >>> q = quaternion_slerp(q0, q1, 0)
1436 | >>> numpy.allclose(q, q0)
1437 | True
1438 | >>> q = quaternion_slerp(q0, q1, 1, 1)
1439 | >>> numpy.allclose(q, q1)
1440 | True
1441 | >>> q = quaternion_slerp(q0, q1, 0.5)
1442 | >>> angle = math.acos(numpy.dot(q0, q))
1443 | >>> numpy.allclose(2, math.acos(numpy.dot(q0, q1)) / angle) or \
1444 | numpy.allclose(2, math.acos(-numpy.dot(q0, q1)) / angle)
1445 | True
1446 |
1447 | """
1448 | q0 = unit_vector(quat0[:4])
1449 | q1 = unit_vector(quat1[:4])
1450 | if fraction == 0.0:
1451 | return q0
1452 | elif fraction == 1.0:
1453 | return q1
1454 | d = numpy.dot(q0, q1)
1455 | if abs(abs(d) - 1.0) < _EPS:
1456 | return q0
1457 | if shortestpath and d < 0.0:
1458 | # invert rotation
1459 | d = -d
1460 | numpy.negative(q1, q1)
1461 | angle = math.acos(d) + spin * math.pi
1462 | if abs(angle) < _EPS:
1463 | return q0
1464 | isin = 1.0 / math.sin(angle)
1465 | q0 *= math.sin((1.0 - fraction) * angle) * isin
1466 | q1 *= math.sin(fraction * angle) * isin
1467 | q0 += q1
1468 | return q0
1469 |
1470 |
1471 | def random_quaternion(rand=None):
1472 | """Return uniform random unit quaternion.
1473 |
1474 | rand: array like or None
1475 | Three independent random variables that are uniformly distributed
1476 | between 0 and 1.
1477 |
1478 | >>> q = random_quaternion()
1479 | >>> numpy.allclose(1, vector_norm(q))
1480 | True
1481 | >>> q = random_quaternion(numpy.random.random(3))
1482 | >>> len(q.shape), q.shape[0]==4
1483 | (1, True)
1484 |
1485 | """
1486 | if rand is None:
1487 | rand = numpy.random.rand(3)
1488 | else:
1489 | assert len(rand) == 3
1490 | r1 = numpy.sqrt(1.0 - rand[0])
1491 | r2 = numpy.sqrt(rand[0])
1492 | pi2 = math.pi * 2.0
1493 | t1 = pi2 * rand[1]
1494 | t2 = pi2 * rand[2]
1495 | return numpy.array([numpy.cos(t2)*r2, numpy.sin(t1)*r1,
1496 | numpy.cos(t1)*r1, numpy.sin(t2)*r2])
1497 |
1498 |
1499 | def random_rotation_matrix(rand=None):
1500 | """Return uniform random rotation matrix.
1501 |
1502 | rand: array like
1503 | Three independent random variables that are uniformly distributed
1504 | between 0 and 1 for each returned quaternion.
1505 |
1506 | >>> R = random_rotation_matrix()
1507 | >>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
1508 | True
1509 |
1510 | """
1511 | return quaternion_matrix(random_quaternion(rand))
1512 |
1513 |
1514 | class Arcball(object):
1515 | """Virtual Trackball Control.
1516 |
1517 | >>> ball = Arcball()
1518 | >>> ball = Arcball(initial=numpy.identity(4))
1519 | >>> ball.place([320, 320], 320)
1520 | >>> ball.down([500, 250])
1521 | >>> ball.drag([475, 275])
1522 | >>> R = ball.matrix()
1523 | >>> numpy.allclose(numpy.sum(R), 3.90583455)
1524 | True
1525 | >>> ball = Arcball(initial=[1, 0, 0, 0])
1526 | >>> ball.place([320, 320], 320)
1527 | >>> ball.setaxes([1, 1, 0], [-1, 1, 0])
1528 | >>> ball.constrain = True
1529 | >>> ball.down([400, 200])
1530 | >>> ball.drag([200, 400])
1531 | >>> R = ball.matrix()
1532 | >>> numpy.allclose(numpy.sum(R), 0.2055924)
1533 | True
1534 | >>> ball.next()
1535 |
1536 | """
1537 | def __init__(self, initial=None):
1538 | """Initialize virtual trackball control.
1539 |
1540 | initial : quaternion or rotation matrix
1541 |
1542 | """
1543 | self._axis = None
1544 | self._axes = None
1545 | self._radius = 1.0
1546 | self._center = [0.0, 0.0]
1547 | self._vdown = numpy.array([0.0, 0.0, 1.0])
1548 | self._constrain = False
1549 | if initial is None:
1550 | self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0])
1551 | else:
1552 | initial = numpy.array(initial, dtype=numpy.float64)
1553 | if initial.shape == (4, 4):
1554 | self._qdown = quaternion_from_matrix(initial)
1555 | elif initial.shape == (4, ):
1556 | initial /= vector_norm(initial)
1557 | self._qdown = initial
1558 | else:
1559 | raise ValueError("initial not a quaternion or matrix")
1560 | self._qnow = self._qpre = self._qdown
1561 |
1562 | def place(self, center, radius):
1563 | """Place Arcball, e.g. when window size changes.
1564 |
1565 | center : sequence[2]
1566 | Window coordinates of trackball center.
1567 | radius : float
1568 | Radius of trackball in window coordinates.
1569 |
1570 | """
1571 | self._radius = float(radius)
1572 | self._center[0] = center[0]
1573 | self._center[1] = center[1]
1574 |
1575 | def setaxes(self, *axes):
1576 | """Set axes to constrain rotations."""
1577 | if axes is None:
1578 | self._axes = None
1579 | else:
1580 | self._axes = [unit_vector(axis) for axis in axes]
1581 |
1582 | @property
1583 | def constrain(self):
1584 | """Return state of constrain to axis mode."""
1585 | return self._constrain
1586 |
1587 | @constrain.setter
1588 | def constrain(self, value):
1589 | """Set state of constrain to axis mode."""
1590 | self._constrain = bool(value)
1591 |
1592 | def down(self, point):
1593 | """Set initial cursor window coordinates and pick constrain-axis."""
1594 | self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
1595 | self._qdown = self._qpre = self._qnow
1596 | if self._constrain and self._axes is not None:
1597 | self._axis = arcball_nearest_axis(self._vdown, self._axes)
1598 | self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
1599 | else:
1600 | self._axis = None
1601 |
1602 | def drag(self, point):
1603 | """Update current cursor window coordinates."""
1604 | vnow = arcball_map_to_sphere(point, self._center, self._radius)
1605 | if self._axis is not None:
1606 | vnow = arcball_constrain_to_axis(vnow, self._axis)
1607 | self._qpre = self._qnow
1608 | t = numpy.cross(self._vdown, vnow)
1609 | if numpy.dot(t, t) < _EPS:
1610 | self._qnow = self._qdown
1611 | else:
1612 | q = [numpy.dot(self._vdown, vnow), t[0], t[1], t[2]]
1613 | self._qnow = quaternion_multiply(q, self._qdown)
1614 |
1615 | def next(self, acceleration=0.0):
1616 | """Continue rotation in direction of last drag."""
1617 | q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
1618 | self._qpre, self._qnow = self._qnow, q
1619 |
1620 | def matrix(self):
1621 | """Return homogeneous rotation matrix."""
1622 | return quaternion_matrix(self._qnow)
1623 |
1624 |
1625 | def arcball_map_to_sphere(point, center, radius):
1626 | """Return unit sphere coordinates from window coordinates."""
1627 | v0 = (point[0] - center[0]) / radius
1628 | v1 = (center[1] - point[1]) / radius
1629 | n = v0*v0 + v1*v1
1630 | if n > 1.0:
1631 | # position outside of sphere
1632 | n = math.sqrt(n)
1633 | return numpy.array([v0/n, v1/n, 0.0])
1634 | else:
1635 | return numpy.array([v0, v1, math.sqrt(1.0 - n)])
1636 |
1637 |
1638 | def arcball_constrain_to_axis(point, axis):
1639 | """Return sphere point perpendicular to axis."""
1640 | v = numpy.array(point, dtype=numpy.float64, copy=True)
1641 | a = numpy.array(axis, dtype=numpy.float64, copy=True)
1642 | v -= a * numpy.dot(a, v) # on plane
1643 | n = vector_norm(v)
1644 | if n > _EPS:
1645 | if v[2] < 0.0:
1646 | numpy.negative(v, v)
1647 | v /= n
1648 | return v
1649 | if a[2] == 1.0:
1650 | return numpy.array([1.0, 0.0, 0.0])
1651 | return unit_vector([-a[1], a[0], 0.0])
1652 |
1653 |
1654 | def arcball_nearest_axis(point, axes):
1655 | """Return axis, which arc is nearest to point."""
1656 | point = numpy.array(point, dtype=numpy.float64, copy=False)
1657 | nearest = None
1658 | mx = -1.0
1659 | for axis in axes:
1660 | t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
1661 | if t > mx:
1662 | nearest = axis
1663 | mx = t
1664 | return nearest
1665 |
1666 |
1667 | # epsilon for testing whether a number is close to zero
1668 | _EPS = numpy.finfo(float).eps * 4.0
1669 |
1670 | # axis sequences for Euler angles
1671 | _NEXT_AXIS = [1, 2, 0, 1]
1672 |
1673 | # map axes strings to/from tuples of inner axis, parity, repetition, frame
1674 | _AXES2TUPLE = {
1675 | 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
1676 | 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
1677 | 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
1678 | 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
1679 | 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
1680 | 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
1681 | 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
1682 | 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
1683 |
1684 | _TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
1685 |
1686 |
1687 | def vector_norm(data, axis=None, out=None):
1688 | """Return length, i.e. Euclidean norm, of ndarray along axis.
1689 |
1690 | >>> v = numpy.random.random(3)
1691 | >>> n = vector_norm(v)
1692 | >>> numpy.allclose(n, numpy.linalg.norm(v))
1693 | True
1694 | >>> v = numpy.random.rand(6, 5, 3)
1695 | >>> n = vector_norm(v, axis=-1)
1696 | >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
1697 | True
1698 | >>> n = vector_norm(v, axis=1)
1699 | >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
1700 | True
1701 | >>> v = numpy.random.rand(5, 4, 3)
1702 | >>> n = numpy.empty((5, 3))
1703 | >>> vector_norm(v, axis=1, out=n)
1704 | >>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
1705 | True
1706 | >>> vector_norm([])
1707 | 0.0
1708 | >>> vector_norm([1])
1709 | 1.0
1710 |
1711 | """
1712 | data = numpy.array(data, dtype=numpy.float64, copy=True)
1713 | if out is None:
1714 | if data.ndim == 1:
1715 | return math.sqrt(numpy.dot(data, data))
1716 | data *= data
1717 | out = numpy.atleast_1d(numpy.sum(data, axis=axis))
1718 | numpy.sqrt(out, out)
1719 | return out
1720 | else:
1721 | data *= data
1722 | numpy.sum(data, axis=axis, out=out)
1723 | numpy.sqrt(out, out)
1724 |
1725 |
1726 | def unit_vector(data, axis=None, out=None):
1727 | """Return ndarray normalized by length, i.e. Euclidean norm, along axis.
1728 |
1729 | >>> v0 = numpy.random.random(3)
1730 | >>> v1 = unit_vector(v0)
1731 | >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
1732 | True
1733 | >>> v0 = numpy.random.rand(5, 4, 3)
1734 | >>> v1 = unit_vector(v0, axis=-1)
1735 | >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
1736 | >>> numpy.allclose(v1, v2)
1737 | True
1738 | >>> v1 = unit_vector(v0, axis=1)
1739 | >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
1740 | >>> numpy.allclose(v1, v2)
1741 | True
1742 | >>> v1 = numpy.empty((5, 4, 3))
1743 | >>> unit_vector(v0, axis=1, out=v1)
1744 | >>> numpy.allclose(v1, v2)
1745 | True
1746 | >>> list(unit_vector([]))
1747 | []
1748 | >>> list(unit_vector([1]))
1749 | [1.0]
1750 |
1751 | """
1752 | if out is None:
1753 | data = numpy.array(data, dtype=numpy.float64, copy=True)
1754 | if data.ndim == 1:
1755 | data /= math.sqrt(numpy.dot(data, data))
1756 | return data
1757 | else:
1758 | if out is not data:
1759 | out[:] = numpy.array(data, copy=False)
1760 | data = out
1761 | length = numpy.atleast_1d(numpy.sum(data*data, axis))
1762 | numpy.sqrt(length, length)
1763 | if axis is not None:
1764 | length = numpy.expand_dims(length, axis)
1765 | data /= length
1766 | if out is None:
1767 | return data
1768 |
1769 |
1770 | def random_vector(size):
1771 | """Return array of random doubles in the half-open interval [0.0, 1.0).
1772 |
1773 | >>> v = random_vector(10000)
1774 | >>> numpy.all(v >= 0) and numpy.all(v < 1)
1775 | True
1776 | >>> v0 = random_vector(10)
1777 | >>> v1 = random_vector(10)
1778 | >>> numpy.any(v0 == v1)
1779 | False
1780 |
1781 | """
1782 | return numpy.random.random(size)
1783 |
1784 |
1785 | def vector_product(v0, v1, axis=0):
1786 | """Return vector perpendicular to vectors.
1787 |
1788 | >>> v = vector_product([2, 0, 0], [0, 3, 0])
1789 | >>> numpy.allclose(v, [0, 0, 6])
1790 | True
1791 | >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
1792 | >>> v1 = [[3], [0], [0]]
1793 | >>> v = vector_product(v0, v1)
1794 | >>> numpy.allclose(v, [[0, 0, 0, 0], [0, 0, 6, 6], [0, -6, 0, -6]])
1795 | True
1796 | >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
1797 | >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
1798 | >>> v = vector_product(v0, v1, axis=1)
1799 | >>> numpy.allclose(v, [[0, 0, 6], [0, -6, 0], [6, 0, 0], [0, -6, 6]])
1800 | True
1801 |
1802 | """
1803 | return numpy.cross(v0, v1, axis=axis)
1804 |
1805 |
1806 | def angle_between_vectors(v0, v1, directed=True, axis=0):
1807 | """Return angle between vectors.
1808 |
1809 | If directed is False, the input vectors are interpreted as undirected axes,
1810 | i.e. the maximum angle is pi/2.
1811 |
1812 | >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3])
1813 | >>> numpy.allclose(a, math.pi)
1814 | True
1815 | >>> a = angle_between_vectors([1, -2, 3], [-1, 2, -3], directed=False)
1816 | >>> numpy.allclose(a, 0)
1817 | True
1818 | >>> v0 = [[2, 0, 0, 2], [0, 2, 0, 2], [0, 0, 2, 2]]
1819 | >>> v1 = [[3], [0], [0]]
1820 | >>> a = angle_between_vectors(v0, v1)
1821 | >>> numpy.allclose(a, [0, 1.5708, 1.5708, 0.95532])
1822 | True
1823 | >>> v0 = [[2, 0, 0], [2, 0, 0], [0, 2, 0], [2, 0, 0]]
1824 | >>> v1 = [[0, 3, 0], [0, 0, 3], [0, 0, 3], [3, 3, 3]]
1825 | >>> a = angle_between_vectors(v0, v1, axis=1)
1826 | >>> numpy.allclose(a, [1.5708, 1.5708, 1.5708, 0.95532])
1827 | True
1828 |
1829 | """
1830 | v0 = numpy.array(v0, dtype=numpy.float64, copy=False)
1831 | v1 = numpy.array(v1, dtype=numpy.float64, copy=False)
1832 | dot = numpy.sum(v0 * v1, axis=axis)
1833 | dot /= vector_norm(v0, axis=axis) * vector_norm(v1, axis=axis)
1834 | return numpy.arccos(dot if directed else numpy.fabs(dot))
1835 |
1836 |
1837 | def inverse_matrix(matrix):
1838 | """Return inverse of square transformation matrix.
1839 |
1840 | >>> M0 = random_rotation_matrix()
1841 | >>> M1 = inverse_matrix(M0.T)
1842 | >>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
1843 | True
1844 | >>> for size in range(1, 7):
1845 | ... M0 = numpy.random.rand(size, size)
1846 | ... M1 = inverse_matrix(M0)
1847 | ... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print(size)
1848 |
1849 | """
1850 | return numpy.linalg.inv(matrix)
1851 |
1852 |
1853 | def concatenate_matrices(*matrices):
1854 | """Return concatenation of series of transformation matrices.
1855 |
1856 | >>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
1857 | >>> numpy.allclose(M, concatenate_matrices(M))
1858 | True
1859 | >>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
1860 | True
1861 |
1862 | """
1863 | M = numpy.identity(4)
1864 | for i in matrices:
1865 | M = numpy.dot(M, i)
1866 | return M
1867 |
1868 |
1869 | def is_same_transform(matrix0, matrix1):
1870 | """Return True if two matrices perform same transformation.
1871 |
1872 | >>> is_same_transform(numpy.identity(4), numpy.identity(4))
1873 | True
1874 | >>> is_same_transform(numpy.identity(4), random_rotation_matrix())
1875 | False
1876 |
1877 | """
1878 | matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
1879 | matrix0 /= matrix0[3, 3]
1880 | matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
1881 | matrix1 /= matrix1[3, 3]
1882 | return numpy.allclose(matrix0, matrix1)
1883 |
1884 |
1885 | def _import_module(name, package=None, warn=True, prefix='_py_', ignore='_'):
1886 | """Try import all public attributes from module into global namespace.
1887 |
1888 | Existing attributes with name clashes are renamed with prefix.
1889 | Attributes starting with underscore are ignored by default.
1890 |
1891 | Return True on successful import.
1892 |
1893 | """
1894 | import warnings
1895 | from importlib import import_module
1896 | try:
1897 | if not package:
1898 | module = import_module(name)
1899 | else:
1900 | module = import_module('.' + name, package=package)
1901 | except ImportError:
1902 | if warn:
1903 | warnings.warn("failed to import module %s" % name)
1904 | else:
1905 | for attr in dir(module):
1906 | if ignore and attr.startswith(ignore):
1907 | continue
1908 | if prefix:
1909 | if attr in globals():
1910 | globals()[prefix + attr] = globals()[attr]
1911 | elif warn:
1912 | warnings.warn("no Python implementation of " + attr)
1913 | globals()[attr] = getattr(module, attr)
1914 | return True
1915 |
1916 |
1917 | _import_module('_transformations')
1918 |
1919 | if __name__ == "__main__":
1920 | import doctest
1921 | import random # used in doctests
1922 | numpy.set_printoptions(suppress=True, precision=5)
1923 | doctest.testmod()
1924 |
1925 |
--------------------------------------------------------------------------------