├── .gitignore
├── MAVSlam
├── .DS_Store
├── .gitignore
├── .project
├── .settings
│ └── org.eclipse.jdt.core.prefs
├── LICENSE.md
├── RealSense.png
├── build.xml
├── dis
│ └── .gitignore
├── heading.png
├── lib
│ ├── .gitignore
│ ├── BoofCV-calibration-0.23.jar
│ ├── BoofCV-feature-0.23.jar
│ ├── BoofCV-geo-0.23.jar
│ ├── BoofCV-io-0.23.jar
│ ├── BoofCV-ip-0.23.jar
│ ├── BoofCV-jcodec-0.23.jar
│ ├── BoofCV-learning-0.23.jar
│ ├── BoofCV-recognition-0.23.jar
│ ├── BoofCV-sfm-0.23.jar
│ ├── BoofCV-visualize-0.23.jar
│ ├── core-0.29.jar
│ ├── ddogleg-0.9.jar
│ ├── dense64-0.29.jar
│ ├── equation-0.29.jar
│ ├── fastcast-3.0.jar
│ ├── fst-2.42-onejar.jar
│ ├── georegression-0.10.jar
│ ├── gson-2.8.5.jar
│ ├── http.jar
│ ├── jna-4.2.2.jar
│ ├── jna-platform-4.2.2.jar
│ ├── jnaerator-runtime-0.13.jar
│ ├── mavcomm.jar
│ ├── mavmq.jar
│ ├── simple-0.29.jar
│ ├── xmlpull-1.1.3.1.jar
│ ├── xpp3_min-1.1.4c.jar
│ └── xstream-1.4.7.jar
├── microslam.png
├── msp.properties
├── native
│ ├── librealsense.dylib
│ └── librealsense.so
├── obstacle.png
├── screenshot6.png
└── src
│ ├── .DS_Store
│ └── com
│ ├── .gitignore
│ └── comino
│ ├── .gitignore
│ ├── dev
│ ├── CoVarianceEjML.java
│ └── RotationMatrixTest.java
│ ├── librealsense
│ ├── .gitignore
│ └── wrapper
│ │ ├── LibRealSenseIntrinsics.java
│ │ ├── LibRealSenseUtils.java
│ │ └── LibRealSenseWrapper.java
│ ├── realsense
│ ├── .gitignore
│ └── boofcv
│ │ ├── RealSenseInfo.java
│ │ ├── StreamRealSenseTest.java
│ │ ├── StreamRealSenseTestVIO.java
│ │ └── StreamRealSenseVisDepth.java
│ ├── server
│ └── mjpeg
│ │ ├── IMJPEGOverlayListener.java
│ │ ├── IVisualStreamHandler.java
│ │ └── impl
│ │ └── HttpMJPEGHandler.java
│ └── slam
│ ├── boofcv
│ ├── MAVDepthVisualOdometry.java
│ ├── sfm
│ │ ├── DepthSparse3D.java
│ │ └── DepthSparse3D_to_PixelTo3D.java
│ ├── vio
│ │ ├── FactoryMAVOdometryVIO.java
│ │ ├── odometry
│ │ │ ├── MAVOdomPixelDepthPnPVIO.java
│ │ │ └── MAVOdomPixelDepthPnP_to_DepthVisualOdometryVIO.java
│ │ └── tracker
│ │ │ └── FactoryMAVPointTrackerTwoPassVIO.java
│ └── vo
│ │ ├── FactoryMAVOdometry.java
│ │ ├── odometry
│ │ ├── MAVOdomPixelDepthPnP.java
│ │ └── MAVOdomPixelDepthPnP_to_DepthVisualOdometry.java
│ │ └── tracker
│ │ └── FactoryMAVPointTrackerTwoPass.java
│ ├── detectors
│ ├── ISLAMDetector.java
│ └── impl
│ │ ├── DirectDepthDetector.java
│ │ ├── FwDirectDepthDetector.java
│ │ ├── VfhDepthDetector.java
│ │ ├── VfhDynamicDirectDepthDetector.java
│ │ └── VfhFeatureDetector.java
│ ├── estimators
│ ├── IPositionEstimator.java
│ ├── vio
│ │ └── MAVVisualPositionEstimatorVIO.java
│ └── vo
│ │ └── MAVVisualPositionEstimatorVO.java
│ └── main
│ └── StartUp.java
└── README.md
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | MAVSlam/.classpath
3 | MAVSlam/dis
4 | MAVSlam/bin
5 | MAVSlam/build_number
--------------------------------------------------------------------------------
/MAVSlam/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/.DS_Store
--------------------------------------------------------------------------------
/MAVSlam/.gitignore:
--------------------------------------------------------------------------------
1 | /bin/
2 | /dis/
3 | /build.number
4 |
--------------------------------------------------------------------------------
/MAVSlam/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | MAVSlam
4 |
5 |
6 |
7 |
8 |
9 | org.eclipse.jdt.core.javabuilder
10 |
11 |
12 |
13 |
14 |
15 | org.eclipse.jdt.core.javanature
16 |
17 |
18 |
--------------------------------------------------------------------------------
/MAVSlam/.settings/org.eclipse.jdt.core.prefs:
--------------------------------------------------------------------------------
1 | eclipse.preferences.version=1
2 | org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
3 | org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
4 | org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
5 | org.eclipse.jdt.core.compiler.compliance=1.8
6 | org.eclipse.jdt.core.compiler.debug.lineNumber=generate
7 | org.eclipse.jdt.core.compiler.debug.localVariable=generate
8 | org.eclipse.jdt.core.compiler.debug.sourceFile=generate
9 | org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
10 | org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
11 | org.eclipse.jdt.core.compiler.source=1.8
12 |
--------------------------------------------------------------------------------
/MAVSlam/LICENSE.md:
--------------------------------------------------------------------------------
1 | The MACGCL is licensed generally under a permissive 3-clause BSD license. Contributions are required to be made under the same license.
2 |
3 | ```
4 | /****************************************************************************
5 | *
6 | * Copyright (c) 2016 Eike Mansfeld ecm@gmx.de. All rights reserved.
7 | *
8 | * Redistribution and use in source and binary forms, with or without
9 | * modification, are permitted provided that the following conditions
10 | * are met:
11 | *
12 | * 1. Redistributions of source code must retain the above copyright
13 | * notice, this list of conditions and the following disclaimer.
14 | * 2. Redistributions in binary form must reproduce the above copyright
15 | * notice, this list of conditions and the following disclaimer in
16 | * the documentation and/or other materials provided with the
17 | * distribution.
18 | * 3. Neither the name of the copyright holder nor the names of its
19 | contributors may be used to endorse or promote products derived
20 | from this software without specific prior written permission.
21 | *
22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
29 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 | * POSSIBILITY OF SUCH DAMAGE.
34 | *
35 | ****************************************************************************/
36 | ```
--------------------------------------------------------------------------------
/MAVSlam/RealSense.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/RealSense.png
--------------------------------------------------------------------------------
/MAVSlam/build.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
28 |
29 |
30 |
31 |
32 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 | Current build number:${build.number}
76 |
77 |
78 |
79 |
80 |
81 |
82 |
--------------------------------------------------------------------------------
/MAVSlam/dis/.gitignore:
--------------------------------------------------------------------------------
1 | /.DS_Store
2 |
--------------------------------------------------------------------------------
/MAVSlam/heading.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/heading.png
--------------------------------------------------------------------------------
/MAVSlam/lib/.gitignore:
--------------------------------------------------------------------------------
1 | /.DS_Store
2 |
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-calibration-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-calibration-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-feature-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-feature-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-geo-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-geo-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-io-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-io-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-ip-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-ip-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-jcodec-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-jcodec-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-learning-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-learning-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-recognition-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-recognition-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-sfm-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-sfm-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/BoofCV-visualize-0.23.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/BoofCV-visualize-0.23.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/core-0.29.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/core-0.29.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/ddogleg-0.9.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/ddogleg-0.9.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/dense64-0.29.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/dense64-0.29.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/equation-0.29.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/equation-0.29.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/fastcast-3.0.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/fastcast-3.0.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/fst-2.42-onejar.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/fst-2.42-onejar.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/georegression-0.10.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/georegression-0.10.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/gson-2.8.5.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/gson-2.8.5.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/http.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/http.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/jna-4.2.2.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/jna-4.2.2.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/jna-platform-4.2.2.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/jna-platform-4.2.2.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/jnaerator-runtime-0.13.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/jnaerator-runtime-0.13.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/mavcomm.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/mavcomm.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/mavmq.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/mavmq.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/simple-0.29.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/simple-0.29.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/xmlpull-1.1.3.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/xmlpull-1.1.3.1.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/xpp3_min-1.1.4c.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/xpp3_min-1.1.4c.jar
--------------------------------------------------------------------------------
/MAVSlam/lib/xstream-1.4.7.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/lib/xstream-1.4.7.jar
--------------------------------------------------------------------------------
/MAVSlam/microslam.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/microslam.png
--------------------------------------------------------------------------------
/MAVSlam/msp.properties:
--------------------------------------------------------------------------------
1 | #Sat, 17 Feb 2018 08:39:55 +0100
2 | #Build
3 | build=638
4 |
5 | # Vision general settings
6 | vision_enabled=true
7 | vision_debug=true
8 |
9 | vision_heading_init=false
10 | vision_highres=false
11 |
12 | vision_min_quality=30
13 |
14 | vision_pub_pos_xy=true
15 | vision_pub_pos_z=true
16 |
17 | vision_pub_speed_xy=true
18 | vision_pub_speed_z=true
19 |
20 | vision_detector_cycle=100
21 |
22 |
23 | # DirectDepthDetector
24 |
25 |
26 |
27 | # FeatureDetector
28 |
29 | feature_max_distance=2.50
30 | feature_min_altitude=0.30
31 |
32 | #SLAM Grid
33 |
34 | slam_publish_microslam=true
35 |
36 | #Autopilot
37 | autopilot_forget_map=true
38 |
--------------------------------------------------------------------------------
/MAVSlam/native/librealsense.dylib:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/native/librealsense.dylib
--------------------------------------------------------------------------------
/MAVSlam/native/librealsense.so:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/native/librealsense.so
--------------------------------------------------------------------------------
/MAVSlam/obstacle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/obstacle.png
--------------------------------------------------------------------------------
/MAVSlam/screenshot6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/screenshot6.png
--------------------------------------------------------------------------------
/MAVSlam/src/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ecmnet/MAVSlam/3049d30bc7d0fba3ec6130f172e72bcb64475142/MAVSlam/src/.DS_Store
--------------------------------------------------------------------------------
/MAVSlam/src/com/.gitignore:
--------------------------------------------------------------------------------
1 | /.DS_Store
2 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/.gitignore:
--------------------------------------------------------------------------------
1 | /.DS_Store
2 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/dev/CoVarianceEjML.java:
--------------------------------------------------------------------------------
1 | package com.comino.dev;
2 |
3 | import org.ejml.simple.SimpleMatrix;
4 |
5 | public class CoVarianceEjML {
6 |
7 | public static void main(String[] args){
8 |
9 | double data[][] = new double[][]{
10 | { 90, 60, 90 },
11 | { 90, 90, 30 },
12 | { 60, 60, 60 },
13 | { 60, 60, 90 },
14 | { 30, 30, 30 }
15 | };
16 |
17 | SimpleMatrix X = new SimpleMatrix(data);
18 | int n = X.numRows();
19 | SimpleMatrix Xt = X.transpose();
20 | int m = Xt.numRows();
21 |
22 | // Means:
23 | SimpleMatrix x = new SimpleMatrix(m, 1);
24 | for(int r=0; r c){
34 | S.set(r, c, S.get(c, r));
35 | } else {
36 | double cov = Xt.extractVector(true, r).minus( x.get((r), 0) ).dot(Xt.extractVector(true, c).minus( x.get((c), 0) ).transpose());
37 | S.set(r, c, (cov / n));
38 | }
39 | }
40 | }
41 | // System.out.println(S);
42 |
43 | // Plotting:
44 | for(int r=0; r {
96 | MouseEvent ev = event;
97 | mouse_x = (int)ev.getX();
98 | mouse_y = (int)ev.getY();
99 | });
100 |
101 |
102 | // RealSenseInfo info = new RealSenseInfo(320,240, RealSenseInfo.MODE_RGB);
103 | RealSenseInfo info = new RealSenseInfo(640,480, RealSenseInfo.MODE_RGB);
104 |
105 | try {
106 |
107 | realsense = new StreamRealSenseVisDepth(0,info);
108 |
109 | } catch(Exception e) {
110 | System.out.println("REALSENSE:"+e.getMessage());
111 | return;
112 | }
113 |
114 | mouse_x = info.width/2;
115 | mouse_y = info.height/2;
116 |
117 | primaryStage.setScene(new Scene(root, info.width,info.height));
118 | primaryStage.show();
119 |
120 |
121 | PkltConfig configKlt = new PkltConfig();
122 | configKlt.pyramidScaling = new int[]{1, 2, 4, 8};
123 | configKlt.templateRadius = 3;
124 |
125 | PointTrackerTwoPass tracker =
126 | FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(900, 2, 1),
127 | GrayU8.class, GrayS16.class);
128 |
129 | DepthSparse3D sparseDepth = new DepthSparse3D.I(1e-3);
130 |
131 | // declares the algorithm
132 | MAVDepthVisualOdometry visualOdometry =
133 | FactoryMAVOdometry.depthPnP(1.2, 120, 2, 200, 50, true,
134 | sparseDepth, tracker, GrayU8.class, GrayU16.class);
135 |
136 | visualOdometry.setCalibration(realsense.getIntrinsics(),new DoNothingPixelTransform_F32());
137 |
138 |
139 | output = new BufferedImage(info.width, info.height, BufferedImage.TYPE_3BYTE_BGR);
140 | wirgb = new WritableImage(info.width, info.height);
141 | ivrgb.setImage(wirgb);
142 |
143 |
144 | realsense.registerListener(new Listener() {
145 |
146 | int fps; float mouse_depth; float md; int mc; int mf=0; int fpm;
147 |
148 | @Override
149 | public void process(Planar rgb, GrayU16 depth, long timeRgb, long timeDepth) {
150 |
151 |
152 | if((System.currentTimeMillis() - tms) > 250) {
153 | tms = System.currentTimeMillis();
154 | if(mf>0)
155 | fps = fpm/mf;
156 | if(mc>0)
157 | mouse_depth = md / mc;
158 | mc = 0; md = 0; mf=0; fpm=0;
159 | }
160 | mf++;
161 | fpm += (int)(1f/((timeRgb - oldTimeDepth)/1000f)+0.5f);
162 | oldTimeDepth = timeRgb;
163 |
164 | if( !visualOdometry.process(rgb.getBand(0),depth) ) {
165 | bus1.writeObject(position);
166 | System.out.println("VO Failed!");
167 | visualOdometry.reset();
168 | return;
169 | }
170 |
171 |
172 |
173 | Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
174 | Vector3D_F64 T = leftToWorld.getT();
175 |
176 |
177 | AccessPointTracks3D points = (AccessPointTracks3D)visualOdometry;
178 | ConvertBufferedImage.convertTo(rgb, output, false);
179 |
180 | Graphics c = output.getGraphics();
181 |
182 | int count = 0; float total = 0; int dx=0, dy=0; int dist=999;
183 | int x, y; int index = -1;
184 |
185 | for( int i = 0; i < points.getAllTracks().size(); i++ ) {
186 | if(points.isInlier(i)) {
187 |
188 |
189 | c.setColor(Color.BLUE);
190 |
191 | x = (int)points.getAllTracks().get(i).x;
192 | y = (int)points.getAllTracks().get(i).y;
193 |
194 |
195 |
196 | int d = depth.get(x,y);
197 | if(d > 0) {
198 |
199 |
200 | int di = (int)Math.sqrt((x-mouse_x)*(x-mouse_x) + (y-mouse_y)*(y-mouse_y));
201 | if(di < dist) {
202 | index = i;
203 | dx = x;
204 | dy = y;
205 | dist = di;
206 | }
207 | total++;
208 | if(d<500) {
209 | c.setColor(Color.RED); count++;
210 | }
211 | c.drawRect(x,y, 1, 1);
212 | }
213 | }
214 | }
215 |
216 | if(depth!=null) {
217 |
218 | System.out.println(visualOdometry.getPoint3DFromPixel(160, 120));
219 | // if(index > -1)
220 | // System.out.println(visualOdometry.getTrackLocation(index));
221 |
222 | mc++;
223 | md = md + depth.get(dx,dy) / 1000f;
224 | c.setColor(Color.GREEN);
225 | c.drawOval(dx-3,dy-3, 6, 6);
226 | }
227 |
228 |
229 | c.setColor(Color.CYAN);
230 | c.drawString("Fps:"+fps, 10, 20);
231 | c.drawString(String.format("Loc: %4.2f %4.2f %4.2f", T.x, T.y, T.z), 10, info.height-10);
232 | c.drawString(String.format("Depth: %3.2f", mouse_depth), info.width-85, info.height-10);
233 |
234 | position.x = T.x;
235 | position.y = T.y;
236 | position.z = T.z;
237 |
238 | position.tms = timeRgb;
239 |
240 | bus1.writeObject(position);
241 |
242 | if((count / total)>0.6f) {
243 | c.setColor(Color.RED);
244 | c.drawString("WARNING!", info.width-70, 20);
245 | }
246 | c.dispose();
247 |
248 | Platform.runLater(() -> {
249 | SwingFXUtils.toFXImage(output, wirgb);
250 | });
251 | }
252 |
253 | }).start();
254 |
255 | }
256 |
257 | @Override
258 | public void stop() throws Exception {
259 | realsense.stop();
260 | bus1.release();
261 | super.stop();
262 | }
263 |
264 | public static void main(String[] args) {
265 | launch(args);
266 | }
267 |
268 | }
269 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/realsense/boofcv/StreamRealSenseTestVIO.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 | package com.comino.realsense.boofcv;
35 |
36 | import java.awt.Color;
37 | import java.awt.Graphics;
38 | import java.awt.image.BufferedImage;
39 |
40 | import com.comino.mq.tests.OptPos;
41 | import com.comino.msp.utils.MSPMathUtils;
42 | import com.comino.realsense.boofcv.StreamRealSenseVisDepth.Listener;
43 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
44 | import com.comino.slam.boofcv.sfm.DepthSparse3D;
45 | import com.comino.slam.boofcv.vio.FactoryMAVOdometryVIO;
46 | import com.comino.slam.boofcv.vio.tracker.FactoryMAVPointTrackerTwoPassVIO;
47 |
48 | import boofcv.abst.feature.detect.interest.ConfigGeneralDetector;
49 | import boofcv.abst.feature.tracker.PointTrackerTwoPass;
50 | import boofcv.abst.sfm.AccessPointTracks3D;
51 | import boofcv.alg.distort.DoNothingPixelTransform_F32;
52 | import boofcv.alg.tracker.klt.PkltConfig;
53 | import boofcv.core.image.ConvertImage;
54 | import boofcv.io.image.ConvertBufferedImage;
55 | import boofcv.struct.image.GrayS16;
56 | import boofcv.struct.image.GrayU16;
57 | import boofcv.struct.image.GrayU8;
58 | import boofcv.struct.image.Planar;
59 | import georegression.geometry.ConvertRotation3D_F64;
60 | import georegression.struct.EulerType;
61 | import georegression.struct.point.Point3D_F64;
62 | import georegression.struct.point.Vector3D_F64;
63 | import georegression.struct.se.Se3_F64;
64 | import javafx.application.Application;
65 | import javafx.application.Platform;
66 | import javafx.embed.swing.SwingFXUtils;
67 | import javafx.scene.Scene;
68 | import javafx.scene.image.ImageView;
69 | import javafx.scene.image.WritableImage;
70 | import javafx.scene.input.MouseEvent;
71 | import javafx.scene.layout.FlowPane;
72 | import javafx.stage.Stage;
73 |
74 | public class StreamRealSenseTestVIO extends Application {
75 |
76 | private static final float INLIER_PIXEL_TOL = 1.5f;
77 | private static final int MAXTRACKS = 100;
78 | private static final int KLT_RADIUS = 3;
79 | private static final float KLT_THRESHOLD = 1f;
80 | private static final int RANSAC_ITERATIONS = 150;
81 | private static final int RETIRE_THRESHOLD = 2;
82 | private static final int ADD_THRESHOLD = 70;
83 | private static final int REFINE_ITERATIONS = 80;
84 |
85 | private BufferedImage output;
86 | private final ImageView ivrgb = new ImageView();
87 | private WritableImage wirgb;
88 |
89 | private StreamRealSenseVisDepth realsense;
90 |
91 | private GrayU8 gray = null;
92 | private Se3_F64 pose = new Se3_F64();
93 | private double[] visAttitude = new double[3];
94 |
95 | private long oldTimeDepth=0;
96 | private long tms = 0;
97 |
98 | private int mouse_x;
99 | private int mouse_y;
100 |
101 | private OptPos position = new OptPos();
102 |
103 | @Override
104 | public void start(Stage primaryStage) {
105 | primaryStage.setTitle("BoofCV RealSense Demo");
106 |
107 | FlowPane root = new FlowPane();
108 |
109 | root.getChildren().add(ivrgb);
110 |
111 |
112 | ivrgb.setOnMouseMoved(event -> {
113 | MouseEvent ev = event;
114 | mouse_x = (int)ev.getX();
115 | mouse_y = (int)ev.getY();
116 | });
117 |
118 |
119 | RealSenseInfo info = new RealSenseInfo(320,240, RealSenseInfo.MODE_RGB);
120 | // RealSenseInfo info = new RealSenseInfo(640,480, RealSenseInfo.MODE_RGB);
121 |
122 | gray = new GrayU8(info.width,info.height);
123 |
124 | try {
125 |
126 | realsense = new StreamRealSenseVisDepth(0,info);
127 |
128 | } catch(Exception e) {
129 | System.out.println("REALSENSE:"+e.getMessage());
130 | return;
131 | }
132 |
133 | mouse_x = info.width/2;
134 | mouse_y = info.height/2;
135 |
136 | primaryStage.setScene(new Scene(root, info.width,info.height));
137 | primaryStage.show();
138 |
139 |
140 | PkltConfig configKlt = new PkltConfig();
141 | configKlt.pyramidScaling = new int[]{1, 2, 4, 8};
142 | configKlt.templateRadius = 3;
143 |
144 | PointTrackerTwoPass tracker =
145 | FactoryMAVPointTrackerTwoPassVIO.klt(configKlt, new ConfigGeneralDetector(MAXTRACKS, KLT_RADIUS, KLT_THRESHOLD),
146 | GrayU8.class, GrayS16.class);
147 |
148 | DepthSparse3D sparseDepth = new DepthSparse3D.I(1e-3);
149 |
150 | // declares the algorithm
151 | MAVDepthVisualOdometry visualOdometry =
152 | FactoryMAVOdometryVIO.depthPnP(INLIER_PIXEL_TOL, ADD_THRESHOLD, RETIRE_THRESHOLD, RANSAC_ITERATIONS, REFINE_ITERATIONS, true,
153 | new Point3D_F64(),
154 | sparseDepth, tracker, GrayU8.class, GrayU16.class);
155 |
156 | visualOdometry.setCalibration(realsense.getIntrinsics(),new DoNothingPixelTransform_F32());
157 |
158 | Runtime.getRuntime().addShutdownHook(new Thread() {
159 | public void run() {
160 | realsense.stop();
161 | }
162 | });
163 |
164 |
165 | output = new BufferedImage(info.width, info.height, BufferedImage.TYPE_INT_RGB);
166 | wirgb = new WritableImage(info.width, info.height);
167 | ivrgb.setImage(wirgb);
168 |
169 | visualOdometry.reset();
170 |
171 |
172 | realsense.registerListener(new Listener() {
173 |
174 | int fps; float mouse_depth; float md; int mc; int mf=0; int fpm;
175 |
176 | @Override
177 | public void process(Planar rgb, GrayU16 depth, long timeRgb, long timeDepth) {
178 |
179 |
180 | if((System.currentTimeMillis() - tms) > 250) {
181 | tms = System.currentTimeMillis();
182 | if(mf>0)
183 | fps = fpm/mf;
184 | if(mc>0)
185 | mouse_depth = md / mc;
186 | mc = 0; md = 0; mf=0; fpm=0;
187 | }
188 | mf++;
189 | fpm += (int)(1f/((timeRgb - oldTimeDepth)/1000f)+0.5f);
190 | oldTimeDepth = timeRgb;
191 |
192 | ConvertImage.average(rgb, gray);
193 |
194 | if( !visualOdometry.process(gray, depth, pose) ) {
195 | visualOdometry.reset();
196 | System.err.println("No motion estimate");
197 | return;
198 | }
199 |
200 |
201 | pose.set(visualOdometry.getCameraToWorld());
202 | Vector3D_F64 T = pose.getT();
203 |
204 | ConvertRotation3D_F64.matrixToEuler(pose.getRotation(), EulerType.ZXY, visAttitude);
205 |
206 |
207 | AccessPointTracks3D points = (AccessPointTracks3D)visualOdometry;
208 | ConvertBufferedImage.convertTo_U8(rgb, output, false);
209 |
210 | Graphics c = output.getGraphics();
211 |
212 | int count = 0; float total = 0; int dx=0, dy=0; int dist=999;
213 | int x, y; int index = -1;
214 |
215 | for( int i = 0; i < points.getAllTracks().size(); i++ ) {
216 | if(points.isInlier(i)) {
217 |
218 |
219 | c.setColor(Color.WHITE);
220 |
221 | x = (int)points.getAllTracks().get(i).x;
222 | y = (int)points.getAllTracks().get(i).y;
223 |
224 |
225 |
226 | int d = depth.get(x,y);
227 | if(d > 0) {
228 |
229 |
230 | int di = (int)Math.sqrt((x-mouse_x)*(x-mouse_x) + (y-mouse_y)*(y-mouse_y));
231 | if(di < dist) {
232 | index = i;
233 | dx = x;
234 | dy = y;
235 | dist = di;
236 | }
237 | total++;
238 | if(d<500) {
239 | c.setColor(Color.RED); count++;
240 | }
241 | c.drawRect(x,y, 1, 1);
242 | }
243 | }
244 | }
245 |
246 | if(depth!=null) {
247 | // if(index > -1)
248 | // System.out.println(visualOdometry.getTrackLocation(index));
249 |
250 | mc++;
251 | md = md + depth.get(dx,dy) / 1000f;
252 | c.setColor(Color.WHITE);
253 | c.drawOval(dx-3,dy-3, 6, 6);
254 | }
255 |
256 |
257 | c.setColor(Color.CYAN);
258 | c.drawString("Fps:"+fps, 10, 20);
259 | c.drawString(String.format("Quality: %3.0f",visualOdometry.getQuality()), info.width-85, 20);
260 | c.drawString(String.format("Loc: %4.2f %4.2f %4.2f", T.x, T.y, T.z), 10, info.height-10);
261 | c.drawString(String.format("Depth: %3.2f", mouse_depth), info.width-85, info.height-10);
262 | c.drawString(String.format("ROLL: %3.0f°", MSPMathUtils.fromRad(visAttitude[0])), info.width-85, info.height-55);
263 | c.drawString(String.format("PITCH: %3.0f°", MSPMathUtils.fromRad(visAttitude[1])), info.width-85, info.height-40);
264 | c.drawString(String.format("YAW: %3.0f°", MSPMathUtils.fromRad(visAttitude[2])), info.width-85, info.height-25);
265 |
266 | position.x = T.x;
267 | position.y = T.y;
268 | position.z = T.z;
269 |
270 | position.tms = timeRgb;
271 |
272 | // if((count / total)>0.6f) {
273 | // c.setColor(Color.RED);
274 | // c.drawString("WARNING!", info.width-70, 20);
275 | // }
276 | c.dispose();
277 |
278 | Platform.runLater(() -> {
279 | SwingFXUtils.toFXImage(output, wirgb);
280 | });
281 | }
282 |
283 | }).start();
284 |
285 | }
286 |
287 | @Override
288 | public void stop() throws Exception {
289 | realsense.stop();
290 | super.stop();
291 | }
292 |
293 | public static void main(String[] args) {
294 |
295 |
296 | launch(args);
297 | }
298 |
299 | }
300 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/realsense/boofcv/StreamRealSenseVisDepth.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 |
35 | package com.comino.realsense.boofcv;
36 |
37 | import java.util.ArrayList;
38 | import java.util.List;
39 |
40 | import com.comino.librealsense.wrapper.LibRealSenseIntrinsics;
41 | import com.comino.librealsense.wrapper.LibRealSenseUtils;
42 | import com.comino.librealsense.wrapper.LibRealSenseWrapper;
43 | import com.comino.librealsense.wrapper.LibRealSenseWrapper.rs_format;
44 | import com.comino.librealsense.wrapper.LibRealSenseWrapper.rs_intrinsics;
45 | import com.comino.librealsense.wrapper.LibRealSenseWrapper.rs_option;
46 | import com.comino.librealsense.wrapper.LibRealSenseWrapper.rs_stream;
47 | import com.sun.jna.Pointer;
48 | import com.sun.jna.ptr.PointerByReference;
49 |
50 | import boofcv.struct.calib.IntrinsicParameters;
51 | import boofcv.struct.image.GrayU16;
52 | import boofcv.struct.image.GrayU8;
53 | import boofcv.struct.image.Planar;
54 |
55 | public class StreamRealSenseVisDepth {
56 |
57 | private static final long MAX_RATE = 20;
58 |
59 | // time out used in some places
60 | private long timeout=10000;
61 |
62 |
63 | private List listeners;
64 |
65 | // image with depth information
66 | private GrayU16 depth = new GrayU16(1,1);
67 |
68 | private Planar rgb = new Planar(GrayU8.class,1,1,3);
69 |
70 |
71 |
72 | private CombineThread thread;
73 |
74 | private PointerByReference error= new PointerByReference();
75 | private PointerByReference ctx;
76 |
77 | private PointerByReference dev;
78 |
79 | private RealSenseInfo info;
80 | private float scale;
81 |
82 | private LibRealSenseIntrinsics intrinsics;
83 |
84 | public StreamRealSenseVisDepth(int devno , RealSenseInfo info)
85 | {
86 |
87 | ctx = LibRealSenseWrapper.INSTANCE.rs_create_context(11201, error);
88 | if(LibRealSenseWrapper.INSTANCE.rs_get_device_count(ctx, error)<1)
89 | ctx = LibRealSenseWrapper.INSTANCE.rs_create_context(4, error);
90 |
91 | if(LibRealSenseWrapper.INSTANCE.rs_get_device_count(ctx, error)<1) {
92 | LibRealSenseWrapper.INSTANCE.rs_delete_context(ctx, error);
93 | throw new IllegalArgumentException("No device found");
94 | }
95 | this.listeners = new ArrayList();
96 |
97 | this.info = info;
98 |
99 | dev = LibRealSenseWrapper.INSTANCE.rs_get_device(ctx, devno, error);
100 | LibRealSenseWrapper.INSTANCE.rs_wait_for_frames(dev, error);
101 |
102 |
103 | Pointer ch = LibRealSenseWrapper.INSTANCE.rs_get_device_firmware_version(dev, error);
104 | System.out.println("Firmware version: "+ch.getString(0));
105 |
106 | LibRealSenseUtils.rs_apply_depth_control_preset(dev, LibRealSenseUtils.PRESET_DEPTH_HIGH);
107 | LibRealSenseWrapper.INSTANCE.rs_set_device_option(dev, rs_option.RS_OPTION_COLOR_ENABLE_AUTO_WHITE_BALANCE, 0, error);
108 | LibRealSenseWrapper.INSTANCE.rs_set_device_option(dev, rs_option.RS_OPTION_R200_EMITTER_ENABLED, 1, error);
109 | LibRealSenseWrapper.INSTANCE.rs_set_device_option(dev, rs_option.RS_OPTION_COLOR_ENABLE_AUTO_EXPOSURE, 1, error);
110 | LibRealSenseWrapper.INSTANCE.rs_set_device_option(dev, rs_option.RS_OPTION_R200_LR_AUTO_EXPOSURE_ENABLED,1 , error);
111 | LibRealSenseWrapper.INSTANCE.rs_set_device_option(dev, rs_option.RS_OPTION_COLOR_BACKLIGHT_COMPENSATION, 0, error);
112 |
113 |
114 | LibRealSenseWrapper.INSTANCE.rs_enable_stream(dev, rs_stream.RS_STREAM_COLOR,
115 | info.width,info.height,rs_format.RS_FORMAT_RGB8, info.framerate, error);
116 |
117 | if(info.mode==RealSenseInfo.MODE_INFRARED) {
118 | LibRealSenseWrapper.INSTANCE.rs_enable_stream(dev, rs_stream.RS_STREAM_INFRARED2_ALIGNED_TO_DEPTH,
119 | info.width,info.height,rs_format.RS_FORMAT_ANY, info.framerate, error);
120 | }
121 |
122 | LibRealSenseWrapper.INSTANCE.rs_enable_stream(dev, rs_stream.RS_STREAM_DEPTH,
123 | info.width,info.height,rs_format.RS_FORMAT_Z16, info.framerate, error);
124 |
125 |
126 | scale = LibRealSenseWrapper.INSTANCE.rs_get_device_depth_scale(dev, error);
127 |
128 | rs_intrinsics rs_int= new rs_intrinsics();
129 | LibRealSenseWrapper.INSTANCE.rs_get_stream_intrinsics(dev, rs_stream.RS_STREAM_RECTIFIED_COLOR, rs_int, error);
130 | intrinsics = new LibRealSenseIntrinsics(rs_int);
131 |
132 | System.out.println("Depth scale: "+scale+" Intrinsics: "+intrinsics.toString());
133 |
134 | depth.reshape(info.width,info.height);
135 | rgb.reshape(info.width,info.height);
136 |
137 | }
138 |
139 | public StreamRealSenseVisDepth registerListener(Listener listener) {
140 | listeners.add(listener);
141 | return this;
142 | }
143 |
144 |
145 | public void start() {
146 | LibRealSenseWrapper.INSTANCE.rs_start_device(dev, error);
147 |
148 | thread = new CombineThread();
149 | thread.start();
150 | thread.setName("VIO");
151 | thread.setPriority(Thread.NORM_PRIORITY);
152 |
153 | while(!thread.running)
154 | Thread.yield();
155 | }
156 |
157 | public void stop() {
158 | thread.requestStop = true;
159 | long start = System.currentTimeMillis()+timeout;
160 | while( start > System.currentTimeMillis() && thread.running )
161 | Thread.yield();
162 | LibRealSenseWrapper.INSTANCE.rs_stop_device(dev, error);
163 | }
164 |
165 |
166 |
167 | public IntrinsicParameters getIntrinsics() {
168 | return intrinsics;
169 | }
170 |
171 |
172 | private class CombineThread extends Thread {
173 |
174 | public volatile boolean running = false;
175 | public volatile boolean requestStop = false;
176 |
177 | public volatile Pointer depthData;
178 | public volatile Pointer rgbData;
179 |
180 | private long timeDepth;
181 | private long timeRgb;
182 |
183 | private long tms_offset_depth = 0;
184 | private long tms_offset_rgb = 0;
185 |
186 | @Override
187 | public void run() {
188 | running = true; long tms; long wait;
189 |
190 | while( !requestStop ) {
191 |
192 | try {
193 |
194 | tms = System.currentTimeMillis();
195 |
196 | LibRealSenseWrapper.INSTANCE.rs_wait_for_frames(dev, error);
197 |
198 | depthData = LibRealSenseWrapper.INSTANCE.rs_get_frame_data(dev,
199 | rs_stream.RS_STREAM_DEPTH_ALIGNED_TO_RECTIFIED_COLOR, error);
200 | timeDepth = (long)LibRealSenseWrapper.INSTANCE.rs_get_frame_timestamp(dev,
201 | rs_stream.RS_STREAM_DEPTH_ALIGNED_TO_RECTIFIED_COLOR, error);
202 | if(tms_offset_depth==0)
203 | tms_offset_depth = System.currentTimeMillis() - timeDepth;
204 |
205 | if(depthData!=null)
206 | bufferDepthToU16(depthData,depth);
207 |
208 | switch(info.mode) {
209 | case RealSenseInfo.MODE_RGB:
210 | synchronized ( this ) {
211 | timeRgb = (long)LibRealSenseWrapper.INSTANCE.rs_get_frame_timestamp(dev,
212 | rs_stream.RS_STREAM_RECTIFIED_COLOR, error);
213 | if(tms_offset_rgb==0)
214 | tms_offset_rgb = System.currentTimeMillis() - timeRgb;
215 | rgbData = LibRealSenseWrapper.INSTANCE.rs_get_frame_data(dev,
216 | rs_stream.RS_STREAM_RECTIFIED_COLOR, error);
217 | if(rgbData!=null)
218 | bufferRgbToMsU8(rgbData,rgb);
219 | }
220 | break;
221 | case RealSenseInfo.MODE_INFRARED:
222 | synchronized ( this ) {
223 | timeRgb = (long)LibRealSenseWrapper.INSTANCE.rs_get_frame_timestamp(dev,
224 | rs_stream.RS_STREAM_INFRARED2_ALIGNED_TO_DEPTH, error);
225 | if(tms_offset_rgb==0)
226 | tms_offset_rgb = System.currentTimeMillis() - timeRgb;
227 | rgbData = LibRealSenseWrapper.INSTANCE.rs_get_frame_data(dev,
228 | rs_stream.RS_STREAM_INFRARED2_ALIGNED_TO_DEPTH, error);
229 | if(rgbData!=null)
230 | bufferGrayToMsU8(rgbData,rgb);
231 | }
232 | break;
233 | }
234 |
235 | if(listeners.size()>0) {
236 | for(Listener listener : listeners)
237 | listener.process(rgb, depth, timeRgb+tms_offset_rgb, timeDepth+tms_offset_depth);
238 | }
239 |
240 | // Limit maximum frame rate to MAX_RATE
241 | wait = MAX_RATE - ( System.currentTimeMillis() - tms + 1 );
242 | if(wait>0)
243 | Thread.sleep(wait);
244 |
245 | } catch(Exception e) {
246 | e.printStackTrace();
247 | }
248 | }
249 |
250 | running = false;
251 | }
252 | }
253 |
254 | public void bufferGrayToU8(Pointer input , GrayU8 output ) {
255 | byte[] inp = input.getByteArray(0, output.width * output.height );
256 | System.arraycopy(inp, 0, output.data, 0, output.width * output.height);
257 | }
258 |
259 |
260 | public void bufferDepthToU16(Pointer input , GrayU16 output ) {
261 | short[] inp = input.getShortArray(0, output.width * output.height);
262 | // System.arraycopy(inp, 0, output.data, 0, output.width * output.height);
263 |
264 | // Upside down mounting
265 | indexIn = output.width * output.height -1;
266 | for( y = 0; y < output.height; y++ ) {
267 | indexOut = output.startIndex + y*output.stride;
268 | for( x = 0; x < output.width; x++ , indexOut++ ) {
269 | output.data[indexOut] = inp[indexIn--];
270 | }
271 | }
272 | }
273 |
274 |
275 | private int x,y,indexOut, indexIn;
276 | private byte[] input;
277 |
278 | public void bufferRgbToMsU8( Pointer inp , Planar output ) {
279 |
280 | input = inp.getByteArray(0, output.width * output.height * 3);
281 |
282 | // Upside down mounting
283 | indexIn = output.width * output.height * 3 -1;
284 | for( y = 0; y < output.height; y++ ) {
285 | indexOut = output.startIndex + y*output.stride;
286 | for( x = 0; x < output.width; x++ , indexOut++ ) {
287 | output.getBand(0).data[indexOut] = input[indexIn--];
288 | output.getBand(1).data[indexOut] = input[indexIn--];
289 | output.getBand(2).data[indexOut] = input[indexIn--];
290 | }
291 | }
292 | }
293 |
294 | public void bufferGrayToMsU8( Pointer inp , Planar output ) {
295 |
296 | input = inp.getByteArray(0, output.width * output.height);
297 |
298 | // Upside down mounting
299 | indexIn = output.width * output.height -1;
300 | for(y = 0; y < output.height; y++ ) {
301 | int indexOut = output.startIndex + y*output.stride;
302 | for(x = 0; x < output.width; x++ , indexOut++ ) {
303 | output.getBand(0).data[indexOut] = input[indexIn];
304 | output.getBand(1).data[indexOut] = input[indexIn];
305 | output.getBand(2).data[indexOut] = input[indexIn--];
306 | }
307 | }
308 | }
309 |
310 | public interface Listener {
311 | public void process(Planar rgb, GrayU16 depth, long timeRgb, long timeDepth);
312 | }
313 | }
314 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/server/mjpeg/IMJPEGOverlayListener.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 |
35 | package com.comino.server.mjpeg;
36 |
37 | import java.awt.Graphics;
38 |
39 | public interface IMJPEGOverlayListener {
40 |
41 | public void processOverlay(Graphics ctx);
42 |
43 | }
44 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/server/mjpeg/IVisualStreamHandler.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 | package com.comino.server.mjpeg;
35 |
36 | import com.comino.msp.model.DataModel;
37 |
38 | public interface IVisualStreamHandler {
39 |
40 | public final static int HTTPVIDEO = 0;
41 | public final static int FILE = 1;
42 |
43 | public void addToStream(T image, DataModel model, long tms_us);
44 | public void registerOverlayListener(IMJPEGOverlayListener listener);
45 |
46 | }
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/server/mjpeg/impl/HttpMJPEGHandler.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 |
35 | package com.comino.server.mjpeg.impl;
36 |
37 | import java.awt.Graphics2D;
38 | import java.awt.image.BufferedImage;
39 | import java.io.BufferedOutputStream;
40 | import java.io.IOException;
41 | import java.io.OutputStream;
42 | import java.util.ArrayList;
43 | import java.util.List;
44 |
45 | import javax.imageio.ImageIO;
46 |
47 | import com.comino.msp.model.DataModel;
48 | import com.comino.realsense.boofcv.RealSenseInfo;
49 | import com.comino.server.mjpeg.IMJPEGOverlayListener;
50 | import com.comino.server.mjpeg.IVisualStreamHandler;
51 | import com.sun.net.httpserver.HttpExchange;
52 | import com.sun.net.httpserver.HttpHandler;
53 |
54 | import boofcv.io.image.ConvertBufferedImage;
55 | import boofcv.struct.image.GrayU8;
56 | import boofcv.struct.image.Planar;
57 |
58 | public class HttpMJPEGHandler implements HttpHandler, IVisualStreamHandler {
59 |
60 | private static final int MAX_VIDEO_RATE_MS = 40;
61 |
62 | private List listeners = null;
63 | private BufferedImage image = null;
64 | private DataModel model = null;
65 | private Graphics2D ctx;
66 |
67 | private T input_image;
68 |
69 | private long last_image_tms = 0;
70 |
71 | public HttpMJPEGHandler(RealSenseInfo info, DataModel model) {
72 | this.model = model;
73 | this.listeners = new ArrayList();
74 | this.image = new BufferedImage(info.width, info.height, BufferedImage.TYPE_3BYTE_BGR);
75 | this.ctx = image.createGraphics();
76 |
77 | ImageIO.setUseCache(false);
78 |
79 | }
80 |
81 | @Override @SuppressWarnings("unchecked")
82 | public void handle(HttpExchange he) throws IOException {
83 |
84 | he.getResponseHeaders().add("content-type","multipart/x-mixed-replace; boundary=--BoundaryString");
85 | he.sendResponseHeaders(200, 0);
86 | OutputStream os = new BufferedOutputStream(he.getResponseBody());
87 | while(true) {
88 | os.write(("--BoundaryString\r\nContent-type:image/jpeg content-length:1\r\n\r\n").getBytes());
89 |
90 | try {
91 |
92 | synchronized(this) {
93 |
94 | if(input_image==null)
95 | wait();
96 | }
97 |
98 | if(input_image instanceof Planar) {
99 | ConvertBufferedImage.convertTo_U8((Planar)input_image, image, true);
100 | }
101 | else if(input_image instanceof GrayU8)
102 | ConvertBufferedImage.convertTo((GrayU8)input_image, image, true);
103 |
104 | if(listeners.size()>0) {
105 | for(IMJPEGOverlayListener listener : listeners)
106 | listener.processOverlay(ctx);
107 | }
108 |
109 | ImageIO.write(image, "jpg", os );
110 | os.write("\r\n\r\n".getBytes());
111 |
112 |
113 |
114 | input_image = null;
115 |
116 | } catch (Exception e) { }
117 | }
118 | }
119 |
120 | @Override
121 | public void registerOverlayListener(IMJPEGOverlayListener listener) {
122 | this.listeners.add(listener);
123 | }
124 |
125 | @Override
126 | public void addToStream(T input, DataModel model, long tms_us) {
127 |
128 | if((System.currentTimeMillis()-last_image_tms)
10 | extends DepthVisualOdometry {
11 |
12 | public int getInlierCount();
13 |
14 | public Point3D_F64 getTrackLocation(int index);
15 |
16 | public double getQuality();
17 |
18 | public void reset(Se3_F64 initialState);
19 |
20 | public boolean process(Vis visual, Depth depth, Se3_F64 currentAttitude);
21 |
22 | public Point3D_F64 getPoint3DFromPixel(int pixelx, int pixely);
23 |
24 | }
25 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/sfm/DepthSparse3D.java:
--------------------------------------------------------------------------------
1 | package com.comino.slam.boofcv.sfm;
2 |
3 | /*
4 | * Copyright (c) 2011-2016, Peter Abeles. All Rights Reserved.
5 | *
6 | * This file is part of BoofCV (http://boofcv.org).
7 | *
8 | * Licensed under the Apache License, Version 2.0 (the "License");
9 | * you may not use this file except in compliance with the License.
10 | * You may obtain a copy of the License at
11 | *
12 | * http://www.apache.org/licenses/LICENSE-2.0
13 | *
14 | * Unless required by applicable law or agreed to in writing, software
15 | * distributed under the License is distributed on an "AS IS" BASIS,
16 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 | * See the License for the specific language governing permissions and
18 | * limitations under the License.
19 | */
20 |
21 | import boofcv.alg.distort.radtan.RemoveRadialPtoN_F64;
22 | import boofcv.struct.calib.IntrinsicParameters;
23 | import boofcv.struct.distort.PixelTransform_F32;
24 | import boofcv.struct.image.GrayF32;
25 | import boofcv.struct.image.GrayI;
26 | import boofcv.struct.image.ImageGray;
27 | import georegression.struct.point.Point2D_F64;
28 | import georegression.struct.point.Point3D_F64;
29 |
30 | /**
31 | * Computes the 3D coordinate a point in a visual camera given a depth image. The visual camera is a standard camera
32 | * while the depth camera contains the depth (value along z-axis) of objects inside its field of view. The
33 | * Kinect (structured light) and flash ladar (time of flight) are examples of sensors which could use this class.
34 | * The z-axis is defined to be pointing straight out of the visual camera and both depth and visual cameras are
35 | * assumed to be parallel with identical pointing vectors for the z-axis.
36 | *
37 | * A mapping is provided for converting between pixels in the visual camera and the depth camera. This mapping
38 | * is assumed to be fixed with time.
39 | *
40 | * @author Peter Abeles
41 | */
42 | public abstract class DepthSparse3D {
43 |
44 | // Storage for the depth image
45 | protected T depthImage;
46 |
47 | // transform from visual camera pixels to normalized image coordinates
48 | private RemoveRadialPtoN_F64 p2n = new RemoveRadialPtoN_F64();
49 |
50 | // location of point in visual camera coordinate system
51 | private Point3D_F64 worldPt = new Point3D_F64();
52 |
53 | // pixel in normalized image coordinates
54 | private Point2D_F64 norm = new Point2D_F64();
55 |
56 | // transform from visual image coordinate system to depth image coordinate system
57 | private PixelTransform_F32 visualToDepth;
58 |
59 | // scales the values from the depth image
60 | private double depthScale;
61 |
62 | /**
63 | * Configures parameters
64 | *
65 | * @param depthScale Used to change units found in the depth image.
66 | */
67 | public DepthSparse3D(double depthScale) {
68 | this.depthScale = depthScale;
69 | }
70 |
71 | /**
72 | * Configures intrinsic camera parameters
73 | *
74 | * @param paramVisual Intrinsic parameters of visual camera.
75 | * @param visualToDepth Transform from visual to depth camera pixel coordinate systems.
76 | */
77 | public void configure(IntrinsicParameters paramVisual , PixelTransform_F32 visualToDepth ) {
78 | this.visualToDepth = visualToDepth;
79 | p2n.setK(paramVisual.fx,paramVisual.fy,paramVisual.skew,paramVisual.cx,paramVisual.cy).
80 | setDistortion(paramVisual.radial, paramVisual.t1,paramVisual.t2);
81 | }
82 |
83 |
84 | /**
85 | * Sets the depth image. A reference is saved internally.
86 | *
87 | * @param depthImage Image containing depth information.
88 | */
89 | public void setDepthImage(T depthImage) {
90 | this.depthImage = depthImage;
91 | }
92 |
93 | /**
94 | * Given a pixel coordinate in the visual camera, compute the 3D coordinate of that point.
95 | *
96 | * @param x x-coordinate of point in visual camera
97 | * @param y y-coordinate of point in visual camera
98 | * @return true if a 3D point could be computed and false if not
99 | */
100 | public boolean process( int x , int y ) {
101 | visualToDepth.compute(x, y);
102 |
103 | int depthX = (int)visualToDepth.distX;
104 | int depthY = (int)visualToDepth.distY;
105 |
106 | if( depthImage.isInBounds(depthX,depthY) ) {
107 | // get the depth at the specified location
108 | double value = lookupDepth(depthX, depthY);
109 |
110 | // see if its an invalid value
111 | // ecmnet modif: use last value instead
112 | if( value == 0 ) {
113 | return false;
114 |
115 | }
116 |
117 | // convert visual pixel into normalized image coordinate
118 | p2n.compute(x,y,norm);
119 |
120 | // project into 3D space
121 | worldPt.z = value*depthScale;
122 | worldPt.x = worldPt.z*norm.x;
123 | worldPt.y = worldPt.z*norm.y;
124 |
125 | return true;
126 | } else {
127 |
128 | return false;
129 | }
130 | }
131 |
132 | /**
133 | * The found 3D coordinate of the point in the visual camera coordinate system. Is only valid when
134 | * {@link #process(int, int)} returns true.
135 | *
136 | * @return 3D coordinate of point in visual camera coordinate system
137 | */
138 | public Point3D_F64 getWorldPt() {
139 | return worldPt;
140 | }
141 |
142 | /**
143 | * Internal function which looks up the pixel's depth. Depth is defined as the value of the z-coordinate which
144 | * is pointing out of the camera. If there is no depth measurement at this location return 0.
145 | *
146 | * @param depthX x-coordinate of pixel in depth camera
147 | * @param depthY y-coordinate of pixel in depth camera
148 | * @return depth at the specified coordinate
149 | */
150 | protected abstract double lookupDepth(int depthX, int depthY);
151 |
152 | /**
153 | * Implementation for {@link GrayI}.
154 | */
155 | public static class I extends DepthSparse3D {
156 |
157 | public I(double depthScale) {
158 | super(depthScale);
159 | }
160 |
161 | @Override
162 | protected double lookupDepth(int depthX, int depthY) {
163 | return depthImage.get(depthX,depthY);
164 | }
165 | }
166 |
167 | /**
168 | * Implementation for {@link GrayF32}.
169 | */
170 | public static class F32 extends DepthSparse3D {
171 |
172 | public F32(double depthScale) {
173 | super(depthScale);
174 | }
175 |
176 | @Override
177 | protected double lookupDepth(int depthX, int depthY) {
178 | return depthImage.unsafe_get(depthX,depthY);
179 | }
180 | }
181 | }
182 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/sfm/DepthSparse3D_to_PixelTo3D.java:
--------------------------------------------------------------------------------
1 | package com.comino.slam.boofcv.sfm;
2 |
3 | import boofcv.abst.sfm.ImagePixelTo3D;
4 | import boofcv.struct.image.ImageGray;
5 |
6 | /**
7 | * Wrapper around {@link DepthSparse3D} for {@link ImagePixelTo3D}.
8 | *
9 | * @author Peter Abeles
10 | */
11 | public class DepthSparse3D_to_PixelTo3D
12 | implements ImagePixelTo3D
13 | {
14 | DepthSparse3D alg;
15 |
16 | public DepthSparse3D_to_PixelTo3D(DepthSparse3D alg) {
17 | this.alg = alg;
18 | }
19 |
20 | @Override
21 | public boolean process(double x, double y) {
22 | return alg.process((int)x,(int)y);
23 | }
24 |
25 | @Override
26 | public double getX() {
27 | return alg.getWorldPt().x;
28 | }
29 |
30 | @Override
31 | public double getY() {
32 | return alg.getWorldPt().y;
33 | }
34 |
35 | @Override
36 | public double getZ() {
37 | return alg.getWorldPt().z;
38 | }
39 |
40 | @Override
41 | public double getW() {
42 | return 1;
43 | }
44 | }
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/vio/FactoryMAVOdometryVIO.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011-2016, Peter Abeles, Eike Mansfeld. All Rights Reserved.
3 | *
4 | * This file is part of BoofCV (http://boofcv.org).
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the "License");
7 | * you may not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.comino.slam.boofcv.vio;
20 |
21 | import org.ddogleg.fitting.modelset.ModelMatcher;
22 | import org.ddogleg.fitting.modelset.ransac.Ransac;
23 |
24 | import com.comino.slam.boofcv.sfm.DepthSparse3D;
25 | import com.comino.slam.boofcv.sfm.DepthSparse3D_to_PixelTo3D;
26 | import com.comino.slam.boofcv.vio.odometry.MAVOdomPixelDepthPnPVIO;
27 | import com.comino.slam.boofcv.vio.odometry.MAVOdomPixelDepthPnP_to_DepthVisualOdometryVIO;
28 | import com.comino.slam.boofcv.vo.odometry.MAVOdomPixelDepthPnP;
29 |
30 | import boofcv.abst.feature.tracker.PointTrackerTwoPass;
31 | import boofcv.abst.geo.Estimate1ofPnP;
32 | import boofcv.abst.geo.RefinePnP;
33 |
34 | import boofcv.abst.sfm.ImagePixelTo3D;
35 | import boofcv.alg.geo.DistanceModelMonoPixels;
36 | import boofcv.alg.geo.pose.PnPDistanceReprojectionSq;
37 | import boofcv.factory.geo.EnumPNP;
38 | import boofcv.factory.geo.EstimatorToGenerator;
39 | import boofcv.factory.geo.FactoryMultiView;
40 | import boofcv.struct.geo.Point2D3D;
41 | import boofcv.struct.image.ImageGray;
42 | import boofcv.struct.image.ImageType;
43 | import georegression.fitting.se.ModelManagerSe3_F64;
44 | import georegression.struct.point.Point3D_F64;
45 | import georegression.struct.se.Se3_F64;
46 |
47 | /**
48 | * Factory for creating visual odometry algorithms.
49 | *
50 | * @author Peter Abeles
51 | */
52 | public class FactoryMAVOdometryVIO {
53 |
54 |
55 |
56 | /**
57 | * Depth sensor based visual odometry algorithm which runs a sparse feature tracker in the visual camera and
58 | * estimates the range of tracks once when first detected using the depth sensor.
59 | *
60 | * @see MAVOdomPixelDepthPnP
61 | *
62 | * @param thresholdAdd Add new tracks when less than this number are in the inlier set. Tracker dependent. Set to
63 | * a value ≤ 0 to add features every frame.
64 | * @param thresholdRetire Discard a track if it is not in the inlier set after this many updates. Try 2
65 | * @param sparseDepth Extracts depth of pixels from a depth sensor.
66 | * @param visualType Type of visual image being processed.
67 | * @param depthType Type of depth image being processed.
68 | * @return StereoVisualOdometry
69 | */
70 | public static
71 | MAVOdomPixelDepthPnP_to_DepthVisualOdometryVIO depthPnP(double inlierPixelTol,
72 | int thresholdAdd,
73 | int thresholdRetire ,
74 | int ransacIterations ,
75 | int refineIterations ,
76 | boolean doublePass ,
77 | Point3D_F64 offset,
78 | DepthSparse3D sparseDepth,
79 | PointTrackerTwoPass tracker ,
80 | Class visualType , Class depthType
81 | ) {
82 |
83 | // Range from sparse disparity
84 | ImagePixelTo3D pixelTo3D = new DepthSparse3D_to_PixelTo3D(sparseDepth);
85 |
86 | Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER,-1,2);
87 | final DistanceModelMonoPixels distance = new PnPDistanceReprojectionSq();
88 |
89 | ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
90 | EstimatorToGenerator generator = new EstimatorToGenerator(estimator);
91 |
92 | // 1/2 a pixel tolerance for RANSAC inliers
93 | double ransacTOL = inlierPixelTol * inlierPixelTol;
94 |
95 | ModelMatcher motion =
96 | new Ransac(2323, manager, generator, distance, ransacIterations, ransacTOL);
97 |
98 | RefinePnP refine = null;
99 |
100 | if( refineIterations > 0 ) {
101 | refine = FactoryMultiView.refinePnP(1e-12,refineIterations);
102 | }
103 |
104 | MAVOdomPixelDepthPnPVIO alg = new MAVOdomPixelDepthPnPVIO
105 | (thresholdAdd,thresholdRetire ,doublePass,offset, motion,pixelTo3D,refine,tracker,null,null);
106 |
107 | return new MAVOdomPixelDepthPnP_to_DepthVisualOdometryVIO
108 | (sparseDepth,alg,distance, ImageType.single(visualType),depthType);
109 | }
110 |
111 |
112 |
113 | }
114 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/vio/odometry/MAVOdomPixelDepthPnPVIO.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011-2015, Peter Abeles, Eike Mansfeld. All Rights Reserved.
3 | *
4 | * This file is part of BoofCV (http://boofcv.org).
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the "License");
7 | * you may not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.comino.slam.boofcv.vio.odometry;
20 |
21 | import java.util.ArrayList;
22 | import java.util.List;
23 |
24 | import org.ddogleg.fitting.modelset.ModelMatcher;
25 |
26 | import boofcv.abst.feature.tracker.PointTrack;
27 | import boofcv.abst.feature.tracker.PointTracker;
28 | import boofcv.abst.feature.tracker.PointTrackerTwoPass;
29 | import boofcv.abst.geo.RefinePnP;
30 | import boofcv.abst.sfm.ImagePixelTo3D;
31 | import boofcv.struct.distort.PointTransform_F64;
32 | import boofcv.struct.geo.Point2D3D;
33 | import boofcv.struct.image.ImageBase;
34 | import boofcv.struct.sfm.Point2D3DTrack;
35 | import georegression.struct.point.Point2D_F64;
36 | import georegression.struct.point.Point3D_F64;
37 | import georegression.struct.se.Se3_F64;
38 | import georegression.transform.se.SePointOps_F64;
39 |
40 | /**
41 | * Full 6-DOF visual odometry where a ranging device is assumed for pixels in
42 | * the primary view and the motion is estimated using a
43 | * {@link boofcv.abst.geo.Estimate1ofPnP}. Range is usually estimated using
44 | * stereo cameras, structured light or time of flight sensors. New features are
45 | * added and removed as needed. Features are removed if they are not part of the
46 | * inlier feature set for some number of consecutive frames. New features are
47 | * detected and added if the inlier set falls below a threshold or every turn.
48 | *
49 | * Non-linear refinement is optional and appears to provide a very modest
50 | * improvement in performance. It is recommended that motion is estimated using
51 | * a P3P algorithm, which is the minimal case. Adding features every frame can
52 | * be computationally expensive, but having too few features being tracked will
53 | * degrade accuracy. The algorithm was designed to minimize magic numbers and to
54 | * be insensitive to small changes in their values.
55 | *
56 | * Due to the level of abstraction, it can't take full advantage of the sensors
57 | * used to estimate 3D feature locations. For example if a stereo camera is used
58 | * then 3-view geometry can't be used to improve performance.
59 | *
60 | * @author Peter Abeles, modified by Eike Mansfeld
61 | */
62 | public class MAVOdomPixelDepthPnPVIO {
63 |
64 | // when the inlier set is less than this number new features are detected
65 | private int thresholdAdd;
66 |
67 | // discard tracks after they have not been in the inlier set for this many
68 | // updates in a row
69 | private int thresholdRetire;
70 |
71 | // run the tracker once or twice?
72 | private boolean doublePass;
73 |
74 | // tracks features in the image
75 | private PointTrackerTwoPass tracker;
76 | // used to estimate a feature's 3D position from image range data
77 | private ImagePixelTo3D pixelTo3D;
78 | // converts from pixel to normalized image coordinates
79 | private PointTransform_F64 pixelToNorm;
80 | // convert from normalized image coordinates to pixel
81 | private PointTransform_F64 normToPixel;
82 |
83 | // non-linear refinement of pose estimate
84 | private RefinePnP refine;
85 |
86 | // estimate the camera motion up to a scale factor from two sets of point
87 | // correspondences
88 | private ModelMatcher motionEstimator;
89 |
90 | // location of tracks in the image that are included in the inlier set
91 | private List inlierTracks = new ArrayList();
92 |
93 | // transform from key frame to world frame
94 | private Se3_F64 keyToWorld = new Se3_F64();
95 | // transform from the current camera view to the key frame
96 | private Se3_F64 currToKey = new Se3_F64();
97 | // transform from the current camera view to the world frame
98 | private Se3_F64 currToWorld = new Se3_F64();
99 |
100 | // is this the first camera view being processed?
101 | private boolean first = true;
102 | // number of frames processed.
103 | private long tick;
104 |
105 | // used when concating motion
106 | private Se3_F64 temp = new Se3_F64();
107 |
108 | private Point3D_F64 lastTrackAdded = new Point3D_F64();
109 | private Point3D_F64 offset = null;
110 |
111 | private double quality = 0;
112 |
113 |
114 | /**
115 | * Configures magic numbers and estimation algorithms.
116 | *
117 | * @param thresholdAdd
118 | * Add new tracks when less than this number are in the inlier
119 | * set. Tracker dependent. Set to a value ≤ 0 to add features
120 | * every frame.
121 | * @param thresholdRetire
122 | * Discard a track if it is not in the inlier set after this many
123 | * updates. Try 2
124 | * @param doublePass
125 | * Associate image features a second time using the estimated
126 | * model from the first try to improve results
127 | * @param motionEstimator
128 | * PnP motion estimator. P3P algorithm is recommended/
129 | * @param pixelTo3D
130 | * Computes the 3D location of pixels.
131 | * @param refine
132 | * Optional algorithm for refining the pose estimate. Can be
133 | * null.
134 | * @param tracker
135 | * Point feature tracker.
136 | * @param pixelToNorm
137 | * Converts from raw image pixels into normalized image
138 | * coordinates.
139 | * @param normToPixel
140 | * Converts from normalized image coordinates into raw pixels
141 | */
142 | public MAVOdomPixelDepthPnPVIO(int thresholdAdd, int thresholdRetire, boolean doublePass, Point3D_F64 offset,
143 | ModelMatcher motionEstimator, ImagePixelTo3D pixelTo3D, RefinePnP refine,
144 | PointTrackerTwoPass tracker, PointTransform_F64 pixelToNorm, PointTransform_F64 normToPixel) {
145 | this.thresholdAdd = thresholdAdd;
146 | this.thresholdRetire = thresholdRetire;
147 | this.doublePass = doublePass;
148 | this.motionEstimator = motionEstimator;
149 | this.pixelTo3D = pixelTo3D;
150 | this.refine = refine;
151 | this.tracker = tracker;
152 | this.pixelToNorm = pixelToNorm;
153 | this.normToPixel = normToPixel;
154 | this.offset = offset;
155 | }
156 |
157 |
158 |
159 | /**
160 | * Estimates the motion given the left camera image. The latest information
161 | * required by ImagePixelTo3D should be passed to the class before invoking
162 | * this function.
163 | *
164 | * @param image
165 | * Camera image.
166 | * @return true if successful or false if it failed
167 | */
168 |
169 | public boolean process(T image, Se3_F64 state) {
170 |
171 | tracker.process(image);
172 |
173 | tick++;
174 | inlierTracks.clear();
175 |
176 | if (first) {
177 | addNewTracks();
178 | first = false;
179 | } else {
180 | if (!estimateMotion()) {
181 | // System.out.println("No motion estimate...");
182 | return false;
183 | }
184 |
185 | dropUnusedTracks();
186 | int N = motionEstimator.getMatchSet().size();
187 |
188 | if (thresholdAdd <= 0 || N < thresholdAdd) {
189 | changePoseToReference();
190 | addNewTracks();
191 | if(state!=null)
192 | keyToWorld.set(state);
193 | // System.out.println("New KeyFrame..."+keyToWorld.getRotation());
194 | }
195 | }
196 |
197 | return true;
198 | }
199 |
200 | /**
201 | * Updates the relative position of all points so that the current frame is
202 | * the reference frame. Mathematically this is not needed, but should help
203 | * keep numbers from getting too large.
204 | */
205 | private void changePoseToReference() {
206 | Se3_F64 keyToCurr = currToKey.invert(null);
207 |
208 | List all = tracker.getAllTracks(null);
209 |
210 | for (PointTrack t : all) {
211 | Point2D3DTrack p = t.getCookie();
212 | SePointOps_F64.transform(keyToCurr, p.location, p.location);
213 | }
214 |
215 | concatMotion();
216 | }
217 |
218 | /**
219 | * Removes tracks which have not been included in the inlier set recently
220 | *
221 | * @return Number of dropped tracks
222 | */
223 | private int dropUnusedTracks() {
224 |
225 | List all = tracker.getAllTracks(null);
226 | int num = 0;
227 |
228 | for (PointTrack t : all) {
229 | Point2D3DTrack p = t.getCookie();
230 | if (tick - p.lastInlier > thresholdRetire) {
231 | tracker.dropTrack(t);
232 | num++;
233 | }
234 | }
235 |
236 | return num;
237 | }
238 |
239 | /**
240 | * Detects new features and computes their 3D coordinates
241 | */
242 | private void addNewTracks() {
243 | // System.out.println("----------- Adding new tracks ---------------");
244 | Point2D3DTrack p;
245 |
246 | tracker.spawnTracks();
247 | try {
248 | List spawned = tracker.getNewTracks(null);
249 |
250 | // estimate 3D coordinate using stereo vision
251 | for (PointTrack t : spawned) {
252 | p = t.getCookie();
253 | if (p == null) {
254 | t.cookie = p = new Point2D3DTrack();
255 | }
256 |
257 | // discard point if it can't localized
258 | if (!pixelTo3D.process(t.x, t.y) || pixelTo3D.getW() == 0) {
259 |
260 | tracker.dropTrack(t);
261 | } else {
262 | Point3D_F64 X = p.getLocation();
263 |
264 | X.set(pixelTo3D.getX(), pixelTo3D.getY(), pixelTo3D.getZ());
265 |
266 | lastTrackAdded.set(X);
267 |
268 | // translate the point into the key frame
269 | SePointOps_F64.transform(currToKey, X, X);
270 | // not needed since the current frame was just set to be the key
271 | // frame
272 |
273 | p.lastInlier = tick;
274 | pixelToNorm.compute(t.x, t.y, p.observation);
275 | }
276 | }
277 | } catch(Exception e) {
278 | e.printStackTrace();
279 | // System.err.println("Add new tracks: "+e.getMessage());
280 | }
281 | }
282 |
283 | /**
284 | * Estimates motion from the set of tracks and their 3D location
285 | *
286 | * @return true if successful.
287 | */
288 | private boolean estimateMotion() {
289 | Point2D3D p = null;
290 | List active = tracker.getActiveTracks(null);
291 | List obs = new ArrayList();
292 |
293 |
294 | for (PointTrack t : active) {
295 | p = t.getCookie();
296 | pixelToNorm.compute(t.x, t.y, p.observation);
297 |
298 | // Add offset TODO: do this in modelMatcher
299 | p.location.z += offset.z;
300 | p.location.y += offset.y;
301 | p.location.x += offset.x;
302 |
303 | obs.add(p);
304 | }
305 |
306 | // estimate the motion up to a scale factor in translation
307 | if (!motionEstimator.process(obs))
308 | return false;
309 |
310 | if (doublePass) {
311 | if (!performSecondPass(active, obs))
312 | return false;
313 | }
314 | tracker.finishTracking();
315 |
316 |
317 | // undo offset; this is obsolete if in ModelMatcher
318 | for (PointTrack t : active) {
319 | p = t.getCookie();
320 | p.location.z -= offset.z;
321 | p.location.y -= offset.y;
322 | p.location.x -= offset.x;
323 | }
324 |
325 |
326 | Se3_F64 keyToCurr;
327 |
328 | if (refine != null) {
329 | keyToCurr = new Se3_F64();
330 | refine.fitModel(motionEstimator.getMatchSet(), motionEstimator.getModelParameters(), keyToCurr);
331 | } else {
332 | keyToCurr = motionEstimator.getModelParameters();
333 | }
334 |
335 | keyToCurr.invert(currToKey);
336 |
337 | // mark tracks as being inliers and add to inlier list
338 | int N = motionEstimator.getMatchSet().size();
339 | for (int i = 0; i < N; i++) {
340 | int index = motionEstimator.getInputIndex(i);
341 | Point2D3DTrack t = active.get(index).getCookie();
342 | t.lastInlier = tick;
343 | inlierTracks.add(t);
344 | }
345 |
346 | if(active.size()> 0)
347 | this.quality = motionEstimator.getFitQuality();
348 | else
349 | this.quality = 0;
350 |
351 | return true;
352 | }
353 |
354 | private boolean performSecondPass(List active, List obs) {
355 | Se3_F64 keyToCurr = motionEstimator.getModelParameters();
356 |
357 | Point3D_F64 cameraPt = new Point3D_F64();
358 | Point2D_F64 predicted = new Point2D_F64();
359 |
360 | // predict where each track should be given the just estimated motion
361 | List all = tracker.getAllTracks(null);
362 | for (PointTrack t : all) {
363 | Point2D3D p = t.getCookie();
364 |
365 | SePointOps_F64.transform(keyToCurr, p.location, cameraPt);
366 | normToPixel.compute(cameraPt.x / cameraPt.z, cameraPt.y / cameraPt.z, predicted);
367 | tracker.setHint(predicted.x, predicted.y, t);
368 | }
369 |
370 | // redo tracking with the additional information
371 | tracker.performSecondPass();
372 |
373 | active.clear();
374 | obs.clear();
375 | tracker.getActiveTracks(active);
376 |
377 | for (PointTrack t : active) {
378 | Point2D3D p = t.getCookie();
379 | pixelToNorm.compute(t.x, t.y, p.observation);
380 | obs.add(p);
381 | }
382 |
383 | return motionEstimator.process(obs);
384 | }
385 |
386 | //MSP
387 |
388 | public Point2D3D getObservation(int index) {
389 | List active = tracker.getActiveTracks(null);
390 | PointTrack t = active.get(index);
391 | Point2D3D p = t.getCookie();
392 | pixelToNorm.compute(t.x, t.y, p.observation);
393 | return p;
394 | }
395 |
396 | // MSP
397 |
398 | public double getQuality() {
399 | return this.quality;
400 | }
401 |
402 | private void concatMotion() {
403 | currToKey.concat(keyToWorld, temp);
404 | keyToWorld.set(temp);
405 | currToKey.reset();
406 | }
407 |
408 |
409 | public Se3_F64 getCurrToWorld() {
410 | currToKey.concat(keyToWorld, currToWorld);
411 | return currToWorld;
412 | }
413 |
414 | /**
415 | * Resets the algorithm into its original state
416 | */
417 |
418 | public void reset() {
419 | tracker.reset();
420 | keyToWorld.reset();
421 | currToKey.reset();
422 | first = true;
423 | tick = 0;
424 | quality = 0;
425 | }
426 |
427 | public void reset(Se3_F64 initialState) {
428 | reset();
429 | keyToWorld.set(initialState);
430 | }
431 |
432 |
433 | public PointTracker getTracker() {
434 | return tracker;
435 | }
436 |
437 |
438 | public ModelMatcher getMotionEstimator() {
439 | return motionEstimator;
440 | }
441 |
442 |
443 | public List getInlierTracks() {
444 | return inlierTracks;
445 | }
446 |
447 |
448 | public void setPixelToNorm(PointTransform_F64 pixelToNorm) {
449 | this.pixelToNorm = pixelToNorm;
450 | }
451 |
452 |
453 | public void setNormToPixel(PointTransform_F64 normToPixel) {
454 | this.normToPixel = normToPixel;
455 | }
456 |
457 |
458 | public long getTick() {
459 | return tick;
460 | }
461 | }
462 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/vio/odometry/MAVOdomPixelDepthPnP_to_DepthVisualOdometryVIO.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011-2016, Peter Abeles. All Rights Reserved.
3 | *
4 | * This file is part of BoofCV (http://boofcv.org).
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the "License");
7 | * you may not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.comino.slam.boofcv.vio.odometry;
20 |
21 | import static boofcv.alg.distort.LensDistortionOps.transformPoint;
22 |
23 | import java.util.ArrayList;
24 | import java.util.List;
25 |
26 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
27 | import com.comino.slam.boofcv.sfm.DepthSparse3D;
28 |
29 | import boofcv.abst.feature.tracker.PointTrack;
30 | import boofcv.abst.sfm.AccessPointTracks3D;
31 | import boofcv.abst.sfm.d3.DepthVisualOdometry;
32 | import boofcv.alg.geo.DistanceModelMonoPixels;
33 |
34 | import boofcv.alg.sfm.d3.VisOdomPixelDepthPnP;
35 | import boofcv.struct.calib.IntrinsicParameters;
36 | import boofcv.struct.distort.PixelTransform_F32;
37 | import boofcv.struct.distort.PointTransform_F64;
38 | import boofcv.struct.geo.Point2D3D;
39 | import boofcv.struct.image.ImageBase;
40 | import boofcv.struct.image.ImageGray;
41 | import boofcv.struct.image.ImageType;
42 | import boofcv.struct.sfm.Point2D3DTrack;
43 | import georegression.struct.point.Point2D_F64;
44 | import georegression.struct.point.Point3D_F64;
45 | import georegression.struct.se.Se3_F64;
46 |
47 | /**
48 | * Wrapper around {@link VisOdomPixelDepthPnP} for {@link DepthVisualOdometry}.
49 | *
50 | * @author Peter Abeles
51 | */
52 | // TODO WARNING! active list has been modified by dropping and adding tracks
53 | // this is probably true of other SFM algorithms
54 | public class MAVOdomPixelDepthPnP_to_DepthVisualOdometryVIO
55 | implements MAVDepthVisualOdometry , AccessPointTracks3D
56 | {
57 | // low level algorithm
58 | DepthSparse3D sparse3D;
59 | MAVOdomPixelDepthPnPVIO alg;
60 | DistanceModelMonoPixels distance;
61 | ImageType visualType;
62 | Class depthType;
63 | boolean success;
64 |
65 | PointTransform_F64 leftPixelToNorm = null;
66 | PointTransform_F64 leftNormToPixel = null;
67 |
68 | List active = new ArrayList();
69 |
70 | public MAVOdomPixelDepthPnP_to_DepthVisualOdometryVIO(DepthSparse3D sparse3D, MAVOdomPixelDepthPnPVIO alg,
71 | DistanceModelMonoPixels distance,
72 | ImageType visualType, Class depthType) {
73 | this.sparse3D = sparse3D;
74 | this.alg = alg;
75 | this.distance = distance;
76 | this.visualType = visualType;
77 | this.depthType = depthType;
78 | }
79 |
80 | @Override
81 | public Point3D_F64 getTrackLocation(int index) {
82 | return alg.getObservation(index).location;
83 | }
84 |
85 | @Override
86 | public double getQuality() {
87 | return alg.getQuality();
88 | }
89 |
90 | @Override
91 | public long getTrackId(int index) {
92 | return active.get(index).featureId;
93 | }
94 |
95 | @Override
96 | public List getAllTracks() {
97 | return (List)active;
98 | }
99 |
100 | @Override
101 | public boolean isInlier(int index) {
102 | Point2D3DTrack t = active.get(index).getCookie();
103 | return t.lastInlier == alg.getTick();
104 | }
105 |
106 | @Override
107 | public boolean isNew(int index) {
108 | PointTrack t = alg.getTracker().getActiveTracks(null).get(index);
109 | return alg.getTracker().getNewTracks(null).contains(t);
110 | }
111 |
112 | @Override
113 | public void setCalibration(IntrinsicParameters paramVisual, PixelTransform_F32 visToDepth) {
114 | sparse3D.configure(paramVisual,visToDepth);
115 |
116 | leftPixelToNorm = transformPoint(paramVisual).undistort_F64(true,false);
117 | leftNormToPixel = transformPoint(paramVisual).distort_F64(false,true);
118 |
119 | alg.setPixelToNorm(leftPixelToNorm);
120 | alg.setNormToPixel(leftNormToPixel);
121 |
122 |
123 | distance.setIntrinsic(paramVisual.fx,paramVisual.fy,paramVisual.skew);
124 | }
125 |
126 |
127 |
128 | @Override
129 | public boolean process(Vis visual, Depth depth) {
130 | sparse3D.setDepthImage(depth);
131 | success = alg.process(visual, null);
132 |
133 | active.clear();
134 | alg.getTracker().getActiveTracks(active);
135 |
136 | return success;
137 | }
138 |
139 | @Override
140 | public boolean process(Vis visual, Depth depth, Se3_F64 state) {
141 | sparse3D.setDepthImage(depth);
142 | success = alg.process(visual, state);
143 |
144 | active.clear();
145 | alg.getTracker().getActiveTracks(active);
146 |
147 | return success;
148 | }
149 |
150 | private int dpx = 15;
151 | public Point3D_F64 getPoint3DFromPixel(int pixelx, int pixely) {
152 |
153 | if(sparse3D.process(pixelx, pixely))
154 | return sparse3D.getWorldPt();
155 |
156 | if(sparse3D.process(pixelx+dpx, pixely+dpx))
157 | return sparse3D.getWorldPt();
158 |
159 | if(sparse3D.process(pixelx-dpx, pixely-dpx))
160 | return sparse3D.getWorldPt();
161 |
162 | if(sparse3D.process(pixelx+dpx, pixely-dpx))
163 | return sparse3D.getWorldPt();
164 |
165 | if(sparse3D.process(pixelx-dpx, pixely+dpx))
166 | return sparse3D.getWorldPt();
167 |
168 |
169 | return null;
170 | }
171 |
172 | @Override
173 | public void reset() {
174 | alg.reset();
175 | }
176 |
177 | @Override
178 | public boolean isFault() {
179 | return !success;
180 | }
181 |
182 | @Override
183 | public Se3_F64 getCameraToWorld() {
184 | return alg.getCurrToWorld();
185 | }
186 |
187 | @Override
188 | public ImageType getVisualType() {
189 | return visualType;
190 | }
191 |
192 | @Override
193 | public Class getDepthType() {
194 | return depthType;
195 | }
196 |
197 | @Override
198 | public int getInlierCount() {
199 | return alg.getInlierTracks().size();
200 | }
201 |
202 | @Override
203 | public void reset(Se3_F64 initialState) {
204 | alg.reset(initialState);
205 | }
206 |
207 | }
208 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/vio/tracker/FactoryMAVPointTrackerTwoPassVIO.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011-2016, Peter Abeles. All Rights Reserved.
3 | *
4 | * This file is part of BoofCV (http://boofcv.org).
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the "License");
7 | * you may not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.comino.slam.boofcv.vio.tracker;
20 |
21 | import static boofcv.factory.feature.tracker.FactoryPointTracker.createShiTomasi;
22 |
23 | import boofcv.abst.feature.associate.AssociateDescription2D;
24 | import boofcv.abst.feature.describe.DescribeRegionPoint;
25 | import boofcv.abst.feature.detdesc.DetectDescribePoint;
26 | import boofcv.abst.feature.detect.interest.ConfigGeneralDetector;
27 | import boofcv.abst.feature.tracker.DdaManagerDetectDescribePoint;
28 | import boofcv.abst.feature.tracker.DdaManagerGeneralPoint;
29 | import boofcv.abst.feature.tracker.DetectDescribeAssociateTwoPass;
30 | import boofcv.abst.feature.tracker.PointTrackerTwoPass;
31 | import boofcv.abst.feature.tracker.PointTrackerTwoPassKltPyramid;
32 | import boofcv.abst.filter.derivative.ImageGradient;
33 | import boofcv.alg.feature.detect.interest.EasyGeneralFeatureDetector;
34 | import boofcv.alg.feature.detect.interest.GeneralFeatureDetector;
35 | import boofcv.alg.interpolate.InterpolateRectangle;
36 | import boofcv.alg.tracker.klt.PkltConfig;
37 | import boofcv.factory.filter.derivative.FactoryDerivative;
38 | import boofcv.factory.interpolate.FactoryInterpolation;
39 | import boofcv.factory.transform.pyramid.FactoryPyramid;
40 | import boofcv.struct.feature.TupleDesc;
41 | import boofcv.struct.image.ImageGray;
42 | import boofcv.struct.pyramid.PyramidDiscrete;
43 |
44 | /**
45 | * @author Peter Abeles
46 | */
47 | public class FactoryMAVPointTrackerTwoPassVIO {
48 |
49 | /**
50 | * Pyramid KLT feature tracker.
51 | *
52 | * @param config Config for the tracker. Try PkltConfig.createDefault().
53 | * @param configExtract Configuration for extracting features
54 | * @return KLT based tracker.
55 | */
56 | public static
57 | PointTrackerTwoPass klt(PkltConfig config, ConfigGeneralDetector configExtract,
58 | Class imageType, Class derivType) {
59 |
60 | GeneralFeatureDetector detector = createShiTomasi(configExtract, derivType);
61 |
62 | InterpolateRectangle interpInput = FactoryInterpolation.bilinearRectangle(imageType);
63 | InterpolateRectangle interpDeriv = FactoryInterpolation.bilinearRectangle(derivType);
64 |
65 | ImageGradient gradient = FactoryDerivative.sobel(imageType, derivType);
66 |
67 | PyramidDiscrete pyramid = FactoryPyramid.discreteGaussian(config.pyramidScaling,-1,2,true,imageType);
68 |
69 | return new PointTrackerTwoPassKltPyramid<>(config.config, config.templateRadius, pyramid, detector,
70 | gradient, interpInput, interpDeriv);
71 | }
72 |
73 | public static
74 | PointTrackerTwoPass dda(GeneralFeatureDetector detector,
75 | DescribeRegionPoint describe,
76 | AssociateDescription2D associate1,
77 | AssociateDescription2D associate2,
78 | double scale,
79 | Class imageType) {
80 |
81 | EasyGeneralFeatureDetector easy = new EasyGeneralFeatureDetector<>(detector, imageType, null);
82 |
83 | DdaManagerGeneralPoint manager = new DdaManagerGeneralPoint<>(easy, describe, scale);
84 |
85 | if( associate2 == null )
86 | associate2 = associate1;
87 |
88 | return new DetectDescribeAssociateTwoPass<>(manager, associate1, associate2, false);
89 | }
90 |
91 | public static
92 | PointTrackerTwoPass dda(DetectDescribePoint detectDescribe,
93 | AssociateDescription2D associate1 ,
94 | AssociateDescription2D associate2 ,
95 | boolean updateDescription ) {
96 |
97 |
98 | DdaManagerDetectDescribePoint manager = new DdaManagerDetectDescribePoint<>(detectDescribe);
99 |
100 | if( associate2 == null )
101 | associate2 = associate1;
102 |
103 | return new DetectDescribeAssociateTwoPass<>(manager, associate1, associate2, updateDescription);
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/vo/FactoryMAVOdometry.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011-2016, Peter Abeles, Eike Mansfeld. All Rights Reserved.
3 | *
4 | * This file is part of BoofCV (http://boofcv.org).
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the "License");
7 | * you may not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.comino.slam.boofcv.vo;
20 |
21 | import org.ddogleg.fitting.modelset.ModelMatcher;
22 | import org.ddogleg.fitting.modelset.ransac.Ransac;
23 |
24 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
25 | import com.comino.slam.boofcv.vo.odometry.MAVOdomPixelDepthPnP;
26 | import com.comino.slam.boofcv.vo.odometry.MAVOdomPixelDepthPnP_to_DepthVisualOdometry;
27 |
28 | import boofcv.abst.feature.tracker.PointTrackerTwoPass;
29 | import boofcv.abst.geo.Estimate1ofPnP;
30 | import boofcv.abst.geo.RefinePnP;
31 | import boofcv.abst.sfm.DepthSparse3D_to_PixelTo3D;
32 | import boofcv.abst.sfm.ImagePixelTo3D;
33 | import boofcv.alg.geo.DistanceModelMonoPixels;
34 | import boofcv.alg.geo.pose.PnPDistanceReprojectionSq;
35 | import boofcv.alg.sfm.DepthSparse3D;
36 | import boofcv.factory.geo.EnumPNP;
37 | import boofcv.factory.geo.EstimatorToGenerator;
38 | import boofcv.factory.geo.FactoryMultiView;
39 | import boofcv.struct.geo.Point2D3D;
40 | import boofcv.struct.image.ImageGray;
41 | import boofcv.struct.image.ImageType;
42 | import georegression.fitting.se.ModelManagerSe3_F64;
43 | import georegression.struct.se.Se3_F64;
44 |
45 | /**
46 | * Factory for creating visual odometry algorithms.
47 | *
48 | * @author Peter Abeles
49 | */
50 | public class FactoryMAVOdometry {
51 |
52 |
53 |
54 | /**
55 | * Depth sensor based visual odometry algorithm which runs a sparse feature tracker in the visual camera and
56 | * estimates the range of tracks once when first detected using the depth sensor.
57 | *
58 | * @see MAVOdomPixelDepthPnP
59 | *
60 | * @param thresholdAdd Add new tracks when less than this number are in the inlier set. Tracker dependent. Set to
61 | * a value ≤ 0 to add features every frame.
62 | * @param thresholdRetire Discard a track if it is not in the inlier set after this many updates. Try 2
63 | * @param sparseDepth Extracts depth of pixels from a depth sensor.
64 | * @param visualType Type of visual image being processed.
65 | * @param depthType Type of depth image being processed.
66 | * @return StereoVisualOdometry
67 | */
68 | public static
69 | MAVDepthVisualOdometry depthPnP(double inlierPixelTol,
70 | int thresholdAdd,
71 | int thresholdRetire ,
72 | int ransacIterations ,
73 | int refineIterations ,
74 | boolean doublePass ,
75 | DepthSparse3D sparseDepth,
76 | PointTrackerTwoPass tracker ,
77 | Class visualType , Class depthType ) {
78 |
79 | // Range from sparse disparity
80 | ImagePixelTo3D pixelTo3D = new DepthSparse3D_to_PixelTo3D(sparseDepth);
81 |
82 | Estimate1ofPnP estimator = FactoryMultiView.computePnP_1(EnumPNP.P3P_FINSTERWALDER,-1,2);
83 | final DistanceModelMonoPixels distance = new PnPDistanceReprojectionSq();
84 |
85 | ModelManagerSe3_F64 manager = new ModelManagerSe3_F64();
86 | EstimatorToGenerator generator = new EstimatorToGenerator(estimator);
87 |
88 | // 1/2 a pixel tolerance for RANSAC inliers
89 | double ransacTOL = inlierPixelTol * inlierPixelTol;
90 |
91 | ModelMatcher motion =
92 | new Ransac(2323, manager, generator, distance, ransacIterations, ransacTOL);
93 |
94 | RefinePnP refine = null;
95 |
96 | if( refineIterations > 0 ) {
97 | refine = FactoryMultiView.refinePnP(1e-12,refineIterations);
98 | }
99 |
100 | MAVOdomPixelDepthPnP alg = new MAVOdomPixelDepthPnP
101 | (thresholdAdd,thresholdRetire ,doublePass,motion,pixelTo3D,refine,tracker,null,null);
102 |
103 | return new MAVOdomPixelDepthPnP_to_DepthVisualOdometry
104 | (sparseDepth,alg,distance, ImageType.single(visualType),depthType);
105 | }
106 |
107 |
108 |
109 | }
110 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/vo/odometry/MAVOdomPixelDepthPnP.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011-2015, Peter Abeles, Eike Mansfeld. All Rights Reserved.
3 | *
4 | * This file is part of BoofCV (http://boofcv.org).
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the "License");
7 | * you may not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.comino.slam.boofcv.vo.odometry;
20 |
21 | import java.util.ArrayList;
22 | import java.util.List;
23 |
24 | import org.ddogleg.fitting.modelset.ModelMatcher;
25 |
26 | import boofcv.abst.feature.tracker.PointTrack;
27 | import boofcv.abst.feature.tracker.PointTracker;
28 | import boofcv.abst.feature.tracker.PointTrackerTwoPass;
29 | import boofcv.abst.geo.RefinePnP;
30 | import boofcv.abst.sfm.ImagePixelTo3D;
31 | import boofcv.struct.distort.PointTransform_F64;
32 | import boofcv.struct.geo.Point2D3D;
33 | import boofcv.struct.image.ImageBase;
34 | import boofcv.struct.sfm.Point2D3DTrack;
35 | import georegression.struct.point.Point2D_F64;
36 | import georegression.struct.point.Point3D_F64;
37 | import georegression.struct.se.Se3_F64;
38 | import georegression.transform.se.SePointOps_F64;
39 |
40 | /**
41 | * Full 6-DOF visual odometry where a ranging device is assumed for pixels in
42 | * the primary view and the motion is estimated using a
43 | * {@link boofcv.abst.geo.Estimate1ofPnP}. Range is usually estimated using
44 | * stereo cameras, structured light or time of flight sensors. New features are
45 | * added and removed as needed. Features are removed if they are not part of the
46 | * inlier feature set for some number of consecutive frames. New features are
47 | * detected and added if the inlier set falls below a threshold or every turn.
48 | *
49 | * Non-linear refinement is optional and appears to provide a very modest
50 | * improvement in performance. It is recommended that motion is estimated using
51 | * a P3P algorithm, which is the minimal case. Adding features every frame can
52 | * be computationally expensive, but having too few features being tracked will
53 | * degrade accuracy. The algorithm was designed to minimize magic numbers and to
54 | * be insensitive to small changes in their values.
55 | *
56 | * Due to the level of abstraction, it can't take full advantage of the sensors
57 | * used to estimate 3D feature locations. For example if a stereo camera is used
58 | * then 3-view geometry can't be used to improve performance.
59 | *
60 | * @author Peter Abeles, modified by Eike Mansfeld
61 | */
62 | public class MAVOdomPixelDepthPnP {
63 |
64 | // when the inlier set is less than this number new features are detected
65 | private int thresholdAdd;
66 |
67 | // discard tracks after they have not been in the inlier set for this many
68 | // updates in a row
69 | private int thresholdRetire;
70 |
71 | // run the tracker once or twice?
72 | private boolean doublePass;
73 |
74 | // tracks features in the image
75 | private PointTrackerTwoPass tracker;
76 | // used to estimate a feature's 3D position from image range data
77 | private ImagePixelTo3D pixelTo3D;
78 | // converts from pixel to normalized image coordinates
79 | private PointTransform_F64 pixelToNorm;
80 | // convert from normalized image coordinates to pixel
81 | private PointTransform_F64 normToPixel;
82 |
83 | // non-linear refinement of pose estimate
84 | private RefinePnP refine;
85 |
86 | // estimate the camera motion up to a scale factor from two sets of point
87 | // correspondences
88 | private ModelMatcher motionEstimator;
89 |
90 | // location of tracks in the image that are included in the inlier set
91 | private List inlierTracks = new ArrayList();
92 |
93 | // transform from key frame to world frame
94 | private Se3_F64 keyToWorld = new Se3_F64();
95 | // transform from the current camera view to the key frame
96 | private Se3_F64 currToKey = new Se3_F64();
97 | // transform from the current camera view to the world frame
98 | private Se3_F64 currToWorld = new Se3_F64();
99 |
100 | private Se3_F64 currentState = new Se3_F64();
101 |
102 | // is this the first camera view being processed?
103 | private boolean first = true;
104 | // number of frames processed.
105 | private long tick;
106 |
107 | // used when concating motion
108 | private Se3_F64 temp = new Se3_F64();
109 |
110 | private Point3D_F64 lastTrackAdded = new Point3D_F64();
111 |
112 | private double quality = 0;
113 |
114 |
115 | /**
116 | * Configures magic numbers and estimation algorithms.
117 | *
118 | * @param thresholdAdd
119 | * Add new tracks when less than this number are in the inlier
120 | * set. Tracker dependent. Set to a value ≤ 0 to add features
121 | * every frame.
122 | * @param thresholdRetire
123 | * Discard a track if it is not in the inlier set after this many
124 | * updates. Try 2
125 | * @param doublePass
126 | * Associate image features a second time using the estimated
127 | * model from the first try to improve results
128 | * @param motionEstimator
129 | * PnP motion estimator. P3P algorithm is recommended/
130 | * @param pixelTo3D
131 | * Computes the 3D location of pixels.
132 | * @param refine
133 | * Optional algorithm for refining the pose estimate. Can be
134 | * null.
135 | * @param tracker
136 | * Point feature tracker.
137 | * @param pixelToNorm
138 | * Converts from raw image pixels into normalized image
139 | * coordinates.
140 | * @param normToPixel
141 | * Converts from normalized image coordinates into raw pixels
142 | */
143 | public MAVOdomPixelDepthPnP(int thresholdAdd, int thresholdRetire, boolean doublePass,
144 | ModelMatcher motionEstimator, ImagePixelTo3D pixelTo3D, RefinePnP refine,
145 | PointTrackerTwoPass tracker, PointTransform_F64 pixelToNorm, PointTransform_F64 normToPixel) {
146 | this.thresholdAdd = thresholdAdd;
147 | this.thresholdRetire = thresholdRetire;
148 | this.doublePass = doublePass;
149 | this.motionEstimator = motionEstimator;
150 | this.pixelTo3D = pixelTo3D;
151 | this.refine = refine;
152 | this.tracker = tracker;
153 | this.pixelToNorm = pixelToNorm;
154 | this.normToPixel = normToPixel;
155 | }
156 |
157 |
158 |
159 | /**
160 | * Estimates the motion given the left camera image. The latest information
161 | * required by ImagePixelTo3D should be passed to the class before invoking
162 | * this function.
163 | *
164 | * @param image
165 | * Camera image.
166 | * @return true if successful or false if it failed
167 | */
168 |
169 | public boolean process(T image, Se3_F64 state) {
170 |
171 | if(state!=null)
172 | currentState.set(state);
173 |
174 | tracker.process(image);
175 |
176 | tick++;
177 | inlierTracks.clear();
178 |
179 | if (first) {
180 | addNewTracks();
181 | first = false;
182 | } else {
183 | if (!estimateMotion()) {
184 | if(state!=null)
185 | keyToWorld.R.set(state.R); // TODO: Try out with pos set
186 | return false;
187 | }
188 |
189 | dropUnusedTracks();
190 | int N = motionEstimator.getMatchSet().size();
191 |
192 | if (thresholdAdd <= 0 || N < thresholdAdd) {
193 | changePoseToReference();
194 | addNewTracks();
195 | // System.out.println("New KeyFrame...");
196 | }
197 |
198 | // System.out.println(" num inliers = "+N+" num dropped
199 | // "+numDropped+" total active "+tracker.getActivePairs().size());
200 | }
201 |
202 | return true;
203 | }
204 |
205 | /**
206 | * Updates the relative position of all points so that the current frame is
207 | * the reference frame. Mathematically this is not needed, but should help
208 | * keep numbers from getting too large.
209 | */
210 | private void changePoseToReference() {
211 | Se3_F64 keyToCurr = currToKey.invert(null);
212 |
213 | List all = tracker.getAllTracks(null);
214 |
215 | for (PointTrack t : all) {
216 | Point2D3DTrack p = t.getCookie();
217 | SePointOps_F64.transform(keyToCurr, p.location, p.location);
218 | }
219 |
220 | concatMotion();
221 | }
222 |
223 | /**
224 | * Removes tracks which have not been included in the inlier set recently
225 | *
226 | * @return Number of dropped tracks
227 | */
228 | private int dropUnusedTracks() {
229 |
230 | List all = tracker.getAllTracks(null);
231 | int num = 0;
232 |
233 | for (PointTrack t : all) {
234 | Point2D3DTrack p = t.getCookie();
235 | if (tick - p.lastInlier > thresholdRetire) {
236 | tracker.dropTrack(t);
237 | num++;
238 | }
239 | }
240 |
241 | return num;
242 | }
243 |
244 | /**
245 | * Detects new features and computes their 3D coordinates
246 | */
247 | private void addNewTracks() {
248 | // System.out.println("----------- Adding new tracks ---------------");
249 |
250 | tracker.spawnTracks();
251 | try {
252 | List spawned = tracker.getNewTracks(null);
253 |
254 | // estimate 3D coordinate using stereo vision
255 | for (PointTrack t : spawned) {
256 | Point2D3DTrack p = t.getCookie();
257 | if (p == null) {
258 | t.cookie = p = new Point2D3DTrack();
259 | }
260 |
261 | // discard point if it can't localized
262 | if (!pixelTo3D.process(t.x, t.y) || pixelTo3D.getW() == 0) {
263 |
264 | tracker.dropTrack(t);
265 | } else {
266 | Point3D_F64 X = p.getLocation();
267 |
268 | X.set(pixelTo3D.getX(), pixelTo3D.getY(), pixelTo3D.getZ());
269 |
270 | lastTrackAdded.set(X);
271 |
272 | // translate the point into the key frame
273 | SePointOps_F64.transform(currToKey, X, X);
274 | // not needed since the current frame was just set to be the key
275 | // frame
276 |
277 | p.lastInlier = tick;
278 | pixelToNorm.compute(t.x, t.y, p.observation);
279 | }
280 | }
281 | } catch(Exception e) {
282 | System.err.println("Add new tracks: "+e.getMessage());
283 | }
284 | }
285 |
286 | /**
287 | * Estimates motion from the set of tracks and their 3D location
288 | *
289 | * @return true if successful.
290 | */
291 | private boolean estimateMotion() {
292 | List active = tracker.getActiveTracks(null);
293 | List obs = new ArrayList();
294 |
295 | for (PointTrack t : active) {
296 | Point2D3D p = t.getCookie();
297 | pixelToNorm.compute(t.x, t.y, p.observation);
298 | obs.add(p);
299 | }
300 |
301 | // estimate the motion up to a scale factor in translation
302 | if (!motionEstimator.process(obs))
303 | return false;
304 |
305 | if (doublePass) {
306 | if (!performSecondPass(active, obs))
307 | return false;
308 | }
309 | tracker.finishTracking();
310 |
311 | Se3_F64 keyToCurr;
312 |
313 | if (refine != null) {
314 | keyToCurr = new Se3_F64();
315 | refine.fitModel(motionEstimator.getMatchSet(), motionEstimator.getModelParameters(), keyToCurr);
316 | } else {
317 | keyToCurr = motionEstimator.getModelParameters();
318 | }
319 |
320 | keyToCurr.invert(currToKey);
321 |
322 | // mark tracks as being inliers and add to inlier list
323 | int N = motionEstimator.getMatchSet().size();
324 | for (int i = 0; i < N; i++) {
325 | int index = motionEstimator.getInputIndex(i);
326 | Point2D3DTrack t = active.get(index).getCookie();
327 | t.lastInlier = tick;
328 | inlierTracks.add(t);
329 | }
330 |
331 | this.quality = (3 * quality + motionEstimator.getFitQuality())/4f;
332 |
333 | return true;
334 | }
335 |
336 | private boolean performSecondPass(List active, List obs) {
337 | Se3_F64 keyToCurr = motionEstimator.getModelParameters();
338 |
339 | Point3D_F64 cameraPt = new Point3D_F64();
340 | Point2D_F64 predicted = new Point2D_F64();
341 |
342 | // predict where each track should be given the just estimated motion
343 | List all = tracker.getAllTracks(null);
344 | for (PointTrack t : all) {
345 | Point2D3D p = t.getCookie();
346 |
347 | SePointOps_F64.transform(keyToCurr, p.location, cameraPt);
348 | normToPixel.compute(cameraPt.x / cameraPt.z, cameraPt.y / cameraPt.z, predicted);
349 | tracker.setHint(predicted.x, predicted.y, t);
350 | }
351 |
352 | // redo tracking with the additional information
353 | tracker.performSecondPass();
354 |
355 | active.clear();
356 | obs.clear();
357 | tracker.getActiveTracks(active);
358 |
359 | for (PointTrack t : active) {
360 | Point2D3D p = t.getCookie();
361 | pixelToNorm.compute(t.x, t.y, p.observation);
362 | obs.add(p);
363 | }
364 |
365 | return motionEstimator.process(obs);
366 | }
367 |
368 | //MSP
369 |
370 | public Point2D3D getObservation(int index) {
371 | List active = tracker.getActiveTracks(null);
372 | PointTrack t = active.get(index);
373 | Point2D3D p = t.getCookie();
374 | pixelToNorm.compute(t.x, t.y, p.observation);
375 | return p;
376 | }
377 |
378 | // MSP
379 |
380 | public double getQuality() {
381 | return this.quality;
382 | }
383 |
384 | private void concatMotion() {
385 | currToKey.concat(keyToWorld, temp);
386 | keyToWorld.set(temp);
387 | // if a new key is set, take currentState rotation into account
388 | keyToWorld.R.set(currentState.R);
389 | currToKey.reset();
390 | }
391 |
392 |
393 | public Se3_F64 getCurrToWorld() {
394 | currToKey.concat(keyToWorld, currToWorld);
395 | return currToWorld;
396 | }
397 |
398 | /**
399 | * Resets the algorithm into its original state
400 | */
401 |
402 | public void reset() {
403 | tracker.reset();
404 | keyToWorld.reset();
405 | currToKey.reset();
406 | first = true;
407 | tick = 0;
408 | }
409 |
410 |
411 | public void reset(Se3_F64 initialState) {
412 | tracker.reset();
413 | currentState.set(initialState);
414 | keyToWorld.R.set(initialState.R);
415 | currToKey.reset();
416 | first = true;
417 | tick = 0;
418 | }
419 |
420 |
421 | public PointTracker getTracker() {
422 | return tracker;
423 | }
424 |
425 |
426 | public ModelMatcher getMotionEstimator() {
427 | return motionEstimator;
428 | }
429 |
430 |
431 | public List getInlierTracks() {
432 | return inlierTracks;
433 | }
434 |
435 |
436 | public void setPixelToNorm(PointTransform_F64 pixelToNorm) {
437 | this.pixelToNorm = pixelToNorm;
438 | }
439 |
440 |
441 | public void setNormToPixel(PointTransform_F64 normToPixel) {
442 | this.normToPixel = normToPixel;
443 | }
444 |
445 |
446 | public long getTick() {
447 | return tick;
448 | }
449 | }
450 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/vo/odometry/MAVOdomPixelDepthPnP_to_DepthVisualOdometry.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011-2016, Peter Abeles. All Rights Reserved.
3 | *
4 | * This file is part of BoofCV (http://boofcv.org).
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the "License");
7 | * you may not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.comino.slam.boofcv.vo.odometry;
20 |
21 | import static boofcv.alg.distort.LensDistortionOps.transformPoint;
22 |
23 | import java.util.ArrayList;
24 | import java.util.List;
25 |
26 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
27 |
28 | import boofcv.abst.feature.tracker.PointTrack;
29 | import boofcv.abst.sfm.AccessPointTracks3D;
30 | import boofcv.abst.sfm.d3.DepthVisualOdometry;
31 | import boofcv.alg.geo.DistanceModelMonoPixels;
32 | import boofcv.alg.sfm.DepthSparse3D;
33 | import boofcv.alg.sfm.d3.VisOdomPixelDepthPnP;
34 | import boofcv.struct.calib.IntrinsicParameters;
35 | import boofcv.struct.distort.PixelTransform_F32;
36 | import boofcv.struct.distort.PointTransform_F64;
37 | import boofcv.struct.geo.Point2D3D;
38 | import boofcv.struct.image.ImageBase;
39 | import boofcv.struct.image.ImageGray;
40 | import boofcv.struct.image.ImageType;
41 | import boofcv.struct.sfm.Point2D3DTrack;
42 | import georegression.struct.point.Point2D_F64;
43 | import georegression.struct.point.Point3D_F64;
44 | import georegression.struct.se.Se3_F64;
45 |
46 | /**
47 | * Wrapper around {@link VisOdomPixelDepthPnP} for {@link DepthVisualOdometry}.
48 | *
49 | * @author Peter Abeles
50 | */
51 | // TODO WARNING! active list has been modified by dropping and adding tracks
52 | // this is probably true of other SFM algorithms
53 | public class MAVOdomPixelDepthPnP_to_DepthVisualOdometry
54 | implements MAVDepthVisualOdometry , AccessPointTracks3D
55 | {
56 | // low level algorithm
57 | DepthSparse3D sparse3D;
58 | MAVOdomPixelDepthPnP alg;
59 | DistanceModelMonoPixels distance;
60 | ImageType visualType;
61 | Class depthType;
62 | boolean success;
63 |
64 | PointTransform_F64 leftPixelToNorm = null;
65 | PointTransform_F64 leftNormToPixel = null;
66 |
67 | List active = new ArrayList();
68 |
69 | public MAVOdomPixelDepthPnP_to_DepthVisualOdometry(DepthSparse3D sparse3D, MAVOdomPixelDepthPnP alg,
70 | DistanceModelMonoPixels distance,
71 | ImageType visualType, Class depthType) {
72 | this.sparse3D = sparse3D;
73 | this.alg = alg;
74 | this.distance = distance;
75 | this.visualType = visualType;
76 | this.depthType = depthType;
77 | }
78 |
79 | @Override
80 | public Point3D_F64 getTrackLocation(int index) {
81 | return alg.getObservation(index).location;
82 | }
83 |
84 | @Override
85 | public double getQuality() {
86 | return alg.getQuality();
87 | }
88 |
89 | @Override
90 | public long getTrackId(int index) {
91 | return active.get(index).featureId;
92 | }
93 |
94 | @Override
95 | public List getAllTracks() {
96 | return (List)active;
97 | }
98 |
99 | @Override
100 | public boolean isInlier(int index) {
101 | Point2D3DTrack t = active.get(index).getCookie();
102 | return t.lastInlier == alg.getTick();
103 | }
104 |
105 | @Override
106 | public boolean isNew(int index) {
107 | PointTrack t = alg.getTracker().getActiveTracks(null).get(index);
108 | return alg.getTracker().getNewTracks(null).contains(t);
109 | }
110 |
111 | @Override
112 | public void setCalibration(IntrinsicParameters paramVisual, PixelTransform_F32 visToDepth) {
113 | sparse3D.configure(paramVisual,visToDepth);
114 |
115 | leftPixelToNorm = transformPoint(paramVisual).undistort_F64(true,false);
116 | leftNormToPixel = transformPoint(paramVisual).distort_F64(false,true);
117 |
118 | alg.setPixelToNorm(leftPixelToNorm);
119 | alg.setNormToPixel(leftNormToPixel);
120 |
121 |
122 | distance.setIntrinsic(paramVisual.fx,paramVisual.fy,paramVisual.skew);
123 | }
124 |
125 |
126 |
127 | @Override
128 | public boolean process(Vis visual, Depth depth) {
129 | sparse3D.setDepthImage(depth);
130 | success = alg.process(visual, null);
131 |
132 | active.clear();
133 | alg.getTracker().getActiveTracks(active);
134 |
135 | return success;
136 | }
137 |
138 | @Override
139 | public boolean process(Vis visual, Depth depth, Se3_F64 state) {
140 | sparse3D.setDepthImage(depth);
141 | success = alg.process(visual, state);
142 |
143 | active.clear();
144 | alg.getTracker().getActiveTracks(active);
145 |
146 | return success;
147 | }
148 |
149 | private int dpx = 15;
150 | public Point3D_F64 getPoint3DFromPixel(int pixelx, int pixely) {
151 |
152 | if(sparse3D.process(pixelx, pixely))
153 | return sparse3D.getWorldPt();
154 |
155 | if(sparse3D.process(pixelx+dpx, pixely+dpx))
156 | return sparse3D.getWorldPt();
157 |
158 | if(sparse3D.process(pixelx-dpx, pixely-dpx))
159 | return sparse3D.getWorldPt();
160 |
161 | if(sparse3D.process(pixelx+dpx, pixely-dpx))
162 | return sparse3D.getWorldPt();
163 |
164 | if(sparse3D.process(pixelx-dpx, pixely+dpx))
165 | return sparse3D.getWorldPt();
166 |
167 |
168 | return null;
169 | }
170 |
171 | @Override
172 | public void reset() {
173 | alg.reset();
174 | }
175 |
176 | @Override
177 | public void reset(Se3_F64 initialState) {
178 | alg.reset(initialState);
179 | }
180 |
181 | @Override
182 | public boolean isFault() {
183 | return !success;
184 | }
185 |
186 | @Override
187 | public Se3_F64 getCameraToWorld() {
188 | return alg.getCurrToWorld();
189 | }
190 |
191 | @Override
192 | public ImageType getVisualType() {
193 | return visualType;
194 | }
195 |
196 | @Override
197 | public Class getDepthType() {
198 | return depthType;
199 | }
200 |
201 | @Override
202 | public int getInlierCount() {
203 | return alg.getInlierTracks().size();
204 | }
205 |
206 | }
207 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/boofcv/vo/tracker/FactoryMAVPointTrackerTwoPass.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011-2016, Peter Abeles. All Rights Reserved.
3 | *
4 | * This file is part of BoofCV (http://boofcv.org).
5 | *
6 | * Licensed under the Apache License, Version 2.0 (the "License");
7 | * you may not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing, software
13 | * distributed under the License is distributed on an "AS IS" BASIS,
14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | * See the License for the specific language governing permissions and
16 | * limitations under the License.
17 | */
18 |
19 | package com.comino.slam.boofcv.vo.tracker;
20 |
21 | import static boofcv.factory.feature.tracker.FactoryPointTracker.createShiTomasi;
22 |
23 | import boofcv.abst.feature.associate.AssociateDescription2D;
24 | import boofcv.abst.feature.describe.DescribeRegionPoint;
25 | import boofcv.abst.feature.detdesc.DetectDescribePoint;
26 | import boofcv.abst.feature.detect.interest.ConfigGeneralDetector;
27 | import boofcv.abst.feature.tracker.DdaManagerDetectDescribePoint;
28 | import boofcv.abst.feature.tracker.DdaManagerGeneralPoint;
29 | import boofcv.abst.feature.tracker.DetectDescribeAssociateTwoPass;
30 | import boofcv.abst.feature.tracker.PointTrackerTwoPass;
31 | import boofcv.abst.feature.tracker.PointTrackerTwoPassKltPyramid;
32 | import boofcv.abst.filter.derivative.ImageGradient;
33 | import boofcv.alg.feature.detect.interest.EasyGeneralFeatureDetector;
34 | import boofcv.alg.feature.detect.interest.GeneralFeatureDetector;
35 | import boofcv.alg.interpolate.InterpolateRectangle;
36 | import boofcv.alg.tracker.klt.PkltConfig;
37 | import boofcv.factory.filter.derivative.FactoryDerivative;
38 | import boofcv.factory.interpolate.FactoryInterpolation;
39 | import boofcv.factory.transform.pyramid.FactoryPyramid;
40 | import boofcv.struct.feature.TupleDesc;
41 | import boofcv.struct.image.ImageGray;
42 | import boofcv.struct.pyramid.PyramidDiscrete;
43 |
44 | /**
45 | * @author Peter Abeles
46 | */
47 | public class FactoryMAVPointTrackerTwoPass {
48 |
49 | /**
50 | * Pyramid KLT feature tracker.
51 | *
52 | * @param config Config for the tracker. Try PkltConfig.createDefault().
53 | * @param configExtract Configuration for extracting features
54 | * @return KLT based tracker.
55 | */
56 | public static
57 | PointTrackerTwoPass klt(PkltConfig config, ConfigGeneralDetector configExtract,
58 | Class imageType, Class derivType) {
59 |
60 | GeneralFeatureDetector detector = createShiTomasi(configExtract, derivType);
61 |
62 | InterpolateRectangle interpInput = FactoryInterpolation.bilinearRectangle(imageType);
63 | InterpolateRectangle interpDeriv = FactoryInterpolation.bilinearRectangle(derivType);
64 |
65 | ImageGradient gradient = FactoryDerivative.sobel(imageType, derivType);
66 |
67 | PyramidDiscrete pyramid = FactoryPyramid.discreteGaussian(config.pyramidScaling,-1,2,true,imageType);
68 |
69 | return new PointTrackerTwoPassKltPyramid<>(config.config, config.templateRadius, pyramid, detector,
70 | gradient, interpInput, interpDeriv);
71 | }
72 |
73 | public static
74 | PointTrackerTwoPass dda(GeneralFeatureDetector detector,
75 | DescribeRegionPoint describe,
76 | AssociateDescription2D associate1,
77 | AssociateDescription2D associate2,
78 | double scale,
79 | Class imageType) {
80 |
81 | EasyGeneralFeatureDetector easy = new EasyGeneralFeatureDetector<>(detector, imageType, null);
82 |
83 | DdaManagerGeneralPoint manager = new DdaManagerGeneralPoint<>(easy, describe, scale);
84 |
85 | if( associate2 == null )
86 | associate2 = associate1;
87 |
88 | return new DetectDescribeAssociateTwoPass<>(manager, associate1, associate2, false);
89 | }
90 |
91 | public static
92 | PointTrackerTwoPass dda(DetectDescribePoint detectDescribe,
93 | AssociateDescription2D associate1 ,
94 | AssociateDescription2D associate2 ,
95 | boolean updateDescription ) {
96 |
97 |
98 | DdaManagerDetectDescribePoint manager = new DdaManagerDetectDescribePoint<>(detectDescribe);
99 |
100 | if( associate2 == null )
101 | associate2 = associate1;
102 |
103 | return new DetectDescribeAssociateTwoPass<>(manager, associate1, associate2, updateDescription);
104 | }
105 | }
106 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/detectors/ISLAMDetector.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2016 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 | package com.comino.slam.detectors;
35 |
36 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
37 |
38 | import boofcv.struct.image.GrayU16;
39 | import boofcv.struct.image.GrayU8;
40 |
41 | public interface ISLAMDetector {
42 |
43 | public void process(MAVDepthVisualOdometry odometry, GrayU16 depth, GrayU8 gray);
44 | public void reset(float x,float y, float z);
45 |
46 | }
47 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/detectors/impl/DirectDepthDetector.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 | package com.comino.slam.detectors.impl;
35 |
36 |
37 | import org.mavlink.messages.MSP_CMD;
38 | import org.mavlink.messages.lquac.msg_msp_command;
39 |
40 | import com.comino.main.MSPConfig;
41 | import com.comino.mav.control.IMAVMSPController;
42 | import com.comino.msp.execution.autopilot.AutoPilotBase;
43 | import com.comino.msp.execution.control.listener.IMAVLinkListener;
44 | import com.comino.msp.model.DataModel;
45 | import com.comino.msp.slam.map2D.ILocalMap;
46 | import com.comino.msp.utils.MSP3DUtils;
47 | import com.comino.server.mjpeg.IVisualStreamHandler;
48 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
49 | import com.comino.slam.detectors.ISLAMDetector;
50 |
51 | import boofcv.struct.image.GrayU16;
52 | import boofcv.struct.image.GrayU8;
53 | import georegression.geometry.ConvertRotation3D_F64;
54 | import georegression.struct.EulerType;
55 | import georegression.struct.point.Point3D_F64;
56 | import georegression.struct.se.Se3_F64;
57 | import georegression.transform.se.SePointOps_F64;
58 |
59 | public class DirectDepthDetector implements ISLAMDetector {
60 |
61 | private static final float MIN_ALTITUDE = 0.3f;
62 |
63 | private static final int YBAND_LOW = 60;
64 | private static final int YBAND_HIGH = 180;
65 |
66 | private double max_distance = 5.0f;
67 | private double current_min_distance= 0.0f;
68 | private double min_altitude = 0;
69 |
70 | private DataModel model = null;
71 | private ILocalMap map = null;
72 |
73 | private Point3D_F64 point = null;
74 | private Point3D_F64 point_min = new Point3D_F64();
75 | private Point3D_F64 point_ned = new Point3D_F64();
76 |
77 | private Se3_F64 current = new Se3_F64();
78 |
79 |
80 | public DirectDepthDetector(IMAVMSPController control, MSPConfig config, IVisualStreamHandler streamer) {
81 |
82 | this.model = control.getCurrentModel();
83 | this.map = AutoPilotBase.getInstance().getMap2D();
84 |
85 | this.max_distance = config.getFloatProperty("map_max_distance", "3.00f");
86 | System.out.println("[col] Max planning distance set to "+max_distance);
87 | this.min_altitude = config.getFloatProperty("map_min_altitude", String.valueOf(MIN_ALTITUDE));
88 | System.out.println("[col] Min.altitude set to "+min_altitude);
89 |
90 | control.registerListener(msg_msp_command.class, new IMAVLinkListener() {
91 | @Override
92 | public void received(Object o) {
93 | msg_msp_command cmd = (msg_msp_command)o;
94 | switch(cmd.command) {
95 | case MSP_CMD.MSP_TRANSFER_MICROSLAM:
96 | model.grid.invalidateTransfer();
97 | break;
98 | }
99 | }
100 | });
101 |
102 | if(streamer !=null)
103 | streamer.registerOverlayListener(ctx -> {
104 | if(current_min_distance odometry, GrayU16 depth, GrayU8 gray) {
111 |
112 | getModelToState(model,current);
113 |
114 | // Do not update map if loaded from storage or vehicle is lower than MIN_ALTITUDE
115 | // if((model.hud.ar < min_altitude || map.isLoaded()))
116 | // return;
117 |
118 | model.grid.tms = model.sys.getSynchronizedPX4Time_us();
119 |
120 | current_min_distance = Double.MAX_VALUE;
121 | for(int x = 0;x < gray.getWidth();x = x + 2) {
122 |
123 | point_min.set(0,0,Double.MAX_VALUE);
124 | for(int y = YBAND_LOW;y <= YBAND_HIGH;y = y + 5) {
125 | try {
126 | point = odometry.getPoint3DFromPixel(x,y);
127 | if(point != null && point.z < point_min.z)
128 | point_min.set(point);
129 | } catch(Exception e) {
130 | continue;
131 | }
132 | }
133 |
134 | if(point_min.z FwDirectDepthDetector(IMAVMSPController control, MSPConfig config, IVisualStreamHandler streamer) {
82 |
83 | this.model = control.getCurrentModel();
84 | this.map = AutoPilotBase.getInstance().getMap2D();
85 |
86 | this.max_distance = config.getFloatProperty("map_max_distance", "3.00f");
87 | System.out.println("[col] Max planning distance set to "+max_distance);
88 | this.min_altitude = config.getFloatProperty("map_min_altitude", String.valueOf(MIN_ALTITUDE));
89 | System.out.println("[col] Min.altitude set to "+min_altitude);
90 |
91 | control.registerListener(msg_msp_command.class, new IMAVLinkListener() {
92 | @Override
93 | public void received(Object o) {
94 | msg_msp_command cmd = (msg_msp_command)o;
95 | switch(cmd.command) {
96 | case MSP_CMD.MSP_TRANSFER_MICROSLAM:
97 | model.grid.invalidateTransfer();
98 | break;
99 | }
100 | }
101 | });
102 |
103 | if(streamer !=null)
104 | streamer.registerOverlayListener(ctx -> {
105 | if(current_min_distance odometry, GrayU16 depth, GrayU8 gray) {
112 |
113 | getModelToState(model,current);
114 |
115 | // Do not update map if loaded from storage or vehicle is lower than MIN_ALTITUDE
116 | // if((model.hud.ar < min_altitude || map.isLoaded()))
117 | // return;
118 |
119 | model.grid.tms = model.sys.getSynchronizedPX4Time_us();
120 |
121 | current_min_distance = Double.MAX_VALUE;
122 | for(int x = 10;x < gray.getWidth()-10;x = x + 2) {
123 |
124 | point_min.set(0,0,Double.MAX_VALUE);
125 | for(int y = YBAND_LOW;y <= YBAND_HIGH;y = y + 5) {
126 | try {
127 | point = odometry.getPoint3DFromPixel(x,y);
128 | if(point != null && point.z < point_min.z)
129 | point_min.set(point);
130 | } catch(Exception e) {
131 | continue;
132 | }
133 | }
134 |
135 | if(point_min.z {
86 | ctx.drawOval((int)test.observation.x, (int)test.observation.y, 5, 5);
87 |
88 | });
89 |
90 | }
91 |
92 | @Override
93 | public void process(MAVDepthVisualOdometry odometry, GrayU16 depth, GrayU8 gray) {
94 |
95 |
96 | test.setLocation(odometry.getPoint3DFromPixel(gray.width/2, gray.height/2));
97 | test.getObservation().set(160, 120);
98 | if(test.location!=null)
99 | System.out.println(test.location.z);
100 |
101 | }
102 |
103 |
104 | @Override
105 | public void reset(float x, float y, float z) {
106 |
107 |
108 | }
109 |
110 | }
111 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/detectors/impl/VfhDynamicDirectDepthDetector.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 | package com.comino.slam.detectors.impl;
35 |
36 |
37 | import org.mavlink.messages.MSP_CMD;
38 | import org.mavlink.messages.lquac.msg_msp_command;
39 |
40 | import com.comino.main.MSPConfig;
41 | import com.comino.mav.control.IMAVMSPController;
42 | import com.comino.msp.execution.autopilot.AutoPilotBase;
43 | import com.comino.msp.execution.control.listener.IMAVLinkListener;
44 | import com.comino.msp.model.DataModel;
45 | import com.comino.msp.slam.map2D.ILocalMap;
46 | import com.comino.msp.utils.MSP3DUtils;
47 | import com.comino.server.mjpeg.IVisualStreamHandler;
48 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
49 | import com.comino.slam.detectors.ISLAMDetector;
50 |
51 | import boofcv.struct.image.GrayU16;
52 | import boofcv.struct.image.GrayU8;
53 | import georegression.geometry.ConvertRotation3D_F64;
54 | import georegression.struct.EulerType;
55 | import georegression.struct.point.Point3D_F64;
56 | import georegression.struct.se.Se3_F64;
57 | import georegression.transform.se.SePointOps_F64;
58 |
59 | public class VfhDynamicDirectDepthDetector implements ISLAMDetector {
60 |
61 | private static final float MIN_ALTITUDE = -0.4f;
62 | private static final float MAX_DEPTH = 4.0f;
63 |
64 | private float max_distance = 3.0f;
65 | private float min_altitude = 0.35f;
66 |
67 | private DataModel model = null;
68 | private ILocalMap map = null;
69 |
70 | private Point3D_F64 point = null;
71 | private Point3D_F64 point_min = new Point3D_F64();
72 | private Point3D_F64 point_ned = new Point3D_F64();
73 |
74 | private Se3_F64 current = new Se3_F64();
75 |
76 |
77 | public VfhDynamicDirectDepthDetector(IMAVMSPController control, MSPConfig config, IVisualStreamHandler streamer) {
78 |
79 | this.model = control.getCurrentModel();
80 | this.map = AutoPilotBase.getInstance().getMap2D();
81 |
82 | this.max_distance = config.getFloatProperty("feature_max_distance", "3.00f");
83 | System.out.println("[col] Max planning distance set to "+max_distance);
84 | this.min_altitude = config.getFloatProperty("faeture_min_altitude", "0.3f");
85 | System.out.println("[col] Min.altitude set to "+min_altitude);
86 |
87 | control.registerListener(msg_msp_command.class, new IMAVLinkListener() {
88 | @Override
89 | public void received(Object o) {
90 | msg_msp_command cmd = (msg_msp_command)o;
91 | switch(cmd.command) {
92 | case MSP_CMD.MSP_TRANSFER_MICROSLAM:
93 | model.grid.invalidateTransfer();
94 | break;
95 | }
96 | }
97 | });
98 | }
99 |
100 | @Override
101 | public void process(MAVDepthVisualOdometry odometry, GrayU16 depth, GrayU8 gray) {
102 |
103 | int win_y = 0;
104 |
105 | getModelToState(model,current);
106 |
107 | // TODO: Narrow down window in Y axis with increasing depth
108 |
109 | for(int x = 0;x < gray.getWidth();x++) {
110 | point = odometry.getPoint3DFromPixel(x,180);
111 | win_y = (int)(15 / point.z);
112 | point_min.set(0,0,99);
113 | for(int dy = -win_y; dy <= win_y;dy=dy+5) {
114 | try {
115 | point = odometry.getPoint3DFromPixel(x,180+dy);
116 | if(point != null && point.z < point_min.z)
117 | point_min.set(point);
118 | } catch(Exception e) {
119 | e.printStackTrace();
120 | }
121 | }
122 |
123 | SePointOps_F64.transform(current,point_min,point_ned);
124 | MSP3DUtils.toNED(point_ned);
125 | if(point_ned.z > MIN_ALTITUDE)
126 | map.update(model.state.l_x, model.state.l_y,point_ned);
127 | }
128 | }
129 |
130 |
131 | public void reset(float x, float y, float z) {
132 | // reset map if local position was set to 0
133 | if(x==0 && y==0)
134 | map.reset();
135 | }
136 |
137 | private Se3_F64 getModelToState(DataModel m, Se3_F64 state) {
138 | ConvertRotation3D_F64.eulerToMatrix(EulerType.ZXY,
139 | m.attitude.r,
140 | m.attitude.p,
141 | m.attitude.y,
142 | state.getRotation());
143 |
144 | state.getTranslation().y = m.state.l_z;
145 | state.getTranslation().x = m.state.l_y;
146 | state.getTranslation().z = m.state.l_x;
147 |
148 | return state;
149 | }
150 | }
151 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/detectors/impl/VfhFeatureDetector.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 | package com.comino.slam.detectors.impl;
35 |
36 |
37 | import java.util.ArrayList;
38 | import java.util.List;
39 |
40 | import org.mavlink.messages.MSP_CMD;
41 | import org.mavlink.messages.lquac.msg_debug_vect;
42 | import org.mavlink.messages.lquac.msg_msp_command;
43 |
44 | import com.comino.main.MSPConfig;
45 | import com.comino.mav.control.IMAVMSPController;
46 | import com.comino.msp.execution.autopilot.AutoPilotBase;
47 | import com.comino.msp.execution.control.listener.IMAVLinkListener;
48 | import com.comino.msp.model.DataModel;
49 | import com.comino.msp.slam.map2D.ILocalMap;
50 | import com.comino.msp.utils.MSP3DUtils;
51 | import com.comino.server.mjpeg.IVisualStreamHandler;
52 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
53 | import com.comino.slam.detectors.ISLAMDetector;
54 |
55 | import boofcv.abst.sfm.AccessPointTracks3D;
56 | import boofcv.struct.image.GrayU16;
57 | import boofcv.struct.image.GrayU8;
58 | import georegression.geometry.ConvertRotation3D_F64;
59 | import georegression.struct.EulerType;
60 | import georegression.struct.point.Point2D_F64;
61 | import georegression.struct.point.Point3D_F64;
62 | import georegression.struct.point.Vector4D_F64;
63 | import georegression.struct.se.Se3_F64;
64 | import georegression.transform.se.SePointOps_F64;
65 |
66 | public class VfhFeatureDetector implements ISLAMDetector {
67 |
68 | private float max_distance = 3.0f;
69 | private float min_altitude = 0.35f;
70 |
71 | private DataModel model = null;
72 | private Point3D_F64 rel_ned = new Point3D_F64();
73 |
74 | private Se3_F64 current = new Se3_F64();
75 |
76 | private float debug1 = 0;
77 | private float debug2 = 0;
78 | private float debug3 = 0;
79 |
80 | private ILocalMap map = null;
81 |
82 | private Vector4D_F64 current_pos = new Vector4D_F64();
83 |
84 |
85 | private List nearestPoints = new ArrayList();
86 | private List lastFeatures_rel_ned = new ArrayList();
87 |
88 | private IMAVMSPController control = null;
89 |
90 | public VfhFeatureDetector(IMAVMSPController control, MSPConfig config, IVisualStreamHandler streamer) {
91 |
92 | this.model = control.getCurrentModel();
93 | this.control = control;
94 | this.map = AutoPilotBase.getInstance().getMap2D();
95 |
96 | this.max_distance = config.getFloatProperty("feature_max_distance", "3.00f");
97 | System.out.println("[col] Max planning distance set to "+max_distance);
98 | this.min_altitude = config.getFloatProperty("faeture_min_altitude", "0.3f");
99 | System.out.println("[col] Min.altitude set to "+min_altitude);
100 |
101 | control.registerListener(msg_msp_command.class, new IMAVLinkListener() {
102 | @Override
103 | public void received(Object o) {
104 | msg_msp_command cmd = (msg_msp_command)o;
105 | switch(cmd.command) {
106 | case MSP_CMD.MSP_TRANSFER_MICROSLAM:
107 | model.grid.invalidateTransfer();
108 | break;
109 | }
110 | }
111 | });
112 |
113 | if(streamer!=null)
114 | streamer.registerOverlayListener(ctx -> {
115 | if(nearestPoints.size()>0) {
116 | for(Point2D_F64 n : nearestPoints) {
117 | ctx.drawRect((int)n.x-5, (int)n.y-5, 10,10);
118 | }
119 | }
120 | ctx.drawString(String.format("%.2f",debug1), 20, 35);
121 | ctx.drawString(String.format("%.2f",debug2), 20, 50);
122 | ctx.drawString(String.format("%.2f",debug3), 20, 65);
123 | });
124 |
125 | }
126 |
127 | @Override
128 | public void process(MAVDepthVisualOdometry odometry, GrayU16 depth, GrayU8 gray) {
129 | Point2D_F64 xy; Point3D_F64 p;
130 |
131 | AccessPointTracks3D points = (AccessPointTracks3D)odometry;
132 |
133 | nearestPoints.clear();
134 |
135 | current_pos.set(model.state.l_x, model.state.l_y, model.state.l_z, model.state.h);
136 |
137 | // current.set(odometry.getCameraToWorld());
138 | getAttitudeToState(model,current);
139 |
140 | boolean first = true;
141 |
142 | // int i = 0; {
143 | for( int i = 0; i < points.getAllTracks().size(); i++ ) {
144 |
145 | if(points.isInlier(i)) {
146 |
147 | // xy is the observation
148 | xy = points.getAllTracks().get(i);
149 | // p is the obstacle location in body-frame
150 | p = odometry.getTrackLocation(i);
151 |
152 |
153 | SePointOps_F64.transform(current,p,rel_ned);
154 | MSP3DUtils.toNED(rel_ned);
155 |
156 |
157 | // Search highest point (note: NED coordinate system)
158 |
159 | if( // model.raw.di > min_altitude &&
160 | p.z < max_distance && p.y< min_altitude && p.y > -min_altitude) {
161 |
162 | if(first) {
163 |
164 | debug1 = (float)(rel_ned.x);
165 | debug2 = (float)current.T.z;
166 | debug3 = (float)current_pos.x;
167 |
168 | msg_debug_vect v = new msg_debug_vect(2,1);
169 | v.x = debug1;
170 | v.y = debug2;
171 | v.z = debug3;
172 | control.sendMAVLinkMessage(v);
173 | first = false;
174 | }
175 |
176 |
177 |
178 | map.update(rel_ned, current_pos);
179 |
180 | nearestPoints.add(xy);
181 |
182 | // break;
183 |
184 | }
185 | }
186 | }
187 | }
188 |
189 |
190 | public void reset(float x, float y, float z) {
191 | nearestPoints.clear();
192 | }
193 |
194 | private Se3_F64 getAttitudeToState(DataModel m, Se3_F64 state) {
195 | ConvertRotation3D_F64.eulerToMatrix(EulerType.ZXY,
196 | m.attitude.r,
197 | m.attitude.p,
198 | m.attitude.y,
199 | state.getRotation());
200 | return state;
201 | }
202 |
203 |
204 |
205 |
206 | }
207 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/estimators/IPositionEstimator.java:
--------------------------------------------------------------------------------
1 | package com.comino.slam.estimators;
2 |
3 | import com.comino.server.mjpeg.IVisualStreamHandler;
4 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
5 | import com.comino.slam.detectors.ISLAMDetector;
6 |
7 | import boofcv.struct.image.GrayU16;
8 | import boofcv.struct.image.GrayU8;
9 |
10 | public interface IPositionEstimator {
11 |
12 | void registerDetector(ISLAMDetector detector);
13 |
14 | void registerStreams(IVisualStreamHandler stream);
15 |
16 | void start();
17 |
18 | void stop();
19 |
20 | boolean isRunning();
21 |
22 | void reset();
23 |
24 | void enableDetectors( boolean enable);
25 |
26 | MAVDepthVisualOdometry getOdometry();
27 |
28 | }
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/estimators/vo/MAVVisualPositionEstimatorVO.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 | package com.comino.slam.estimators.vo;
35 |
36 |
37 | import java.awt.Color;
38 | import java.awt.Graphics;
39 | import java.util.ArrayList;
40 | import java.util.List;
41 |
42 | import org.mavlink.messages.MAV_SEVERITY;
43 | import org.mavlink.messages.MSP_CMD;
44 | import org.mavlink.messages.MSP_COMPONENT_CTRL;
45 | import org.mavlink.messages.lquac.msg_msp_command;
46 | import org.mavlink.messages.lquac.msg_msp_vision;
47 | import org.mavlink.messages.lquac.msg_vision_position_estimate;
48 |
49 | import com.comino.main.MSPConfig;
50 | import com.comino.mav.control.IMAVMSPController;
51 | import com.comino.msp.execution.control.listener.IMAVLinkListener;
52 | import com.comino.msp.model.DataModel;
53 | import com.comino.msp.model.segment.LogMessage;
54 | import com.comino.msp.model.segment.Status;
55 | import com.comino.msp.utils.ExecutorService;
56 | import com.comino.msp.utils.MSPMathUtils;
57 | import com.comino.realsense.boofcv.RealSenseInfo;
58 | import com.comino.realsense.boofcv.StreamRealSenseVisDepth;
59 | import com.comino.realsense.boofcv.StreamRealSenseVisDepth.Listener;
60 | import com.comino.server.mjpeg.IVisualStreamHandler;
61 | import com.comino.slam.boofcv.MAVDepthVisualOdometry;
62 | import com.comino.slam.boofcv.vo.FactoryMAVOdometry;
63 | import com.comino.slam.boofcv.vo.tracker.FactoryMAVPointTrackerTwoPass;
64 | import com.comino.slam.detectors.ISLAMDetector;
65 | import com.comino.slam.estimators.IPositionEstimator;
66 |
67 | import boofcv.abst.feature.detect.interest.ConfigGeneralDetector;
68 | import boofcv.abst.feature.tracker.PointTrackerTwoPass;
69 | import boofcv.abst.sfm.AccessPointTracks3D;
70 | import boofcv.alg.distort.DoNothingPixelTransform_F32;
71 | import boofcv.alg.sfm.DepthSparse3D;
72 | import boofcv.alg.tracker.klt.PkltConfig;
73 | import boofcv.core.image.ConvertImage;
74 | import boofcv.struct.image.GrayS16;
75 | import boofcv.struct.image.GrayU16;
76 | import boofcv.struct.image.GrayU8;
77 | import boofcv.struct.image.Planar;
78 | import georegression.geometry.ConvertRotation3D_F64;
79 | import georegression.geometry.GeometryMath_F64;
80 | import georegression.struct.EulerType;
81 | import georegression.struct.point.Vector3D_F64;
82 | import georegression.struct.se.Se3_F64;
83 | import georegression.struct.so.Quaternion_F64;
84 |
85 | public class MAVVisualPositionEstimatorVO implements IPositionEstimator {
86 |
87 | private static final int PUBLISH_RATE_MSP = 50 - 5;
88 | private static final int PUBLISH_RATE_PX4 = 15 - 5;
89 |
90 | private static final int INIT_COUNT = 1;
91 | private static final int MAX_ERRORS = 5;
92 | private static final int MAX_QUALITY_ERRORS = 10;
93 |
94 | private static final int MAX_SPEED = 50;
95 | private static final float VISION_POS_GATE = 0.25f;
96 | private static final float VISION_SPEED_GATE = 0.25f;
97 |
98 | private static final float INLIER_PIXEL_TOL = 1.5f;
99 | private static final int MAXTRACKS = 100;
100 | private static final int KLT_RADIUS = 3;
101 | private static final float KLT_THRESHOLD = 1f;
102 | private static final int RANSAC_ITERATIONS = 150;
103 | private static final int RETIRE_THRESHOLD = 2;
104 | private static final int ADD_THRESHOLD = 70;
105 | private static final int REFINE_ITERATIONS = 80;
106 |
107 | private StreamRealSenseVisDepth realsense = null;
108 | private MAVDepthVisualOdometry visualOdometry = null;
109 | private RealSenseInfo info = null;
110 |
111 | private GrayU8 gray = null;
112 |
113 | private double oldTimeDepth_us = 0;
114 | private double estTimeDepth_us = 0;
115 |
116 | private Vector3D_F64 pos_raw;
117 | private Vector3D_F64 pos_raw_old = new Vector3D_F64();
118 |
119 | private Se3_F64 speed_ned = new Se3_F64();
120 | private Se3_F64 pos_delta = new Se3_F64();
121 | private Se3_F64 pos_ned = new Se3_F64();
122 |
123 | private Se3_F64 rot_ned = new Se3_F64();
124 |
125 | private Se3_F64 current = new Se3_F64();
126 |
127 | private Quaternion_F64 att_q = new Quaternion_F64();
128 | private double[] visAttitude = new double[3];
129 |
130 | private long last_pos_tms = 0;
131 | private long last_msp_tms = 0;
132 | private long last_msg = 0;
133 |
134 | private DataModel model;
135 |
136 | private boolean debug = false;
137 | private boolean heading_init_enabled = false;
138 | private boolean isRunning = false;
139 |
140 |
141 | private int quality = 0;
142 | private int min_quality = 0;
143 |
144 | private long detector_tms = 0;
145 | private int detector_cycle_ms = 250;
146 |
147 | private float vision_pos_gate = 0;
148 | private float vision_speed_gate = 0;
149 |
150 | private float fps = 0;
151 | private long fps_tms = 0;
152 |
153 | private long publish_tms_us = 0;
154 |
155 |
156 | private int initialized_count = 0;
157 | private int error_count = 0;
158 |
159 | private boolean do_odometry = true;
160 |
161 | private boolean do_xy_position = false;
162 |
163 | private boolean do_xy_speed = false;
164 | private boolean do_attitude = false;
165 |
166 | private boolean do_covariances = false;
167 |
168 | private IMAVMSPController control = null;
169 | private List detectors = null;
170 | private List>> streams = null;
171 | private String last_reason = null;
172 |
173 | private final Color bgColor = new Color(128,128,128,130);
174 |
175 |
176 | public MAVVisualPositionEstimatorVO(RealSenseInfo info, IMAVMSPController control, MSPConfig config, IVisualStreamHandler stream) {
177 |
178 | this.info = info;
179 | this.control = control;
180 | this.detectors = new ArrayList();
181 | this.streams = new ArrayList>>();
182 |
183 | System.out.println("Vision position estimator: "+this.getClass().getSimpleName());
184 | this.debug = config.getBoolProperty("vision_debug", "true");
185 | this.heading_init_enabled = config.getBoolProperty("vision_heading_init", "true");
186 | System.out.println("Vision debugging: "+debug);
187 | System.out.println("Initialize heading when landed: "+heading_init_enabled);
188 | System.out.println("Vision setup: MaxTracks="+MAXTRACKS+" RanSac="+RANSAC_ITERATIONS+ " KLTRadius="+KLT_RADIUS+ " KLTThreshold="+KLT_THRESHOLD);
189 | this.min_quality = config.getIntProperty("vision_min_quality", "50");
190 | System.out.println("Vision minimum quality: "+min_quality);
191 |
192 | this.vision_pos_gate = config.getFloatProperty("vision_pos_gate", String.valueOf(VISION_POS_GATE));
193 | System.out.println("Vision position gate: "+vision_pos_gate+"m");
194 |
195 | this.vision_speed_gate = config.getFloatProperty("vision_speed_gate", String.valueOf(VISION_SPEED_GATE));
196 | System.out.println("Vision speed gate: "+vision_speed_gate+"m/s");
197 |
198 | this.do_odometry = config.getBoolProperty("vision_enable", "true");
199 | System.out.println("Vision Odometry enabled: "+do_odometry);
200 |
201 | this.do_xy_position = config.getBoolProperty("vision_pub_pos_xy", "true");
202 | System.out.println("Vision publishes XY position: "+do_xy_position);
203 |
204 | this.do_xy_speed = config.getBoolProperty("vision_pub_speed_xy", "true");
205 | System.out.println("Vision publishes XY speed: "+do_xy_speed);
206 |
207 | this.do_attitude = config.getBoolProperty("vision_pub_attitude", "true");
208 | System.out.println("Vision publishes attitude: "+do_attitude);
209 |
210 | this.do_covariances = config.getBoolProperty("vision_pub_covariance", "true");
211 | System.out.println("Vision publishes covariances: "+do_covariances);
212 |
213 |
214 | this.detector_cycle_ms = config.getIntProperty("vision_detector_cycle", "100");
215 | if(this.detector_cycle_ms > 0)
216 | System.out.printf("Vision detectors enablied with %d [ms] cycle \n",detector_cycle_ms);
217 |
218 | System.out.println("Resolution: "+info.width+"x"+info.height);
219 |
220 | this.model = control.getCurrentModel();
221 |
222 | gray = new GrayU8(info.width,info.height);
223 |
224 | control.registerListener(msg_msp_command.class, new IMAVLinkListener() {
225 | @Override
226 | public void received(Object o) {
227 | msg_msp_command cmd = (msg_msp_command)o;
228 | switch(cmd.command) {
229 | case MSP_CMD.MSP_CMD_VISION:
230 | switch((int)cmd.param1) {
231 | case MSP_COMPONENT_CTRL.ENABLE:
232 | do_odometry = true; init("Init"); break;
233 | case MSP_COMPONENT_CTRL.DISABLE:
234 | do_odometry = false; break;
235 | case MSP_COMPONENT_CTRL.RESET:
236 | reset(); break;
237 | }
238 | }
239 | }
240 | });
241 |
242 | // reset vision when armed
243 | control.getStatusManager().addListener( Status.MSP_ARMED, (n) -> {
244 | if(n.isStatus(Status.MSP_ARMED)) {
245 | reset();
246 | }
247 | });
248 |
249 | //reset vision when GPOS gets valid
250 | control.getStatusManager().addListener(Status.MSP_GPOS_VALID, (n) -> {
251 | if((n.isStatus(Status.MSP_GPOS_VALID)))
252 | reset();
253 | });
254 |
255 | try {
256 | realsense = new StreamRealSenseVisDepth(0,info);
257 | } catch(Exception e) {
258 | this.do_odometry = false;
259 | this.detector_cycle_ms = 0;
260 | System.out.println("Vision disabled due to: "+e.getMessage());
261 | return;
262 | }
263 |
264 | PkltConfig configKlt = new PkltConfig();
265 | configKlt.pyramidScaling = new int[]{ 1, 2, 4, 8 };
266 | configKlt.templateRadius = 3;
267 |
268 | PointTrackerTwoPass tracker =
269 | FactoryMAVPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(MAXTRACKS, KLT_RADIUS, KLT_THRESHOLD),
270 | GrayU8.class, GrayS16.class);
271 |
272 | DepthSparse3D sparseDepth = new DepthSparse3D.I(1e-3);
273 |
274 |
275 | visualOdometry = FactoryMAVOdometry.depthPnP(INLIER_PIXEL_TOL,
276 | ADD_THRESHOLD, RETIRE_THRESHOLD, RANSAC_ITERATIONS, REFINE_ITERATIONS, true,
277 | sparseDepth, tracker, GrayU8.class, GrayU16.class);
278 |
279 | visualOdometry.setCalibration(realsense.getIntrinsics(),new DoNothingPixelTransform_F32());
280 |
281 | if(stream!=null) {
282 | registerStreams(stream);
283 |
284 | if(debug && streams.get(0) !=null) {
285 | streams.get(0).registerOverlayListener(ctx -> {
286 | overlayFeatures(ctx);
287 | });
288 | }
289 | }
290 |
291 | initialized_count = 0;
292 |
293 |
294 | realsense.registerListener(new Listener() {
295 |
296 | double dt; int mf=0; int fpm;
297 | int qual_error_count=0;
298 |
299 | @Override
300 | public void process(Planar rgb, GrayU16 depth, long timeRgb, long timeDepth) {
301 |
302 | publish_tms_us = System.currentTimeMillis()*1000;
303 |
304 | for(IVisualStreamHandler> stream : streams)
305 | stream.addToStream(rgb, model, System.currentTimeMillis()*1000);
306 |
307 | if(!do_odometry || visualOdometry == null ) {
308 | return;
309 | }
310 |
311 | if(dt >0) {
312 | fpm += (int)(1f/dt+0.5f);
313 | if((System.currentTimeMillis() - fps_tms) > 500) {
314 | fps_tms = System.currentTimeMillis();
315 | if(mf>0)
316 | fps = fpm/mf;
317 | mf=0; fpm=0;
318 | }
319 | mf++;
320 | }
321 |
322 | try {
323 |
324 | ConvertImage.average(rgb, gray);
325 |
326 |
327 | if( !visualOdometry.process(gray,depth,setModelToState(model, current))) {
328 | init("Odometry");
329 | return;
330 | }
331 |
332 | } catch( Exception e) {
333 | if(debug)
334 | System.out.println("[vis] Odometry failure: "+e.getMessage());
335 | init("Exception");
336 | return;
337 | }
338 |
339 | quality = (int)((visualOdometry.getQuality())*100f/MAXTRACKS);
340 | if(quality > 100) quality = 100; if(quality < 1) quality = 1;
341 |
342 | // get Measurement from odometry
343 | pos_raw = visualOdometry.getCameraToWorld().getT();
344 |
345 | if(initialized_count < INIT_COUNT) {
346 |
347 | // reset speed and old measurements
348 | speed_ned.T.set(0,0,0);;
349 | pos_raw_old.set(0,0,0);
350 |
351 | if( quality > min_quality) {
352 | if(++initialized_count == INIT_COUNT) {
353 | oldTimeDepth_us = estTimeDepth_us;
354 | publishPX4Vision();
355 | if(debug && (System.currentTimeMillis() - last_msg) > 500 && last_reason != null) {
356 | last_msg = System.currentTimeMillis();
357 | control.writeLogMessage(new LogMessage("[vis] odometry re-init: "+last_reason,
358 | MAV_SEVERITY.MAV_SEVERITY_NOTICE));
359 | }
360 | error_count = 0;
361 | }
362 | } else {
363 | initialized_count = 0;
364 | return;
365 | }
366 |
367 | }
368 |
369 | rot_ned.setRotation(visualOdometry.getCameraToWorld().getR());
370 | estTimeDepth_us = timeDepth * 1000d;
371 | // estTimeDepth_us = publish_tms_us;
372 | dt = (estTimeDepth_us - oldTimeDepth_us)/1000000d;
373 |
374 | if(oldTimeDepth_us==0) {
375 | oldTimeDepth_us = estTimeDepth_us;
376 | return;
377 | }
378 | oldTimeDepth_us = estTimeDepth_us;
379 |
380 | if(!pos_raw_old.isIdentical(0, 0, 0) && dt > 0) {
381 |
382 |
383 | if(quality > min_quality ) {
384 |
385 | // speed.T = (pos_raw - pos_raw_old ) / dt
386 | GeometryMath_F64.sub(pos_raw, pos_raw_old, speed_ned.T);
387 | speed_ned.T.scale(1d/dt);
388 |
389 |
390 | // Check XY speed
391 | if(Math.sqrt(speed_ned.getX()*speed_ned.getX()+speed_ned.getZ()*speed_ned.getZ())>MAX_SPEED) {
392 | init("Speed");
393 | return;
394 | }
395 |
396 | } else {
397 | if(++qual_error_count > MAX_QUALITY_ERRORS) {
398 | qual_error_count=0;
399 | init("Quality");
400 | }
401 | return;
402 | }
403 |
404 | // pos_delta.T = speed.T * dt
405 | pos_delta.T.set(speed_ned.T); pos_delta.T.scale(dt);
406 |
407 | // pos.T = pos.T + pos_delta.T
408 | pos_ned.T.plusIP(pos_delta.T);
409 |
410 | // Todo: get Rid of visAttitude
411 | ConvertRotation3D_F64.matrixToEuler(rot_ned.R, EulerType.ZXY, visAttitude);
412 | ConvertRotation3D_F64.eulerToQuaternion(EulerType.XYZ,visAttitude[0],visAttitude[1], visAttitude[2], att_q);
413 |
414 |
415 | if(Math.abs(visAttitude[2] - model.attitude.y) > 0.1 && model.sys.isStatus(Status.MSP_LANDED)
416 | && heading_init_enabled && !control.isSimulation()) {
417 | init("Heading div.");
418 | return;
419 | }
420 | }
421 | pos_raw_old.set(pos_raw);
422 |
423 |
424 | if( ( Math.abs(pos_ned.T.z - model.state.l_x) > vision_pos_gate ||
425 | Math.abs(pos_ned.T.x - model.state.l_y) > vision_pos_gate ) && !control.isSimulation()) {
426 | init("Vision pos. gate");
427 | return;
428 | }
429 |
430 | // TODO: Replace with vision speed gate -> needs to be tested
431 |
432 | // if( ( Math.abs(speed_ned.T.z - model.state.l_vx) > vision_speed_gate ||
433 | // Math.abs(speed_ned.T.x - model.state.l_vy) > vision_speed_gate ) ) {
434 | // init("Vision speed gate");
435 | // return;
436 | // }
437 |
438 | publishPX4Vision();
439 | error_count=0;
440 |
441 | if(detectors.size()>0 && detector_cycle_ms>0 && do_odometry) {
442 | if((System.currentTimeMillis() - detector_tms) > detector_cycle_ms) {
443 | detector_tms = System.currentTimeMillis();
444 | model.sys.setSensor(Status.MSP_SLAM_AVAILABILITY, true);
445 | ExecutorService.get().execute(() -> {
446 | for(ISLAMDetector d : detectors) {
447 | try {
448 | d.process(visualOdometry, depth, rgb.getBand(0));
449 | } catch(Exception e) {
450 | model.sys.setSensor(Status.MSP_SLAM_AVAILABILITY, false);
451 | //System.out.println(timeDepth+"[vis] SLAM exception: "+e.getMessage());
452 | }
453 | }
454 | });
455 | }
456 | }
457 |
458 | updateInternalModel();
459 |
460 | // Publish MSP data
461 | publisMSPVision();
462 |
463 |
464 | }
465 | });
466 | }
467 |
468 | private void overlayFeatures(Graphics ctx) {
469 |
470 | AccessPointTracks3D points = (AccessPointTracks3D)visualOdometry;
471 | for( int i = 0; i < points.getAllTracks().size(); i++ ) {
472 | if(points.isInlier(i))
473 | ctx.drawRect((int)points.getAllTracks().get(i).x,(int)points.getAllTracks().get(i).y, 1, 1);
474 | }
475 |
476 | ctx.setColor(bgColor);
477 | ctx.fillRect(5, 5, info.width-10, 21);
478 | ctx.setColor(Color.white);
479 |
480 | if(points.getAllTracks().size()==0)
481 | ctx.drawString("No odometry", info.width-90, 20);
482 | else if(quality < min_quality)
483 | ctx.drawString("Low quality", info.width-85, 20);
484 | else
485 | ctx.drawString((int)fps+" fps", info.width-50, 20);
486 |
487 | if(!Float.isNaN(model.sys.t_armed_ms) && model.sys.isStatus(Status.MSP_ARMED))
488 | ctx.drawString(String.format("%.1f sec",model.sys.t_armed_ms/1000), 20, 20);
489 |
490 |
491 | // ctx.drawString("Message:"+model.msg.msg, 10, 20);
492 |
493 | }
494 |
495 | public MAVVisualPositionEstimatorVO() {
496 | this(new RealSenseInfo(320,240, RealSenseInfo.MODE_RGB), null, MSPConfig.getInstance(),null);
497 | }
498 |
499 | public void registerDetector(ISLAMDetector detector) {
500 | if(detector_cycle_ms>0) {
501 | System.out.println("[vis] Vision detector registered: "+detector.getClass().getSimpleName());
502 | detectors.add(detector);
503 | }
504 | }
505 |
506 | @Override
507 | public MAVDepthVisualOdometry getOdometry() {
508 | return visualOdometry;
509 | }
510 |
511 | public void registerStreams(IVisualStreamHandler stream) {
512 | System.out.println("[vis] Vision stream registered: "+stream.getClass().getSimpleName());
513 | streams.add(stream);
514 | }
515 |
516 | public void start() {
517 | isRunning = true;
518 | init("StartUp");
519 | if(realsense!=null)
520 | realsense.start();
521 | }
522 |
523 | public void stop() {
524 | if(isRunning) {
525 | realsense.stop();
526 | publisMSPVision();
527 | }
528 | isRunning=false;
529 | }
530 |
531 | public boolean isRunning() {
532 | return isRunning;
533 | }
534 |
535 | public void reset() {
536 | init("msp reset");
537 | }
538 |
539 | public void enableDetectors( boolean enable) {
540 | // TODO: Implement
541 | }
542 |
543 |
544 | private Se3_F64 setModelToState(DataModel m, Se3_F64 state) {
545 | if(!Float.isNaN(m.attitude.r) && !Float.isNaN(m.attitude.p))
546 | ConvertRotation3D_F64.eulerToMatrix(EulerType.ZXY,
547 | m.attitude.r,
548 | m.attitude.p,
549 | m.attitude.y,
550 | state.getRotation());
551 |
552 | if(!Float.isNaN(m.state.l_y) && !Float.isNaN(m.state.l_x)) {
553 | state.getTranslation().y = m.state.l_z;
554 | state.getTranslation().x = m.state.l_y;
555 | state.getTranslation().z = m.state.l_x;
556 | }
557 | return state;
558 | }
559 |
560 | private void init(String reason) {
561 |
562 | if(visualOdometry==null)
563 | return;
564 |
565 | this.last_pos_tms = 0;
566 | this.last_reason = reason;
567 |
568 | if(do_odometry) {
569 | initialized_count = 0;
570 | if(++error_count > MAX_ERRORS) {
571 | fps=0; quality=0;
572 | model.sys.setSensor(Status.MSP_OPCV_AVAILABILITY, false);
573 | }
574 | current = setModelToState(model,current);
575 | visualOdometry.reset(current);
576 | pos_ned.set(current);
577 |
578 | if(detectors.size()>0) {
579 | detector_tms = System.currentTimeMillis();
580 | for(ISLAMDetector d : detectors)
581 | d.reset(model.state.l_x, model.state.l_y, model.state.l_z);
582 | }
583 |
584 | }
585 | }
586 |
587 |
588 |
589 | private void publishPX4Vision() {
590 | if(do_odometry && (System.currentTimeMillis()-last_pos_tms) > PUBLISH_RATE_PX4) {
591 | last_pos_tms = System.currentTimeMillis();
592 |
593 | msg_vision_position_estimate sms = new msg_vision_position_estimate(1,2);
594 |
595 | sms.usec = (long)publish_tms_us;
596 | if(do_xy_position) {
597 | sms.x = (float) pos_ned.T.z;
598 | sms.y = (float) pos_ned.T.x;
599 | sms.z = (float) pos_ned.T.y;
600 | } else {
601 | sms.x = Float.NaN;
602 | sms.y = Float.NaN;
603 | sms.z = Float.NaN;
604 | }
605 |
606 |
607 | if(do_attitude) {
608 | sms.roll = (float)visAttitude[0];
609 | sms.pitch = (float)visAttitude[1];
610 | sms.yaw = (float)visAttitude[2];
611 | } else {
612 | sms.roll = Float.NaN;
613 | sms.pitch = Float.NaN;
614 | sms.yaw = Float.NaN;
615 | }
616 |
617 | sms.covariance[0] = Float.NaN;
618 |
619 | control.sendMAVLinkMessage(sms);
620 |
621 | model.sys.setSensor(Status.MSP_OPCV_AVAILABILITY, true);
622 |
623 | }
624 | }
625 |
626 | private void publisMSPVision() {
627 | if((System.currentTimeMillis()-last_msp_tms) > PUBLISH_RATE_MSP) {
628 | last_msp_tms = System.currentTimeMillis();
629 | msg_msp_vision msg = new msg_msp_vision(2,1);
630 | msg.x = (float) pos_ned.T.z;
631 | msg.y = (float) pos_ned.T.x;
632 | msg.z = (float) pos_ned.T.y;
633 | msg.vx = (float) speed_ned.T.z;
634 | msg.vy = (float) speed_ned.T.x;
635 | msg.vz = (float) speed_ned.T.y;
636 | msg.h = MSPMathUtils.fromRad((float)visAttitude[2]); //MSPMathUtils.fromRad((float)vis_init.getY());
637 | msg.p = (float)visAttitude[1];
638 | msg.r = (float)visAttitude[0];
639 |
640 | msg.quality = quality;
641 | msg.fps = fps;
642 | // msg.tms = (long)estTimeDepth_us;
643 | msg.tms = publish_tms_us;
644 | msg.errors = error_count;
645 | if(do_xy_position && do_odometry)
646 | msg.flags = msg.flags | 1;
647 | if(do_xy_speed && do_odometry)
648 | msg.flags = msg.flags | 2;
649 | if(do_attitude && do_odometry)
650 | msg.flags = msg.flags | 4;
651 | msg.tms = (long)estTimeDepth_us;
652 | control.sendMAVLinkMessage(msg);
653 | }
654 | }
655 |
656 | private void updateInternalModel() {
657 |
658 | model.vision.tms = model.sys.getSynchronizedPX4Time_us();
659 | model.vision.x = (float) pos_ned.T.z;
660 | model.vision.y = (float) pos_ned.T.x;
661 | model.vision.z = (float) pos_ned.T.y;
662 | model.vision.vx = (float) speed_ned.T.z;
663 | model.vision.vy = (float) speed_ned.T.x;
664 | model.vision.vz = (float) speed_ned.T.y;
665 | model.vision.h = MSPMathUtils.fromRad((float)visAttitude[2]);
666 | model.vision.p = (float)visAttitude[1];
667 | model.vision.r = (float)visAttitude[0];
668 | model.vision.qual = quality;
669 | model.vision.fps = fps;
670 |
671 | }
672 |
673 | public static void main(String[] args) {
674 | new MAVVisualPositionEstimatorVO();
675 | }
676 |
677 | }
678 |
--------------------------------------------------------------------------------
/MAVSlam/src/com/comino/slam/main/StartUp.java:
--------------------------------------------------------------------------------
1 | /****************************************************************************
2 | *
3 | * Copyright (c) 2017 Eike Mansfeld ecm@gmx.de. All rights reserved.
4 | *
5 | * Redistribution and use in source and binary forms, with or without
6 | * modification, are permitted provided that the following conditions
7 | * are met:
8 | *
9 | * 1. Redistributions of source code must retain the above copyright
10 | * notice, this list of conditions and the following disclaimer.
11 | * 2. Redistributions in binary form must reproduce the above copyright
12 | * notice, this list of conditions and the following disclaimer in
13 | * the documentation and/or other materials provided with the
14 | * distribution.
15 | * 3. Neither the name of the copyright holder nor the names of its
16 | * contributors may be used to endorse or promote products derived
17 | * from this software without specific prior written permission.
18 | *
19 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
26 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
27 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 | * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 | * POSSIBILITY OF SUCH DAMAGE.
31 | *
32 | ****************************************************************************/
33 |
34 | package com.comino.slam.main;
35 |
36 | import java.io.IOException;
37 | import java.lang.management.MemoryMXBean;
38 | import java.lang.management.OperatingSystemMXBean;
39 | import java.net.InetSocketAddress;
40 |
41 | import org.mavlink.messages.MAV_SEVERITY;
42 | import org.mavlink.messages.MSP_CMD;
43 | import org.mavlink.messages.lquac.msg_msp_command;
44 | import org.mavlink.messages.lquac.msg_msp_micro_grid;
45 | import org.mavlink.messages.lquac.msg_msp_status;
46 | import org.mavlink.messages.lquac.msg_timesync;
47 |
48 | import com.comino.main.MSPConfig;
49 | import com.comino.mav.control.IMAVMSPController;
50 | import com.comino.mav.control.impl.MAVController;
51 | import com.comino.mav.control.impl.MAVProxyController;
52 | import com.comino.msp.execution.commander.MSPCommander;
53 | import com.comino.msp.execution.control.StatusManager;
54 | import com.comino.msp.execution.control.listener.IMAVLinkListener;
55 | import com.comino.msp.log.MSPLogger;
56 | import com.comino.msp.model.DataModel;
57 | import com.comino.msp.model.segment.Status;
58 | import com.comino.msp.utils.ExecutorService;
59 | import com.comino.msp.utils.linux.LinuxUtils;
60 | import com.comino.msp.utils.px4.DefaultTunes;
61 | import com.comino.msp.utils.upboard.CPUTemperature;
62 | import com.comino.msp.utils.upboard.UpLEDControl;
63 | import com.comino.msp.utils.upboard.WifiQuality;
64 | import com.comino.realsense.boofcv.RealSenseInfo;
65 | import com.comino.server.mjpeg.impl.HttpMJPEGHandler;
66 | import com.comino.slam.detectors.impl.FwDirectDepthDetector;
67 | import com.comino.slam.estimators.IPositionEstimator;
68 | import com.comino.slam.estimators.vio.MAVVisualPositionEstimatorVIO;
69 | import com.sun.net.httpserver.HttpServer;
70 |
71 | import georegression.struct.point.Point3D_F64;
72 |
73 | public class StartUp implements Runnable {
74 |
75 | IMAVMSPController control = null;
76 | MSPConfig config = null;
77 |
78 | private OperatingSystemMXBean osBean = null;
79 | private MemoryMXBean mxBean = null;
80 |
81 | private HttpMJPEGHandler> streamer = null;
82 |
83 | private MSPCommander commander = null;
84 | private final long startTime_ms = System.currentTimeMillis();
85 |
86 | IPositionEstimator vision = null;
87 | private boolean publish_microslam;
88 | private boolean is_simulation;
89 |
90 | private MSPLogger logger;
91 |
92 | public StartUp(String[] args) {
93 |
94 | ExecutorService.create();
95 |
96 | if(args.length != 0) {
97 | is_simulation = true;
98 | }
99 |
100 | RealSenseInfo info = null;
101 |
102 | if(is_simulation) {
103 | config = MSPConfig.getInstance(System.getProperty("user.home")+"/","msp.properties");
104 | control = new MAVProxyController(MAVController.MODE_SITL);
105 | }
106 | else {
107 | config = MSPConfig.getInstance("/home/up","msp.properties");
108 | control = new MAVProxyController(MAVController.MODE_NORMAL);
109 | }
110 |
111 | System.out.println("MSPControlService version "+config.getVersion());
112 |
113 | osBean = java.lang.management.ManagementFactory.getOperatingSystemMXBean();
114 | mxBean = java.lang.management.ManagementFactory.getMemoryMXBean();
115 |
116 | logger = MSPLogger.getInstance(control);
117 |
118 | commander = new MSPCommander(control,config);
119 |
120 | control.start();
121 |
122 |
123 | control.getStatusManager().addListener(StatusManager.TYPE_MSP_SERVICES,
124 | Status.MSP_SLAM_AVAILABILITY, StatusManager.EDGE_FALLING, (n) -> {
125 | logger.writeLocalMsg("[msp] SLAM disabled", MAV_SEVERITY.MAV_SEVERITY_INFO);
126 | });
127 |
128 |
129 | Runtime.getRuntime().addShutdownHook(new Thread() {
130 | public void run() {
131 | if(vision!=null)
132 | vision.stop();
133 | }
134 | });
135 |
136 |
137 |
138 | logger.writeLocalMsg("MAVProxy "+config.getVersion()+" loaded");
139 | //if(!is_simulation) {
140 | Thread worker = new Thread(this);
141 | worker.setPriority(Thread.MIN_PRIORITY);
142 | worker.setName("Main");
143 | worker.start();
144 | // }
145 |
146 | // Start services if required
147 |
148 | try {
149 | Thread.sleep(200);
150 |
151 | if(config.getBoolProperty("vision_enabled", "true")) {
152 |
153 | if(config.getBoolProperty("vision_highres", "false"))
154 | info = new RealSenseInfo(640,480, RealSenseInfo.MODE_RGB);
155 | else
156 | info = new RealSenseInfo(320,240, RealSenseInfo.MODE_RGB);
157 |
158 |
159 | streamer = new HttpMJPEGHandler(info, control.getCurrentModel());
160 |
161 |
162 | // vision = new MAVVisualPositionEstimatorVO(info, control, config, streamer);
163 | vision = new MAVVisualPositionEstimatorVIO(info, control, config, streamer);
164 |
165 | vision.registerDetector(new FwDirectDepthDetector(control,config,streamer));
166 |
167 |
168 |
169 | HttpServer server;
170 | try {
171 | server = HttpServer.create(new InetSocketAddress(8080),2);
172 | server.createContext("/mjpeg", streamer);
173 | server.setExecutor(null); // creates a default executor
174 | server.start();
175 | } catch (IOException e) {
176 | System.err.println(e.getMessage());
177 | }
178 |
179 | }
180 | } catch(Exception e) { System.out.println("No vision available: "+e.getMessage()); }
181 |
182 |
183 | this.publish_microslam = config.getBoolProperty("slam_publish_microslam", "true");
184 | System.out.println("[vis] Publishing microSlam enabled: "+publish_microslam);
185 |
186 | if(vision!=null && !vision.isRunning()) {
187 | vision.start();
188 | }
189 |
190 | control.registerListener(msg_msp_command.class, new IMAVLinkListener() {
191 | @Override
192 | public void received(Object o) {
193 | Point3D_F64 target = null;
194 | msg_msp_command cmd = (msg_msp_command)o;
195 | switch(cmd.command) {
196 | case MSP_CMD.SET_OPTICAL_TARGET:
197 | if(vision.getOdometry() == null || !vision.isRunning()) {
198 | return;
199 | }
200 | try {
201 | if(Float.isNaN(cmd.param1) || Float.isNaN(cmd.param2))
202 | target = vision.getOdometry().getPoint3DFromPixel(160, 120);
203 | else
204 | target = vision.getOdometry().getPoint3DFromPixel((int)cmd.param1, (int)cmd.param2);
205 |
206 | if(target!=null && target.z < 15.0f)
207 | logger.writeLocalMsg(String.format("OpticalTarget: [%.2f %.2f %.2f]", target.x,target.z,target.y));
208 | else
209 | logger.writeLocalMsg("OpticalTarget could not be set)");
210 |
211 | } catch(Exception e) {
212 | e.printStackTrace();
213 | }
214 | // TODO: Rotate into body_ned and transform to world (add LPOS)
215 | // commander.getAutopilot().moveto((float)target.x, (float)target.y, (float)target.z - 0.25f, Float.NaN);
216 | break;
217 | }
218 | }
219 | });
220 |
221 | }
222 |
223 | public static void main(String[] args) {
224 |
225 | if(args.length==0)
226 | UpLEDControl.clear();
227 |
228 | new StartUp(args);
229 |
230 | }
231 |
232 |
233 | @Override
234 | public void run() {
235 | long tms = System.currentTimeMillis();
236 | long blink = tms;
237 | boolean tune_played = false;
238 | int pack_count;
239 |
240 | DataModel model = control.getCurrentModel();
241 |
242 | WifiQuality wifi = new WifiQuality();
243 | CPUTemperature temp = new CPUTemperature();
244 | msg_msp_micro_grid grid = new msg_msp_micro_grid(2,1);
245 | msg_msp_status msg = new msg_msp_status(2,1);
246 |
247 |
248 | while(true) {
249 | try {
250 |
251 | if(!control.isConnected()) {
252 | Thread.sleep(200);
253 | control.connect();
254 | continue;
255 | }
256 |
257 | pack_count = 0; publish_microslam = true;
258 | while(publish_microslam && model.grid.hasTransfers() && pack_count++ < 10) {
259 | if(model.grid.toArray(grid.data)) {
260 | grid.resolution = 0.05f;
261 | grid.extension = 0;
262 | grid.cx = model.grid.getIndicatorX();
263 | grid.cy = model.grid.getIndicatorY();
264 | grid.cz = model.grid.getIndicatorZ();
265 | grid.tms = model.grid.tms;
266 | grid.count = model.grid.count;
267 | control.sendMAVLinkMessage(grid);
268 | Thread.sleep(5);
269 | }
270 | }
271 |
272 | // streamer.addToStream(Autopilot2D.getInstance().getMap2D().getMap().subimage(400-160, 400-120, 400+160, 400+120), model, System.currentTimeMillis()*1000);
273 |
274 | Thread.sleep(50);
275 |
276 | if((System.currentTimeMillis()-tms) < 333)
277 | continue;
278 |
279 | tms = System.currentTimeMillis();
280 |
281 |
282 | if(!control.isSimulation()) {
283 |
284 | if(!tune_played ) {
285 | DefaultTunes.play(control,"MFT200e8a8aE");
286 | tune_played = true;
287 | }
288 |
289 | msg_timesync sync_s = new msg_timesync(255,1);
290 | sync_s.tc1 = 0;
291 | sync_s.ts1 = System.currentTimeMillis()*1000000L;
292 | control.sendMAVLinkMessage(sync_s);
293 |
294 | wifi.getQuality();
295 | temp.getTemperature();
296 | }
297 |
298 | msg.load = LinuxUtils.getProcessCpuLoad();
299 | msg.memory = (int)(mxBean.getHeapMemoryUsage().getUsed() * 100 /mxBean.getHeapMemoryUsage().getMax());
300 | msg.wifi_quality = (byte)wifi.get();
301 | msg.threads = Thread.activeCount();
302 | msg.cpu_temp = (byte)temp.get();
303 | msg.com_error = control.getErrorCount();
304 | msg.autopilot_mode =control.getCurrentModel().sys.autopilot;
305 | msg.uptime_ms = System.currentTimeMillis() - startTime_ms;
306 | msg.status = control.getCurrentModel().sys.getStatus();
307 | msg.setVersion(config.getVersion()+"/"+config.getVersionDate().replace(".", ""));
308 | msg.setArch(osBean.getArch());
309 | msg.unix_time_us = System.currentTimeMillis() * 1000;
310 | control.sendMAVLinkMessage(msg);
311 |
312 | if((System.currentTimeMillis()-blink) < 3000 || is_simulation)
313 | continue;
314 |
315 | blink = System.currentTimeMillis();
316 |
317 | if(model.sys.isStatus(Status.MSP_ACTIVE))
318 | UpLEDControl.flash("green", 10);
319 | else
320 | UpLEDControl.flash("red", 200);
321 |
322 | } catch (Exception e) {
323 | e.printStackTrace();
324 | control.close();
325 | }
326 | }
327 | }
328 | }
329 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # MAVSlam - Visual odometry & SLAM for PX4
2 |
3 | ##
4 |
5 | #### Onboard visual odometry for PX4
6 |
7 | (Deprecated)
8 |
9 | Integrate Intel® RealSense™ R200 depth camera as an oboard visual input source for PX4 using the [UP-Board]( http://www.up-board.org) as companion:
10 |
11 | - Estimate position and speed based on stereo depth images up to 60 fps for LPE /EKF2
12 | - Stream video to MAVGCL with overlays
13 | - Feature-based or depth obstacle detection
14 | - Integration with PX4 via MAVLink ([MAVComm](https://github.com/ecmnet/MAVComm) required)
15 |
16 | Prerequisites:
17 |
18 | - UP-Board running Ubilinux 3.0 with Java 8 (minimum) stack connected via serial link to PX4 controller
19 | - Intel® RealSense™ R200 (connected with external power supply)
20 |
21 | Video:
22 |
23 |
24 |
25 |
26 | Heading estimation:
27 |
28 | 
29 |
30 |
31 |
32 | XY Estimation:
33 |
34 | 
35 |
36 | Obstacle detection:
37 |
38 | 
39 |
40 |
41 |
42 | MicroSLAM:
43 |
44 | 
45 |
46 |
47 |
48 | #### Desktop odometry demo
49 |
50 | A first implementation using Intel® RealSense™ R200 with boofcv on OSX/Linux platforms. Based on https://github.com/IntelRealSense/librealsense and http://boofcv.org/index.php?title=Main_Page.
51 |
52 | 
--------------------------------------------------------------------------------