├── .gitignore
├── .travis.yml
├── README.md
├── app
├── build.gradle
├── proguard-rules.pro
└── src
│ ├── androidTest
│ └── java
│ │ └── org
│ │ └── hitlabnz
│ │ └── sensor_fusion_demo
│ │ └── androidTest
│ │ └── ApplicationTest.java
│ ├── main
│ ├── AndroidManifest.xml
│ ├── assets
│ │ └── about
│ │ │ ├── de
│ │ │ └── index.html
│ │ │ └── en
│ │ │ └── index.html
│ ├── java
│ │ └── org
│ │ │ └── hitlabnz
│ │ │ └── sensor_fusion_demo
│ │ │ ├── AboutActivity.java
│ │ │ ├── Cube.java
│ │ │ ├── CubeRenderer.java
│ │ │ ├── HardwareChecker.java
│ │ │ ├── OrientationVisualisationFragment.java
│ │ │ ├── SensorChecker.java
│ │ │ ├── SensorSelectionActivity.java
│ │ │ ├── orientationProvider
│ │ │ ├── AccelerometerCompassProvider.java
│ │ │ ├── CalibratedGyroscopeProvider.java
│ │ │ ├── GravityCompassProvider.java
│ │ │ ├── ImprovedOrientationSensor1Provider.java
│ │ │ ├── ImprovedOrientationSensor2Provider.java
│ │ │ ├── MadgwickProvider.java
│ │ │ ├── OrientationProvider.java
│ │ │ └── RotationVectorProvider.java
│ │ │ └── representation
│ │ │ ├── Matrix.java
│ │ │ ├── MatrixF4x4.java
│ │ │ ├── Quaternion.java
│ │ │ ├── Vector3f.java
│ │ │ └── Vector4f.java
│ └── res
│ │ ├── drawable-hdpi
│ │ └── ic_launcher.png
│ │ ├── drawable-mdpi
│ │ └── ic_launcher.png
│ │ ├── drawable-xhdpi
│ │ └── ic_launcher.png
│ │ ├── drawable-xxhdpi
│ │ └── ic_launcher.png
│ │ ├── layout
│ │ ├── activity_about.xml
│ │ └── activity_sensor_selection.xml
│ │ ├── menu
│ │ └── sensor_selection.xml
│ │ ├── values-de
│ │ └── strings.xml
│ │ ├── values-v11
│ │ └── styles.xml
│ │ ├── values-v14
│ │ └── styles.xml
│ │ └── values
│ │ ├── strings.xml
│ │ └── styles.xml
│ └── test
│ └── java
│ └── org
│ └── hitlabnz
│ └── sensor_fusion_demo
│ └── test
│ └── QuaternionTest.java
├── build.gradle
├── docs
├── .gitignore
├── Makefile
├── conf.py
├── index.rst
├── make.bat
└── requirements.txt
├── gradle
└── wrapper
│ ├── gradle-wrapper.jar
│ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
├── ic_launcher-web.png
├── ic_launcher-web_functional.pdn
├── ic_launcher-web_functional.png
└── settings.gradle
/.gitignore:
--------------------------------------------------------------------------------
1 | # Built application files
2 | *.apk
3 | *.ap_
4 |
5 | # Files for the Dalvik VM
6 | *.dex
7 |
8 | # Java class files
9 | *.class
10 |
11 | # Generated files
12 | bin/
13 | gen/
14 | out/
15 |
16 | # Gradle files
17 | .gradle/
18 | build/
19 | /build
20 |
21 | # Local configuration file (sdk path, etc)
22 | local.properties
23 |
24 | # Proguard folder generated by Eclipse
25 | proguard/
26 |
27 | # Log Files
28 | *.log
29 |
30 | # Android Studio Navigation editor temp files
31 | .navigation/
32 |
33 | # Android Studio captures folder
34 | captures/
35 |
36 | # Android Studio
37 | *.iml
38 | .idea
39 | #.idea/workspace.xml - remove # and delete .idea if it better suit your needs.
40 |
41 | # Intellij
42 | *.iml
43 | /.idea/workspace.xml
44 | /.idea/libraries
45 |
46 | .DS_Store
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: android
2 | jdk: oraclejdk8
3 |
4 | android:
5 | components:
6 | - platform-tools
7 | - tools
8 |
9 | # The BuildTools version used by your project
10 | - build-tools-25.0.2
11 |
12 | # The SDK version used to compile your project
13 | - android-25
14 |
15 | # Additional components
16 | - extra-google-google_play_services
17 | - extra-google-m2repository
18 | - extra-android-m2repository
19 | - sys-img-armeabi-v7a-android-25
20 |
21 | before_script:
22 | # Create and start emulator
23 | # - echo no | android create avd --force -n test -t android-24 --abi armeabi-v7a
24 | # - emulator -avd test -no-skin -no-audio -no-window &
25 | # - android-wait-for-emulator
26 | # - adb shell input keyevent 82 &
27 |
28 | script: ./gradlew assembleRelease
29 | #connectedAndroidTest
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Sensor fusion demo for Android
2 |
3 | [](https://www.bitrise.io/app/46b5cf7adea1286f)
4 | [](https://travis-ci.org/apacha/sensor-fusion-demo)
5 | [](http://sensor-fusion-demo.readthedocs.io/en/latest/?badge=latest)
6 |
7 | This application demonstrates the capabilities of various sensors and sensor-fusions. Data from the Gyroscope, Accelerometer and compass are combined in different ways and the result is shown as a cube that can be rotated by rotating the device.
8 |
9 |
10 | Read the full documentation [here](http://sensor-fusion-demo.readthedocs.io) and [here](https://github.com/maddevsio/mad-location-manager/wiki/Theory)
11 | The following sensors are available for comparison:
12 |
13 | - Improved Orientation Sensor 1 (Sensor fusion of Android Rotation Vector and Calibrated Gyroscope - less stable but more accurate)
14 | - Improved Orientation Sensor 2 (Sensor fusion of Android Rotation Vector and Calibrated Gyroscope - more stable but less accurate)
15 | - Android Rotation Vector (Kalman filter fusion of Accelerometer + Gyroscope + Compass)
16 | - Madgwick AHRS
17 | - Gravity + Compass
18 | - Accelerometer + Compass
19 | - Calibrated Gyroscope (Separate result of Kalman filter fusion of Accelerometer + Gyroscope + Compass)
20 |
21 | This application was developed for demonstrating the sensor fusion approach developed for [Master Thesis "Sensor fusion for robust outdoor Augmented Reality tracking on mobile devices"](https://alexanderpacha.files.wordpress.com/2017/05/masterthesis-pacha.pdf) at the [Human Interface Technology Laboratory New Zealand](http://www.hitlabnz.org). Was modified by maddevs.io for MadgwickAHRS demonstration.
22 |
23 | ## Build and Install
24 |
25 | This project is an Gradle-based Android Studio project. It is also published in the [Google Play Store](https://play.google.com/store/apps/details?id=org.hitlabnz.sensor_fusion_demo), if you just want to try it out.
26 |
27 | ## License
28 |
29 | Released under the MIT license.
30 |
31 | Copyright, 2017, by [Alexander Pacha](http://alexanderpacha.com) and the [Human Technology Laboratory New Zealand](http://www.hitlabnz.org).
32 |
33 | Permission is hereby granted, free of charge, to any person obtaining a copy
34 | of this software and associated documentation files (the "Software"), to deal
35 | in the Software without restriction, including without limitation the rights
36 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
37 | copies of the Software, and to permit persons to whom the Software is
38 | furnished to do so, subject to the following conditions:
39 |
40 | The above copyright notice and this permission notice shall be included in
41 | all copies or substantial portions of the Software.
42 |
43 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
44 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
45 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
46 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
47 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
48 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
49 | THE SOFTWARE.
50 |
51 | This application also uses parts from the the Android Open Source Project, licensed under the [Apache License, Version 2.0]( http://www.apache.org/licenses/LICENSE-2.0).
52 |
53 | ## Data privacy statement
54 |
55 | This application does not store or transmit any data.
56 |
--------------------------------------------------------------------------------
/app/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'com.android.application'
2 |
3 | def date = new Date()
4 | def formattedDate = date.format('yyMMdd')
5 | def code = formattedDate.toInteger() * 10000
6 | def buildNumber = System.getenv("BITRISE_BUILD_NUMBER") as Integer ?: 0
7 |
8 | android {
9 | compileSdkVersion 26
10 | // buildToolsVersion "26.0.2"
11 |
12 | defaultConfig {
13 | applicationId "org.hitlabnz.sensor_fusion_demo"
14 | minSdkVersion 18
15 | targetSdkVersion 26
16 | versionCode code + buildNumber
17 | versionName "1.4." + buildNumber
18 | testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
19 | }
20 |
21 | buildTypes {
22 | release {
23 | minifyEnabled false
24 | proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
25 | }
26 | }
27 |
28 | lintOptions {
29 | abortOnError false
30 | }
31 | }
32 |
33 | dependencies {
34 | implementation 'com.android.support:support-v4:25.0.1'
35 |
36 | testImplementation 'junit:junit:4.12'
37 | testImplementation 'org.mockito:mockito-core:2.2.27'
38 |
39 | androidTestImplementation 'com.android.support:support-annotations:25.0.1'
40 |
41 | androidTestImplementation 'org.hamcrest:hamcrest-library:1.3'
42 | androidTestImplementation 'com.android.support.test.uiautomator:uiautomator-v18:2.1.2'
43 |
44 | androidTestImplementation ('com.android.support.test.espresso:espresso-core:2.2.2') {
45 | exclude group: 'com.android.support', module: 'support-annotations'
46 | }
47 | androidTestImplementation ('com.android.support.test:runner:0.5') {
48 | exclude group: 'com.android.support', module: 'support-annotations'
49 | }
50 | androidTestImplementation ('com.android.support.test:rules:0.5') {
51 | exclude group: 'com.android.support', module: 'support-annotations'
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/app/proguard-rules.pro:
--------------------------------------------------------------------------------
1 | # Add project specific ProGuard rules here.
2 | # By default, the flags in this file are appended to flags specified
3 | # in /Users/alex/Library/Developer/Xamarin/android-sdk-macosx/tools/proguard/proguard-android.txt
4 | # You can edit the include path and order by changing the proguardFiles
5 | # directive in build.gradle.
6 | #
7 | # For more details, see
8 | # http://developer.android.com/guide/developing/tools/proguard.html
9 |
10 | # Add any project specific keep options here:
11 |
12 | # If your project uses WebView with JS, uncomment the following
13 | # and specify the fully qualified class name to the JavaScript interface
14 | # class:
15 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
16 | # public *;
17 | #}
18 |
--------------------------------------------------------------------------------
/app/src/androidTest/java/org/hitlabnz/sensor_fusion_demo/androidTest/ApplicationTest.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.androidTest;
2 |
3 | import android.app.Application;
4 | import android.support.test.espresso.NoMatchingViewException;
5 | import android.support.test.espresso.matcher.ViewMatchers;
6 | import android.support.test.rule.ActivityTestRule;
7 | import android.support.test.runner.AndroidJUnit4;
8 | import android.test.ApplicationTestCase;
9 | import android.test.suitebuilder.annotation.LargeTest;
10 |
11 | import org.hitlabnz.sensor_fusion_demo.R;
12 | import org.hitlabnz.sensor_fusion_demo.SensorSelectionActivity;
13 | import org.junit.Rule;
14 | import org.junit.Test;
15 | import org.junit.runner.RunWith;
16 |
17 | import static android.support.test.espresso.Espresso.onView;
18 | import static android.support.test.espresso.action.ViewActions.click;
19 | import static android.support.test.espresso.assertion.ViewAssertions.matches;
20 | import static android.support.test.espresso.matcher.ViewMatchers.hasLinks;
21 | import static android.support.test.espresso.matcher.ViewMatchers.withId;
22 | import static android.support.test.espresso.matcher.ViewMatchers.withText;
23 |
24 | ///**
25 | // * Testing Fundamentals
26 | // */
27 | //public class ApplicationTest extends ApplicationTestCase {
28 | // public ApplicationTest() {
29 | // super(Application.class);
30 | // }
31 | //}
32 |
33 | @RunWith(AndroidJUnit4.class)
34 | @LargeTest
35 | public class ApplicationTest {
36 |
37 | @Rule
38 | public ActivityTestRule mActivityRule = new ActivityTestRule<>(
39 | SensorSelectionActivity.class);
40 |
41 | @Test
42 | public void checkAssertMenuExists(){
43 | DiscardGyroscopeWarning();
44 |
45 | // Act & Assert
46 | AssertThatAboutActionMenuExists();
47 | }
48 |
49 | @Test
50 | public void clickAboutMenu_expectAboutToBeDisplayed(){
51 | DiscardGyroscopeWarning();
52 |
53 | // Act
54 | onView(withId(R.id.action_about)).perform(click());
55 |
56 | onView(withId(R.id.webViewAbout)).check(matches(ViewMatchers.isDisplayed()));
57 | }
58 |
59 |
60 | public void AssertThatAboutActionMenuExists() {
61 | onView(withId(R.id.action_about)).check(matches(withText("About")));
62 | }
63 |
64 | public static void DiscardGyroscopeWarning() {
65 | try {
66 | onView(withText("OK")).perform(click());
67 | } catch (NoMatchingViewException e) {
68 | //view not displayed logic
69 | }
70 | }
71 | }
--------------------------------------------------------------------------------
/app/src/main/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
10 |
14 |
15 |
16 |
17 |
18 |
19 |
22 |
23 |
24 |
25 |
26 |
--------------------------------------------------------------------------------
/app/src/main/assets/about/de/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Diese App wurde von Alexander Pacha am Human Interface Technology
7 | Laboratory New Zealand entwickelt und demonstriert die
8 | Leistungsfähigkeit von verschiedenen Sensoren und Sensorfusionen.
9 | Messungen von dem Gyroskop, Akzelerometer und Kompass werden in
10 | verschiedenen Weisen kombiniert und das Ergebnis wird als Würfel
11 | visualisiert, der durch rotieren des Gerätes gedreht werden kann.
12 |
13 |
14 | Die große Neuheit in dieser Applikation ist die Fusion von zwei
15 | virtuellen Sensoren: Improved Orientation Sensor 1 und Improved
16 | Orientation Sensor 2 nutzen den Android Rotation Vector mit dem
17 | kalibrierten Gyroskopsensor und erreichen eine nie zuvor dagewesenen Präzision
18 | und Reaktionsfähigkeit.
19 |
20 |
21 | Neben diesen beiden Sensorfusionen gibt es noch weitere Sensoren
22 | zum Vergleich:
23 |
24 | - Improved Orientation Sensor 1 (Sensorfusion des Android
25 | Rotation Vector und des kalibrierten Gyroskops - weniger stabil,
26 | dafür genauer)
27 | - Improved Orientation Sensor 2 (Sensorfusion des Android
28 | Rotation Vector und des kalibrierten Gyroskops - stabiler, dafür
29 | ungenauer)
30 | - Android Rotation Vector (Kalmanfilterfusion von Akzelerometer
31 | + Gyroskop + Kompass) - die bisher beste verfügbare Fusion!
32 | - Kalibriertes Gyroskop (Weiteres Ergebnis der
33 | Kalmanfilterfusion von Akzelerometer + Gyroskop + Kompass). Liefert
34 | nur relative Rotation, kann daher von den anderen Sensoren abweichen.
35 | - Gravitation + Kompass
36 | - Akzelerometer + Kompass
37 |
38 |
39 |
40 | Die Anwendung wurde entwickelt um die im Rahmer der Masterarbeit "Sensor
42 | fusion for robust outdoor Augmented Reality tracking on mobile devices"
43 | (download)
44 | entwickelte Sensorfusion zu demonstrieren. Die App wurde am Human Interface Technology
46 | Laboratory New Zealand entwickelt.
47 |
48 |
49 | Lange auf den Würfel gedrückt halten, um kurzfristig in den Raummodus zu wechseln.
50 |
51 |
52 | Der Quellcode ist öffentlich verfügbar auf Bitbucket
54 | unter der MIT Lizenz.
55 |
56 |
57 |
58 | Weitere Mitarbeit:
59 |
62 |
63 |
64 |
--------------------------------------------------------------------------------
/app/src/main/assets/about/en/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | This application was developed by Alexander Pacha at the Human Interface
5 | Technology Laboratory New Zealand to demonstrate the
6 | capabilities of various sensors and sensor fusion approaches. Data
7 | from the Gyroscope, Accelerometer and compass are combined in
8 | different ways and the result is shown as a cube that can be rotated
9 | by rotating the device.
10 |
11 | The major novelty in this application is the fusion of virtual
12 | sensors: Improved Orientation Sensor 1 and Improved
13 | Orientation Sensor 2 fuse the Android Rotation Vector with the
14 | virtual Gyroscope sensor to achieve a pose estimation with a
15 | previously unknown stability and precision.
16 |
17 |
18 | Apart from these two sensors, the following sensors are
19 | available for comparison:
20 |
21 | - Improved Orientation Sensor 1 (Sensor fusion of Android
22 | Rotation Vector and Calibrated Gyroscope - less stable but more
23 | accurate)
24 | - Improved Orientation Sensor 2 (Sensor fusion of Android
25 | Rotation Vector and Calibrated Gyroscope - more stable but less
26 | accurate)
27 | - Android Rotation Vector (Kalman filter fusion of
28 | Accelerometer + Gyroscope + Compass)
29 | - Calibrated Gyroscope (Separate result of Kalman filter fusion
30 | of Accelerometer + Gyroscope + Compass)
31 | - Gravity + Compass
32 | - Accelerometer + Compass
33 | - Accelerometer+Gyroscope+Magnetometer fused by Madgwick
34 |
35 |
36 |
37 | This application was developed for demonstrating the sensor fusion
38 | approach developed for my Master thesis "Sensor fusion for robust outdoor Augmented Reality tracking on mobile
40 | devices"
41 | (download) at the Human Interface
42 | Technology Laboratory New Zealand
43 |
44 |
45 | Long-click on a cube to temporarily change into the space-mode for this fusion.
46 |
47 |
48 | The source-code is publicly available at Bitbucket
50 | and licensed under the MIT license.
51 |
52 |
53 |
54 | Contributors:
55 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/AboutActivity.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo;
2 |
3 | import java.util.Locale;
4 |
5 | import android.app.Activity;
6 | import android.os.Bundle;
7 | import android.view.MenuItem;
8 | import android.webkit.WebView;
9 |
10 | /**
11 | * Activity, that displays a single WebView with the text shown under the section About in the settings
12 | *
13 | * @author Alexander Pacha
14 | *
15 | */
16 | public class AboutActivity extends Activity {
17 |
18 | @Override
19 | protected void onCreate(Bundle savedInstanceState) {
20 | super.onCreate(savedInstanceState);
21 | setContentView(R.layout.activity_about);
22 |
23 | // Get the locale substring to access the localised assets
24 | String localPrefix = Locale.getDefault().getLanguage().substring(0, 2).toLowerCase(Locale.US);
25 |
26 | // Load the website as the only action for this activity
27 | WebView webView = (WebView) findViewById(R.id.webViewAbout);
28 | webView.loadUrl("file:///android_asset/about/" + localPrefix + "/index.html");
29 |
30 | // Enable the logo in the top left corner to bring the user back to another activity.
31 | getActionBar().setDisplayHomeAsUpEnabled(true);
32 | }
33 |
34 | @Override
35 | public boolean onOptionsItemSelected(MenuItem item) {
36 | switch (item.getItemId()) {
37 | case android.R.id.home:
38 | finish();
39 | return true;
40 | }
41 | return super.onOptionsItemSelected(item);
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/Cube.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo;
2 |
3 | import java.nio.ByteBuffer;
4 | import java.nio.ByteOrder;
5 | import java.nio.FloatBuffer;
6 |
7 | import javax.microedition.khronos.opengles.GL10;
8 |
9 | /**
10 | * A simple colour-cube that is used for drawing the current rotation of the device
11 | *
12 | */
13 | public class Cube {
14 | /**
15 | * Buffer for the vertices
16 | */
17 | private FloatBuffer mVertexBuffer;
18 | /**
19 | * Buffer for the colours
20 | */
21 | private FloatBuffer mColorBuffer;
22 | /**
23 | * Buffer for indices
24 | */
25 | private ByteBuffer mIndexBuffer;
26 |
27 | /**
28 | * Initialises a new instance of the cube
29 | */
30 | public Cube() {
31 | final float vertices[] = {
32 | -1, -1, -1,
33 | 1, -1, -1,
34 | 1, 1, -1,
35 | -1, 1, -1,
36 | -1, -1, 1,
37 | 1, -1, 1,
38 | 1, 1, 1,
39 | -1, 1, 1, };
40 |
41 | final float colors[] = {
42 | 0, 0, 0, 1,
43 | 1, 0, 0, 1,
44 | 1, 1, 0, 1,
45 | 0, 1, 0, 1,
46 | 0, 0, 1, 1,
47 | 1, 0, 1, 1,
48 | 1, 1, 1, 1,
49 | 0, 1, 1, 1, };
50 |
51 | final byte indices[] = {
52 | 0, 4, 5, 0,
53 | 5, 1, 1, 5,
54 | 6, 1, 6, 2,
55 | 2, 6, 7, 2,
56 | 7, 3, 3, 7,
57 | 4, 3, 4, 0,
58 | 4, 7, 6, 4,
59 | 6, 5, 3, 0,
60 | 1, 3, 1, 2 };
61 |
62 | ByteBuffer vbb = ByteBuffer.allocateDirect(vertices.length * 4);
63 | vbb.order(ByteOrder.nativeOrder());
64 | mVertexBuffer = vbb.asFloatBuffer();
65 | mVertexBuffer.put(vertices);
66 | mVertexBuffer.position(0);
67 |
68 | ByteBuffer cbb = ByteBuffer.allocateDirect(colors.length * 4);
69 | cbb.order(ByteOrder.nativeOrder());
70 | mColorBuffer = cbb.asFloatBuffer();
71 | mColorBuffer.put(colors);
72 | mColorBuffer.position(0);
73 |
74 | mIndexBuffer = ByteBuffer.allocateDirect(indices.length);
75 | mIndexBuffer.put(indices);
76 | mIndexBuffer.position(0);
77 | }
78 |
79 | /**
80 | * Draws this cube of the given GL-Surface
81 | *
82 | * @param gl The GL-Surface this cube should be drawn upon.
83 | */
84 | public void draw(GL10 gl) {
85 | gl.glEnable(GL10.GL_CULL_FACE);
86 | gl.glFrontFace(GL10.GL_CW);
87 | gl.glShadeModel(GL10.GL_SMOOTH);
88 | gl.glVertexPointer(3, GL10.GL_FLOAT, 0, mVertexBuffer);
89 | gl.glColorPointer(4, GL10.GL_FLOAT, 0, mColorBuffer);
90 | gl.glDrawElements(GL10.GL_TRIANGLES, 36, GL10.GL_UNSIGNED_BYTE, mIndexBuffer);
91 | }
92 | }
93 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/CubeRenderer.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo;
2 |
3 | import javax.microedition.khronos.egl.EGLConfig;
4 | import javax.microedition.khronos.opengles.GL10;
5 |
6 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.OrientationProvider;
7 | import org.hitlabnz.sensor_fusion_demo.representation.Quaternion;
8 |
9 | import android.opengl.GLSurfaceView;
10 |
11 | /**
12 | * Class that implements the rendering of a cube with the current rotation of the device that is provided by a
13 | * OrientationProvider
14 | *
15 | * @author Alexander Pacha
16 | *
17 | */
18 | public class CubeRenderer implements GLSurfaceView.Renderer {
19 | /**
20 | * The colour-cube that is drawn repeatedly
21 | */
22 | private Cube mCube;
23 |
24 | /**
25 | * The current provider of the device orientation.
26 | */
27 | private OrientationProvider orientationProvider = null;
28 | private Quaternion quaternion = new Quaternion();
29 |
30 | /**
31 | * Initialises a new CubeRenderer
32 | */
33 | public CubeRenderer() {
34 | mCube = new Cube();
35 | }
36 |
37 | /**
38 | * Sets the orientationProvider of this renderer. Use this method to change which sensor fusion should be currently
39 | * used for rendering the cube. Simply exchange it with another orientationProvider and the cube will be rendered
40 | * with another approach.
41 | *
42 | * @param orientationProvider The new orientation provider that delivers the current orientation of the device
43 | */
44 | public void setOrientationProvider(OrientationProvider orientationProvider) {
45 | this.orientationProvider = orientationProvider;
46 | }
47 |
48 | /**
49 | * Perform the actual rendering of the cube for each frame
50 | *
51 | * @param gl The surface on which the cube should be rendered
52 | */
53 | public void onDrawFrame(GL10 gl) {
54 | // clear screen
55 | gl.glClear(GL10.GL_COLOR_BUFFER_BIT);
56 |
57 | // set-up modelview matrix
58 | gl.glMatrixMode(GL10.GL_MODELVIEW);
59 | gl.glLoadIdentity();
60 |
61 | if (showCubeInsideOut) {
62 | float dist = 3;
63 | gl.glTranslatef(0, 0, -dist);
64 |
65 | if (orientationProvider != null) {
66 | // All Orientation providers deliver Quaternion as well as rotation matrix.
67 | // Use your favourite representation:
68 |
69 | // Get the rotation from the current orientationProvider as rotation matrix
70 | //gl.glMultMatrixf(orientationProvider.getRotationMatrix().getMatrix(), 0);
71 |
72 | // Get the rotation from the current orientationProvider as quaternion
73 | orientationProvider.getQuaternion(quaternion);
74 | gl.glRotatef((float) (2.0f * Math.acos(quaternion.getW()) * 180.0f / Math.PI), quaternion.getX(), quaternion.getY(), quaternion.getZ());
75 | }
76 |
77 | // draw our object
78 | gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
79 | gl.glEnableClientState(GL10.GL_COLOR_ARRAY);
80 |
81 | mCube.draw(gl);
82 | } else {
83 |
84 | if (orientationProvider != null) {
85 | // All Orientation providers deliver Quaternion as well as rotation matrix.
86 | // Use your favourite representation:
87 |
88 | // Get the rotation from the current orientationProvider as rotation matrix
89 | //gl.glMultMatrixf(orientationProvider.getRotationMatrix().getMatrix(), 0);
90 |
91 | // Get the rotation from the current orientationProvider as quaternion
92 | orientationProvider.getQuaternion(quaternion);
93 | gl.glRotatef((float) (2.0f * Math.acos(quaternion.getW()) * 180.0f / Math.PI), quaternion.getX(), quaternion.getY(), quaternion.getZ());
94 | }
95 |
96 | float dist = 3;
97 | drawTranslatedCube(gl, 0, 0, -dist);
98 | drawTranslatedCube(gl, 0, 0, dist);
99 | drawTranslatedCube(gl, 0, -dist, 0);
100 | drawTranslatedCube(gl, 0, dist, 0);
101 | drawTranslatedCube(gl, -dist, 0, 0);
102 | drawTranslatedCube(gl, dist, 0, 0);
103 | }
104 |
105 | // draw our object
106 | gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
107 | gl.glEnableClientState(GL10.GL_COLOR_ARRAY);
108 |
109 | mCube.draw(gl);
110 | }
111 |
112 | /**
113 | * Draws a translated cube
114 | *
115 | * @param gl the surface
116 | * @param translateX x-translation
117 | * @param translateY y-translation
118 | * @param translateZ z-translation
119 | */
120 | private void drawTranslatedCube(GL10 gl, float translateX, float translateY, float translateZ) {
121 | gl.glPushMatrix();
122 | gl.glTranslatef(translateX, translateY, translateZ);
123 |
124 | // draw our object
125 | gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
126 | gl.glEnableClientState(GL10.GL_COLOR_ARRAY);
127 |
128 | mCube.draw(gl);
129 | gl.glPopMatrix();
130 | }
131 |
132 | /**
133 | * Update view-port with the new surface
134 | *
135 | * @param gl the surface
136 | * @param width new width
137 | * @param height new height
138 | */
139 | public void onSurfaceChanged(GL10 gl, int width, int height) {
140 | // set view-port
141 | gl.glViewport(0, 0, width, height);
142 | // set projection matrix
143 | float ratio = (float) width / height;
144 | gl.glMatrixMode(GL10.GL_PROJECTION);
145 | gl.glLoadIdentity();
146 | gl.glFrustumf(-ratio, ratio, -1, 1, 1, 10);
147 | }
148 |
149 | @Override
150 | public void onSurfaceCreated(GL10 gl, EGLConfig config) {
151 | // dither is enabled by default, we don't need it
152 | gl.glDisable(GL10.GL_DITHER);
153 | // clear screen in black
154 | gl.glClearColor(0, 0, 0, 1);
155 | }
156 |
157 | /**
158 | * Flag indicating whether you want to view inside out, or outside in
159 | */
160 | private boolean showCubeInsideOut = true;
161 |
162 | /**
163 | * Toggles whether the cube will be shown inside-out or outside in.
164 | */
165 | public void toggleShowCubeInsideOut() {
166 | this.showCubeInsideOut = !showCubeInsideOut;
167 | }
168 | }
169 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/HardwareChecker.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo;
2 |
3 | import android.hardware.Sensor;
4 | import android.hardware.SensorManager;
5 |
6 | /**
7 | * Class that tests availability of hardware sensors.
8 | *
9 | * @author Alex
10 | *
11 | */
12 | public class HardwareChecker implements SensorChecker {
13 |
14 | boolean gyroscopeIsAvailable = false;
15 |
16 | public HardwareChecker (SensorManager sensorManager) {
17 | if(sensorManager.getSensorList(Sensor.TYPE_GYROSCOPE).size() > 0) {
18 | gyroscopeIsAvailable = true;
19 | }
20 | }
21 |
22 | @Override
23 | public boolean IsGyroscopeAvailable() {
24 | return gyroscopeIsAvailable;
25 | }
26 |
27 | }
28 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/OrientationVisualisationFragment.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo;
2 |
3 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.AccelerometerCompassProvider;
4 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.CalibratedGyroscopeProvider;
5 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.GravityCompassProvider;
6 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.ImprovedOrientationSensor1Provider;
7 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.ImprovedOrientationSensor2Provider;
8 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.MadgwickProvider;
9 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.OrientationProvider;
10 | import org.hitlabnz.sensor_fusion_demo.orientationProvider.RotationVectorProvider;
11 |
12 | import android.hardware.SensorManager;
13 | import android.opengl.GLSurfaceView;
14 | import android.os.Bundle;
15 | import android.support.v4.app.Fragment;
16 | import android.view.LayoutInflater;
17 | import android.view.View;
18 | import android.view.ViewGroup;
19 | import android.view.View.OnLongClickListener;
20 |
21 | /**
22 | * A fragment that contains the same visualisation for different orientation providers
23 | */
24 | public class OrientationVisualisationFragment extends Fragment {
25 | /**
26 | * The surface that will be drawn upon
27 | */
28 | private GLSurfaceView mGLSurfaceView;
29 | /**
30 | * The class that renders the cube
31 | */
32 | private CubeRenderer mRenderer;
33 | /**
34 | * The current orientation provider that delivers device orientation.
35 | */
36 | private OrientationProvider currentOrientationProvider;
37 |
38 | /**
39 | * The fragment argument representing the section number for this
40 | * fragment.
41 | */
42 | public static final String ARG_SECTION_NUMBER = "section_number";
43 |
44 | @Override
45 | public void onResume() {
46 | // Ideally a game should implement onResume() and onPause()
47 | // to take appropriate action when the activity looses focus
48 | super.onResume();
49 | currentOrientationProvider.start();
50 | mGLSurfaceView.onResume();
51 | }
52 |
53 | @Override
54 | public void onPause() {
55 | // Ideally a game should implement onResume() and onPause()
56 | // to take appropriate action when the activity looses focus
57 | super.onPause();
58 | currentOrientationProvider.stop();
59 | mGLSurfaceView.onPause();
60 | }
61 |
62 | @Override
63 | public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
64 | // Initialise the orientationProvider
65 | switch (getArguments().getInt(ARG_SECTION_NUMBER)) {
66 | case 1:
67 | currentOrientationProvider = new ImprovedOrientationSensor1Provider((SensorManager) getActivity()
68 | .getSystemService(SensorSelectionActivity.SENSOR_SERVICE));
69 | break;
70 | case 2:
71 | currentOrientationProvider = new ImprovedOrientationSensor2Provider((SensorManager) getActivity()
72 | .getSystemService(SensorSelectionActivity.SENSOR_SERVICE));
73 | break;
74 | case 3:
75 | currentOrientationProvider = new RotationVectorProvider((SensorManager) getActivity().getSystemService(
76 | SensorSelectionActivity.SENSOR_SERVICE));
77 | break;
78 | case 7:
79 | currentOrientationProvider = new CalibratedGyroscopeProvider((SensorManager) getActivity()
80 | .getSystemService(SensorSelectionActivity.SENSOR_SERVICE));
81 | break;
82 | case 5:
83 | currentOrientationProvider = new GravityCompassProvider((SensorManager) getActivity().getSystemService(
84 | SensorSelectionActivity.SENSOR_SERVICE));
85 | break;
86 | case 6:
87 | currentOrientationProvider = new AccelerometerCompassProvider((SensorManager) getActivity()
88 | .getSystemService(SensorSelectionActivity.SENSOR_SERVICE));
89 | break;
90 | case 4:
91 | currentOrientationProvider = new MadgwickProvider((SensorManager) getActivity()
92 | .getSystemService(SensorSelectionActivity.SENSOR_SERVICE), 0.07f, 50.0f);
93 | break;
94 | default:
95 | break;
96 | }
97 |
98 | // Create our Preview view and set it as the content of our Activity
99 | mRenderer = new CubeRenderer();
100 | mRenderer.setOrientationProvider(currentOrientationProvider);
101 | mGLSurfaceView = new GLSurfaceView(getActivity());
102 | mGLSurfaceView.setEGLConfigChooser(8, 8, 8, 8, 16, 0);
103 | mGLSurfaceView.setRenderer(mRenderer);
104 |
105 | mGLSurfaceView.setOnLongClickListener(new OnLongClickListener() {
106 |
107 | @Override
108 | public boolean onLongClick(View v) {
109 | mRenderer.toggleShowCubeInsideOut();
110 | return true;
111 | }
112 | });
113 |
114 | return mGLSurfaceView;
115 | }
116 | }
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/SensorChecker.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo;
2 |
3 | public interface SensorChecker {
4 |
5 | /**
6 | * Checks if the device that is currently running the application has a hardware gyroscope built into it.
7 | *
8 | * @return True, if a gyroscope is available. False otherwise.
9 | */
10 | public boolean IsGyroscopeAvailable();
11 | }
12 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/SensorSelectionActivity.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo;
2 |
3 | import java.util.Locale;
4 |
5 | import android.app.AlertDialog;
6 | import android.content.DialogInterface;
7 | import android.content.Intent;
8 | import android.hardware.SensorManager;
9 | import android.os.Bundle;
10 | import android.support.v4.app.Fragment;
11 | import android.support.v4.app.FragmentActivity;
12 | import android.support.v4.app.FragmentManager;
13 | import android.support.v4.app.FragmentPagerAdapter;
14 | import android.support.v4.view.ViewPager;
15 | import android.view.Menu;
16 | import android.view.MenuItem;
17 |
18 | /**
19 | * The main activity where the user can select which sensor-fusion he wants to try out
20 | *
21 | * @author Alexander Pacha
22 | *
23 | */
24 | public class SensorSelectionActivity extends FragmentActivity {
25 |
26 | /**
27 | * The {@link android.support.v4.view.PagerAdapter} that will provide
28 | * fragments for each of the sections. We use a {@link android.support.v4.app.FragmentPagerAdapter} derivative,
29 | * which
30 | * will keep every loaded fragment in memory. If this becomes too memory
31 | * intensive, it may be best to switch to a {@link android.support.v4.app.FragmentStatePagerAdapter}.
32 | */
33 | SectionsPagerAdapter mSectionsPagerAdapter;
34 |
35 | /**
36 | * The {@link ViewPager} that will host the section contents.
37 | */
38 | ViewPager mViewPager;
39 |
40 | @Override
41 | protected void onCreate(Bundle savedInstanceState) {
42 | super.onCreate(savedInstanceState);
43 | setContentView(R.layout.activity_sensor_selection);
44 |
45 | // Create the adapter that will return a fragment for each of the three
46 | // primary sections of the app.
47 | mSectionsPagerAdapter = new SectionsPagerAdapter(getSupportFragmentManager());
48 |
49 | // Set up the ViewPager with the sections adapter.
50 | mViewPager = (ViewPager) findViewById(R.id.pager);
51 | mViewPager.setAdapter(mSectionsPagerAdapter);
52 |
53 | // Check if device has a hardware gyroscope
54 | SensorChecker checker = new HardwareChecker((SensorManager) getSystemService(SENSOR_SERVICE));
55 | if(!checker.IsGyroscopeAvailable()) {
56 | // If a gyroscope is unavailable, display a warning.
57 | displayHardwareMissingWarning();
58 | }
59 | }
60 |
61 | private void displayHardwareMissingWarning() {
62 | AlertDialog ad = new AlertDialog.Builder(this).create();
63 | ad.setCancelable(false); // This blocks the 'BACK' button
64 | ad.setTitle(getResources().getString(R.string.gyroscope_missing));
65 | ad.setMessage(getResources().getString(R.string.gyroscope_missing_message));
66 | ad.setButton(DialogInterface.BUTTON_NEUTRAL, getResources().getString(R.string.OK), new DialogInterface.OnClickListener() {
67 | @Override
68 | public void onClick(DialogInterface dialog, int which) {
69 | dialog.dismiss();
70 | }
71 | });
72 | ad.show();
73 | }
74 |
75 | @Override
76 | public boolean onCreateOptionsMenu(Menu menu) {
77 | // Inflate the menu; this adds items to the action bar if it is present.
78 | getMenuInflater().inflate(R.menu.sensor_selection, menu);
79 | return true;
80 | }
81 |
82 | @Override
83 | public boolean onOptionsItemSelected(MenuItem item) {
84 | // Handle item selection
85 | switch (item.getItemId()) {
86 | case R.id.action_about:
87 | Intent intent = new Intent(this, AboutActivity.class);
88 | startActivity(intent);
89 | return true;
90 | }
91 | return false;
92 | }
93 |
94 | /**
95 | * A {@link FragmentPagerAdapter} that returns a fragment corresponding to
96 | * one of the sections/tabs/pages.
97 | */
98 | public class SectionsPagerAdapter extends FragmentPagerAdapter {
99 |
100 | /**
101 | * Initialises a new sectionPagerAdapter
102 | *
103 | * @param fm the fragment Manager
104 | */
105 | public SectionsPagerAdapter(FragmentManager fm) {
106 | super(fm);
107 | }
108 |
109 | @Override
110 | public Fragment getItem(int position) {
111 | // getItem is called to instantiate the fragment for the given page.
112 | // Return a DummySectionFragment (defined as a static inner class
113 | // below) with the page number as its lone argument.
114 | Fragment fragment = new OrientationVisualisationFragment();
115 | Bundle args = new Bundle();
116 | args.putInt(OrientationVisualisationFragment.ARG_SECTION_NUMBER, position + 1);
117 | fragment.setArguments(args);
118 | return fragment;
119 | }
120 |
121 | @Override
122 | public int getCount() {
123 | // Show 6 total pages.
124 | return 7;
125 | }
126 |
127 | @Override
128 | public CharSequence getPageTitle(int position) {
129 | Locale l = Locale.getDefault();
130 | switch (position) {
131 | case 0:
132 | return getString(R.string.title_section1).toUpperCase(l);
133 | case 1:
134 | return getString(R.string.title_section2).toUpperCase(l);
135 | case 2:
136 | return getString(R.string.title_section3).toUpperCase(l);
137 | case 3:
138 | return "Madgwick".toUpperCase(l);
139 | case 4:
140 | return getString(R.string.title_section5).toUpperCase(l);
141 | case 5:
142 | return getString(R.string.title_section6).toUpperCase(l);
143 | case 6:
144 | return getString(R.string.title_section4).toUpperCase(l);
145 | }
146 | return null;
147 | }
148 | }
149 |
150 | }
151 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/orientationProvider/AccelerometerCompassProvider.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.orientationProvider;
2 |
3 | import android.hardware.Sensor;
4 | import android.hardware.SensorEvent;
5 | import android.hardware.SensorManager;
6 |
7 | /**
8 | * The orientation provider that delivers the current orientation from the {@link Sensor#TYPE_ACCELEROMETER
9 | * Accelerometer} and {@link Sensor#TYPE_MAGNETIC_FIELD Compass}.
10 | *
11 | * @author Alexander Pacha
12 | *
13 | */
14 | public class AccelerometerCompassProvider extends OrientationProvider {
15 |
16 | /**
17 | * Compass values
18 | */
19 | final private float[] magnitudeValues = new float[3];
20 |
21 | /**
22 | * Accelerometer values
23 | */
24 | final private float[] accelerometerValues = new float[3];
25 |
26 | /**
27 | * Inclination values
28 | */
29 | final float[] inclinationValues = new float[16];
30 |
31 | /**
32 | * Initialises a new AccelerometerCompassProvider
33 | *
34 | * @param sensorManager The android sensor manager
35 | */
36 | public AccelerometerCompassProvider(SensorManager sensorManager) {
37 | super(sensorManager);
38 |
39 | //Add the compass and the accelerometer
40 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER));
41 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_MAGNETIC_FIELD));
42 | }
43 |
44 | @Override
45 | public void onSensorChanged(SensorEvent event) {
46 |
47 | // we received a sensor event. it is a good practice to check
48 | // that we received the proper event
49 | if (event.sensor.getType() == Sensor.TYPE_MAGNETIC_FIELD) {
50 | System.arraycopy(event.values, 0, magnitudeValues, 0, magnitudeValues.length);
51 | } else if (event.sensor.getType() == Sensor.TYPE_ACCELEROMETER) {
52 | System.arraycopy(event.values, 0, accelerometerValues, 0, accelerometerValues.length);
53 | }
54 |
55 | if (magnitudeValues != null && accelerometerValues != null) {
56 | // Fuse accelerometer with compass
57 | SensorManager.getRotationMatrix(currentOrientationRotationMatrix.matrix, inclinationValues, accelerometerValues,
58 | magnitudeValues);
59 | // Transform rotation matrix to quaternion
60 | currentOrientationQuaternion.setRowMajor(currentOrientationRotationMatrix.matrix);
61 | }
62 | }
63 | }
64 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/orientationProvider/CalibratedGyroscopeProvider.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.orientationProvider;
2 |
3 | import org.hitlabnz.sensor_fusion_demo.representation.Quaternion;
4 |
5 | import android.hardware.Sensor;
6 | import android.hardware.SensorEvent;
7 | import android.hardware.SensorManager;
8 |
9 | /**
10 | * The orientation provider that delivers the relative orientation from the {@link Sensor#TYPE_GYROSCOPE
11 | * Gyroscope}. This sensor does not deliver an absolute orientation (with respect to magnetic north and gravity) but
12 | * only a relative measurement starting from the point where it started.
13 | *
14 | * @author Alexander Pacha
15 | *
16 | */
17 | public class CalibratedGyroscopeProvider extends OrientationProvider {
18 |
19 | /**
20 | * Constant specifying the factor between a Nano-second and a second
21 | */
22 | private static final float NS2S = 1.0f / 1000000000.0f;
23 |
24 | /**
25 | * The quaternion that stores the difference that is obtained by the gyroscope.
26 | * Basically it contains a rotational difference encoded into a quaternion.
27 | *
28 | * To obtain the absolute orientation one must add this into an initial position by
29 | * multiplying it with another quaternion
30 | */
31 | private final Quaternion deltaQuaternion = new Quaternion();
32 |
33 | /**
34 | * The time-stamp being used to record the time when the last gyroscope event occurred.
35 | */
36 | private long timestamp;
37 |
38 | /**
39 | * This is a filter-threshold for discarding Gyroscope measurements that are below a certain level and
40 | * potentially are only noise and not real motion. Values from the gyroscope are usually between 0 (stop) and
41 | * 10 (rapid rotation), so 0.1 seems to be a reasonable threshold to filter noise (usually smaller than 0.1) and
42 | * real motion (usually > 0.1). Note that there is a chance of missing real motion, if the use is turning the
43 | * device really slowly, so this value has to find a balance between accepting noise (threshold = 0) and missing
44 | * slow user-action (threshold > 0.5). 0.1 seems to work fine for most applications.
45 | *
46 | */
47 | private static final double EPSILON = 0.1f;
48 |
49 | /**
50 | * Value giving the total velocity of the gyroscope (will be high, when the device is moving fast and low when
51 | * the device is standing still). This is usually a value between 0 and 10 for normal motion. Heavy shaking can
52 | * increase it to about 25. Keep in mind, that these values are time-depended, so changing the sampling rate of
53 | * the sensor will affect this value!
54 | */
55 | private double gyroscopeRotationVelocity = 0;
56 |
57 | /**
58 | * Temporary variable to save allocations.
59 | */
60 | private Quaternion correctedQuaternion = new Quaternion();
61 |
62 | /**
63 | * Initialises a new CalibratedGyroscopeProvider
64 | *
65 | * @param sensorManager The android sensor manager
66 | */
67 | public CalibratedGyroscopeProvider(SensorManager sensorManager) {
68 | super(sensorManager);
69 |
70 | //Add the gyroscope
71 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE));
72 | }
73 |
74 | @Override
75 | public void onSensorChanged(SensorEvent event) {
76 |
77 | // we received a sensor event. it is a good practice to check
78 | // that we received the proper event
79 | if (event.sensor.getType() == Sensor.TYPE_GYROSCOPE) {
80 |
81 | // This timestamps delta rotation to be multiplied by the current rotation
82 | // after computing it from the gyro sample data.
83 | if (timestamp != 0) {
84 | final float dT = (event.timestamp - timestamp) * NS2S;
85 | // Axis of the rotation sample, not normalized yet.
86 | float axisX = event.values[0];
87 | float axisY = event.values[1];
88 | float axisZ = event.values[2];
89 |
90 | // Calculate the angular speed of the sample
91 | gyroscopeRotationVelocity = Math.sqrt(axisX * axisX + axisY * axisY + axisZ * axisZ);
92 |
93 | // Normalize the rotation vector if it's big enough to get the axis
94 | if (gyroscopeRotationVelocity > EPSILON) {
95 | axisX /= gyroscopeRotationVelocity;
96 | axisY /= gyroscopeRotationVelocity;
97 | axisZ /= gyroscopeRotationVelocity;
98 | }
99 |
100 | // Integrate around this axis with the angular speed by the timestep
101 | // in order to get a delta rotation from this sample over the timestep
102 | // We will convert this axis-angle representation of the delta rotation
103 | // into a quaternion before turning it into the rotation matrix.
104 | double thetaOverTwo = gyroscopeRotationVelocity * dT / 2.0f;
105 | double sinThetaOverTwo = Math.sin(thetaOverTwo);
106 | double cosThetaOverTwo = Math.cos(thetaOverTwo);
107 | deltaQuaternion.setX((float) (sinThetaOverTwo * axisX));
108 | deltaQuaternion.setY((float) (sinThetaOverTwo * axisY));
109 | deltaQuaternion.setZ((float) (sinThetaOverTwo * axisZ));
110 | deltaQuaternion.setW(-(float) cosThetaOverTwo);
111 |
112 | // Matrix rendering in CubeRenderer does not seem to have this problem.
113 | synchronized (synchronizationToken) {
114 | // Move current gyro orientation if gyroscope should be used
115 | deltaQuaternion.multiplyByQuat(currentOrientationQuaternion, currentOrientationQuaternion);
116 | }
117 |
118 | correctedQuaternion.set(currentOrientationQuaternion);
119 | // We inverted w in the deltaQuaternion, because currentOrientationQuaternion required it.
120 | // Before converting it back to matrix representation, we need to revert this process
121 | correctedQuaternion.w(-correctedQuaternion.w());
122 |
123 | synchronized (synchronizationToken) {
124 | // Set the rotation matrix as well to have both representations
125 | SensorManager.getRotationMatrixFromVector(currentOrientationRotationMatrix.matrix,
126 | correctedQuaternion.array());
127 | }
128 | }
129 | timestamp = event.timestamp;
130 | }
131 | }
132 | }
133 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/orientationProvider/GravityCompassProvider.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.orientationProvider;
2 |
3 | import android.hardware.Sensor;
4 | import android.hardware.SensorEvent;
5 | import android.hardware.SensorManager;
6 |
7 | /**
8 | * The orientation provider that delivers the current orientation from the {@link Sensor#TYPE_GRAVITY
9 | * Gravity} and {@link Sensor#TYPE_MAGNETIC_FIELD Compass}.
10 | *
11 | * @author Alexander Pacha
12 | *
13 | */
14 | public class GravityCompassProvider extends OrientationProvider {
15 |
16 | /**
17 | * Compass values
18 | */
19 | final private float[] magnitudeValues = new float[3];
20 |
21 | /**
22 | * Gravity values
23 | */
24 | final private float[] gravityValues = new float[3];
25 |
26 | /**
27 | * Inclination values
28 | */
29 | float[] inclinationValues = new float[16];
30 |
31 | /**
32 | * Initialises a new GravityCompassProvider
33 | *
34 | * @param sensorManager The android sensor manager
35 | */
36 | public GravityCompassProvider(SensorManager sensorManager) {
37 | super(sensorManager);
38 |
39 | //Add the compass and the gravity sensor
40 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_GRAVITY));
41 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_MAGNETIC_FIELD));
42 | }
43 |
44 | @Override
45 | public void onSensorChanged(SensorEvent event) {
46 |
47 | // we received a sensor event. it is a good practice to check
48 | // that we received the proper event
49 | if (event.sensor.getType() == Sensor.TYPE_MAGNETIC_FIELD) {
50 | System.arraycopy(event.values, 0, magnitudeValues, 0, magnitudeValues.length);
51 | } else if (event.sensor.getType() == Sensor.TYPE_GRAVITY) {
52 | System.arraycopy(event.values, 0, gravityValues, 0, gravityValues.length);
53 | }
54 |
55 | if (magnitudeValues != null && gravityValues != null) {
56 | // Fuse gravity-sensor (virtual sensor) with compass
57 | SensorManager.getRotationMatrix(currentOrientationRotationMatrix.matrix, inclinationValues, gravityValues, magnitudeValues);
58 | // Transform rotation matrix to quaternion
59 | currentOrientationQuaternion.setRowMajor(currentOrientationRotationMatrix.matrix);
60 | }
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/orientationProvider/ImprovedOrientationSensor1Provider.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.orientationProvider;
2 |
3 | import org.hitlabnz.sensor_fusion_demo.representation.Quaternion;
4 |
5 | import android.hardware.Sensor;
6 | import android.hardware.SensorEvent;
7 | import android.hardware.SensorManager;
8 | import android.util.Log;
9 |
10 | /**
11 | * The orientation provider that delivers the absolute orientation from the {@link Sensor#TYPE_GYROSCOPE
12 | * Gyroscope} and {@link Sensor#TYPE_ROTATION_VECTOR Android Rotation Vector sensor}.
13 | *
14 | * It mainly relies on the gyroscope, but corrects with the Android Rotation Vector which also provides an absolute
15 | * estimation of current orientation. The correction is a static weight.
16 | *
17 | * @author Alexander Pacha
18 | *
19 | */
20 | public class ImprovedOrientationSensor1Provider extends OrientationProvider {
21 |
22 | /**
23 | * Constant specifying the factor between a Nano-second and a second
24 | */
25 | private static final float NS2S = 1.0f / 1000000000.0f;
26 |
27 | /**
28 | * The quaternion that stores the difference that is obtained by the gyroscope.
29 | * Basically it contains a rotational difference encoded into a quaternion.
30 | *
31 | * To obtain the absolute orientation one must add this into an initial position by
32 | * multiplying it with another quaternion
33 | */
34 | private final Quaternion deltaQuaternion = new Quaternion();
35 |
36 | /**
37 | * The Quaternions that contain the current rotation (Angle and axis in Quaternion format) of the Gyroscope
38 | */
39 | private Quaternion quaternionGyroscope = new Quaternion();
40 |
41 | /**
42 | * The quaternion that contains the absolute orientation as obtained by the rotationVector sensor.
43 | */
44 | private Quaternion quaternionRotationVector = new Quaternion();
45 |
46 | /**
47 | * The time-stamp being used to record the time when the last gyroscope event occurred.
48 | */
49 | private long timestamp;
50 |
51 | /**
52 | * This is a filter-threshold for discarding Gyroscope measurements that are below a certain level and
53 | * potentially are only noise and not real motion. Values from the gyroscope are usually between 0 (stop) and
54 | * 10 (rapid rotation), so 0.1 seems to be a reasonable threshold to filter noise (usually smaller than 0.1) and
55 | * real motion (usually > 0.1). Note that there is a chance of missing real motion, if the use is turning the
56 | * device really slowly, so this value has to find a balance between accepting noise (threshold = 0) and missing
57 | * slow user-action (threshold > 0.5). 0.1 seems to work fine for most applications.
58 | *
59 | */
60 | private static final double EPSILON = 0.1f;
61 |
62 | /**
63 | * Value giving the total velocity of the gyroscope (will be high, when the device is moving fast and low when
64 | * the device is standing still). This is usually a value between 0 and 10 for normal motion. Heavy shaking can
65 | * increase it to about 25. Keep in mind, that these values are time-depended, so changing the sampling rate of
66 | * the sensor will affect this value!
67 | */
68 | private double gyroscopeRotationVelocity = 0;
69 |
70 | /**
71 | * Flag indicating, whether the orientations were initialised from the rotation vector or not. If false, the
72 | * gyroscope can not be used (since it's only meaningful to calculate differences from an initial state). If
73 | * true,
74 | * the gyroscope can be used normally.
75 | */
76 | private boolean positionInitialised = false;
77 |
78 | /**
79 | * Counter that sums the number of consecutive frames, where the rotationVector and the gyroscope were
80 | * significantly different (and the dot-product was smaller than 0.7). This event can either happen when the
81 | * angles of the rotation vector explode (e.g. during fast tilting) or when the device was shaken heavily and
82 | * the gyroscope is now completely off.
83 | */
84 | private int panicCounter;
85 |
86 | /**
87 | * This weight determines directly how much the rotation sensor will be used to correct (in
88 | * Sensor-fusion-scenario 1 - SensorSelection.GyroscopeAndRotationVector). Must be a value between 0 and 1.
89 | * 0 means that the system entirely relies on the gyroscope, whereas 1 means that the system relies entirely on
90 | * the rotationVector.
91 | */
92 | private static final float DIRECT_INTERPOLATION_WEIGHT = 0.005f;
93 |
94 | /**
95 | * The threshold that indicates an outlier of the rotation vector. If the dot-product between the two vectors
96 | * (gyroscope orientation and rotationVector orientation) falls below this threshold (ideally it should be 1,
97 | * if they are exactly the same) the system falls back to the gyroscope values only and just ignores the
98 | * rotation vector.
99 | *
100 | * This value should be quite high (> 0.7) to filter even the slightest discrepancies that causes jumps when
101 | * tiling the device. Possible values are between 0 and 1, where a value close to 1 means that even a very small
102 | * difference between the two sensors will be treated as outlier, whereas a value close to zero means that the
103 | * almost any discrepancy between the two sensors is tolerated.
104 | */
105 | private static final float OUTLIER_THRESHOLD = 0.85f;
106 |
107 | /**
108 | * The threshold that indicates a massive discrepancy between the rotation vector and the gyroscope orientation.
109 | * If the dot-product between the two vectors
110 | * (gyroscope orientation and rotationVector orientation) falls below this threshold (ideally it should be 1, if
111 | * they are exactly the same), the system will start increasing the panic counter (that probably indicates a
112 | * gyroscope failure).
113 | *
114 | * This value should be lower than OUTLIER_THRESHOLD (0.5 - 0.7) to only start increasing the panic counter,
115 | * when there is a
116 | * huge discrepancy between the two fused sensors.
117 | */
118 | private static final float OUTLIER_PANIC_THRESHOLD = 0.65f;
119 |
120 | /**
121 | * The threshold that indicates that a chaos state has been established rather than just a temporary peak in the
122 | * rotation vector (caused by exploding angled during fast tilting).
123 | *
124 | * If the chaosCounter is bigger than this threshold, the current position will be reset to whatever the
125 | * rotation vector indicates.
126 | */
127 | private static final int PANIC_THRESHOLD = 60;
128 |
129 | /**
130 | * Some temporary variables to save allocations
131 | */
132 | final private float[] temporaryQuaternion = new float[4];
133 | final private Quaternion correctedQuaternion = new Quaternion();
134 | final private Quaternion interpolatedQuaternion = new Quaternion();
135 |
136 | /**
137 | * Initialises a new ImprovedOrientationSensor1Provider
138 | *
139 | * @param sensorManager The android sensor manager
140 | */
141 | public ImprovedOrientationSensor1Provider(SensorManager sensorManager) {
142 | super(sensorManager);
143 |
144 | //Add the gyroscope and rotation Vector
145 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE));
146 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_ROTATION_VECTOR));
147 | }
148 |
149 | @Override
150 | public void onSensorChanged(SensorEvent event) {
151 |
152 | if (event.sensor.getType() == Sensor.TYPE_ROTATION_VECTOR) {
153 | // Process rotation vector (just safe it)
154 | // Calculate angle. Starting with API_18, Android will provide this value as event.values[3], but if not, we have to calculate it manually.
155 | SensorManager.getQuaternionFromVector(temporaryQuaternion, event.values);
156 |
157 | // Store in quaternion
158 | quaternionRotationVector.setXYZW(temporaryQuaternion[1], temporaryQuaternion[2], temporaryQuaternion[3], -temporaryQuaternion[0]);
159 | if (!positionInitialised) {
160 | // Override
161 | quaternionGyroscope.set(quaternionRotationVector);
162 | positionInitialised = true;
163 | }
164 |
165 | } else if (event.sensor.getType() == Sensor.TYPE_GYROSCOPE) {
166 | // Process Gyroscope and perform fusion
167 |
168 | // This timestep's delta rotation to be multiplied by the current rotation
169 | // after computing it from the gyro sample data.
170 | if (timestamp != 0) {
171 | final float dT = (event.timestamp - timestamp) * NS2S;
172 | // Axis of the rotation sample, not normalized yet.
173 | float axisX = event.values[0];
174 | float axisY = event.values[1];
175 | float axisZ = event.values[2];
176 |
177 | // Calculate the angular speed of the sample
178 | gyroscopeRotationVelocity = Math.sqrt(axisX * axisX + axisY * axisY + axisZ * axisZ);
179 |
180 | // Normalize the rotation vector if it's big enough to get the axis
181 | if (gyroscopeRotationVelocity > EPSILON) {
182 | axisX /= gyroscopeRotationVelocity;
183 | axisY /= gyroscopeRotationVelocity;
184 | axisZ /= gyroscopeRotationVelocity;
185 | }
186 |
187 | // Integrate around this axis with the angular speed by the timestep
188 | // in order to get a delta rotation from this sample over the timestep
189 | // We will convert this axis-angle representation of the delta rotation
190 | // into a quaternion before turning it into the rotation matrix.
191 | double thetaOverTwo = gyroscopeRotationVelocity * dT / 2.0f;
192 | double sinThetaOverTwo = Math.sin(thetaOverTwo);
193 | double cosThetaOverTwo = Math.cos(thetaOverTwo);
194 | deltaQuaternion.setX((float) (sinThetaOverTwo * axisX));
195 | deltaQuaternion.setY((float) (sinThetaOverTwo * axisY));
196 | deltaQuaternion.setZ((float) (sinThetaOverTwo * axisZ));
197 | deltaQuaternion.setW(-(float) cosThetaOverTwo);
198 |
199 | // Move current gyro orientation
200 | deltaQuaternion.multiplyByQuat(quaternionGyroscope, quaternionGyroscope);
201 |
202 | // Calculate dot-product to calculate whether the two orientation sensors have diverged
203 | // (if the dot-product is closer to 0 than to 1), because it should be close to 1 if both are the same.
204 | float dotProd = quaternionGyroscope.dotProduct(quaternionRotationVector);
205 |
206 | // If they have diverged, rely on gyroscope only (this happens on some devices when the rotation vector "jumps").
207 | if (Math.abs(dotProd) < OUTLIER_THRESHOLD) {
208 | // Increase panic counter
209 | if (Math.abs(dotProd) < OUTLIER_PANIC_THRESHOLD) {
210 | panicCounter++;
211 | }
212 |
213 | // Directly use Gyro
214 | setOrientationQuaternionAndMatrix(quaternionGyroscope);
215 |
216 | } else {
217 | // Both are nearly saying the same. Perform normal fusion.
218 |
219 | // Interpolate with a fixed weight between the two absolute quaternions obtained from gyro and rotation vector sensors
220 | // The weight should be quite low, so the rotation vector corrects the gyro only slowly, and the output keeps responsive.
221 | quaternionGyroscope.slerp(quaternionRotationVector, interpolatedQuaternion, DIRECT_INTERPOLATION_WEIGHT);
222 |
223 | // Use the interpolated value between gyro and rotationVector
224 | setOrientationQuaternionAndMatrix(interpolatedQuaternion);
225 | // Override current gyroscope-orientation
226 | quaternionGyroscope.copyVec4(interpolatedQuaternion);
227 |
228 | // Reset the panic counter because both sensors are saying the same again
229 | panicCounter = 0;
230 | }
231 |
232 | if (panicCounter > PANIC_THRESHOLD) {
233 | Log.d("Rotation Vector",
234 | "Panic counter is bigger than threshold; this indicates a Gyroscope failure. Panic reset is imminent.");
235 |
236 | if (gyroscopeRotationVelocity < 3) {
237 | Log.d("Rotation Vector",
238 | "Performing Panic-reset. Resetting orientation to rotation-vector value.");
239 |
240 | // Manually set position to whatever rotation vector says.
241 | setOrientationQuaternionAndMatrix(quaternionRotationVector);
242 | // Override current gyroscope-orientation with corrected value
243 | quaternionGyroscope.copyVec4(quaternionRotationVector);
244 |
245 | panicCounter = 0;
246 | } else {
247 | Log.d("Rotation Vector",
248 | String.format(
249 | "Panic reset delayed due to ongoing motion (user is still shaking the device). Gyroscope Velocity: %.2f > 3",
250 | gyroscopeRotationVelocity));
251 | }
252 | }
253 | }
254 | timestamp = event.timestamp;
255 | }
256 | }
257 |
258 | /**
259 | * Sets the output quaternion and matrix with the provided quaternion and synchronises the setting
260 | *
261 | * @param quaternion The Quaternion to set (the result of the sensor fusion)
262 | */
263 | private void setOrientationQuaternionAndMatrix(Quaternion quaternion) {
264 | correctedQuaternion.set(quaternion);
265 | // We inverted w in the deltaQuaternion, because currentOrientationQuaternion required it.
266 | // Before converting it back to matrix representation, we need to revert this process
267 | correctedQuaternion.w(-correctedQuaternion.w());
268 |
269 | synchronized (synchronizationToken) {
270 | // Use gyro only
271 | currentOrientationQuaternion.copyVec4(quaternion);
272 |
273 | // Set the rotation matrix as well to have both representations
274 | SensorManager.getRotationMatrixFromVector(currentOrientationRotationMatrix.matrix, correctedQuaternion.array());
275 | }
276 | }
277 | }
278 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/orientationProvider/ImprovedOrientationSensor2Provider.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.orientationProvider;
2 |
3 | import org.hitlabnz.sensor_fusion_demo.representation.Quaternion;
4 |
5 | import android.hardware.Sensor;
6 | import android.hardware.SensorEvent;
7 | import android.hardware.SensorManager;
8 | import android.util.Log;
9 |
10 | /**
11 | * The orientation provider that delivers the absolute orientation from the {@link Sensor#TYPE_GYROSCOPE
12 | * Gyroscope} and {@link Sensor#TYPE_ROTATION_VECTOR Android Rotation Vector sensor}.
13 | *
14 | * It mainly relies on the gyroscope, but corrects with the Android Rotation Vector which also provides an absolute
15 | * estimation of current orientation. The correction is a static weight.
16 | *
17 | * @author Alexander Pacha
18 | *
19 | */
20 | public class ImprovedOrientationSensor2Provider extends OrientationProvider {
21 |
22 | /**
23 | * Constant specifying the factor between a Nano-second and a second
24 | */
25 | private static final float NS2S = 1.0f / 1000000000.0f;
26 |
27 | /**
28 | * The quaternion that stores the difference that is obtained by the gyroscope.
29 | * Basically it contains a rotational difference encoded into a quaternion.
30 | *
31 | * To obtain the absolute orientation one must add this into an initial position by
32 | * multiplying it with another quaternion
33 | */
34 | private final Quaternion deltaQuaternion = new Quaternion();
35 |
36 | /**
37 | * The Quaternions that contain the current rotation (Angle and axis in Quaternion format) of the Gyroscope
38 | */
39 | private Quaternion quaternionGyroscope = new Quaternion();
40 |
41 | /**
42 | * The quaternion that contains the absolute orientation as obtained by the rotationVector sensor.
43 | */
44 | private Quaternion quaternionRotationVector = new Quaternion();
45 |
46 | /**
47 | * The time-stamp being used to record the time when the last gyroscope event occurred.
48 | */
49 | private long timestamp;
50 |
51 | /**
52 | * This is a filter-threshold for discarding Gyroscope measurements that are below a certain level and
53 | * potentially are only noise and not real motion. Values from the gyroscope are usually between 0 (stop) and
54 | * 10 (rapid rotation), so 0.1 seems to be a reasonable threshold to filter noise (usually smaller than 0.1) and
55 | * real motion (usually > 0.1). Note that there is a chance of missing real motion, if the use is turning the
56 | * device really slowly, so this value has to find a balance between accepting noise (threshold = 0) and missing
57 | * slow user-action (threshold > 0.5). 0.1 seems to work fine for most applications.
58 | *
59 | */
60 | private static final double EPSILON = 0.1f;
61 |
62 | /**
63 | * Value giving the total velocity of the gyroscope (will be high, when the device is moving fast and low when
64 | * the device is standing still). This is usually a value between 0 and 10 for normal motion. Heavy shaking can
65 | * increase it to about 25. Keep in mind, that these values are time-depended, so changing the sampling rate of
66 | * the sensor will affect this value!
67 | */
68 | private double gyroscopeRotationVelocity = 0;
69 |
70 | /**
71 | * Flag indicating, whether the orientations were initialised from the rotation vector or not. If false, the
72 | * gyroscope can not be used (since it's only meaningful to calculate differences from an initial state). If
73 | * true,
74 | * the gyroscope can be used normally.
75 | */
76 | private boolean positionInitialised = false;
77 |
78 | /**
79 | * Counter that sums the number of consecutive frames, where the rotationVector and the gyroscope were
80 | * significantly different (and the dot-product was smaller than 0.7). This event can either happen when the
81 | * angles of the rotation vector explode (e.g. during fast tilting) or when the device was shaken heavily and
82 | * the gyroscope is now completely off.
83 | */
84 | private int panicCounter;
85 |
86 | /**
87 | * This weight determines indirectly how much the rotation sensor will be used to correct. This weight will be
88 | * multiplied by the velocity to obtain the actual weight. (in sensor-fusion-scenario 2 -
89 | * SensorSelection.GyroscopeAndRotationVector2).
90 | * Must be a value between 0 and approx. 0.04 (because, if multiplied with a velocity of up to 25, should be still
91 | * less than 1, otherwise the SLERP will not correctly interpolate). Should be close to zero.
92 | */
93 | private static final float INDIRECT_INTERPOLATION_WEIGHT = 0.01f;
94 |
95 | /**
96 | * The threshold that indicates an outlier of the rotation vector. If the dot-product between the two vectors
97 | * (gyroscope orientation and rotationVector orientation) falls below this threshold (ideally it should be 1,
98 | * if they are exactly the same) the system falls back to the gyroscope values only and just ignores the
99 | * rotation vector.
100 | *
101 | * This value should be quite high (> 0.7) to filter even the slightest discrepancies that causes jumps when
102 | * tiling the device. Possible values are between 0 and 1, where a value close to 1 means that even a very small
103 | * difference between the two sensors will be treated as outlier, whereas a value close to zero means that the
104 | * almost any discrepancy between the two sensors is tolerated.
105 | */
106 | private static final float OUTLIER_THRESHOLD = 0.85f;
107 |
108 | /**
109 | * The threshold that indicates a massive discrepancy between the rotation vector and the gyroscope orientation.
110 | * If the dot-product between the two vectors
111 | * (gyroscope orientation and rotationVector orientation) falls below this threshold (ideally it should be 1, if
112 | * they are exactly the same), the system will start increasing the panic counter (that probably indicates a
113 | * gyroscope failure).
114 | *
115 | * This value should be lower than OUTLIER_THRESHOLD (0.5 - 0.7) to only start increasing the panic counter,
116 | * when there is a huge discrepancy between the two fused sensors.
117 | */
118 | private static final float OUTLIER_PANIC_THRESHOLD = 0.75f;
119 |
120 | /**
121 | * The threshold that indicates that a chaos state has been established rather than just a temporary peak in the
122 | * rotation vector (caused by exploding angled during fast tilting).
123 | *
124 | * If the chaosCounter is bigger than this threshold, the current position will be reset to whatever the
125 | * rotation vector indicates.
126 | */
127 | private static final int PANIC_THRESHOLD = 60;
128 |
129 | /**
130 | * Some temporary variable to save allocations.
131 | */
132 | final private float[] temporaryQuaternion = new float[4];
133 | final private Quaternion correctedQuaternion = new Quaternion();
134 | final private Quaternion interpolatedQuaternion = new Quaternion();
135 |
136 | /**
137 | * Initialises a new ImprovedOrientationSensor2Provider
138 | *
139 | * @param sensorManager The android sensor manager
140 | */
141 | public ImprovedOrientationSensor2Provider(SensorManager sensorManager) {
142 | super(sensorManager);
143 |
144 | //Add the gyroscope and rotation Vector
145 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE));
146 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_ROTATION_VECTOR));
147 | }
148 |
149 | @Override
150 | public void onSensorChanged(SensorEvent event) {
151 |
152 | if (event.sensor.getType() == Sensor.TYPE_ROTATION_VECTOR) {
153 | // Process rotation vector (just safe it)
154 | // Calculate angle. Starting with API_18, Android will provide this value as event.values[3], but if not, we have to calculate it manually.
155 | SensorManager.getQuaternionFromVector(temporaryQuaternion, event.values);
156 |
157 | // Store in quaternion
158 | quaternionRotationVector.setXYZW(temporaryQuaternion[1], temporaryQuaternion[2], temporaryQuaternion[3], -temporaryQuaternion[0]);
159 | if (!positionInitialised) {
160 | // Override
161 | quaternionGyroscope.set(quaternionRotationVector);
162 | positionInitialised = true;
163 | }
164 |
165 | } else if (event.sensor.getType() == Sensor.TYPE_GYROSCOPE) {
166 | // Process Gyroscope and perform fusion
167 |
168 | // This timestep's delta rotation to be multiplied by the current rotation
169 | // after computing it from the gyro sample data.
170 | if (timestamp != 0) {
171 | final float dT = (event.timestamp - timestamp) * NS2S;
172 | // Axis of the rotation sample, not normalized yet.
173 | float axisX = event.values[0];
174 | float axisY = event.values[1];
175 | float axisZ = event.values[2];
176 |
177 | // Calculate the angular speed of the sample
178 | gyroscopeRotationVelocity = Math.sqrt(axisX * axisX + axisY * axisY + axisZ * axisZ);
179 |
180 | // Normalize the rotation vector if it's big enough to get the axis
181 | if (gyroscopeRotationVelocity > EPSILON) {
182 | axisX /= gyroscopeRotationVelocity;
183 | axisY /= gyroscopeRotationVelocity;
184 | axisZ /= gyroscopeRotationVelocity;
185 | }
186 |
187 | // Integrate around this axis with the angular speed by the timestep
188 | // in order to get a delta rotation from this sample over the timestep
189 | // We will convert this axis-angle representation of the delta rotation
190 | // into a quaternion before turning it into the rotation matrix.
191 | double thetaOverTwo = gyroscopeRotationVelocity * dT / 2.0f;
192 | double sinThetaOverTwo = Math.sin(thetaOverTwo);
193 | double cosThetaOverTwo = Math.cos(thetaOverTwo);
194 | deltaQuaternion.setX((float) (sinThetaOverTwo * axisX));
195 | deltaQuaternion.setY((float) (sinThetaOverTwo * axisY));
196 | deltaQuaternion.setZ((float) (sinThetaOverTwo * axisZ));
197 | deltaQuaternion.setW(-(float) cosThetaOverTwo);
198 |
199 | // Move current gyro orientation
200 | deltaQuaternion.multiplyByQuat(quaternionGyroscope, quaternionGyroscope);
201 |
202 | // Calculate dot-product to calculate whether the two orientation sensors have diverged
203 | // (if the dot-product is closer to 0 than to 1), because it should be close to 1 if both are the same.
204 | float dotProd = quaternionGyroscope.dotProduct(quaternionRotationVector);
205 |
206 | // If they have diverged, rely on gyroscope only (this happens on some devices when the rotation vector "jumps").
207 | if (Math.abs(dotProd) < OUTLIER_THRESHOLD) {
208 | // Increase panic counter
209 | if (Math.abs(dotProd) < OUTLIER_PANIC_THRESHOLD) {
210 | panicCounter++;
211 | }
212 |
213 | // Directly use Gyro
214 | setOrientationQuaternionAndMatrix(quaternionGyroscope);
215 |
216 | } else {
217 | // Both are nearly saying the same. Perform normal fusion.
218 |
219 | // Interpolate with a fixed weight between the two absolute quaternions obtained from gyro and rotation vector sensors
220 | // The weight should be quite low, so the rotation vector corrects the gyro only slowly, and the output keeps responsive.
221 | quaternionGyroscope.slerp(quaternionRotationVector, interpolatedQuaternion,
222 | (float) (INDIRECT_INTERPOLATION_WEIGHT * gyroscopeRotationVelocity));
223 |
224 | // Use the interpolated value between gyro and rotationVector
225 | setOrientationQuaternionAndMatrix(interpolatedQuaternion);
226 | // Override current gyroscope-orientation
227 | quaternionGyroscope.copyVec4(interpolatedQuaternion);
228 |
229 | // Reset the panic counter because both sensors are saying the same again
230 | panicCounter = 0;
231 | }
232 |
233 | if (panicCounter > PANIC_THRESHOLD) {
234 | Log.d("Rotation Vector",
235 | "Panic counter is bigger than threshold; this indicates a Gyroscope failure. Panic reset is imminent.");
236 |
237 | if (gyroscopeRotationVelocity < 3) {
238 | Log.d("Rotation Vector",
239 | "Performing Panic-reset. Resetting orientation to rotation-vector value.");
240 |
241 | // Manually set position to whatever rotation vector says.
242 | setOrientationQuaternionAndMatrix(quaternionRotationVector);
243 | // Override current gyroscope-orientation with corrected value
244 | quaternionGyroscope.copyVec4(quaternionRotationVector);
245 |
246 | panicCounter = 0;
247 | } else {
248 | Log.d("Rotation Vector",
249 | String.format(
250 | "Panic reset delayed due to ongoing motion (user is still shaking the device). Gyroscope Velocity: %.2f > 3",
251 | gyroscopeRotationVelocity));
252 | }
253 | }
254 | }
255 | timestamp = event.timestamp;
256 | }
257 | }
258 |
259 | /**
260 | * Sets the output quaternion and matrix with the provided quaternion and synchronises the setting
261 | *
262 | * @param quaternion The Quaternion to set (the result of the sensor fusion)
263 | */
264 | private void setOrientationQuaternionAndMatrix(Quaternion quaternion) {
265 | correctedQuaternion.set(quaternion);
266 | // We inverted w in the deltaQuaternion, because currentOrientationQuaternion required it.
267 | // Before converting it back to matrix representation, we need to revert this process
268 | correctedQuaternion.w(-correctedQuaternion.w());
269 |
270 | synchronized (synchronizationToken) {
271 | // Use gyro only
272 | currentOrientationQuaternion.copyVec4(quaternion);
273 |
274 | // Set the rotation matrix as well to have both representations
275 | SensorManager.getRotationMatrixFromVector(currentOrientationRotationMatrix.matrix, correctedQuaternion.array());
276 | }
277 | }
278 | }
279 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/orientationProvider/MadgwickProvider.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.orientationProvider;
2 |
3 | import android.hardware.Sensor;
4 | import android.hardware.SensorEvent;
5 | import android.hardware.SensorManager;
6 |
7 | import org.hitlabnz.sensor_fusion_demo.representation.MatrixF4x4;
8 |
9 | /**
10 | * Created by lezh1k on 1/5/18.
11 | */
12 |
13 | public class MadgwickProvider extends OrientationProvider {
14 |
15 | private float gain;
16 | private float sampleFreq;
17 | private float qW, qX, qY, qZ; //quaternion
18 |
19 | private float acc[] = new float[4];
20 | private float gyr[] = new float[4];
21 |
22 | /**
23 | * Initialises a new OrientationProvider
24 | *
25 | * @param sensorManager The android sensor manager
26 | */
27 | public MadgwickProvider(SensorManager sensorManager,
28 | float gain,
29 | float sampleFreq) {
30 | super(sensorManager);
31 | this.gain = gain;
32 | this.sampleFreq = sampleFreq;
33 | qW = 1.0f;
34 | qX = 0.0f;
35 | qY = 0.0f;
36 | qZ = 1.0f;
37 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER));
38 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE));
39 | }
40 |
41 | private float invSqrt(float x) {
42 | return (float) (1.0f / Math.sqrt(x));
43 | }
44 |
45 | public void MadgwickAHRSupdateIMU(float gx, float gy, float gz, float ax, float ay, float az) {
46 | float recipNorm;
47 | float s0, s1, s2, s3;
48 | float qDot1, qDot2, qDot3, qDot4;
49 | float _2q0, _2q1, _2q2, _2q3, _4q0, _4q1, _4q2 ,_8q1, _8q2, q0q0, q1q1, q2q2, q3q3;
50 |
51 | // Rate of change of quaternion from gyroscope
52 | qDot1 = 0.5f * (-qX * gx - qY * gy - qZ * gz);
53 | qDot2 = 0.5f * (qW * gx + qY * gz - qZ * gy);
54 | qDot3 = 0.5f * (qW * gy - qX * gz + qZ * gx);
55 | qDot4 = 0.5f * (qW * gz + qX * gy - qY * gx);
56 |
57 | // Compute feedback only if accelerometer measurement valid (avoids NaN in accelerometer normalisation)
58 | if(!((ax == 0.0f) && (ay == 0.0f) && (az == 0.0f))) {
59 |
60 | // Normalise accelerometer measurement
61 | recipNorm = invSqrt(ax * ax + ay * ay + az * az);
62 | ax *= recipNorm;
63 | ay *= recipNorm;
64 | az *= recipNorm;
65 |
66 | // Auxiliary variables to avoid repeated arithmetic
67 | _2q0 = 2.0f * qW;
68 | _2q1 = 2.0f * qX;
69 | _2q2 = 2.0f * qY;
70 | _2q3 = 2.0f * qZ;
71 | _4q0 = 4.0f * qW;
72 | _4q1 = 4.0f * qX;
73 | _4q2 = 4.0f * qY;
74 | _8q1 = 8.0f * qX;
75 | _8q2 = 8.0f * qY;
76 | q0q0 = qW * qW;
77 | q1q1 = qX * qX;
78 | q2q2 = qY * qY;
79 | q3q3 = qZ * qZ;
80 |
81 | // Gradient decent algorithm corrective step
82 | s0 = _4q0 * q2q2 + _2q2 * ax + _4q0 * q1q1 - _2q1 * ay;
83 | s1 = _4q1 * q3q3 - _2q3 * ax + 4.0f * q0q0 * qX - _2q0 * ay - _4q1 + _8q1 * q1q1 + _8q1 * q2q2 + _4q1 * az;
84 | s2 = 4.0f * q0q0 * qY + _2q0 * ax + _4q2 * q3q3 - _2q3 * ay - _4q2 + _8q2 * q1q1 + _8q2 * q2q2 + _4q2 * az;
85 | s3 = 4.0f * q1q1 * qZ - _2q1 * ax + 4.0f * q2q2 * qZ - _2q2 * ay;
86 | recipNorm = invSqrt(s0 * s0 + s1 * s1 + s2 * s2 + s3 * s3); // normalise step magnitude
87 | s0 *= recipNorm;
88 | s1 *= recipNorm;
89 | s2 *= recipNorm;
90 | s3 *= recipNorm;
91 |
92 | // Apply feedback step
93 | qDot1 -= gain * s0;
94 | qDot2 -= gain * s1;
95 | qDot3 -= gain * s2;
96 | qDot4 -= gain * s3;
97 | }
98 |
99 | // Integrate rate of change of quaternion to yield quaternion
100 | qW += qDot1 * (1.0f / sampleFreq);
101 | qX += qDot2 * (1.0f / sampleFreq);
102 | qY += qDot3 * (1.0f / sampleFreq);
103 | qZ += qDot4 * (1.0f / sampleFreq);
104 |
105 | // Normalise quaternion
106 | recipNorm = invSqrt(qW * qW + qX * qX + qY * qY + qZ * qZ);
107 | qW *= recipNorm;
108 | qX *= recipNorm;
109 | qY *= recipNorm;
110 | qZ *= recipNorm;
111 | }
112 |
113 | boolean init0 = false;
114 | boolean init1 = false;
115 |
116 | @Override
117 | public void onSensorChanged(SensorEvent event) {
118 | switch(event.sensor.getType()) {
119 | case Sensor.TYPE_ACCELEROMETER:
120 | System.arraycopy(event.values, 0, acc, 0, 3);
121 | init0 = true;
122 | break;
123 | case Sensor.TYPE_GYROSCOPE:
124 | System.arraycopy(event.values, 0, gyr, 0, 3);
125 | init1 = true;
126 | break;
127 | default:
128 | return;
129 | }
130 |
131 | if (init0 && init1) {
132 | MadgwickAHRSupdateIMU(gyr[0], gyr[1], gyr[2], acc[0], acc[1], acc[2]);
133 | currentOrientationQuaternion.setXYZW(qX, qY, qZ, -qW); //-q for cube rotation inversion
134 | init0 = init1 = false;
135 | }
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/orientationProvider/OrientationProvider.java:
--------------------------------------------------------------------------------
1 | /**
2 | *
3 | */
4 | package org.hitlabnz.sensor_fusion_demo.orientationProvider;
5 |
6 | import java.util.ArrayList;
7 | import java.util.List;
8 |
9 | import org.hitlabnz.sensor_fusion_demo.representation.MatrixF4x4;
10 | import org.hitlabnz.sensor_fusion_demo.representation.Quaternion;
11 |
12 | import android.hardware.Sensor;
13 | import android.hardware.SensorEventListener;
14 | import android.hardware.SensorManager;
15 |
16 | /**
17 | * Classes implementing this interface provide an orientation of the device
18 | * either by directly accessing hardware, using Android sensor fusion or fusing
19 | * sensors itself.
20 | *
21 | * The orientation can be provided as rotation matrix or quaternion.
22 | *
23 | * @author Alexander Pacha
24 | *
25 | */
26 | public abstract class OrientationProvider implements SensorEventListener {
27 | /**
28 | * Sync-token for syncing read/write to sensor-data from sensor manager and
29 | * fusion algorithm
30 | */
31 | protected final Object synchronizationToken = new Object();
32 |
33 | /**
34 | * The list of sensors used by this provider
35 | */
36 | protected List sensorList = new ArrayList();
37 |
38 | /**
39 | * The matrix that holds the current rotation
40 | */
41 | protected final MatrixF4x4 currentOrientationRotationMatrix;
42 |
43 | /**
44 | * The quaternion that holds the current rotation
45 | */
46 | protected final Quaternion currentOrientationQuaternion;
47 |
48 | /**
49 | * The sensor manager for accessing android sensors
50 | */
51 | protected SensorManager sensorManager;
52 |
53 | /**
54 | * Initialises a new OrientationProvider
55 | *
56 | * @param sensorManager
57 | * The android sensor manager
58 | */
59 | public OrientationProvider(SensorManager sensorManager) {
60 | this.sensorManager = sensorManager;
61 |
62 | // Initialise with identity
63 | currentOrientationRotationMatrix = new MatrixF4x4();
64 |
65 | // Initialise with identity
66 | currentOrientationQuaternion = new Quaternion();
67 | }
68 |
69 | /**
70 | * Starts the sensor fusion (e.g. when resuming the activity)
71 | */
72 | public void start() {
73 | // enable our sensor when the activity is resumed, ask for
74 | // 10 ms updates.
75 | for (Sensor sensor : sensorList) {
76 | // enable our sensors when the activity is resumed, ask for
77 | // 20 ms updates (Sensor_delay_game)
78 | sensorManager.registerListener(this, sensor,
79 | SensorManager.SENSOR_DELAY_GAME);
80 | }
81 | }
82 |
83 | /**
84 | * Stops the sensor fusion (e.g. when pausing/suspending the activity)
85 | */
86 | public void stop() {
87 | // make sure to turn our sensors off when the activity is paused
88 | for (Sensor sensor : sensorList) {
89 | sensorManager.unregisterListener(this, sensor);
90 | }
91 | }
92 |
93 | @Override
94 | public void onAccuracyChanged(Sensor sensor, int accuracy) {
95 | // Not doing anything
96 | }
97 |
98 | /**
99 | * Get the current rotation of the device in the rotation matrix format (4x4 matrix)
100 | */
101 | public void getRotationMatrix(MatrixF4x4 matrix) {
102 | synchronized (synchronizationToken) {
103 | matrix.set(currentOrientationRotationMatrix);
104 | }
105 | }
106 |
107 | /**
108 | * Get the current rotation of the device in the quaternion format (vector4f)
109 | */
110 | public void getQuaternion(Quaternion quaternion) {
111 | synchronized (synchronizationToken) {
112 | quaternion.set(currentOrientationQuaternion);
113 | }
114 | }
115 |
116 | /**
117 | * Get the current rotation of the device in the Euler angles
118 | */
119 | public void getEulerAngles(float angles[]) {
120 | synchronized (synchronizationToken) {
121 | SensorManager.getOrientation(currentOrientationRotationMatrix.matrix, angles);
122 | }
123 | }
124 | }
125 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/orientationProvider/RotationVectorProvider.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.orientationProvider;
2 |
3 | import android.hardware.Sensor;
4 | import android.hardware.SensorEvent;
5 | import android.hardware.SensorManager;
6 | import android.util.Log;
7 |
8 | /**
9 | * The orientation provider that delivers the current orientation from the {@link Sensor#TYPE_ROTATION_VECTOR Android
10 | * Rotation Vector sensor}.
11 | *
12 | * @author Alexander Pacha
13 | *
14 | */
15 | public class RotationVectorProvider extends OrientationProvider {
16 |
17 | /**
18 | * Temporary quaternion to store the values obtained from the SensorManager
19 | */
20 | final private float[] temporaryQuaternion = new float[4];
21 |
22 | /**
23 | * Initialises a new RotationVectorProvider
24 | *
25 | * @param sensorManager The android sensor manager
26 | */
27 | public RotationVectorProvider(SensorManager sensorManager) {
28 | super(sensorManager);
29 |
30 | //The rotation vector sensor that is being used for this provider to get device orientation
31 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_ROTATION_VECTOR));
32 | sensorList.add(sensorManager.getDefaultSensor(Sensor.TYPE_GRAVITY));
33 | }
34 |
35 | private float R[] = new float[16];
36 | private float RI[] = new float[16];
37 | private float accAxis[] = new float[4];
38 | private float acc[] = new float[4];
39 | @Override
40 | public void onSensorChanged(SensorEvent event) {
41 | // we received a sensor event. it is a good practice to check
42 | // that we received the proper event
43 | if (event.sensor.getType() == Sensor.TYPE_ROTATION_VECTOR) {
44 | // convert the rotation-vector to a 4x4 matrix. the matrix
45 | // is interpreted by Open GL as the inverse of the
46 | // rotation-vector, which is what we want.
47 | // SensorManager.getRotationMatrixFromVector(currentOrientationRotationMatrix.matrix, event.values);
48 |
49 | // Get Quaternion
50 | // Calculate angle. Starting with API_18, Android will provide this value as event.values[3], but if not, we have to calculate it manually.
51 |
52 | SensorManager.getRotationMatrixFromVector(R, event.values);
53 |
54 |
55 | SensorManager.getQuaternionFromVector(temporaryQuaternion, event.values);
56 | // Log.i("RotVect", String.format("%f %f %f %f\n", temporaryQuaternion[0],
57 | // temporaryQuaternion[1], temporaryQuaternion[2], temporaryQuaternion[3]));
58 | currentOrientationQuaternion.setXYZW(temporaryQuaternion[1], temporaryQuaternion[2],
59 | temporaryQuaternion[3], -temporaryQuaternion[0]);
60 | } else if (event.sensor.getType() == Sensor.TYPE_GRAVITY) {
61 | System.arraycopy(event.values, 0, acc, 0, event.values.length);
62 | android.opengl.Matrix.multiplyMV(accAxis, 0, R,
63 | 0, acc, 0);
64 | // Log.i("RotVect", String.format("%f %f %f %f", accAxis[0], accAxis[1], accAxis[2], accAxis[3]));
65 | }
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/representation/Matrix.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.representation;
2 |
3 | /**
4 | * Matrix math utilities. These methods operate on OpenGL ES format
5 | * matrices and vectors stored in float arrays.
6 | *
7 | * Matrices are 4 x 4 column-vector matrices stored in column-major
8 | * order:
9 | *
10 | *
11 | * m[offset + 0] m[offset + 4] m[offset + 8] m[offset + 12]
12 | * m[offset + 1] m[offset + 5] m[offset + 9] m[offset + 13]
13 | * m[offset + 2] m[offset + 6] m[offset + 10] m[offset + 14]
14 | * m[offset + 3] m[offset + 7] m[offset + 11] m[offset + 15]
15 | *
16 | *
17 | * Vectors are 4 row x 1 column column-vectors stored in order:
18 | *
19 | *
20 | * v[offset + 0]
21 | * v[offset + 1]
22 | * v[offset + 2]
23 | * v[offset + 3]
24 | *
25 | *
26 | */
27 | public class Matrix {
28 |
29 | /**
30 | * Temporary memory for operations that need temporary matrix data.
31 | */
32 | private static final float[] TEMP_MATRIX_ARRAY = new float[32];
33 |
34 | /**
35 | * Multiply two 4x4 matrices together and store the result in a third 4x4
36 | * matrix. In matrix notation: result = lhs x rhs. Due to the way
37 | * matrix multiplication works, the result matrix will have the same
38 | * effect as first multiplying by the rhs matrix, then multiplying by
39 | * the lhs matrix. This is the opposite of what you might expect.
40 | *
41 | * The same float array may be passed for result, lhs, and/or rhs. However,
42 | * the result element values are undefined if the result elements overlap
43 | * either the lhs or rhs elements.
44 | *
45 | * @param result The float array that holds the result.
46 | * @param resultOffset The offset into the result array where the result is
47 | * stored.
48 | * @param lhs The float array that holds the left-hand-side matrix.
49 | * @param lhsOffset The offset into the lhs array where the lhs is stored
50 | * @param rhs The float array that holds the right-hand-side matrix.
51 | * @param rhsOffset The offset into the rhs array where the rhs is stored.
52 | *
53 | * @throws IllegalArgumentException if result, lhs, or rhs are null, or if
54 | * resultOffset + 16 > result.length or lhsOffset + 16 > lhs.length or
55 | * rhsOffset + 16 > rhs.length.
56 | */
57 | /**
58 | * public static void multiplyMM(float[] result, int resultOffset,
59 | * float[] lhs, int lhsOffset, float[] rhs, int rhsOffset){
60 | * android.opengl.Matrix.multiplyMM(result, resultOffset, lhs, lhsOffset, rhs, rhsOffset);
61 | * }
62 | */
63 |
64 | public static void multiplyMM(float[] output, int outputOffset, float[] lhs, int lhsOffset, float[] rhs,
65 | int rhsOffset) {
66 | //for(int i = 0; i < 4; i++){
67 | // for(int j = 0; j < 4; j++){
68 |
69 | // int k = i * 4;
70 | // output[outputOffset + 0 + j] += lhs[lhsOffset + k + j] * rhs[rhsOffset + 0 * 4 + i];
71 | // output[outputOffset + 1 * 4 + j] += lhs[lhsOffset +k + j] * rhs[rhsOffset + 1 * 4 + i];
72 | // output[outputOffset + 2 * 4 + j] += lhs[lhsOffset +k + j] * rhs[rhsOffset + 2 * 4 + i];
73 | // output[outputOffset + 3 * 4 + j] += lhs[lhsOffset +k + j] * rhs[rhsOffset + 3 * 4 + i];
74 | // }
75 | //}
76 | output[outputOffset + 0] = lhs[lhsOffset + 0] * rhs[rhsOffset + 0] + lhs[lhsOffset + 4] * rhs[rhsOffset + 1]
77 | + lhs[lhsOffset + 8] * rhs[rhsOffset + 2] + lhs[lhsOffset + 12] * rhs[rhsOffset + 3];
78 | output[outputOffset + 1] = lhs[lhsOffset + 1] * rhs[rhsOffset + 0] + lhs[lhsOffset + 5] * rhs[rhsOffset + 1]
79 | + lhs[lhsOffset + 9] * rhs[rhsOffset + 2] + lhs[lhsOffset + 13] * rhs[rhsOffset + 3];
80 | output[outputOffset + 2] = lhs[lhsOffset + 2] * rhs[rhsOffset + 0] + lhs[lhsOffset + 6] * rhs[rhsOffset + 1]
81 | + lhs[lhsOffset + 10] * rhs[rhsOffset + 2] + lhs[lhsOffset + 14] * rhs[rhsOffset + 3];
82 | output[outputOffset + 3] = lhs[lhsOffset + 3] * rhs[rhsOffset + 0] + lhs[lhsOffset + 7] * rhs[rhsOffset + 1]
83 | + lhs[lhsOffset + 11] * rhs[rhsOffset + 2] + lhs[lhsOffset + 15] * rhs[rhsOffset + 3];
84 |
85 | output[outputOffset + 4] = lhs[lhsOffset + 0] * rhs[rhsOffset + 4] + lhs[lhsOffset + 4] * rhs[rhsOffset + 5]
86 | + lhs[lhsOffset + 8] * rhs[rhsOffset + 6] + lhs[lhsOffset + 12] * rhs[rhsOffset + 7];
87 | output[outputOffset + 5] = lhs[lhsOffset + 1] * rhs[rhsOffset + 4] + lhs[lhsOffset + 5] * rhs[rhsOffset + 5]
88 | + lhs[lhsOffset + 9] * rhs[rhsOffset + 6] + lhs[lhsOffset + 13] * rhs[rhsOffset + 7];
89 | output[outputOffset + 6] = lhs[lhsOffset + 2] * rhs[rhsOffset + 4] + lhs[lhsOffset + 6] * rhs[rhsOffset + 5]
90 | + lhs[lhsOffset + 10] * rhs[rhsOffset + 6] + lhs[lhsOffset + 14] * rhs[rhsOffset + 7];
91 | output[outputOffset + 7] = lhs[lhsOffset + 3] * rhs[rhsOffset + 4] + lhs[lhsOffset + 7] * rhs[rhsOffset + 5]
92 | + lhs[lhsOffset + 11] * rhs[rhsOffset + 6] + lhs[lhsOffset + 15] * rhs[rhsOffset + 7];
93 |
94 | output[outputOffset + 8] = lhs[lhsOffset + 0] * rhs[rhsOffset + 8] + lhs[lhsOffset + 4] * rhs[rhsOffset + 9]
95 | + lhs[lhsOffset + 8] * rhs[rhsOffset + 10] + lhs[lhsOffset + 12] * rhs[rhsOffset + 11];
96 | output[outputOffset + 9] = lhs[lhsOffset + 1] * rhs[rhsOffset + 8] + lhs[lhsOffset + 5] * rhs[rhsOffset + 9]
97 | + lhs[lhsOffset + 9] * rhs[rhsOffset + 10] + lhs[lhsOffset + 13] * rhs[rhsOffset + 11];
98 | output[outputOffset + 10] = lhs[lhsOffset + 2] * rhs[rhsOffset + 8] + lhs[lhsOffset + 6] * rhs[rhsOffset + 9]
99 | + lhs[lhsOffset + 10] * rhs[rhsOffset + 10] + lhs[lhsOffset + 14] * rhs[rhsOffset + 11];
100 | output[outputOffset + 11] = lhs[lhsOffset + 3] * rhs[rhsOffset + 8] + lhs[lhsOffset + 7] * rhs[rhsOffset + 9]
101 | + lhs[lhsOffset + 11] * rhs[rhsOffset + 10] + lhs[lhsOffset + 15] * rhs[rhsOffset + 11];
102 |
103 | output[outputOffset + 12] = lhs[lhsOffset + 0] * rhs[rhsOffset + 12] + lhs[lhsOffset + 4] * rhs[rhsOffset + 13]
104 | + lhs[lhsOffset + 8] * rhs[rhsOffset + 14] + lhs[lhsOffset + 12] * rhs[rhsOffset + 15];
105 | output[outputOffset + 13] = lhs[lhsOffset + 1] * rhs[rhsOffset + 12] + lhs[lhsOffset + 5] * rhs[rhsOffset + 13]
106 | + lhs[lhsOffset + 9] * rhs[rhsOffset + 14] + lhs[lhsOffset + 13] * rhs[rhsOffset + 15];
107 | output[outputOffset + 14] = lhs[lhsOffset + 2] * rhs[rhsOffset + 12] + lhs[lhsOffset + 6] * rhs[rhsOffset + 13]
108 | + lhs[lhsOffset + 10] * rhs[rhsOffset + 14] + lhs[lhsOffset + 14] * rhs[rhsOffset + 15];
109 | output[outputOffset + 15] = lhs[lhsOffset + 3] * rhs[rhsOffset + 12] + lhs[lhsOffset + 7] * rhs[rhsOffset + 13]
110 | + lhs[lhsOffset + 11] * rhs[rhsOffset + 14] + lhs[lhsOffset + 15] * rhs[rhsOffset + 15];
111 | }
112 |
113 | public static void multiplyMM(float[] output, float[] lhs, float[] rhs) {
114 | output[0] = lhs[0] * rhs[0] + lhs[4] * rhs[1] + lhs[8] * rhs[2] + lhs[12] * rhs[3];
115 | output[1] = lhs[1] * rhs[0] + lhs[5] * rhs[1] + lhs[9] * rhs[2] + lhs[13] * rhs[3];
116 | output[2] = lhs[2] * rhs[0] + lhs[6] * rhs[1] + lhs[10] * rhs[2] + lhs[14] * rhs[3];
117 | output[3] = lhs[3] * rhs[0] + lhs[7] * rhs[1] + lhs[11] * rhs[2] + lhs[15] * rhs[3];
118 |
119 | output[4] = lhs[0] * rhs[4] + lhs[4] * rhs[5] + lhs[8] * rhs[6] + lhs[12] * rhs[7];
120 | output[5] = lhs[1] * rhs[4] + lhs[5] * rhs[5] + lhs[9] * rhs[6] + lhs[13] * rhs[7];
121 | output[6] = lhs[2] * rhs[4] + lhs[6] * rhs[5] + lhs[10] * rhs[6] + lhs[14] * rhs[7];
122 | output[7] = lhs[3] * rhs[4] + lhs[7] * rhs[5] + lhs[11] * rhs[6] + lhs[15] * rhs[7];
123 |
124 | output[8] = lhs[0] * rhs[8] + lhs[4] * rhs[9] + lhs[8] * rhs[10] + lhs[12] * rhs[11];
125 | output[9] = lhs[1] * rhs[8] + lhs[5] * rhs[9] + lhs[9] * rhs[10] + lhs[13] * rhs[11];
126 | output[10] = lhs[2] * rhs[8] + lhs[6] * rhs[9] + lhs[10] * rhs[10] + lhs[14] * rhs[11];
127 | output[11] = lhs[3] * rhs[8] + lhs[7] * rhs[9] + lhs[11] * rhs[10] + lhs[15] * rhs[11];
128 |
129 | output[12] = lhs[0] * rhs[12] + lhs[4] * rhs[13] + lhs[8] * rhs[14] + lhs[12] * rhs[15];
130 | output[13] = lhs[1] * rhs[12] + lhs[5] * rhs[13] + lhs[9] * rhs[14] + lhs[13] * rhs[15];
131 | output[14] = lhs[2] * rhs[12] + lhs[6] * rhs[13] + lhs[10] * rhs[14] + lhs[14] * rhs[15];
132 | output[15] = lhs[3] * rhs[12] + lhs[7] * rhs[13] + lhs[11] * rhs[14] + lhs[15] * rhs[15];
133 | }
134 |
135 | /**
136 | * Multiply a 4 element vector by a 4x4 matrix and store the result in a 4
137 | * element column vector. In matrix notation: result = lhs x rhs
138 | *
139 | * The same float array may be passed for resultVec, lhsMat, and/or rhsVec.
140 | * However, the resultVec element values are undefined if the resultVec
141 | * elements overlap either the lhsMat or rhsVec elements.
142 | *
143 | * @param resultVec The float array that holds the result vector.
144 | * @param resultVecOffset The offset into the result array where the result
145 | * vector is stored.
146 | * @param lhsMat The float array that holds the left-hand-side matrix.
147 | * @param lhsMatOffset The offset into the lhs array where the lhs is stored
148 | * @param rhsVec The float array that holds the right-hand-side vector.
149 | * @param rhsVecOffset The offset into the rhs vector where the rhs vector
150 | * is stored.
151 | *
152 | * @throws IllegalArgumentException if resultVec, lhsMat,
153 | * or rhsVec are null, or if resultVecOffset + 4 > resultVec.length
154 | * or lhsMatOffset + 16 > lhsMat.length or
155 | * rhsVecOffset + 4 > rhsVec.length.
156 | */
157 | /* public static void multiplyMV(float[] resultVec,
158 | * int resultVecOffset, float[] lhsMat, int lhsMatOffset,
159 | * float[] rhsVec, int rhsVecOffset){
160 | * android.opengl.Matrix.multiplyMV(resultVec, resultVecOffset, lhsMat, lhsMatOffset, rhsVec, rhsVecOffset);
161 | * } */
162 | public static void multiplyMV(float[] output, int outputOffset, float[] lhs, int lhsOffset, float[] rhs,
163 | int rhsOffset) {
164 | /* wrong implementation (this is for row major matrices)
165 | * output[outputOffset +0] = lhs[lhsOffset + 0] * rhs[rhsOffset + 0] + lhs[lhsOffset + 1] * rhs[rhsOffset + 1]
166 | * + lhs[lhsOffset + 2] * rhs[rhsOffset + 2] + lhs[lhsOffset + 3] * rhs[rhsOffset + 3];
167 | * output[outputOffset +1] = lhs[lhsOffset + 4] * rhs[rhsOffset + 0] + lhs[lhsOffset + 5] * rhs[rhsOffset + 1] +
168 | * lhs[lhsOffset + 6] * rhs[rhsOffset + 2] + lhs[lhsOffset + 7] * rhs[rhsOffset + 3];
169 | * output[outputOffset +2] = lhs[lhsOffset + 8] * rhs[rhsOffset + 0] + lhs[lhsOffset + 9] * rhs[rhsOffset + 1] +
170 | * lhs[lhsOffset + 10] * rhs[rhsOffset + 2] + lhs[lhsOffset + 11] * rhs[rhsOffset + 3];
171 | * output[outputOffset +3] = lhs[lhsOffset + 12] * rhs[rhsOffset + 0] + lhs[lhsOffset + 13] * rhs[rhsOffset + 1]
172 | * + lhs[lhsOffset + 14] * rhs[rhsOffset + 2] + lhs[lhsOffset + 15] * rhs[rhsOffset + 3]; */
173 | // correct implementation for column major matrices (which is for OpenGL)
174 | output[outputOffset + 0] = lhs[lhsOffset + 0] * rhs[rhsOffset + 0] + lhs[lhsOffset + 4] * rhs[rhsOffset + 1]
175 | + lhs[lhsOffset + 8] * rhs[rhsOffset + 2] + lhs[lhsOffset + 12] * rhs[rhsOffset + 3];
176 | output[outputOffset + 1] = lhs[lhsOffset + 1] * rhs[rhsOffset + 0] + lhs[lhsOffset + 5] * rhs[rhsOffset + 1]
177 | + lhs[lhsOffset + 9] * rhs[rhsOffset + 2] + lhs[lhsOffset + 13] * rhs[rhsOffset + 3];
178 | output[outputOffset + 2] = lhs[lhsOffset + 2] * rhs[rhsOffset + 0] + lhs[lhsOffset + 6] * rhs[rhsOffset + 1]
179 | + lhs[lhsOffset + 10] * rhs[rhsOffset + 2] + lhs[lhsOffset + 14] * rhs[rhsOffset + 3];
180 | output[outputOffset + 3] = lhs[lhsOffset + 3] * rhs[rhsOffset + 0] + lhs[lhsOffset + 7] * rhs[rhsOffset + 1]
181 | + lhs[lhsOffset + 11] * rhs[rhsOffset + 2] + lhs[lhsOffset + 15] * rhs[rhsOffset + 3];
182 |
183 | }
184 |
185 | public static void multiplyMV(float[] outputV, float[] inputM, float[] inputV) {
186 | outputV[0] = inputM[0] * inputV[0] + inputM[4] * inputV[1] + inputM[8] * inputV[2] + inputM[12] * inputV[3];
187 | outputV[1] = inputM[1] * inputV[0] + inputM[5] * inputV[1] + inputM[9] * inputV[2] + inputM[13] * inputV[3];
188 | outputV[2] = inputM[2] * inputV[0] + inputM[6] * inputV[1] + inputM[10] * inputV[2] + inputM[14] * inputV[3];
189 | outputV[3] = inputM[3] * inputV[0] + inputM[7] * inputV[1] + inputM[11] * inputV[2] + inputM[15] * inputV[3];
190 | }
191 |
192 | public static void multiplyMV3(float[] outputV, float[] inputM, float[] inputV, float w) {
193 | outputV[0] = inputM[0] * inputV[0] + inputM[4] * inputV[1] + inputM[8] * inputV[2] + inputM[12] * w;
194 | outputV[1] = inputM[1] * inputV[0] + inputM[5] * inputV[1] + inputM[9] * inputV[2] + inputM[13] * w;
195 | outputV[2] = inputM[2] * inputV[0] + inputM[6] * inputV[1] + inputM[10] * inputV[2] + inputM[14] * w;
196 | }
197 |
198 | /**
199 | * Transposes a 4 x 4 matrix.
200 | *
201 | * @param mTrans the array that holds the output inverted matrix
202 | * @param mTransOffset an offset into mInv where the inverted matrix is
203 | * stored.
204 | * @param m the input array
205 | * @param mOffset an offset into m where the matrix is stored.
206 | */
207 | public static void transposeM(float[] mTrans, int mTransOffset, float[] m, int mOffset) {
208 | for (int i = 0; i < 4; i++) {
209 | int mBase = i * 4 + mOffset;
210 | mTrans[i + mTransOffset] = m[mBase];
211 | mTrans[i + 4 + mTransOffset] = m[mBase + 1];
212 | mTrans[i + 8 + mTransOffset] = m[mBase + 2];
213 | mTrans[i + 12 + mTransOffset] = m[mBase + 3];
214 | }
215 | }
216 |
217 | /**
218 | * Inverts a 4 x 4 matrix.
219 | *
220 | * @param mInv the array that holds the output inverted matrix
221 | * @param mInvOffset an offset into mInv where the inverted matrix is
222 | * stored.
223 | * @param m the input array
224 | * @param mOffset an offset into m where the matrix is stored.
225 | * @return true if the matrix could be inverted, false if it could not.
226 | */
227 | public static boolean invertM(float[] mInv, int mInvOffset, float[] m, int mOffset) {
228 | // Invert a 4 x 4 matrix using Cramer's Rule
229 |
230 | // transpose matrix
231 | final float src0 = m[mOffset + 0];
232 | final float src4 = m[mOffset + 1];
233 | final float src8 = m[mOffset + 2];
234 | final float src12 = m[mOffset + 3];
235 |
236 | final float src1 = m[mOffset + 4];
237 | final float src5 = m[mOffset + 5];
238 | final float src9 = m[mOffset + 6];
239 | final float src13 = m[mOffset + 7];
240 |
241 | final float src2 = m[mOffset + 8];
242 | final float src6 = m[mOffset + 9];
243 | final float src10 = m[mOffset + 10];
244 | final float src14 = m[mOffset + 11];
245 |
246 | final float src3 = m[mOffset + 12];
247 | final float src7 = m[mOffset + 13];
248 | final float src11 = m[mOffset + 14];
249 | final float src15 = m[mOffset + 15];
250 |
251 | // calculate pairs for first 8 elements (cofactors)
252 | final float atmp0 = src10 * src15;
253 | final float atmp1 = src11 * src14;
254 | final float atmp2 = src9 * src15;
255 | final float atmp3 = src11 * src13;
256 | final float atmp4 = src9 * src14;
257 | final float atmp5 = src10 * src13;
258 | final float atmp6 = src8 * src15;
259 | final float atmp7 = src11 * src12;
260 | final float atmp8 = src8 * src14;
261 | final float atmp9 = src10 * src12;
262 | final float atmp10 = src8 * src13;
263 | final float atmp11 = src9 * src12;
264 |
265 | // calculate first 8 elements (cofactors)
266 | final float dst0 = (atmp0 * src5 + atmp3 * src6 + atmp4 * src7) - (atmp1 * src5 + atmp2 * src6 + atmp5 * src7);
267 | final float dst1 = (atmp1 * src4 + atmp6 * src6 + atmp9 * src7) - (atmp0 * src4 + atmp7 * src6 + atmp8 * src7);
268 | final float dst2 = (atmp2 * src4 + atmp7 * src5 + atmp10 * src7)
269 | - (atmp3 * src4 + atmp6 * src5 + atmp11 * src7);
270 | final float dst3 = (atmp5 * src4 + atmp8 * src5 + atmp11 * src6)
271 | - (atmp4 * src4 + atmp9 * src5 + atmp10 * src6);
272 | final float dst4 = (atmp1 * src1 + atmp2 * src2 + atmp5 * src3) - (atmp0 * src1 + atmp3 * src2 + atmp4 * src3);
273 | final float dst5 = (atmp0 * src0 + atmp7 * src2 + atmp8 * src3) - (atmp1 * src0 + atmp6 * src2 + atmp9 * src3);
274 | final float dst6 = (atmp3 * src0 + atmp6 * src1 + atmp11 * src3)
275 | - (atmp2 * src0 + atmp7 * src1 + atmp10 * src3);
276 | final float dst7 = (atmp4 * src0 + atmp9 * src1 + atmp10 * src2)
277 | - (atmp5 * src0 + atmp8 * src1 + atmp11 * src2);
278 |
279 | // calculate pairs for second 8 elements (cofactors)
280 | final float btmp0 = src2 * src7;
281 | final float btmp1 = src3 * src6;
282 | final float btmp2 = src1 * src7;
283 | final float btmp3 = src3 * src5;
284 | final float btmp4 = src1 * src6;
285 | final float btmp5 = src2 * src5;
286 | final float btmp6 = src0 * src7;
287 | final float btmp7 = src3 * src4;
288 | final float btmp8 = src0 * src6;
289 | final float btmp9 = src2 * src4;
290 | final float btmp10 = src0 * src5;
291 | final float btmp11 = src1 * src4;
292 |
293 | // calculate second 8 elements (cofactors)
294 | final float dst8 = (btmp0 * src13 + btmp3 * src14 + btmp4 * src15)
295 | - (btmp1 * src13 + btmp2 * src14 + btmp5 * src15);
296 | final float dst9 = (btmp1 * src12 + btmp6 * src14 + btmp9 * src15)
297 | - (btmp0 * src12 + btmp7 * src14 + btmp8 * src15);
298 | final float dst10 = (btmp2 * src12 + btmp7 * src13 + btmp10 * src15)
299 | - (btmp3 * src12 + btmp6 * src13 + btmp11 * src15);
300 | final float dst11 = (btmp5 * src12 + btmp8 * src13 + btmp11 * src14)
301 | - (btmp4 * src12 + btmp9 * src13 + btmp10 * src14);
302 | final float dst12 = (btmp2 * src10 + btmp5 * src11 + btmp1 * src9)
303 | - (btmp4 * src11 + btmp0 * src9 + btmp3 * src10);
304 | final float dst13 = (btmp8 * src11 + btmp0 * src8 + btmp7 * src10)
305 | - (btmp6 * src10 + btmp9 * src11 + btmp1 * src8);
306 | final float dst14 = (btmp6 * src9 + btmp11 * src11 + btmp3 * src8)
307 | - (btmp10 * src11 + btmp2 * src8 + btmp7 * src9);
308 | final float dst15 = (btmp10 * src10 + btmp4 * src8 + btmp9 * src9)
309 | - (btmp8 * src9 + btmp11 * src10 + btmp5 * src8);
310 |
311 | // calculate determinant
312 | final float det = src0 * dst0 + src1 * dst1 + src2 * dst2 + src3 * dst3;
313 |
314 | if (det == 0.0f) {
315 | return false;
316 | }
317 |
318 | // calculate matrix inverse
319 | final float invdet = 1.0f / det;
320 | mInv[mInvOffset] = dst0 * invdet;
321 | mInv[1 + mInvOffset] = dst1 * invdet;
322 | mInv[2 + mInvOffset] = dst2 * invdet;
323 | mInv[3 + mInvOffset] = dst3 * invdet;
324 |
325 | mInv[4 + mInvOffset] = dst4 * invdet;
326 | mInv[5 + mInvOffset] = dst5 * invdet;
327 | mInv[6 + mInvOffset] = dst6 * invdet;
328 | mInv[7 + mInvOffset] = dst7 * invdet;
329 |
330 | mInv[8 + mInvOffset] = dst8 * invdet;
331 | mInv[9 + mInvOffset] = dst9 * invdet;
332 | mInv[10 + mInvOffset] = dst10 * invdet;
333 | mInv[11 + mInvOffset] = dst11 * invdet;
334 |
335 | mInv[12 + mInvOffset] = dst12 * invdet;
336 | mInv[13 + mInvOffset] = dst13 * invdet;
337 | mInv[14 + mInvOffset] = dst14 * invdet;
338 | mInv[15 + mInvOffset] = dst15 * invdet;
339 |
340 | return true;
341 | }
342 |
343 | /**
344 | * Computes an orthographic projection matrix.
345 | *
346 | * @param m returns the result
347 | * @param mOffset
348 | * @param left
349 | * @param right
350 | * @param bottom
351 | * @param top
352 | * @param near
353 | * @param far
354 | */
355 |
356 | public static void orthoM(float[] m, int mOffset, float left, float right, float bottom, float top, float near,
357 | float far) {
358 | if (left == right) {
359 | throw new IllegalArgumentException("left == right");
360 | }
361 | if (bottom == top) {
362 | throw new IllegalArgumentException("bottom == top");
363 | }
364 | if (near == far) {
365 | throw new IllegalArgumentException("near == far");
366 | }
367 |
368 | final float r_width = 1.0f / (right - left);
369 | final float r_height = 1.0f / (top - bottom);
370 | final float r_depth = 1.0f / (far - near);
371 | final float x = 2.0f * (r_width);
372 | final float y = 2.0f * (r_height);
373 | final float z = -2.0f * (r_depth);
374 | final float tx = -(right + left) * r_width;
375 | final float ty = -(top + bottom) * r_height;
376 | final float tz = -(far + near) * r_depth;
377 | m[mOffset + 0] = x;
378 | m[mOffset + 5] = y;
379 | m[mOffset + 10] = z;
380 | m[mOffset + 12] = tx;
381 | m[mOffset + 13] = ty;
382 | m[mOffset + 14] = tz;
383 | m[mOffset + 15] = 1.0f;
384 | m[mOffset + 1] = 0.0f;
385 | m[mOffset + 2] = 0.0f;
386 | m[mOffset + 3] = 0.0f;
387 | m[mOffset + 4] = 0.0f;
388 | m[mOffset + 6] = 0.0f;
389 | m[mOffset + 7] = 0.0f;
390 | m[mOffset + 8] = 0.0f;
391 | m[mOffset + 9] = 0.0f;
392 | m[mOffset + 11] = 0.0f;
393 | }
394 |
395 | /**
396 | * Define a projection matrix in terms of six clip planes
397 | *
398 | * @param m the float array that holds the perspective matrix
399 | * @param offset the offset into float array m where the perspective
400 | * matrix data is written
401 | * @param left
402 | * @param right
403 | * @param bottom
404 | * @param top
405 | * @param near
406 | * @param far
407 | */
408 |
409 | public static void frustumM(float[] m, int offset, float left, float right, float bottom, float top, float near,
410 | float far) {
411 | if (left == right) {
412 | throw new IllegalArgumentException("left == right");
413 | }
414 | if (top == bottom) {
415 | throw new IllegalArgumentException("top == bottom");
416 | }
417 | if (near == far) {
418 | throw new IllegalArgumentException("near == far");
419 | }
420 | if (near <= 0.0f) {
421 | throw new IllegalArgumentException("near <= 0.0f");
422 | }
423 | if (far <= 0.0f) {
424 | throw new IllegalArgumentException("far <= 0.0f");
425 | }
426 | final float r_width = 1.0f / (right - left);
427 | final float r_height = 1.0f / (top - bottom);
428 | final float r_depth = 1.0f / (near - far);
429 | final float x = 2.0f * (near * r_width);
430 | final float y = 2.0f * (near * r_height);
431 | final float A = 2.0f * ((right + left) * r_width);
432 | final float B = (top + bottom) * r_height;
433 | final float C = (far + near) * r_depth;
434 | final float D = 2.0f * (far * near * r_depth);
435 | m[offset + 0] = x;
436 | m[offset + 5] = y;
437 | m[offset + 8] = A;
438 | m[offset + 9] = B;
439 | m[offset + 10] = C;
440 | m[offset + 14] = D;
441 | m[offset + 11] = -1.0f;
442 | m[offset + 1] = 0.0f;
443 | m[offset + 2] = 0.0f;
444 | m[offset + 3] = 0.0f;
445 | m[offset + 4] = 0.0f;
446 | m[offset + 6] = 0.0f;
447 | m[offset + 7] = 0.0f;
448 | m[offset + 12] = 0.0f;
449 | m[offset + 13] = 0.0f;
450 | m[offset + 15] = 0.0f;
451 | }
452 |
453 | /**
454 | * Define a projection matrix in terms of a field of view angle, an
455 | * aspect ratio, and z clip planes
456 | *
457 | * @param m the float array that holds the perspective matrix
458 | * @param offset the offset into float array m where the perspective
459 | * matrix data is written
460 | * @param fovy field of view in y direction, in degrees
461 | * @param aspect width to height aspect ratio of the viewport
462 | * @param zNear
463 | * @param zFar
464 | */
465 | public static void perspectiveM(float[] m, int offset, float fovy, float aspect, float zNear, float zFar) {
466 | float f = 1.0f / (float) Math.tan(fovy * (Math.PI / 360.0));
467 | float rangeReciprocal = 1.0f / (zNear - zFar);
468 |
469 | m[offset + 0] = f / aspect;
470 | m[offset + 1] = 0.0f;
471 | m[offset + 2] = 0.0f;
472 | m[offset + 3] = 0.0f;
473 |
474 | m[offset + 4] = 0.0f;
475 | m[offset + 5] = f;
476 | m[offset + 6] = 0.0f;
477 | m[offset + 7] = 0.0f;
478 |
479 | m[offset + 8] = 0.0f;
480 | m[offset + 9] = 0.0f;
481 | m[offset + 10] = (zFar + zNear) * rangeReciprocal;
482 | m[offset + 11] = -1.0f;
483 |
484 | m[offset + 12] = 0.0f;
485 | m[offset + 13] = 0.0f;
486 | m[offset + 14] = 2.0f * zFar * zNear * rangeReciprocal;
487 | m[offset + 15] = 0.0f;
488 | }
489 |
490 | /**
491 | * Computes the length of a vector
492 | *
493 | * @param x x coordinate of a vector
494 | * @param y y coordinate of a vector
495 | * @param z z coordinate of a vector
496 | * @return the length of a vector
497 | */
498 | public static float length(float x, float y, float z) {
499 | return (float) Math.sqrt(x * x + y * y + z * z);
500 | }
501 |
502 | /**
503 | * Sets matrix m to the identity matrix.
504 | *
505 | * @param sm returns the result
506 | * @param smOffset index into sm where the result matrix starts
507 | */
508 | public static void setIdentityM(float[] sm, int smOffset) {
509 | for (int i = 0; i < 16; i++) {
510 | sm[smOffset + i] = 0;
511 | }
512 | for (int i = 0; i < 16; i += 5) {
513 | sm[smOffset + i] = 1.0f;
514 | }
515 | }
516 |
517 | /**
518 | * Scales matrix m by x, y, and z, putting the result in sm
519 | *
520 | * @param sm returns the result
521 | * @param smOffset index into sm where the result matrix starts
522 | * @param m source matrix
523 | * @param mOffset index into m where the source matrix starts
524 | * @param x scale factor x
525 | * @param y scale factor y
526 | * @param z scale factor z
527 | */
528 | public static void scaleM(float[] sm, int smOffset, float[] m, int mOffset, float x, float y, float z) {
529 | for (int i = 0; i < 4; i++) {
530 | int smi = smOffset + i;
531 | int mi = mOffset + i;
532 | sm[smi] = m[mi] * x;
533 | sm[4 + smi] = m[4 + mi] * y;
534 | sm[8 + smi] = m[8 + mi] * z;
535 | sm[12 + smi] = m[12 + mi];
536 | }
537 | }
538 |
539 | /**
540 | * Scales matrix m in place by sx, sy, and sz
541 | *
542 | * @param m matrix to scale
543 | * @param mOffset index into m where the matrix starts
544 | * @param x scale factor x
545 | * @param y scale factor y
546 | * @param z scale factor z
547 | */
548 | public static void scaleM(float[] m, int mOffset, float x, float y, float z) {
549 | for (int i = 0; i < 4; i++) {
550 | int mi = mOffset + i;
551 | m[mi] *= x;
552 | m[4 + mi] *= y;
553 | m[8 + mi] *= z;
554 | }
555 | }
556 |
557 | /**
558 | * Translates matrix m by x, y, and z, putting the result in tm
559 | *
560 | * @param tm returns the result
561 | * @param tmOffset index into sm where the result matrix starts
562 | * @param m source matrix
563 | * @param mOffset index into m where the source matrix starts
564 | * @param x translation factor x
565 | * @param y translation factor y
566 | * @param z translation factor z
567 | */
568 | public static void translateM(float[] tm, int tmOffset, float[] m, int mOffset, float x, float y, float z) {
569 | for (int i = 0; i < 12; i++) {
570 | tm[tmOffset + i] = m[mOffset + i];
571 | }
572 | for (int i = 0; i < 4; i++) {
573 | int tmi = tmOffset + i;
574 | int mi = mOffset + i;
575 | tm[12 + tmi] = m[mi] * x + m[4 + mi] * y + m[8 + mi] * z + m[12 + mi];
576 | }
577 | }
578 |
579 | /**
580 | * Translates matrix m by x, y, and z in place.
581 | *
582 | * @param m matrix
583 | * @param mOffset index into m where the matrix starts
584 | * @param x translation factor x
585 | * @param y translation factor y
586 | * @param z translation factor z
587 | */
588 | public static void translateM(float[] m, int mOffset, float x, float y, float z) {
589 | for (int i = 0; i < 4; i++) {
590 | int mi = mOffset + i;
591 | m[12 + mi] += m[mi] * x + m[4 + mi] * y + m[8 + mi] * z;
592 | }
593 | }
594 |
595 | /**
596 | * Rotates matrix m by angle a (in degrees) around the axis (x, y, z)
597 | *
598 | * @param rm returns the result
599 | * @param rmOffset index into rm where the result matrix starts
600 | * @param m source matrix
601 | * @param mOffset index into m where the source matrix starts
602 | * @param a angle to rotate in degrees
603 | * @param x scale factor x
604 | * @param y scale factor y
605 | * @param z scale factor z
606 | */
607 | public static void rotateM(float[] rm, int rmOffset, float[] m, int mOffset, float a, float x, float y, float z) {
608 | synchronized (TEMP_MATRIX_ARRAY) {
609 | setRotateM(TEMP_MATRIX_ARRAY, 0, a, x, y, z);
610 | multiplyMM(rm, rmOffset, m, mOffset, TEMP_MATRIX_ARRAY, 0);
611 | }
612 | }
613 |
614 | /**
615 | * Rotates matrix m in place by angle a (in degrees)
616 | * around the axis (x, y, z)
617 | *
618 | * @param m source matrix
619 | * @param mOffset index into m where the matrix starts
620 | * @param a angle to rotate in degrees
621 | * @param x scale factor x
622 | * @param y scale factor y
623 | * @param z scale factor z
624 | */
625 | public static void rotateM(float[] m, int mOffset, float a, float x, float y, float z) {
626 | synchronized (TEMP_MATRIX_ARRAY) {
627 | setRotateM(TEMP_MATRIX_ARRAY, 0, a, x, y, z);
628 | multiplyMM(TEMP_MATRIX_ARRAY, 16, m, mOffset, TEMP_MATRIX_ARRAY, 0);
629 | System.arraycopy(TEMP_MATRIX_ARRAY, 16, m, mOffset, 16);
630 | }
631 | }
632 |
633 | /**
634 | * Rotates matrix m by angle a (in degrees) around the axis (x, y, z)
635 | *
636 | * @param rm returns the result
637 | * @param rmOffset index into rm where the result matrix starts
638 | * @param a angle to rotate in degrees
639 | * @param x scale factor x
640 | * @param y scale factor y
641 | * @param z scale factor z
642 | */
643 | public static void setRotateM(float[] rm, int rmOffset, float a, float x, float y, float z) {
644 | rm[rmOffset + 3] = 0;
645 | rm[rmOffset + 7] = 0;
646 | rm[rmOffset + 11] = 0;
647 | rm[rmOffset + 12] = 0;
648 | rm[rmOffset + 13] = 0;
649 | rm[rmOffset + 14] = 0;
650 | rm[rmOffset + 15] = 1;
651 | a *= (float) (Math.PI / 180.0f);
652 | float s = (float) Math.sin(a);
653 | float c = (float) Math.cos(a);
654 | if (1.0f == x && 0.0f == y && 0.0f == z) {
655 | rm[rmOffset + 5] = c;
656 | rm[rmOffset + 10] = c;
657 | rm[rmOffset + 6] = s;
658 | rm[rmOffset + 9] = -s;
659 | rm[rmOffset + 1] = 0;
660 | rm[rmOffset + 2] = 0;
661 | rm[rmOffset + 4] = 0;
662 | rm[rmOffset + 8] = 0;
663 | rm[rmOffset + 0] = 1;
664 | } else if (0.0f == x && 1.0f == y && 0.0f == z) {
665 | rm[rmOffset + 0] = c;
666 | rm[rmOffset + 10] = c;
667 | rm[rmOffset + 8] = s;
668 | rm[rmOffset + 2] = -s;
669 | rm[rmOffset + 1] = 0;
670 | rm[rmOffset + 4] = 0;
671 | rm[rmOffset + 6] = 0;
672 | rm[rmOffset + 9] = 0;
673 | rm[rmOffset + 5] = 1;
674 | } else if (0.0f == x && 0.0f == y && 1.0f == z) {
675 | rm[rmOffset + 0] = c;
676 | rm[rmOffset + 5] = c;
677 | rm[rmOffset + 1] = s;
678 | rm[rmOffset + 4] = -s;
679 | rm[rmOffset + 2] = 0;
680 | rm[rmOffset + 6] = 0;
681 | rm[rmOffset + 8] = 0;
682 | rm[rmOffset + 9] = 0;
683 | rm[rmOffset + 10] = 1;
684 | } else {
685 | float len = length(x, y, z);
686 | if (1.0f != len) {
687 | float recipLen = 1.0f / len;
688 | x *= recipLen;
689 | y *= recipLen;
690 | z *= recipLen;
691 | }
692 | float nc = 1.0f - c;
693 | float xy = x * y;
694 | float yz = y * z;
695 | float zx = z * x;
696 | float xs = x * s;
697 | float ys = y * s;
698 | float zs = z * s;
699 | rm[rmOffset + 0] = x * x * nc + c;
700 | rm[rmOffset + 4] = xy * nc - zs;
701 | rm[rmOffset + 8] = zx * nc + ys;
702 | rm[rmOffset + 1] = xy * nc + zs;
703 | rm[rmOffset + 5] = y * y * nc + c;
704 | rm[rmOffset + 9] = yz * nc - xs;
705 | rm[rmOffset + 2] = zx * nc - ys;
706 | rm[rmOffset + 6] = yz * nc + xs;
707 | rm[rmOffset + 10] = z * z * nc + c;
708 | }
709 | }
710 |
711 | /**
712 | * Converts Euler angles to a rotation matrix
713 | *
714 | * @param rm returns the result
715 | * @param rmOffset index into rm where the result matrix starts
716 | * @param x angle of rotation, in degrees
717 | * @param y angle of rotation, in degrees
718 | * @param z angle of rotation, in degrees
719 | */
720 | public static void setRotateEulerM(float[] rm, int rmOffset, float x, float y, float z) {
721 | x *= (float) (Math.PI / 180.0f);
722 | y *= (float) (Math.PI / 180.0f);
723 | z *= (float) (Math.PI / 180.0f);
724 | float cx = (float) Math.cos(x);
725 | float sx = (float) Math.sin(x);
726 | float cy = (float) Math.cos(y);
727 | float sy = (float) Math.sin(y);
728 | float cz = (float) Math.cos(z);
729 | float sz = (float) Math.sin(z);
730 | float cxsy = cx * sy;
731 | float sxsy = sx * sy;
732 |
733 | rm[rmOffset + 0] = cy * cz;
734 | rm[rmOffset + 1] = -cy * sz;
735 | rm[rmOffset + 2] = sy;
736 | rm[rmOffset + 3] = 0.0f;
737 |
738 | rm[rmOffset + 4] = cxsy * cz + cx * sz;
739 | rm[rmOffset + 5] = -cxsy * sz + cx * cz;
740 | rm[rmOffset + 6] = -sx * cy;
741 | rm[rmOffset + 7] = 0.0f;
742 |
743 | rm[rmOffset + 8] = -sxsy * cz + sx * sz;
744 | rm[rmOffset + 9] = sxsy * sz + sx * cz;
745 | rm[rmOffset + 10] = cx * cy;
746 | rm[rmOffset + 11] = 0.0f;
747 |
748 | rm[rmOffset + 12] = 0.0f;
749 | rm[rmOffset + 13] = 0.0f;
750 | rm[rmOffset + 14] = 0.0f;
751 | rm[rmOffset + 15] = 1.0f;
752 | }
753 |
754 | /**
755 | * Define a viewing transformation in terms of an eye point, a center of
756 | * view, and an up vector.
757 | *
758 | * @param rm returns the result
759 | * @param rmOffset index into rm where the result matrix starts
760 | * @param eyeX eye point X
761 | * @param eyeY eye point Y
762 | * @param eyeZ eye point Z
763 | * @param centerX center of view X
764 | * @param centerY center of view Y
765 | * @param centerZ center of view Z
766 | * @param upX up vector X
767 | * @param upY up vector Y
768 | * @param upZ up vector Z
769 | */
770 | public static void setLookAtM(float[] rm, int rmOffset, float eyeX, float eyeY, float eyeZ, float centerX,
771 | float centerY, float centerZ, float upX, float upY, float upZ) {
772 |
773 | // See the OpenGL GLUT documentation for gluLookAt for a description
774 | // of the algorithm. We implement it in a straightforward way:
775 |
776 | float fx = centerX - eyeX;
777 | float fy = centerY - eyeY;
778 | float fz = centerZ - eyeZ;
779 |
780 | // Normalize f
781 | float rlf = 1.0f / Matrix.length(fx, fy, fz);
782 | fx *= rlf;
783 | fy *= rlf;
784 | fz *= rlf;
785 |
786 | // compute s = f x up (x means "cross product")
787 | float sx = fy * upZ - fz * upY;
788 | float sy = fz * upX - fx * upZ;
789 | float sz = fx * upY - fy * upX;
790 |
791 | // and normalize s
792 | float rls = 1.0f / Matrix.length(sx, sy, sz);
793 | sx *= rls;
794 | sy *= rls;
795 | sz *= rls;
796 |
797 | // compute u = s x f
798 | float ux = sy * fz - sz * fy;
799 | float uy = sz * fx - sx * fz;
800 | float uz = sx * fy - sy * fx;
801 |
802 | rm[rmOffset + 0] = sx;
803 | rm[rmOffset + 1] = ux;
804 | rm[rmOffset + 2] = -fx;
805 | rm[rmOffset + 3] = 0.0f;
806 |
807 | rm[rmOffset + 4] = sy;
808 | rm[rmOffset + 5] = uy;
809 | rm[rmOffset + 6] = -fy;
810 | rm[rmOffset + 7] = 0.0f;
811 |
812 | rm[rmOffset + 8] = sz;
813 | rm[rmOffset + 9] = uz;
814 | rm[rmOffset + 10] = -fz;
815 | rm[rmOffset + 11] = 0.0f;
816 |
817 | rm[rmOffset + 12] = 0.0f;
818 | rm[rmOffset + 13] = 0.0f;
819 | rm[rmOffset + 14] = 0.0f;
820 | rm[rmOffset + 15] = 1.0f;
821 |
822 | translateM(rm, rmOffset, -eyeX, -eyeY, -eyeZ);
823 | }
824 | }
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/representation/MatrixF4x4.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.representation;
2 |
3 | import android.util.Log;
4 |
5 | /**
6 | * The Class MatrixF4x4.
7 | *
8 | * Internal the matrix is structured as
9 | *
10 | * [ x0 , y0 , z0 , w0 ] [ x1 , y1 , z1 , w1 ] [ x2 , y2 , z2 , w2 ] [ x3 , y3 , z3 , w3 ]
11 | *
12 | * it is recommend that when setting the matrix values individually that you use the set{x,#} methods, where 'x' is
13 | * either x, y, z or w and # is either 0, 1, 2 or 3, setY1 for example. The reason you should use these functions is
14 | * because it will map directly to that part of the matrix regardless of whether or not the internal matrix is column
15 | * major or not. If the matrix is either or length 9 or 16 it will be able to determine if it can set the value or not.
16 | * If the matrix is of size 9 but you set say w2, the value will not be set and the set method will return without any
17 | * error.
18 | *
19 | */
20 | public class MatrixF4x4 {
21 |
22 | public static final int[] matIndCol9_3x3 = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
23 | public static final int[] matIndCol16_3x3 = { 0, 1, 2, 4, 5, 6, 8, 9, 10 };
24 | public static final int[] matIndRow9_3x3 = { 0, 3, 6, 1, 4, 7, 3, 5, 8 };
25 | public static final int[] matIndRow16_3x3 = { 0, 4, 8, 1, 5, 9, 2, 6, 10 };
26 |
27 | public static final int[] matIndCol16_4x4 = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
28 | public static final int[] matIndRow16_4x4 = { 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 };
29 |
30 | private boolean colMaj = true;
31 |
32 | /** The matrix. */
33 | public float[] matrix;
34 |
35 | /**
36 | * Instantiates a new matrixf4x4. The Matrix is assumed to be Column major, however you can change this by using the
37 | * setColumnMajor function to false and it will operate like a row major matrix.
38 | */
39 | public MatrixF4x4() {
40 | // The matrix is defined as float[column][row]
41 | this.matrix = new float[16];
42 | Matrix.setIdentityM(this.matrix, 0);
43 | }
44 |
45 | /**
46 | * Gets the matrix.
47 | *
48 | * @return the matrix, can be null if the matrix is invalid
49 | */
50 | public float[] getMatrix() {
51 | return this.matrix;
52 | }
53 |
54 | public int size() {
55 | return matrix.length;
56 | }
57 |
58 | /**
59 | * Sets the matrix from a float[16] array. If the matrix you set isn't 16 long then the matrix will be set as
60 | * invalid.
61 | *
62 | * @param matrix the new matrix
63 | */
64 | public void setMatrix(float[] matrix) {
65 | if (matrix.length == 16 || matrix.length == 9)
66 | this.matrix = matrix;
67 | else {
68 | throw new IllegalArgumentException("Matrix set is invalid, size is " + matrix.length + " expected 9 or 16");
69 | }
70 | }
71 |
72 | public void set(MatrixF4x4 source) {
73 | System.arraycopy(source.matrix, 0, matrix, 0, matrix.length);
74 | }
75 |
76 | /**
77 | * Set whether the internal data is col major by passing true, or false for a row major matrix. The matrix is column
78 | * major by default.
79 | *
80 | * @param colMajor
81 | */
82 | public void setColumnMajor(boolean colMajor) {
83 | this.colMaj = colMajor;
84 | }
85 |
86 | /**
87 | * Find out if the stored matrix is column major
88 | *
89 | * @return
90 | */
91 | public boolean isColumnMajor() {
92 | return colMaj;
93 | }
94 |
95 | /**
96 | * Multiply the given vector by this matrix. This should only be used if the matrix is of size 16 (use the
97 | * matrix.size() method).
98 | *
99 | * @param vector A vector of length 4.
100 | */
101 | public void multiplyVector4fByMatrix(Vector4f vector) {
102 |
103 | if (matrix.length == 16) {
104 | float x = 0;
105 | float y = 0;
106 | float z = 0;
107 | float w = 0;
108 |
109 | float[] vectorArray = vector.array();
110 |
111 | if (colMaj) {
112 | for (int i = 0; i < 4; i++) {
113 |
114 | int k = i * 4;
115 |
116 | x += this.matrix[k + 0] * vectorArray[i];
117 | y += this.matrix[k + 1] * vectorArray[i];
118 | z += this.matrix[k + 2] * vectorArray[i];
119 | w += this.matrix[k + 3] * vectorArray[i];
120 | }
121 | } else {
122 | for (int i = 0; i < 4; i++) {
123 |
124 | x += this.matrix[0 + i] * vectorArray[i];
125 | y += this.matrix[4 + i] * vectorArray[i];
126 | z += this.matrix[8 + i] * vectorArray[i];
127 | w += this.matrix[12 + i] * vectorArray[i];
128 | }
129 | }
130 |
131 | vector.setX(x);
132 | vector.setY(y);
133 | vector.setZ(z);
134 | vector.setW(w);
135 | } else
136 | Log.e("matrix", "Matrix is invalid, is " + matrix.length + " long, this equation expects a 16 value matrix");
137 | }
138 |
139 | /**
140 | * Multiply the given vector by this matrix. This should only be used if the matrix is of size 9 (use the
141 | * matrix.size() method).
142 | *
143 | * @param vector A vector of length 3.
144 | */
145 | public void multiplyVector3fByMatrix(Vector3f vector) {
146 |
147 | if (matrix.length == 9) {
148 | float x = 0;
149 | float y = 0;
150 | float z = 0;
151 |
152 | float[] vectorArray = vector.toArray();
153 |
154 | if (!colMaj) {
155 | for (int i = 0; i < 3; i++) {
156 |
157 | int k = i * 3;
158 |
159 | x += this.matrix[k + 0] * vectorArray[i];
160 | y += this.matrix[k + 1] * vectorArray[i];
161 | z += this.matrix[k + 2] * vectorArray[i];
162 | }
163 | } else {
164 | for (int i = 0; i < 3; i++) {
165 |
166 | x += this.matrix[0 + i] * vectorArray[i];
167 | y += this.matrix[3 + i] * vectorArray[i];
168 | z += this.matrix[6 + i] * vectorArray[i];
169 | }
170 | }
171 |
172 | vector.setX(x);
173 | vector.setY(y);
174 | vector.setZ(z);
175 | } else
176 | Log.e("matrix", "Matrix is invalid, is " + matrix.length
177 | + " long, this function expects the internal matrix to be of size 9");
178 | }
179 |
180 | /**
181 | * Multiply matrix4x4 by matrix.
182 | *
183 | * @param matrixf the matrixf
184 | */
185 | public void multiplyMatrix4x4ByMatrix(MatrixF4x4 matrixf) {
186 |
187 | // TODO implement Strassen Algorithm in place of this slower naive one.
188 | float[] bufferMatrix = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
189 | float[] matrix = matrixf.getMatrix();
190 |
191 | /**
192 | * for(int i = 0; i < 4; i++){ for(int j = 0; j < 4; j++){
193 | *
194 | * int k = i * 4; bufferMatrix[0 + j] += this.matrix[k + j] * matrix[0 * 4 + i]; bufferMatrix[1 * 4 + j] +=
195 | * this.matrix[k + j] * matrix[1 * 4 + i]; bufferMatrix[2 * 4 + j] += this.matrix[k + j] * matrix[2 * 4 +
196 | * i]; bufferMatrix[3 * 4 + j] += this.matrix[k + j] * matrix[3 * 4 + i]; } }
197 | */
198 |
199 | multiplyMatrix(matrix, 0, bufferMatrix, 0);
200 | matrixf.setMatrix(bufferMatrix);
201 | }
202 |
203 | public void multiplyMatrix(float[] input, int inputOffset, float[] output, int outputOffset) {
204 | float[] bufferMatrix = output;
205 | float[] matrix = input;
206 |
207 | for (int i = 0; i < 4; i++) {
208 | for (int j = 0; j < 4; j++) {
209 |
210 | int k = i * 4;
211 | bufferMatrix[outputOffset + 0 + j] += this.matrix[k + j] * matrix[inputOffset + 0 * 4 + i];
212 | bufferMatrix[outputOffset + 1 * 4 + j] += this.matrix[k + j] * matrix[inputOffset + 1 * 4 + i];
213 | bufferMatrix[outputOffset + 2 * 4 + j] += this.matrix[k + j] * matrix[inputOffset + 2 * 4 + i];
214 | bufferMatrix[outputOffset + 3 * 4 + j] += this.matrix[k + j] * matrix[inputOffset + 3 * 4 + i];
215 | }
216 | }
217 | }
218 |
219 | /**
220 | * This will rearrange the internal structure of the matrix. Be careful though as this is an expensive operation.
221 | */
222 | public void transpose() {
223 | if (this.matrix.length == 16) {
224 | float[] newMatrix = new float[16];
225 | for (int i = 0; i < 4; i++) {
226 |
227 | int k = i * 4;
228 |
229 | newMatrix[k] = matrix[i];
230 | newMatrix[k + 1] = matrix[4 + i];
231 | newMatrix[k + 2] = matrix[8 + i];
232 | newMatrix[k + 3] = matrix[12 + i];
233 | }
234 | matrix = newMatrix;
235 |
236 | } else {
237 | float[] newMatrix = new float[9];
238 | for (int i = 0; i < 3; i++) {
239 |
240 | int k = i * 3;
241 |
242 | newMatrix[k] = matrix[i];
243 | newMatrix[k + 1] = matrix[3 + i];
244 | newMatrix[k + 2] = matrix[6 + i];
245 | }
246 | matrix = newMatrix;
247 | }
248 |
249 | }
250 |
251 | public void setX0(float value) {
252 |
253 | if (matrix.length == 16) {
254 | if (colMaj)
255 | matrix[matIndCol16_3x3[0]] = value;
256 | else
257 | matrix[matIndRow16_3x3[0]] = value;
258 | } else {
259 | if (colMaj)
260 | matrix[matIndCol9_3x3[0]] = value;
261 | else
262 | matrix[matIndRow9_3x3[0]] = value;
263 | }
264 | }
265 |
266 | public void setX1(float value) {
267 |
268 | if (matrix.length == 16) {
269 | if (colMaj)
270 | matrix[matIndCol16_3x3[1]] = value;
271 | else
272 | matrix[matIndRow16_3x3[1]] = value;
273 | } else {
274 | if (colMaj)
275 | matrix[matIndCol9_3x3[1]] = value;
276 | else
277 | matrix[matIndRow9_3x3[1]] = value;
278 | }
279 | }
280 |
281 | public void setX2(float value) {
282 |
283 | if (matrix.length == 16) {
284 | if (colMaj)
285 | matrix[matIndCol16_3x3[2]] = value;
286 | else
287 | matrix[matIndRow16_3x3[2]] = value;
288 | } else {
289 | if (colMaj)
290 | matrix[matIndCol9_3x3[2]] = value;
291 | else
292 | matrix[matIndRow9_3x3[2]] = value;
293 | }
294 | }
295 |
296 | public void setY0(float value) {
297 |
298 | if (matrix.length == 16) {
299 | if (colMaj)
300 | matrix[matIndCol16_3x3[3]] = value;
301 | else
302 | matrix[matIndRow16_3x3[3]] = value;
303 | } else {
304 | if (colMaj)
305 | matrix[matIndCol9_3x3[3]] = value;
306 | else
307 | matrix[matIndRow9_3x3[3]] = value;
308 | }
309 | }
310 |
311 | public void setY1(float value) {
312 |
313 | if (matrix.length == 16) {
314 | if (colMaj)
315 | matrix[matIndCol16_3x3[4]] = value;
316 | else
317 | matrix[matIndRow16_3x3[4]] = value;
318 | } else {
319 | if (colMaj)
320 | matrix[matIndCol9_3x3[4]] = value;
321 | else
322 | matrix[matIndRow9_3x3[4]] = value;
323 | }
324 | }
325 |
326 | public void setY2(float value) {
327 |
328 | if (matrix.length == 16) {
329 | if (colMaj)
330 | matrix[matIndCol16_3x3[5]] = value;
331 | else
332 | matrix[matIndRow16_3x3[5]] = value;
333 | } else {
334 | if (colMaj)
335 | matrix[matIndCol9_3x3[5]] = value;
336 | else
337 | matrix[matIndRow9_3x3[5]] = value;
338 | }
339 | }
340 |
341 | public void setZ0(float value) {
342 |
343 | if (matrix.length == 16) {
344 | if (colMaj)
345 | matrix[matIndCol16_3x3[6]] = value;
346 | else
347 | matrix[matIndRow16_3x3[6]] = value;
348 | } else {
349 | if (colMaj)
350 | matrix[matIndCol9_3x3[6]] = value;
351 | else
352 | matrix[matIndRow9_3x3[6]] = value;
353 | }
354 | }
355 |
356 | public void setZ1(float value) {
357 |
358 | if (matrix.length == 16) {
359 | if (colMaj)
360 | matrix[matIndCol16_3x3[7]] = value;
361 | else
362 | matrix[matIndRow16_3x3[7]] = value;
363 | } else {
364 | if (colMaj)
365 | matrix[matIndCol9_3x3[7]] = value;
366 | else
367 | matrix[matIndRow9_3x3[7]] = value;
368 | }
369 | }
370 |
371 | public void setZ2(float value) {
372 |
373 | if (matrix.length == 16) {
374 | if (colMaj)
375 | matrix[matIndCol16_3x3[8]] = value;
376 | else
377 | matrix[matIndRow16_3x3[8]] = value;
378 | } else {
379 | if (colMaj)
380 | matrix[matIndCol9_3x3[8]] = value;
381 | else
382 | matrix[matIndRow9_3x3[8]] = value;
383 | }
384 | }
385 |
386 | public void setX3(float value) {
387 |
388 | if (matrix.length == 16) {
389 | if (colMaj)
390 | matrix[matIndCol16_4x4[3]] = value;
391 | else
392 | matrix[matIndRow16_4x4[3]] = value;
393 | }else
394 | throw new IllegalStateException("length of matrix should be 16");
395 | }
396 |
397 | public void setY3(float value) {
398 |
399 | if (matrix.length == 16) {
400 | if (colMaj)
401 | matrix[matIndCol16_4x4[7]] = value;
402 | else
403 | matrix[matIndRow16_4x4[7]] = value;
404 | }else
405 | throw new IllegalStateException("length of matrix should be 16");
406 | }
407 |
408 | public void setZ3(float value) {
409 |
410 | if (matrix.length == 16) {
411 | if (colMaj)
412 | matrix[matIndCol16_4x4[11]] = value;
413 | else
414 | matrix[matIndRow16_4x4[11]] = value;
415 | }else
416 | throw new IllegalStateException("length of matrix should be 16");
417 | }
418 |
419 | public void setW0(float value) {
420 |
421 | if (matrix.length == 16) {
422 | if (colMaj)
423 | matrix[matIndCol16_4x4[12]] = value;
424 | else
425 | matrix[matIndRow16_4x4[12]] = value;
426 | }else
427 | throw new IllegalStateException("length of matrix should be 16");
428 | }
429 |
430 | public void setW1(float value) {
431 |
432 | if (matrix.length == 16) {
433 | if (colMaj)
434 | matrix[matIndCol16_4x4[13]] = value;
435 | else
436 | matrix[matIndRow16_4x4[13]] = value;
437 | }else
438 | throw new IllegalStateException("length of matrix should be 16");
439 | }
440 |
441 | public void setW2(float value) {
442 |
443 | if (matrix.length == 16) {
444 | if (colMaj)
445 | matrix[matIndCol16_4x4[14]] = value;
446 | else
447 | matrix[matIndRow16_4x4[14]] = value;
448 | }else
449 | throw new IllegalStateException("length of matrix should be 16");
450 | }
451 |
452 | public void setW3(float value) {
453 |
454 | if (matrix.length == 16) {
455 | if (colMaj)
456 | matrix[matIndCol16_4x4[15]] = value;
457 | else
458 | matrix[matIndRow16_4x4[15]] = value;
459 | }else
460 | throw new IllegalStateException("length of matrix should be 16");
461 | }
462 |
463 | }
464 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/representation/Quaternion.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.representation;
2 |
3 | /**
4 | * The Quaternion class. A Quaternion is a four-dimensional vector that is used to represent rotations of a rigid body
5 | * in the 3D space. It is very similar to a rotation vector; it contains an angle, encoded into the w component
6 | * and three components to describe the rotation-axis (encoded into x, y, z).
7 | *
8 | *
9 | * Quaternions allow for elegant descriptions of 3D rotations, interpolations as well as extrapolations and compared to
10 | * Euler angles, they don't suffer from gimbal lock. Interpolations between two Quaternions are called SLERP (Spherical
11 | * Linear Interpolation).
12 | *
13 | *
14 | *
15 | * This class also contains the representation of the same rotation as a Quaternion and 4x4-Rotation-Matrix.
16 | *
17 | *
18 | * @author Leigh Beattie, Alexander Pacha
19 | *
20 | */
21 | public class Quaternion extends Vector4f {
22 |
23 | /**
24 | * Rotation matrix that contains the same rotation as the Quaternion in a 4x4 homogenised rotation matrix.
25 | * Remember that for performance reasons, this matrix is only updated, when it is accessed and not on every change
26 | * of the quaternion-values.
27 | */
28 | private MatrixF4x4 matrix;
29 |
30 | /**
31 | * This variable is used to synchronise the rotation matrix with the current quaternion values. If someone has
32 | * changed the
33 | * quaternion numbers then the matrix will need to be updated. To save on processing we only really want to update
34 | * the matrix when someone wants to fetch it, instead of whenever someone sets a quaternion value.
35 | */
36 | private boolean dirty = false;
37 |
38 | private Vector4f tmpVector = new Vector4f();
39 | private Quaternion tmpQuaternion;
40 |
41 | /**
42 | * Creates a new Quaternion object and initialises it with the identity Quaternion
43 | */
44 | public Quaternion() {
45 | super();
46 | matrix = new MatrixF4x4();
47 | loadIdentityQuat();
48 | }
49 |
50 | /**
51 | * Normalise this Quaternion into a unity Quaternion.
52 | */
53 | public void normalise() {
54 | this.dirty = true;
55 | float mag = (float) Math.sqrt(points[3] * points[3] + points[0] * points[0] + points[1] * points[1] + points[2]
56 | * points[2]);
57 | points[3] = points[3] / mag;
58 | points[0] = points[0] / mag;
59 | points[1] = points[1] / mag;
60 | points[2] = points[2] / mag;
61 | }
62 |
63 | @Override
64 | public void normalize() {
65 | normalise();
66 | }
67 |
68 | /**
69 | * Copies the values from the given quaternion to this one
70 | *
71 | * @param quat The quaternion to copy from
72 | */
73 | public void set(Quaternion quat) {
74 | this.dirty = true;
75 | copyVec4(quat);
76 | }
77 |
78 | /**
79 | * Multiply this quaternion by the input quaternion and store the result in the out quaternion
80 | *
81 | * @param input
82 | * @param output
83 | */
84 | public void multiplyByQuat(Quaternion input, Quaternion output) {
85 |
86 | if (input != output) {
87 | output.points[3] = (points[3] * input.points[3] - points[0] * input.points[0] - points[1] * input.points[1] - points[2]
88 | * input.points[2]); //w = w1w2 - x1x2 - y1y2 - z1z2
89 | output.points[0] = (points[3] * input.points[0] + points[0] * input.points[3] + points[1] * input.points[2] - points[2]
90 | * input.points[1]); //x = w1x2 + x1w2 + y1z2 - z1y2
91 | output.points[1] = (points[3] * input.points[1] + points[1] * input.points[3] + points[2] * input.points[0] - points[0]
92 | * input.points[2]); //y = w1y2 + y1w2 + z1x2 - x1z2
93 | output.points[2] = (points[3] * input.points[2] + points[2] * input.points[3] + points[0] * input.points[1] - points[1]
94 | * input.points[0]); //z = w1z2 + z1w2 + x1y2 - y1x2
95 | } else {
96 | tmpVector.points[0] = input.points[0];
97 | tmpVector.points[1] = input.points[1];
98 | tmpVector.points[2] = input.points[2];
99 | tmpVector.points[3] = input.points[3];
100 |
101 | output.points[3] = (points[3] * tmpVector.points[3] - points[0] * tmpVector.points[0] - points[1]
102 | * tmpVector.points[1] - points[2] * tmpVector.points[2]); //w = w1w2 - x1x2 - y1y2 - z1z2
103 | output.points[0] = (points[3] * tmpVector.points[0] + points[0] * tmpVector.points[3] + points[1]
104 | * tmpVector.points[2] - points[2] * tmpVector.points[1]); //x = w1x2 + x1w2 + y1z2 - z1y2
105 | output.points[1] = (points[3] * tmpVector.points[1] + points[1] * tmpVector.points[3] + points[2]
106 | * tmpVector.points[0] - points[0] * tmpVector.points[2]); //y = w1y2 + y1w2 + z1x2 - x1z2
107 | output.points[2] = (points[3] * tmpVector.points[2] + points[2] * tmpVector.points[3] + points[0]
108 | * tmpVector.points[1] - points[1] * tmpVector.points[0]); //z = w1z2 + z1w2 + x1y2 - y1x2
109 | }
110 | }
111 |
112 | /**
113 | * Multiply this quaternion by the input quaternion and store the result in the out quaternion
114 | *
115 | * @param input
116 | */
117 | public void multiplyByQuat(Quaternion input) {
118 | this.dirty = true;
119 | if(tmpQuaternion == null) tmpQuaternion = new Quaternion();
120 | tmpQuaternion.copyVec4(this);
121 | multiplyByQuat(input, tmpQuaternion);
122 | this.copyVec4(tmpQuaternion);
123 | }
124 |
125 | /**
126 | * Multiplies this Quaternion with a scalar
127 | *
128 | * @param scalar the value that the vector should be multiplied with
129 | */
130 | public void multiplyByScalar(float scalar) {
131 | this.dirty = true;
132 | multiplyByScalar(scalar);
133 | }
134 |
135 | /**
136 | * Add a quaternion to this quaternion
137 | *
138 | * @param input The quaternion that you want to add to this one
139 | */
140 | public void addQuat(Quaternion input) {
141 | this.dirty = true;
142 | addQuat(input, this);
143 | }
144 |
145 | /**
146 | * Add this quaternion and another quaternion together and store the result in the output quaternion
147 | *
148 | * @param input The quaternion you want added to this quaternion
149 | * @param output The quaternion you want to store the output in.
150 | */
151 | public void addQuat(Quaternion input, Quaternion output) {
152 | output.setX(getX() + input.getX());
153 | output.setY(getY() + input.getY());
154 | output.setZ(getZ() + input.getZ());
155 | output.setW(getW() + input.getW());
156 | }
157 |
158 | /**
159 | * Subtract a quaternion to this quaternion
160 | *
161 | * @param input The quaternion that you want to subtracted from this one
162 | */
163 | public void subQuat(Quaternion input) {
164 | this.dirty = true;
165 | subQuat(input, this);
166 | }
167 |
168 | /**
169 | * Subtract another quaternion from this quaternion and store the result in the output quaternion
170 | *
171 | * @param input The quaternion you want subtracted from this quaternion
172 | * @param output The quaternion you want to store the output in.
173 | */
174 | public void subQuat(Quaternion input, Quaternion output) {
175 | output.setX(getX() - input.getX());
176 | output.setY(getY() - input.getY());
177 | output.setZ(getZ() - input.getZ());
178 | output.setW(getW() - input.getW());
179 | }
180 |
181 | /**
182 | * Converts this Quaternion into the Rotation-Matrix representation which can be accessed by
183 | * {@link Quaternion#getMatrix4x4 getMatrix4x4}
184 | */
185 | private void convertQuatToMatrix() {
186 | float x = points[0];
187 | float y = points[1];
188 | float z = points[2];
189 | float w = points[3];
190 |
191 | matrix.setX0(1 - 2 * (y * y) - 2 * (z * z)); //1 - 2y2 - 2z2
192 | matrix.setX1(2 * (x * y) + 2 * (w * z)); // 2xy - 2wz
193 | matrix.setX2(2 * (x * z) - 2 * (w * y)); //2xz + 2wy
194 | matrix.setX3(0);
195 | matrix.setY0(2 * (x * y) - 2 * (w * z)); //2xy + 2wz
196 | matrix.setY1(1 - 2 * (x * x) - 2 * (z * z)); //1 - 2x2 - 2z2
197 | matrix.setY2(2 * (y * z) + 2 * (w * x)); // 2yz + 2wx
198 | matrix.setY3(0);
199 | matrix.setZ0(2 * (x * z) + 2 * (w * y)); //2xz + 2wy
200 | matrix.setZ1(2 * (y * z) - 2 * (w * x)); //2yz - 2wx
201 | matrix.setZ2(1 - 2 * (x * x) - 2 * (y * y)); //1 - 2x2 - 2y2
202 | matrix.setZ3(0);
203 | matrix.setW0(0);
204 | matrix.setW1(0);
205 | matrix.setW2(0);
206 | matrix.setW3(1);
207 | }
208 |
209 | /**
210 | * Get an axis angle representation of this quaternion.
211 | *
212 | * @param output Vector4f axis angle.
213 | */
214 | public void toAxisAngle(Vector4f output) {
215 | if (getW() > 1) {
216 | normalise(); // if w>1 acos and sqrt will produce errors, this cant happen if quaternion is normalised
217 | }
218 | float angle = 2 * (float) Math.toDegrees(Math.acos(getW()));
219 | float x;
220 | float y;
221 | float z;
222 |
223 | float s = (float) Math.sqrt(1 - getW() * getW()); // assuming quaternion normalised then w is less than 1, so term always positive.
224 | if (s < 0.001) { // test to avoid divide by zero, s is always positive due to sqrt
225 | // if s close to zero then direction of axis not important
226 | x = points[0]; // if it is important that axis is normalised then replace with x=1; y=z=0;
227 | y = points[1];
228 | z = points[2];
229 | } else {
230 | x = points[0] / s; // normalise axis
231 | y = points[1] / s;
232 | z = points[2] / s;
233 | }
234 |
235 | output.points[0] = x;
236 | output.points[1] = y;
237 | output.points[2] = z;
238 | output.points[3] = angle;
239 | }
240 |
241 | /**
242 | * Returns the heading, attitude and bank of this quaternion as euler angles in the double array respectively
243 | *
244 | * @return An array of size 3 containing the euler angles for this quaternion
245 | */
246 | public double[] toEulerAngles() {
247 | double[] ret = new double[3];
248 |
249 | ret[0] = Math.atan2(2 * points[1] * getW() - 2 * points[0] * points[2], 1 - 2 * (points[1] * points[1]) - 2
250 | * (points[2] * points[2])); // atan2(2*qy*qw-2*qx*qz , 1 - 2*qy2 - 2*qz2)
251 | ret[1] = Math.asin(2 * points[0] * points[1] + 2 * points[2] * getW()); // asin(2*qx*qy + 2*qz*qw)
252 | ret[2] = Math.atan2(2 * points[0] * getW() - 2 * points[1] * points[2], 1 - 2 * (points[0] * points[0]) - 2
253 | * (points[2] * points[2])); // atan2(2*qx*qw-2*qy*qz , 1 - 2*qx2 - 2*qz2)
254 |
255 | return ret;
256 | }
257 |
258 | /**
259 | * Sets the quaternion to an identity quaternion of 0,0,0,1.
260 | */
261 | public void loadIdentityQuat() {
262 | this.dirty = true;
263 | setX(0);
264 | setY(0);
265 | setZ(0);
266 | setW(1);
267 | }
268 |
269 | @Override
270 | public String toString() {
271 | return "{X: " + getX() + ", Y:" + getY() + ", Z:" + getZ() + ", W:" + getW() + "}";
272 | }
273 |
274 | /**
275 | * This is an internal method used to build a quaternion from a rotation matrix and then sets the current quaternion
276 | * from that matrix.
277 | *
278 | */
279 | private void generateQuaternionFromMatrix() {
280 |
281 | float qx;
282 | float qy;
283 | float qz;
284 | float qw;
285 |
286 | float[] mat = matrix.getMatrix();
287 | int[] indices = null;
288 |
289 | if (this.matrix.size() == 16) {
290 | if (this.matrix.isColumnMajor()) {
291 | indices = MatrixF4x4.matIndCol16_3x3;
292 | } else {
293 | indices = MatrixF4x4.matIndRow16_3x3;
294 | }
295 | } else {
296 | if (this.matrix.isColumnMajor()) {
297 | indices = MatrixF4x4.matIndCol9_3x3;
298 | } else {
299 | indices = MatrixF4x4.matIndRow9_3x3;
300 | }
301 | }
302 |
303 | int m00 = indices[0];
304 | int m01 = indices[1];
305 | int m02 = indices[2];
306 |
307 | int m10 = indices[3];
308 | int m11 = indices[4];
309 | int m12 = indices[5];
310 |
311 | int m20 = indices[6];
312 | int m21 = indices[7];
313 | int m22 = indices[8];
314 |
315 | float tr = mat[m00] + mat[m11] + mat[m22];
316 | if (tr > 0) {
317 | float s = (float) Math.sqrt(tr + 1.0) * 2; // S=4*qw
318 | qw = 0.25f * s;
319 | qx = (mat[m21] - mat[m12]) / s;
320 | qy = (mat[m02] - mat[m20]) / s;
321 | qz = (mat[m10] - mat[m01]) / s;
322 | } else if ((mat[m00] > mat[m11]) & (mat[m00] > mat[m22])) {
323 | float s = (float) Math.sqrt(1.0 + mat[m00] - mat[m11] - mat[m22]) * 2; // S=4*qx
324 | qw = (mat[m21] - mat[m12]) / s;
325 | qx = 0.25f * s;
326 | qy = (mat[m01] + mat[m10]) / s;
327 | qz = (mat[m02] + mat[m20]) / s;
328 | } else if (mat[m11] > mat[m22]) {
329 | float s = (float) Math.sqrt(1.0 + mat[m11] - mat[m00] - mat[m22]) * 2; // S=4*qy
330 | qw = (mat[m02] - mat[m20]) / s;
331 | qx = (mat[m01] + mat[m10]) / s;
332 | qy = 0.25f * s;
333 | qz = (mat[m12] + mat[m21]) / s;
334 | } else {
335 | float s = (float) Math.sqrt(1.0 + mat[m22] - mat[m00] - mat[m11]) * 2; // S=4*qz
336 | qw = (mat[m10] - mat[m01]) / s;
337 | qx = (mat[m02] + mat[m20]) / s;
338 | qy = (mat[m12] + mat[m21]) / s;
339 | qz = 0.25f * s;
340 | }
341 |
342 | setX(qx);
343 | setY(qy);
344 | setZ(qz);
345 | setW(qw);
346 | }
347 |
348 | /**
349 | * You can set the values for this quaternion based off a rotation matrix. If the matrix you supply is not a
350 | * rotation matrix this will fail. You MUST provide a 4x4 matrix.
351 | *
352 | * @param matrix A column major rotation matrix
353 | */
354 | public void setColumnMajor(float[] matrix) {
355 |
356 | this.matrix.setMatrix(matrix);
357 | this.matrix.setColumnMajor(true);
358 |
359 | generateQuaternionFromMatrix();
360 | }
361 |
362 | /**
363 | * You can set the values for this quaternion based off a rotation matrix. If the matrix you supply is not a
364 | * rotation matrix this will fail.
365 | *
366 | * @param matrix A column major rotation matrix
367 | */
368 | public void setRowMajor(float[] matrix) {
369 |
370 | this.matrix.setMatrix(matrix);
371 | this.matrix.setColumnMajor(false);
372 |
373 | generateQuaternionFromMatrix();
374 | }
375 |
376 | /**
377 | * Set this quaternion from axis angle values. All rotations are in degrees.
378 | *
379 | * @param azimuth The rotation around the z axis
380 | * @param pitch The rotation around the y axis
381 | * @param roll The rotation around the x axis
382 | */
383 | public void setEulerAngle(float azimuth, float pitch, float roll) {
384 |
385 | double heading = Math.toRadians(roll);
386 | double attitude = Math.toRadians(pitch);
387 | double bank = Math.toRadians(azimuth);
388 |
389 | double c1 = Math.cos(heading / 2);
390 | double s1 = Math.sin(heading / 2);
391 | double c2 = Math.cos(attitude / 2);
392 | double s2 = Math.sin(attitude / 2);
393 | double c3 = Math.cos(bank / 2);
394 | double s3 = Math.sin(bank / 2);
395 | double c1c2 = c1 * c2;
396 | double s1s2 = s1 * s2;
397 | setW((float) (c1c2 * c3 - s1s2 * s3));
398 | setX((float) (c1c2 * s3 + s1s2 * c3));
399 | setY((float) (s1 * c2 * c3 + c1 * s2 * s3));
400 | setZ((float) (c1 * s2 * c3 - s1 * c2 * s3));
401 |
402 | dirty = true;
403 | }
404 |
405 | /**
406 | * Rotation is in degrees. Set this quaternion from the supplied axis angle.
407 | *
408 | * @param vec The vector of rotation
409 | * @param rot The angle of rotation around that vector in degrees.
410 | */
411 | public void setAxisAngle(Vector3f vec, float rot) {
412 | double s = Math.sin(Math.toRadians(rot / 2));
413 | setX(vec.getX() * (float) s);
414 | setY(vec.getY() * (float) s);
415 | setZ(vec.getZ() * (float) s);
416 | setW((float) Math.cos(Math.toRadians(rot / 2)));
417 |
418 | dirty = true;
419 | }
420 |
421 | public void setAxisAngleRad(Vector3f vec, double rot) {
422 | double s = rot / 2;
423 | setX(vec.getX() * (float) s);
424 | setY(vec.getY() * (float) s);
425 | setZ(vec.getZ() * (float) s);
426 | setW((float) rot / 2);
427 |
428 | dirty = true;
429 | }
430 |
431 | /**
432 | * @return Returns this Quaternion in the Rotation Matrix representation
433 | */
434 | public MatrixF4x4 getMatrix4x4() {
435 | //toMatrixColMajor();
436 | if (dirty) {
437 | convertQuatToMatrix();
438 | dirty = false;
439 | }
440 | return this.matrix;
441 | }
442 |
443 | public void copyFromVec3(Vector3f vec, float w) {
444 | copyFromV3f(vec, w);
445 | }
446 |
447 | /**
448 | * Get a linear interpolation between this quaternion and the input quaternion, storing the result in the output
449 | * quaternion.
450 | *
451 | * @param input The quaternion to be slerped with this quaternion.
452 | * @param output The quaternion to store the result in.
453 | * @param t The ratio between the two quaternions where 0 <= t <= 1.0 . Increase value of t will bring rotation
454 | * closer to the input quaternion.
455 | */
456 | public void slerp(Quaternion input, Quaternion output, float t) {
457 | // Calculate angle between them.
458 | //double cosHalftheta = this.dotProduct(input);
459 | Quaternion bufferQuat;
460 | float cosHalftheta = this.dotProduct(input);
461 |
462 | if (cosHalftheta < 0) {
463 | if(tmpQuaternion == null) tmpQuaternion = new Quaternion();
464 | bufferQuat = tmpQuaternion;
465 | cosHalftheta = -cosHalftheta;
466 | bufferQuat.points[0] = (-input.points[0]);
467 | bufferQuat.points[1] = (-input.points[1]);
468 | bufferQuat.points[2] = (-input.points[2]);
469 | bufferQuat.points[3] = (-input.points[3]);
470 | } else {
471 | bufferQuat = input;
472 | }
473 | /**
474 | * if(dot < 0.95f){
475 | * double angle = Math.acos(dot);
476 | * double ratioA = Math.sin((1 - t) * angle);
477 | * double ratioB = Math.sin(t * angle);
478 | * double divisor = Math.sin(angle);
479 | *
480 | * //Calculate Quaternion
481 | * output.setW((float)((this.getW() * ratioA + input.getW() * ratioB)/divisor));
482 | * output.setX((float)((this.getX() * ratioA + input.getX() * ratioB)/divisor));
483 | * output.setY((float)((this.getY() * ratioA + input.getY() * ratioB)/divisor));
484 | * output.setZ((float)((this.getZ() * ratioA + input.getZ() * ratioB)/divisor));
485 | * }
486 | * else{
487 | * lerp(input, output, t);
488 | * }
489 | */
490 | // if qa=qb or qa=-qb then theta = 0 and we can return qa
491 | if (Math.abs(cosHalftheta) >= 1.0) {
492 | output.points[0] = (this.points[0]);
493 | output.points[1] = (this.points[1]);
494 | output.points[2] = (this.points[2]);
495 | output.points[3] = (this.points[3]);
496 | } else {
497 | double sinHalfTheta = Math.sqrt(1.0 - cosHalftheta * cosHalftheta);
498 | // if theta = 180 degrees then result is not fully defined
499 | // we could rotate around any axis normal to qa or qb
500 | //if(Math.abs(sinHalfTheta) < 0.001){
501 | //output.setW(this.getW() * 0.5f + input.getW() * 0.5f);
502 | //output.setX(this.getX() * 0.5f + input.getX() * 0.5f);
503 | //output.setY(this.getY() * 0.5f + input.getY() * 0.5f);
504 | //output.setZ(this.getZ() * 0.5f + input.getZ() * 0.5f);
505 | // lerp(bufferQuat, output, t);
506 | //}
507 | //else{
508 | double halfTheta = Math.acos(cosHalftheta);
509 |
510 | double ratioA = Math.sin((1 - t) * halfTheta) / sinHalfTheta;
511 | double ratioB = Math.sin(t * halfTheta) / sinHalfTheta;
512 |
513 | //Calculate Quaternion
514 | output.points[3] = ((float) (points[3] * ratioA + bufferQuat.points[3] * ratioB));
515 | output.points[0] = ((float) (this.points[0] * ratioA + bufferQuat.points[0] * ratioB));
516 | output.points[1] = ((float) (this.points[1] * ratioA + bufferQuat.points[1] * ratioB));
517 | output.points[2] = ((float) (this.points[2] * ratioA + bufferQuat.points[2] * ratioB));
518 |
519 | //}
520 | }
521 | }
522 |
523 | }
524 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/representation/Vector3f.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.representation;
2 |
3 | /**
4 | * 3-dimensional vector with conventient getters and setters. Additionally this class is serializable and
5 | */
6 | public class Vector3f {
7 |
8 | /**
9 | * A float array was chosen instead of individual variables due to performance concerns. Converting the points into
10 | * an array at run time can cause slowness so instead we use one array and extract the individual variables with get
11 | * methods.
12 | */
13 | protected float[] points = new float[3];
14 |
15 | /**
16 | * Initialises the vector with the given values
17 | *
18 | * @param x the x-component
19 | * @param y the y-component
20 | * @param z the z-component
21 | */
22 | public Vector3f(float x, float y, float z) {
23 | this.points[0] = x;
24 | this.points[1] = y;
25 | this.points[2] = z;
26 | }
27 |
28 | /**
29 | * Initialises all components of this vector with the given same value.
30 | *
31 | * @param value Initialisation value for all components
32 | */
33 | public Vector3f(float value) {
34 | this.points[0] = value;
35 | this.points[1] = value;
36 | this.points[2] = value;
37 | }
38 |
39 | /**
40 | * Instantiates a new vector3f.
41 | */
42 | public Vector3f() {
43 | }
44 |
45 | /**
46 | * Copy constructor
47 | */
48 | public Vector3f(Vector3f vector) {
49 | this.points[0] = vector.points[0];
50 | this.points[1] = vector.points[1];
51 | this.points[2] = vector.points[2];
52 | }
53 |
54 | /**
55 | * Initialises this vector from a 4-dimensional vector. If the fourth component is not zero, a normalisation of all
56 | * components will be performed.
57 | *
58 | * @param vector The 4-dimensional vector that should be used for initialisation
59 | */
60 | public Vector3f(Vector4f vector) {
61 | if (vector.w() != 0) {
62 | this.points[0] = vector.x() / vector.w();
63 | this.points[1] = vector.y() / vector.w();
64 | this.points[2] = vector.z() / vector.w();
65 | } else {
66 | this.points[0] = vector.x();
67 | this.points[1] = vector.y();
68 | this.points[2] = vector.z();
69 | }
70 | }
71 |
72 | /**
73 | * Returns this vector as float-array.
74 | *
75 | * @return the float[]
76 | */
77 | public float[] toArray() {
78 | return this.points;
79 | }
80 |
81 | /**
82 | * Adds a vector to this vector
83 | *
84 | * @param summand the vector that should be added component-wise
85 | */
86 | public void add(Vector3f summand) {
87 | this.points[0] += summand.points[0];
88 | this.points[1] += summand.points[1];
89 | this.points[2] += summand.points[2];
90 | }
91 |
92 | /**
93 | * Adds the value to all components of this vector
94 | *
95 | * @param summand The value that should be added to all components
96 | */
97 | public void add(float summand) {
98 | this.points[0] += summand;
99 | this.points[1] += summand;
100 | this.points[2] += summand;
101 | }
102 |
103 | /**
104 | *
105 | * @param subtrahend
106 | */
107 | public void subtract(Vector3f subtrahend) {
108 | this.points[0] -= subtrahend.points[0];
109 | this.points[1] -= subtrahend.points[1];
110 | this.points[2] -= subtrahend.points[2];
111 | }
112 |
113 | /**
114 | * Multiply by scalar.
115 | *
116 | * @param scalar the scalar
117 | */
118 | public void multiplyByScalar(float scalar) {
119 | this.points[0] *= scalar;
120 | this.points[1] *= scalar;
121 | this.points[2] *= scalar;
122 | }
123 |
124 | /**
125 | * Normalize.
126 | */
127 | public void normalize() {
128 |
129 | double a = Math.sqrt(points[0] * points[0] + points[1] * points[1] + points[2] * points[2]);
130 | this.points[0] = (float) (this.points[0] / a);
131 | this.points[1] = (float) (this.points[1] / a);
132 | this.points[2] = (float) (this.points[2] / a);
133 |
134 | }
135 |
136 | /**
137 | * Gets the x.
138 | *
139 | * @return the x
140 | */
141 | public float getX() {
142 | return points[0];
143 | }
144 |
145 | /**
146 | * Gets the y.
147 | *
148 | * @return the y
149 | */
150 | public float getY() {
151 | return points[1];
152 | }
153 |
154 | /**
155 | * Gets the z.
156 | *
157 | * @return the z
158 | */
159 | public float getZ() {
160 | return points[2];
161 | }
162 |
163 | /**
164 | * Sets the x.
165 | *
166 | * @param x the new x
167 | */
168 | public void setX(float x) {
169 | this.points[0] = x;
170 | }
171 |
172 | /**
173 | * Sets the y.
174 | *
175 | * @param y the new y
176 | */
177 | public void setY(float y) {
178 | this.points[1] = y;
179 | }
180 |
181 | /**
182 | * Sets the z.
183 | *
184 | * @param z the new z
185 | */
186 | public void setZ(float z) {
187 | this.points[2] = z;
188 | }
189 |
190 | /**
191 | * Functions for convenience
192 | */
193 |
194 | public float x() {
195 | return this.points[0];
196 | }
197 |
198 | public float y() {
199 | return this.points[1];
200 | }
201 |
202 | public float z() {
203 | return this.points[2];
204 | }
205 |
206 | public void x(float x) {
207 | this.points[0] = x;
208 | }
209 |
210 | public void y(float y) {
211 | this.points[1] = y;
212 | }
213 |
214 | public void z(float z) {
215 | this.points[2] = z;
216 | }
217 |
218 | public void setXYZ(float x, float y, float z) {
219 | this.points[0] = x;
220 | this.points[1] = y;
221 | this.points[2] = z;
222 | }
223 |
224 | /**
225 | * Return the dot product of this vector with the input vector
226 | *
227 | * @param inputVec The vector you want to do the dot product with against this vector.
228 | * @return Float value representing the scalar of the dot product operation
229 | */
230 | public float dotProduct(Vector3f inputVec) {
231 | return points[0] * inputVec.points[0] + points[1] * inputVec.points[1] + points[2] * inputVec.points[2];
232 |
233 | }
234 |
235 | /**
236 | * Get the cross product of this vector and another vector. The result will be stored in the output vector.
237 | *
238 | * @param inputVec The vector you want to get the dot product of against this vector.
239 | * @param outputVec The vector to store the result in.
240 | */
241 | public void crossProduct(Vector3f inputVec, Vector3f outputVec) {
242 | outputVec.setX(points[1] * inputVec.points[2] - points[2] * inputVec.points[1]);
243 | outputVec.setY(points[2] * inputVec.points[0] - points[0] * inputVec.points[2]);
244 | outputVec.setZ(points[0] * inputVec.points[1] - points[1] * inputVec.points[0]);
245 | }
246 |
247 | /**
248 | * If you need to get the length of a vector then use this function.
249 | *
250 | * @return The length of the vector
251 | */
252 | public float getLength() {
253 | return (float) Math.sqrt(points[0] * points[0] + points[1] * points[1] + points[2] * points[2]);
254 | }
255 |
256 | @Override
257 | public String toString() {
258 | return "X:" + points[0] + " Y:" + points[1] + " Z:" + points[2];
259 | }
260 |
261 | /**
262 | * Clone the input vector so that this vector has the same values.
263 | *
264 | * @param source The vector you want to clone.
265 | */
266 | public void set(Vector3f source) {
267 | set(source.points);
268 | }
269 |
270 | /**
271 | * Clone the input vector so that this vector has the same values.
272 | *
273 | * @param source The vector you want to clone.
274 | */
275 | public void set(float[] source) {
276 | System.arraycopy(source, 0, points, 0, 3);
277 | }
278 | }
279 |
--------------------------------------------------------------------------------
/app/src/main/java/org/hitlabnz/sensor_fusion_demo/representation/Vector4f.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.representation;
2 |
3 | /**
4 | * Representation of a four-dimensional float-vector
5 | */
6 | public class Vector4f {
7 |
8 | /** The points. */
9 | protected float points[] = { 0, 0, 0, 0 };
10 |
11 | /**
12 | * Instantiates a new vector4f.
13 | *
14 | * @param x the x
15 | * @param y the y
16 | * @param z the z
17 | * @param w the w
18 | */
19 | public Vector4f(float x, float y, float z, float w) {
20 | this.points[0] = x;
21 | this.points[1] = y;
22 | this.points[2] = z;
23 | this.points[3] = w;
24 | }
25 |
26 | /**
27 | * Instantiates a new vector4f.
28 | */
29 | public Vector4f() {
30 | this.points[0] = 0;
31 | this.points[1] = 0;
32 | this.points[2] = 0;
33 | this.points[3] = 0;
34 | }
35 |
36 | public Vector4f(Vector3f vector3f, float w) {
37 | this.points[0] = vector3f.x();
38 | this.points[1] = vector3f.y();
39 | this.points[2] = vector3f.z();
40 | this.points[3] = w;
41 | }
42 |
43 | /**
44 | * To array.
45 | *
46 | * @return the float[]
47 | */
48 | public float[] array() {
49 | return points;
50 | }
51 |
52 | public void copyVec4(Vector4f vec) {
53 | this.points[0] = vec.points[0];
54 | this.points[1] = vec.points[1];
55 | this.points[2] = vec.points[2];
56 | this.points[3] = vec.points[3];
57 | }
58 |
59 | /**
60 | * Adds the.
61 | *
62 | * @param vector the vector
63 | */
64 | public void add(Vector4f vector) {
65 | this.points[0] += vector.points[0];
66 | this.points[1] += vector.points[1];
67 | this.points[2] += vector.points[2];
68 | this.points[3] += vector.points[3];
69 | }
70 |
71 | public void add(Vector3f vector, float w) {
72 | this.points[0] += vector.x();
73 | this.points[1] += vector.y();
74 | this.points[2] += vector.z();
75 | this.points[3] += w;
76 | }
77 |
78 | public void subtract(Vector4f vector) {
79 | this.points[0] -= vector.points[0];
80 | this.points[1] -= vector.points[1];
81 | this.points[2] -= vector.points[2];
82 | this.points[3] -= vector.points[3];
83 | }
84 |
85 | public void subtract(Vector4f vector, Vector4f output) {
86 | output.setXYZW(this.points[0] - vector.points[0], this.points[1] - vector.points[1], this.points[2]
87 | - vector.points[2], this.points[3] - vector.points[3]);
88 | }
89 |
90 | public void subdivide(Vector4f vector) {
91 | this.points[0] /= vector.points[0];
92 | this.points[1] /= vector.points[1];
93 | this.points[2] /= vector.points[2];
94 | this.points[3] /= vector.points[3];
95 | }
96 |
97 | /**
98 | * Multiply by scalar.
99 | *
100 | * @param scalar the scalar
101 | */
102 | public void multiplyByScalar(float scalar) {
103 | this.points[0] *= scalar;
104 | this.points[1] *= scalar;
105 | this.points[2] *= scalar;
106 | this.points[3] *= scalar;
107 | }
108 |
109 | public float dotProduct(Vector4f input) {
110 | return this.points[0] * input.points[0] + this.points[1] * input.points[1] + this.points[2] * input.points[2]
111 | + this.points[3] * input.points[3];
112 | }
113 |
114 | /**
115 | * Linear interpolation between two vectors storing the result in the output variable.
116 | *
117 | * @param input
118 | * @param output
119 | * @param t
120 | */
121 | public void lerp(Vector4f input, Vector4f output, float t) {
122 | output.points[0] = (points[0] * (1.0f * t) + input.points[0] * t);
123 | output.points[1] = (points[1] * (1.0f * t) + input.points[1] * t);
124 | output.points[2] = (points[2] * (1.0f * t) + input.points[2] * t);
125 | output.points[3] = (points[3] * (1.0f * t) + input.points[3] * t);
126 |
127 | }
128 |
129 | /**
130 | * Normalize.
131 | */
132 | public void normalize() {
133 | if (points[3] == 0)
134 | return;
135 |
136 | points[0] /= points[3];
137 | points[1] /= points[3];
138 | points[2] /= points[3];
139 |
140 | double a = Math.sqrt(this.points[0] * this.points[0] + this.points[1] * this.points[1] + this.points[2]
141 | * this.points[2]);
142 | points[0] = (float) (this.points[0] / a);
143 | points[1] = (float) (this.points[1] / a);
144 | points[2] = (float) (this.points[2] / a);
145 | }
146 |
147 | /**
148 | * Gets the x.
149 | *
150 | * @return the x
151 | */
152 | public float getX() {
153 | return this.points[0];
154 | }
155 |
156 | /**
157 | * Gets the y.
158 | *
159 | * @return the y
160 | */
161 | public float getY() {
162 | return this.points[1];
163 | }
164 |
165 | /**
166 | * Gets the z.
167 | *
168 | * @return the z
169 | */
170 | public float getZ() {
171 | return this.points[2];
172 | }
173 |
174 | /**
175 | * Gets the w.
176 | *
177 | * @return the w
178 | */
179 | public float getW() {
180 | return this.points[3];
181 | }
182 |
183 | /**
184 | * Sets the x.
185 | *
186 | * @param x the new x
187 | */
188 | public void setX(float x) {
189 | this.points[0] = x;
190 | }
191 |
192 | /**
193 | * Sets the y.
194 | *
195 | * @param y the new y
196 | */
197 | public void setY(float y) {
198 | this.points[1] = y;
199 | }
200 |
201 | /**
202 | * Sets the z.
203 | *
204 | * @param z the new z
205 | */
206 | public void setZ(float z) {
207 | this.points[2] = z;
208 | }
209 |
210 | /**
211 | * Sets the w.
212 | *
213 | * @param w the new w
214 | */
215 | public void setW(float w) {
216 | this.points[3] = w;
217 | }
218 |
219 | public float x() {
220 | return this.points[0];
221 | }
222 |
223 | public float y() {
224 | return this.points[1];
225 | }
226 |
227 | public float z() {
228 | return this.points[2];
229 | }
230 |
231 | public float w() {
232 | return this.points[3];
233 | }
234 |
235 | public void x(float x) {
236 | this.points[0] = x;
237 | }
238 |
239 | public void y(float y) {
240 | this.points[1] = y;
241 | }
242 |
243 | public void z(float z) {
244 | this.points[2] = z;
245 | }
246 |
247 | public void w(float w) {
248 | this.points[3] = w;
249 | }
250 |
251 | public void setXYZW(float x, float y, float z, float w) {
252 | this.points[0] = x;
253 | this.points[1] = y;
254 | this.points[2] = z;
255 | this.points[3] = w;
256 | }
257 |
258 | /**
259 | * Compare this vector4f to the supplied one
260 | *
261 | * @param rhs True if they match, false other wise.
262 | * @return
263 | */
264 | public boolean compareTo(Vector4f rhs) {
265 | boolean ret = false;
266 | if (this.points[0] == rhs.points[0] && this.points[1] == rhs.points[1] && this.points[2] == rhs.points[2]
267 | && this.points[3] == rhs.points[3])
268 | ret = true;
269 | return ret;
270 | }
271 |
272 | /**
273 | * Copies the data from the supplied vec3 into this vec4 plus the supplied w.
274 | *
275 | * @param input The x y z values to copy in.
276 | * @param w The extra w element to copy in
277 | */
278 | public void copyFromV3f(Vector3f input, float w) {
279 | points[0] = (input.x());
280 | points[1] = (input.y());
281 | points[2] = (input.z());
282 | points[3] = (w);
283 | }
284 |
285 | @Override
286 | public String toString() {
287 | return "X:" + points[0] + " Y:" + points[1] + " Z:" + points[2] + " W:" + points[3];
288 | }
289 |
290 | }
--------------------------------------------------------------------------------
/app/src/main/res/drawable-hdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maddevsio/sensor-fusion-demo/d1d23683582c71a3510ec6f256ed0f7d0402e06a/app/src/main/res/drawable-hdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/drawable-mdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maddevsio/sensor-fusion-demo/d1d23683582c71a3510ec6f256ed0f7d0402e06a/app/src/main/res/drawable-mdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/drawable-xhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maddevsio/sensor-fusion-demo/d1d23683582c71a3510ec6f256ed0f7d0402e06a/app/src/main/res/drawable-xhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/drawable-xxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maddevsio/sensor-fusion-demo/d1d23683582c71a3510ec6f256ed0f7d0402e06a/app/src/main/res/drawable-xxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/layout/activity_about.xml:
--------------------------------------------------------------------------------
1 |
6 |
7 |
13 |
14 |
--------------------------------------------------------------------------------
/app/src/main/res/layout/activity_sensor_selection.xml:
--------------------------------------------------------------------------------
1 |
7 |
8 |
12 |
13 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/app/src/main/res/menu/sensor_selection.xml:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/app/src/main/res/values-de/strings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Sensorfusions Demo
5 | Über
6 | Improved Orientation Sensor 1
7 | Improved Orientation Sensor 2
8 | Android Rotation Vector
9 | Kalibriertes Gyroskop
10 | Gravitation und Kompass
11 | Akzelerometer und Kompass
12 | Sensorfusions Demo
13 | Über
14 | Fehlendes Gyroskop
15 | Dieses Gerät hat keinen Gyroskop-Sensor der aber notwendig ist, damit diese App richtig funktioniert. Bitte verwende ein aktuelleres Gerät, das ein Gyroskop hat.
16 | OK
17 |
18 |
19 |
--------------------------------------------------------------------------------
/app/src/main/res/values-v11/styles.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
7 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/app/src/main/res/values-v14/styles.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
8 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/app/src/main/res/values/strings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Sensor fusion demo
5 | About
6 | Improved Orientation Sensor 1
7 | Improved Orientation Sensor 2
8 | Android Rotation Vector
9 | Calibrated Gyroscope
10 | Gravity and Compass
11 | Accelerometer and Compass
12 | Sensor fusion demo
13 | About
14 | Gyroscope Missing
15 | Your device has no hardware gyroscope sensor, which would be necessary for this app to work properly. Please run it on a newer device that has a gyroscope.
16 | OK
17 |
18 |
19 |
--------------------------------------------------------------------------------
/app/src/main/res/values/styles.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
7 |
14 |
15 |
16 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/app/src/test/java/org/hitlabnz/sensor_fusion_demo/test/QuaternionTest.java:
--------------------------------------------------------------------------------
1 | package org.hitlabnz.sensor_fusion_demo.test;
2 |
3 | import org.hitlabnz.sensor_fusion_demo.representation.Quaternion;
4 | import org.junit.Test;
5 |
6 | import static org.hamcrest.CoreMatchers.*;
7 | import static org.junit.Assert.*;
8 |
9 | /**
10 | * To work on unit tests, switch the Test Artifact in the Build Variants view.
11 | */
12 | public class QuaternionTest {
13 |
14 | @Test
15 | public void quaternion_loadUnityQuaternion_expectCorrectValues() throws Exception {
16 |
17 | // Arrange
18 | Quaternion q = new Quaternion();
19 |
20 | // Act
21 | q.loadIdentityQuat();
22 |
23 | // Assert
24 | assertThat(q.w(), is(equalTo(1.0f)));
25 | assertThat(q.x(), is(equalTo(0.0f)));
26 | assertThat(q.y(), is(equalTo(0.0f)));
27 | assertThat(q.z(), is(equalTo(0.0f)));
28 | }
29 |
30 |
31 | @Test
32 | public void quaternion_performSlerp_expectCorrectValues() throws Exception {
33 |
34 | // Arrange
35 | Quaternion q1 = new Quaternion();
36 | Quaternion q2 = new Quaternion();
37 | Quaternion q3 = new Quaternion();
38 | q1.loadIdentityQuat();
39 | q2.setXYZW(0.5f, 0.5f, 0.5f, 1);
40 | q2.normalise();
41 |
42 | // Act
43 | q2.slerp(q1, q3, 0.5f);
44 |
45 | // Assert
46 | assertThat(q3.w(), is(not(equalTo(1.0f))));
47 | assertThat(q3.x(), is(not(equalTo(0.0f))));
48 | assertThat(q3.y(), is(not(equalTo(0.0f))));
49 | assertThat(q3.z(), is(not(equalTo(0.0f))));
50 | }
51 |
52 | }
53 |
54 |
--------------------------------------------------------------------------------
/build.gradle:
--------------------------------------------------------------------------------
1 | // Top-level build file where you can add configuration options common to all sub-projects/modules.
2 | apply plugin: 'com.github.ben-manes.versions'
3 |
4 | buildscript {
5 | repositories {
6 | jcenter()
7 | maven { url 'http://repo1.maven.org/maven2' }
8 | maven {
9 | url 'https://maven.google.com/'
10 | name 'Google'
11 | }
12 | google()
13 | }
14 | dependencies {
15 | classpath 'com.android.tools.build:gradle:3.3.2'
16 | classpath 'com.github.ben-manes:gradle-versions-plugin:0.13.0'
17 | }
18 | }
19 |
20 | allprojects {
21 | repositories {
22 | jcenter()
23 | maven { url 'http://repo1.maven.org/maven2' }
24 | maven {
25 | url 'https://maven.google.com/'
26 | name 'Google'
27 | }
28 | }
29 | }
30 |
31 | task clean(type: Delete) {
32 | delete rootProject.buildDir
33 | }
34 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | _build
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = Sensor-fusionDemo
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | #
4 | # Sensor-fusion Demo documentation build configuration file, created by
5 | # sphinx-quickstart on Mon Apr 3 16:50:08 2017.
6 | #
7 | # This file is execfile()d with the current directory set to its
8 | # containing dir.
9 | #
10 | # Note that not all possible configuration values are present in this
11 | # autogenerated file.
12 | #
13 | # All configuration values have a default; values that are commented out
14 | # serve to show the default.
15 |
16 | # If extensions (or modules to document with autodoc) are in another directory,
17 | # add these directories to sys.path here. If the directory is relative to the
18 | # documentation root, use os.path.abspath to make it absolute, like shown here.
19 | #
20 | # import os
21 | # import sys
22 | # sys.path.insert(0, os.path.abspath('.'))
23 |
24 |
25 | # -- General configuration ------------------------------------------------
26 |
27 | # If your documentation needs a minimal Sphinx version, state it here.
28 | #
29 | # needs_sphinx = '1.0'
30 |
31 | # Add any Sphinx extension module names here, as strings. They can be
32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 | # ones.
34 | extensions = ['sphinx.ext.mathjax',
35 | 'sphinx.ext.viewcode',
36 | 'sphinx.ext.githubpages']
37 |
38 | # Add any paths that contain templates here, relative to this directory.
39 | templates_path = ['_templates']
40 |
41 | # The suffix(es) of source filenames.
42 | # You can specify multiple suffix as a list of string:
43 | #
44 | # source_suffix = ['.rst', '.md']
45 | source_suffix = '.rst'
46 |
47 | # The master toctree document.
48 | master_doc = 'index'
49 |
50 | # General information about the project.
51 | project = 'Sensor-fusion Demo'
52 | copyright = '2017, Alexander Pacha'
53 | author = 'Alexander Pacha'
54 |
55 | # The version info for the project you're documenting, acts as replacement for
56 | # |version| and |release|, also used in various other places throughout the
57 | # built documents.
58 | #
59 | # The short X.Y version.
60 | version = '1.2'
61 | # The full version, including alpha/gain/rc tags.
62 | release = '1.2'
63 |
64 | # The language for content autogenerated by Sphinx. Refer to documentation
65 | # for a list of supported languages.
66 | #
67 | # This is also used if you do content translation via gettext catalogs.
68 | # Usually you set "language" from the command line for these cases.
69 | language = None
70 |
71 | # List of patterns, relative to source directory, that match files and
72 | # directories to ignore when looking for source files.
73 | # This patterns also effect to html_static_path and html_extra_path
74 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
75 |
76 | # The name of the Pygments (syntax highlighting) style to use.
77 | pygments_style = 'sphinx'
78 |
79 | # If true, `todo` and `todoList` produce output, else they produce nothing.
80 | todo_include_todos = False
81 |
82 |
83 | # -- Options for HTML output ----------------------------------------------
84 |
85 | # The theme to use for HTML and HTML Help pages. See the documentation for
86 | # a list of builtin themes.
87 | #
88 | import sphinx_rtd_theme
89 |
90 | html_theme = "sphinx_rtd_theme"
91 |
92 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
93 |
94 | # Theme options are theme-specific and customize the look and feel of a theme
95 | # further. For a list of options available for each theme, see the
96 | # documentation.
97 | #
98 | # html_theme_options = {}
99 |
100 | # Add any paths that contain custom static files (such as style sheets) here,
101 | # relative to this directory. They are copied after the builtin static files,
102 | # so a file named "default.css" will overwrite the builtin "default.css".
103 | html_static_path = ['_static']
104 |
105 |
106 | # -- Options for HTMLHelp output ------------------------------------------
107 |
108 | # Output file base name for HTML help builder.
109 | htmlhelp_basename = 'Sensor-fusion-Demo-doc'
110 |
111 |
112 | # -- Options for LaTeX output ---------------------------------------------
113 |
114 | latex_elements = {
115 | # The paper size ('letterpaper' or 'a4paper').
116 | #
117 | # 'papersize': 'letterpaper',
118 |
119 | # The font size ('10pt', '11pt' or '12pt').
120 | #
121 | # 'pointsize': '10pt',
122 |
123 | # Additional stuff for the LaTeX preamble.
124 | #
125 | # 'preamble': '',
126 |
127 | # Latex figure (float) alignment
128 | #
129 | # 'figure_align': 'htbp',
130 | }
131 |
132 | # Grouping the document tree into LaTeX files. List of tuples
133 | # (source start file, target name, title,
134 | # author, documentclass [howto, manual, or own class]).
135 | latex_documents = [
136 | (master_doc, 'Sensor-fusion-Demo.tex', 'Sensor-fusion Demo Documentation',
137 | 'Alexander Pacha', 'manual'),
138 | ]
139 |
140 |
141 | # -- Options for manual page output ---------------------------------------
142 |
143 | # One entry per manual page. List of tuples
144 | # (source start file, name, description, authors, manual section).
145 | man_pages = [
146 | (master_doc, 'sensor-fusion-demo', 'Sensor-fusion Demo Documentation',
147 | [author], 1)
148 | ]
149 |
150 |
151 | # -- Options for Texinfo output -------------------------------------------
152 |
153 | # Grouping the document tree into Texinfo files. List of tuples
154 | # (source start file, target name, title, author,
155 | # dir menu entry, description, category)
156 | texinfo_documents = [
157 | (master_doc, 'Sensor-fusion-Demo', 'Sensor-fusion Demo Documentation',
158 | author, 'Sensor-fusion-Demo', 'Android application that demonstrates various Sensor-fusion algorithms by rotating a virtual cube.',
159 | 'Miscellaneous'),
160 | ]
161 |
162 |
163 |
164 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. Sensor-fusion Demo documentation master file, created by
2 | sphinx-quickstart on Mon Apr 3 16:50:08 2017.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to Sensor-fusion Demo's documentation!
7 | ==============================================
8 |
9 | This application demonstrates the capabilities of various sensors and sensor-fusions. Data from the Gyroscope, Accelerometer and compass are combined in different ways and the result is shown as a cube that can be rotated by rotating the device.
10 |
11 | The major novelty in this application is the fusion of virtual sensors: **Improved Orientation Sensor 1** and **Improved Orientation Sensor 2** fuse the Android Rotation Vector with the virtual Gyroscope sensor to achieve a pose estimation with a previously unknown stability and precision.
12 |
13 | Apart from these two sensors, the following sensors are available for comparison:
14 |
15 | - Improved Orientation Sensor 1 (Sensor fusion of Android Rotation Vector and Calibrated Gyroscope - less stable but more accurate)
16 | - Improved Orientation Sensor 2 (Sensor fusion of Android Rotation Vector and Calibrated Gyroscope - more stable but less accurate)
17 | - Android Rotation Vector (Kalman filter fusion of Accelerometer + Gyroscope + Compass)
18 | - Calibrated Gyroscope (Separate result of Kalman filter fusion of Accelerometer + Gyroscope + Compass)
19 | - Gravity + Compass
20 | - Accelerometer + Compass
21 |
22 | This application was developed for demonstrating the sensor fusion approach developed for my `Master Thesis "Sensor fusion for robust outdoor Augmented Reality tracking on mobile devices `_ at the `Human Interface Technology Laboratory New Zealand `_.
23 |
24 |
25 | Euler Angles
26 | ============
27 |
28 | Euler-Angles are also often referred to as Azimuth, Pitch and Roll and describe the rotation of an object in the three-dimensional space with respect to three axis that are simple to understand and visualize. However, they have certain limitations and have therefore been removed from this project.
29 |
30 | If you want to obtain the rotation using Euler-Angles, check out `the last tag that contained the Euler angles `_. Notice that they have been removed, because many people don't understand Euler Angles fully and are surprised, when they get results, that they did not expect (because the representation is ambiguous and suffers from Gimbal Lock). Try to use a Rotation Matrix, a Rotation Vector or Quaternions instead.
31 |
32 | A short summary can also be found in this `Stackoverflow answer `_.
33 |
34 |
35 | Installation
36 | ============
37 |
38 | This project is an Gradle-based Android Studio project. It is also published in the `Google Play Store `_, if you just want to try it out.
39 |
40 | Contribute
41 | ==========
42 |
43 | - `Issue Tracker `_
44 | - `Source Code `_
45 |
46 | 1. Fork it
47 | 2. Create your feature branch (`git checkout -b my-new-feature`)
48 | 3. Commit your changes (`git commit -am 'Add some feature'`)
49 | 4. Push to the branch (`git push origin my-new-feature`)
50 | 5. Create new Pull Request
51 |
52 | License
53 | =======
54 |
55 | Released under the MIT license.
56 |
57 | Copyright, 2016, by `Alexander Pacha `_ and the `Human Technology Laboratory New Zealand `_ .
58 |
59 | Permission is hereby granted, free of charge, to any person obtaining a copy
60 | of this software and associated documentation files (the "Software"), to deal
61 | in the Software without restriction, including without limitation the rights
62 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
63 | copies of the Software, and to permit persons to whom the Software is
64 | furnished to do so, subject to the following conditions:
65 |
66 | The above copyright notice and this permission notice shall be included in
67 | all copies or substantial portions of the Software.
68 |
69 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
70 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
71 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
72 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
73 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
74 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
75 | THE SOFTWARE.
76 |
77 | This application also uses parts from the the Android Open Source Project, licensed under the `Apache License, Version 2.0 `_.
78 |
79 |
80 | .. toctree::
81 | :maxdepth: 2
82 | :caption: Contents:
83 |
84 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 | set SPHINXPROJ=Sensor-fusionDemo
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx_rtd_theme
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maddevsio/sensor-fusion-demo/d1d23683582c71a3510ec6f256ed0f7d0402e06a/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Thu Apr 11 15:46:00 KGT 2019
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.1-all.zip
7 |
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
10 | DEFAULT_JVM_OPTS=""
11 |
12 | APP_NAME="Gradle"
13 | APP_BASE_NAME=`basename "$0"`
14 |
15 | # Use the maximum available, or set MAX_FD != -1 to use that value.
16 | MAX_FD="maximum"
17 |
18 | warn ( ) {
19 | echo "$*"
20 | }
21 |
22 | die ( ) {
23 | echo
24 | echo "$*"
25 | echo
26 | exit 1
27 | }
28 |
29 | # OS specific support (must be 'true' or 'false').
30 | cygwin=false
31 | msys=false
32 | darwin=false
33 | case "`uname`" in
34 | CYGWIN* )
35 | cygwin=true
36 | ;;
37 | Darwin* )
38 | darwin=true
39 | ;;
40 | MINGW* )
41 | msys=true
42 | ;;
43 | esac
44 |
45 | # Attempt to set APP_HOME
46 | # Resolve links: $0 may be a link
47 | PRG="$0"
48 | # Need this for relative symlinks.
49 | while [ -h "$PRG" ] ; do
50 | ls=`ls -ld "$PRG"`
51 | link=`expr "$ls" : '.*-> \(.*\)$'`
52 | if expr "$link" : '/.*' > /dev/null; then
53 | PRG="$link"
54 | else
55 | PRG=`dirname "$PRG"`"/$link"
56 | fi
57 | done
58 | SAVED="`pwd`"
59 | cd "`dirname \"$PRG\"`/" >/dev/null
60 | APP_HOME="`pwd -P`"
61 | cd "$SAVED" >/dev/null
62 |
63 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
64 |
65 | # Determine the Java command to use to start the JVM.
66 | if [ -n "$JAVA_HOME" ] ; then
67 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
68 | # IBM's JDK on AIX uses strange locations for the executables
69 | JAVACMD="$JAVA_HOME/jre/sh/java"
70 | else
71 | JAVACMD="$JAVA_HOME/bin/java"
72 | fi
73 | if [ ! -x "$JAVACMD" ] ; then
74 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
75 |
76 | Please set the JAVA_HOME variable in your environment to match the
77 | location of your Java installation."
78 | fi
79 | else
80 | JAVACMD="java"
81 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
82 |
83 | Please set the JAVA_HOME variable in your environment to match the
84 | location of your Java installation."
85 | fi
86 |
87 | # Increase the maximum file descriptors if we can.
88 | if [ "$cygwin" = "false" -a "$darwin" = "false" ] ; then
89 | MAX_FD_LIMIT=`ulimit -H -n`
90 | if [ $? -eq 0 ] ; then
91 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
92 | MAX_FD="$MAX_FD_LIMIT"
93 | fi
94 | ulimit -n $MAX_FD
95 | if [ $? -ne 0 ] ; then
96 | warn "Could not set maximum file descriptor limit: $MAX_FD"
97 | fi
98 | else
99 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
100 | fi
101 | fi
102 |
103 | # For Darwin, add options to specify how the application appears in the dock
104 | if $darwin; then
105 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
106 | fi
107 |
108 | # For Cygwin, switch paths to Windows format before running java
109 | if $cygwin ; then
110 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
111 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
112 | JAVACMD=`cygpath --unix "$JAVACMD"`
113 |
114 | # We build the pattern for arguments to be converted via cygpath
115 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
116 | SEP=""
117 | for dir in $ROOTDIRSRAW ; do
118 | ROOTDIRS="$ROOTDIRS$SEP$dir"
119 | SEP="|"
120 | done
121 | OURCYGPATTERN="(^($ROOTDIRS))"
122 | # Add a user-defined pattern to the cygpath arguments
123 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
124 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
125 | fi
126 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
127 | i=0
128 | for arg in "$@" ; do
129 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
130 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
131 |
132 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
133 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
134 | else
135 | eval `echo args$i`="\"$arg\""
136 | fi
137 | i=$((i+1))
138 | done
139 | case $i in
140 | (0) set -- ;;
141 | (1) set -- "$args0" ;;
142 | (2) set -- "$args0" "$args1" ;;
143 | (3) set -- "$args0" "$args1" "$args2" ;;
144 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
145 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
146 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
147 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
148 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
149 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
150 | esac
151 | fi
152 |
153 | # Split up the JVM_OPTS And GRADLE_OPTS values into an array, following the shell quoting and substitution rules
154 | function splitJvmOpts() {
155 | JVM_OPTS=("$@")
156 | }
157 | eval splitJvmOpts $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS
158 | JVM_OPTS[${#JVM_OPTS[*]}]="-Dorg.gradle.appname=$APP_BASE_NAME"
159 |
160 | exec "$JAVACMD" "${JVM_OPTS[@]}" -classpath "$CLASSPATH" org.gradle.wrapper.GradleWrapperMain "$@"
161 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
12 | set DEFAULT_JVM_OPTS=
13 |
14 | set DIRNAME=%~dp0
15 | if "%DIRNAME%" == "" set DIRNAME=.
16 | set APP_BASE_NAME=%~n0
17 | set APP_HOME=%DIRNAME%
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windowz variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 | if "%@eval[2+2]" == "4" goto 4NT_args
53 |
54 | :win9xME_args
55 | @rem Slurp the command line arguments.
56 | set CMD_LINE_ARGS=
57 | set _SKIP=2
58 |
59 | :win9xME_args_slurp
60 | if "x%~1" == "x" goto execute
61 |
62 | set CMD_LINE_ARGS=%*
63 | goto execute
64 |
65 | :4NT_args
66 | @rem Get arguments from the 4NT Shell from JP Software
67 | set CMD_LINE_ARGS=%$
68 |
69 | :execute
70 | @rem Setup the command line
71 |
72 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
73 |
74 | @rem Execute Gradle
75 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
76 |
77 | :end
78 | @rem End local scope for the variables with windows NT shell
79 | if "%ERRORLEVEL%"=="0" goto mainEnd
80 |
81 | :fail
82 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
83 | rem the _cmd.exe /c_ return code!
84 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
85 | exit /b 1
86 |
87 | :mainEnd
88 | if "%OS%"=="Windows_NT" endlocal
89 |
90 | :omega
91 |
--------------------------------------------------------------------------------
/ic_launcher-web.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maddevsio/sensor-fusion-demo/d1d23683582c71a3510ec6f256ed0f7d0402e06a/ic_launcher-web.png
--------------------------------------------------------------------------------
/ic_launcher-web_functional.pdn:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maddevsio/sensor-fusion-demo/d1d23683582c71a3510ec6f256ed0f7d0402e06a/ic_launcher-web_functional.pdn
--------------------------------------------------------------------------------
/ic_launcher-web_functional.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/maddevsio/sensor-fusion-demo/d1d23683582c71a3510ec6f256ed0f7d0402e06a/ic_launcher-web_functional.png
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':app'
2 |
--------------------------------------------------------------------------------