├── .gitattributes
├── .gitignore
├── .idea
├── encodings.xml
├── gradle.xml
├── misc.xml
├── runConfigurations.xml
└── vcs.xml
├── README.md
├── app
├── .gitignore
├── CMakeLists.txt
├── build.gradle
├── proguard-rules.pro
└── src
│ ├── androidTest
│ └── java
│ │ └── com
│ │ └── webrtc
│ │ └── ExampleInstrumentedTest.java
│ ├── main
│ ├── AndroidManifest.xml
│ ├── assets
│ │ └── record
│ │ │ ├── recorded_audio.pcm
│ │ │ ├── recorded_audio_16k.pcm
│ │ │ ├── recorded_audio_32k.pcm
│ │ │ └── test_32k.pcm
│ ├── cpp
│ │ ├── _android_log_print.h
│ │ ├── analog_agc.c
│ │ ├── analog_agc.h
│ │ ├── config.h
│ │ ├── copy_set_operations.c
│ │ ├── cpu_features_wrapper.h
│ │ ├── defines.h
│ │ ├── digital_agc.c
│ │ ├── digital_agc.h
│ │ ├── division_operations.c
│ │ ├── dot_product_with_scale.c
│ │ ├── downsample_fast.c
│ │ ├── fft4g.c
│ │ ├── fft4g.h
│ │ ├── gain_control.h
│ │ ├── noise_suppression.c
│ │ ├── noise_suppression.h
│ │ ├── ns_core.c
│ │ ├── ns_core.h
│ │ ├── real_fft.h
│ │ ├── resample_by_2.c
│ │ ├── signal_processing_library.h
│ │ ├── spl_inl.h
│ │ ├── spl_inl_armv7.h
│ │ ├── spl_inl_mips.h
│ │ ├── spl_sqrt.c
│ │ ├── splitting_filter.c
│ │ ├── typedefs.h
│ │ ├── web_rtc.cpp
│ │ └── windows_private.h
│ ├── java
│ │ └── com
│ │ │ └── webrtc
│ │ │ ├── WebRtcActivity.java
│ │ │ └── jni
│ │ │ └── WebRtcUtils.java
│ └── res
│ │ ├── drawable-v24
│ │ └── ic_launcher_foreground.xml
│ │ ├── drawable
│ │ └── ic_launcher_background.xml
│ │ ├── layout
│ │ └── activity_main.xml
│ │ ├── mipmap-anydpi-v26
│ │ ├── ic_launcher.xml
│ │ └── ic_launcher_round.xml
│ │ ├── mipmap-hdpi
│ │ ├── ic_launcher.png
│ │ └── ic_launcher_round.png
│ │ ├── mipmap-mdpi
│ │ ├── ic_launcher.png
│ │ └── ic_launcher_round.png
│ │ ├── mipmap-xhdpi
│ │ ├── ic_launcher.png
│ │ └── ic_launcher_round.png
│ │ ├── mipmap-xxhdpi
│ │ ├── ic_launcher.png
│ │ └── ic_launcher_round.png
│ │ ├── mipmap-xxxhdpi
│ │ ├── ic_launcher.png
│ │ └── ic_launcher_round.png
│ │ └── values
│ │ ├── colors.xml
│ │ ├── strings.xml
│ │ └── styles.xml
│ └── test
│ └── java
│ └── com
│ └── webrtc
│ └── ExampleUnitTest.java
├── build.gradle
├── gradle.properties
├── gradle
└── wrapper
│ ├── gradle-wrapper.jar
│ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
└── settings.gradle
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.js linguist-language=Java
2 | *.css linguist-language=Java
3 | *.html linguist-language=Java
4 | *.h linguist-language=Java
5 | *.m linguist-language=Java
6 | *.c linguist-language=Java
7 | *.cpp linguist-language=Java
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .gradle
3 | /local.properties
4 | /.idea/libraries
5 | /.idea/modules.xml
6 | /.idea/workspace.xml
7 | .DS_Store
8 | /build
9 | /captures
10 | .externalNativeBuild
11 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/gradle.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
17 |
18 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/runConfigurations.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # webrtc
2 |
--------------------------------------------------------------------------------
/app/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .gradle
3 | /local.properties
4 | /.idea
5 | .DS_Store
6 | /build
7 | /captures
8 | .externalNativeBuild
9 |
10 |
--------------------------------------------------------------------------------
/app/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | # Sets the minimum version of CMake required to build the native
2 | # library. You should either keep the default value or only pass a
3 | # value of 3.4.0 or lower.
4 |
5 | cmake_minimum_required(VERSION 3.4.1)
6 |
7 | # Creates and names a library, sets it as either STATIC
8 | # or SHARED, and provides the relative paths to its source code.
9 | # You can define multiple libraries, and CMake builds it for you.
10 | # Gradle automatically packages shared libraries with your APK.
11 |
12 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -pedantic")
13 |
14 | include_directories(${PROJECT_SOURCE_DIR}/include)
15 |
16 | add_definitions(-DFIXED_POINT -DUSE_KISS_FFT -DHAVE_CONFIG_H)
17 |
18 | add_library( # Sets the name of the library.
19 | WRtcAudio
20 |
21 | # Sets the library as a shared library.
22 | SHARED
23 |
24 | # Provides a relative path to your source file(s).
25 | # Associated headers in the same location as their source
26 | # file are automatically included.
27 | src/main/cpp/web_rtc.cpp
28 |
29 | src/main/cpp/noise_suppression.c
30 | src/main/cpp/fft4g.c
31 | src/main/cpp/ns_core.c
32 |
33 | src/main/cpp/analog_agc.c
34 | src/main/cpp/digital_agc.c
35 | src/main/cpp/division_operations.c
36 | src/main/cpp/copy_set_operations.c
37 | src/main/cpp/dot_product_with_scale.c
38 | src/main/cpp/downsample_fast.c
39 | src/main/cpp/resample_by_2.c
40 | src/main/cpp/spl_sqrt.c
41 | src/main/cpp/splitting_filter.c
42 |
43 | )
44 |
45 |
46 | # Searches for a specified prebuilt library and stores the path as a
47 | # variable. Because system libraries are included in the search path by
48 | # default, you only need to specify the name of the public NDK library
49 | # you want to add. CMake verifies that the library exists before
50 | # completing its build.
51 |
52 | find_library( # Sets the name of the path variable.
53 | log-lib
54 |
55 | # Specifies the name of the NDK library that
56 | # you want CMake to locate.
57 | log)
58 |
59 | # Specifies libraries CMake should link to your target library. You
60 | # can link multiple libraries, such as libraries you define in the
61 | # build script, prebuilt third-party libraries, or system libraries.
62 |
63 | target_link_libraries( # Specifies the target library.
64 | WRtcAudio
65 |
66 | # Links the target library to the log library
67 | # included in the NDK.
68 | ${log-lib})
69 |
--------------------------------------------------------------------------------
/app/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'com.android.application'
2 |
3 | android {
4 | compileSdkVersion 26
5 | defaultConfig {
6 | applicationId "com.sws"
7 | minSdkVersion 18
8 | targetSdkVersion 26
9 | versionCode 1
10 | versionName "1.0"
11 | testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
12 | externalNativeBuild {
13 | cmake {
14 | cppFlags ""
15 | }
16 |
17 | ndk{
18 | abiFilters "armeabi", "armeabi-v7a", "x86", "x86_64", "arm64-v8a"
19 | // abiFilters "armeabi", "armeabi-v7a"
20 | }
21 | }
22 |
23 | }
24 | buildTypes {
25 | release {
26 | minifyEnabled false
27 | proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
28 | }
29 | }
30 | externalNativeBuild {
31 | cmake {
32 | path "CMakeLists.txt"
33 | }
34 | }
35 | }
36 |
37 | dependencies {
38 | implementation fileTree(dir: 'libs', include: ['*.jar'])
39 | implementation 'com.android.support:appcompat-v7:26.1.0'
40 | testImplementation 'junit:junit:4.12'
41 | androidTestImplementation 'com.android.support.test:runner:1.0.1'
42 | androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.1'
43 | }
44 |
--------------------------------------------------------------------------------
/app/proguard-rules.pro:
--------------------------------------------------------------------------------
1 | # Add project specific ProGuard rules here.
2 | # You can control the set of applied configuration files using the
3 | # proguardFiles setting in build.gradle.
4 | #
5 | # For more details, see
6 | # http://developer.android.com/guide/developing/tools/proguard.html
7 |
8 | # If your project uses WebView with JS, uncomment the following
9 | # and specify the fully qualified class name to the JavaScript interface
10 | # class:
11 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
12 | # public *;
13 | #}
14 |
15 | # Uncomment this to preserve the line number information for
16 | # debugging stack traces.
17 | #-keepattributes SourceFile,LineNumberTable
18 |
19 | # If you keep the line number information, uncomment this to
20 | # hide the original source file name.
21 | #-renamesourcefileattribute SourceFile
22 |
--------------------------------------------------------------------------------
/app/src/androidTest/java/com/webrtc/ExampleInstrumentedTest.java:
--------------------------------------------------------------------------------
1 | package com.webrtc;
2 |
3 | import android.content.Context;
4 | import android.support.test.InstrumentationRegistry;
5 | import android.support.test.runner.AndroidJUnit4;
6 |
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | import static org.junit.Assert.*;
11 |
12 | /**
13 | * Instrumented test, which will execute on an Android device.
14 | *
15 | * @see Testing documentation
16 | */
17 | @RunWith(AndroidJUnit4.class)
18 | public class ExampleInstrumentedTest {
19 | @Test
20 | public void useAppContext() {
21 | // Context of the app under test.
22 | Context appContext = InstrumentationRegistry.getTargetContext();
23 |
24 | assertEquals("com.sws", appContext.getPackageName());
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/app/src/main/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/app/src/main/assets/record/recorded_audio.pcm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/assets/record/recorded_audio.pcm
--------------------------------------------------------------------------------
/app/src/main/assets/record/recorded_audio_16k.pcm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/assets/record/recorded_audio_16k.pcm
--------------------------------------------------------------------------------
/app/src/main/assets/record/recorded_audio_32k.pcm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/assets/record/recorded_audio_32k.pcm
--------------------------------------------------------------------------------
/app/src/main/assets/record/test_32k.pcm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/assets/record/test_32k.pcm
--------------------------------------------------------------------------------
/app/src/main/cpp/_android_log_print.h:
--------------------------------------------------------------------------------
1 | #ifndef NDKDEMO_ANDROID_LOG_PRINT_H
2 | #define NDKDEMO_ANDROID_LOG_PRINT_H
3 |
4 | #endif //NDKDEMO_ANDROID_LOG_PRINT_H
5 |
6 | #include
7 |
8 | //#define IS_DEBUG
9 |
10 | #ifdef IS_DEBUG
11 |
12 | #define LOG_TAG ("SWS_LOG_TEST")
13 |
14 | #define LOGV(...) ((void)__android_log_print(ANDROID_LOG_VERBOSE, LOG_TAG, __VA_ARGS__))
15 |
16 | #define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
17 |
18 | #define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__))
19 |
20 | #define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, LOG_TAG, __VA_ARGS__))
21 |
22 | #define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__))
23 |
24 | #else
25 |
26 | #define LOGV(LOG_TAG, ...) NULL
27 |
28 | #define LOGD(LOG_TAG, ...) NULL
29 |
30 | #define LOGI(LOG_TAG, ...) NULL
31 |
32 | #define LOGW(LOG_TAG, ...) NULL
33 |
34 | #define LOGE(LOG_TAG, ...) NULL
35 |
36 | #endif
37 |
38 |
--------------------------------------------------------------------------------
/app/src/main/cpp/analog_agc.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_
12 | #define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_
13 |
14 | #include "digital_agc.h"
15 | #include "gain_control.h"
16 | #include "typedefs.h"
17 |
18 | //#define AGC_DEBUG
19 | //#define MIC_LEVEL_FEEDBACK
20 | #ifdef AGC_DEBUG
21 | #include
22 | #endif
23 |
24 | /* Analog Automatic Gain Control variables:
25 | * Constant declarations (inner limits inside which no changes are done)
26 | * In the beginning the range is narrower to widen as soon as the measure
27 | * 'Rxx160_LP' is inside it. Currently the starting limits are -22.2+/-1dBm0
28 | * and the final limits -22.2+/-2.5dBm0. These levels makes the speech signal
29 | * go towards -25.4dBm0 (-31.4dBov). Tuned with wbfile-31.4dBov.pcm
30 | * The limits are created by running the AGC with a file having the desired
31 | * signal level and thereafter plotting Rxx160_LP in the dBm0-domain defined
32 | * by out=10*log10(in/260537279.7); Set the target level to the average level
33 | * of our measure Rxx160_LP. Remember that the levels are in blocks of 16 in
34 | * Q(-7). (Example matlab code: round(db2pow(-21.2)*16/2^7) )
35 | */
36 | #define RXX_BUFFER_LEN 10
37 |
38 | static const int16_t kMsecSpeechInner = 520;
39 | static const int16_t kMsecSpeechOuter = 340;
40 |
41 | static const int16_t kNormalVadThreshold = 400;
42 |
43 | static const int16_t kAlphaShortTerm = 6; // 1 >> 6 = 0.0156
44 | static const int16_t kAlphaLongTerm = 10; // 1 >> 10 = 0.000977
45 |
46 | typedef struct
47 | {
48 | // Configurable parameters/variables
49 | uint32_t fs; // Sampling frequency
50 | int16_t compressionGaindB; // Fixed gain level in dB
51 | int16_t targetLevelDbfs; // Target level in -dBfs of envelope (default -3)
52 | int16_t agcMode; // Hard coded mode (adaptAna/adaptDig/fixedDig)
53 | uint8_t limiterEnable; // Enabling limiter (on/off (default off))
54 | WebRtcAgc_config_t defaultConfig;
55 | WebRtcAgc_config_t usedConfig;
56 |
57 | // General variables
58 | int16_t initFlag;
59 | int16_t lastError;
60 |
61 | // Target level parameters
62 | // Based on the above: analogTargetLevel = round((32767*10^(-22/20))^2*16/2^7)
63 | int32_t analogTargetLevel; // = RXX_BUFFER_LEN * 846805; -22 dBfs
64 | int32_t startUpperLimit; // = RXX_BUFFER_LEN * 1066064; -21 dBfs
65 | int32_t startLowerLimit; // = RXX_BUFFER_LEN * 672641; -23 dBfs
66 | int32_t upperPrimaryLimit; // = RXX_BUFFER_LEN * 1342095; -20 dBfs
67 | int32_t lowerPrimaryLimit; // = RXX_BUFFER_LEN * 534298; -24 dBfs
68 | int32_t upperSecondaryLimit;// = RXX_BUFFER_LEN * 2677832; -17 dBfs
69 | int32_t lowerSecondaryLimit;// = RXX_BUFFER_LEN * 267783; -27 dBfs
70 | uint16_t targetIdx; // Table index for corresponding target level
71 | #ifdef MIC_LEVEL_FEEDBACK
72 | uint16_t targetIdxOffset; // Table index offset for level compensation
73 | #endif
74 | int16_t analogTarget; // Digital reference level in ENV scale
75 |
76 | // Analog AGC specific variables
77 | int32_t filterState[8]; // For downsampling wb to nb
78 | int32_t upperLimit; // Upper limit for mic energy
79 | int32_t lowerLimit; // Lower limit for mic energy
80 | int32_t Rxx160w32; // Average energy for one frame
81 | int32_t Rxx16_LPw32; // Low pass filtered subframe energies
82 | int32_t Rxx160_LPw32; // Low pass filtered frame energies
83 | int32_t Rxx16_LPw32Max; // Keeps track of largest energy subframe
84 | int32_t Rxx16_vectorw32[RXX_BUFFER_LEN];// Array with subframe energies
85 | int32_t Rxx16w32_array[2][5];// Energy values of microphone signal
86 | int32_t env[2][10]; // Envelope values of subframes
87 |
88 | int16_t Rxx16pos; // Current position in the Rxx16_vectorw32
89 | int16_t envSum; // Filtered scaled envelope in subframes
90 | int16_t vadThreshold; // Threshold for VAD decision
91 | int16_t inActive; // Inactive time in milliseconds
92 | int16_t msTooLow; // Milliseconds of speech at a too low level
93 | int16_t msTooHigh; // Milliseconds of speech at a too high level
94 | int16_t changeToSlowMode; // Change to slow mode after some time at target
95 | int16_t firstCall; // First call to the process-function
96 | int16_t msZero; // Milliseconds of zero input
97 | int16_t msecSpeechOuterChange;// Min ms of speech between volume changes
98 | int16_t msecSpeechInnerChange;// Min ms of speech between volume changes
99 | int16_t activeSpeech; // Milliseconds of active speech
100 | int16_t muteGuardMs; // Counter to prevent mute action
101 | int16_t inQueue; // 10 ms batch indicator
102 |
103 | // Microphone level variables
104 | int32_t micRef; // Remember ref. mic level for virtual mic
105 | uint16_t gainTableIdx; // Current position in virtual gain table
106 | int32_t micGainIdx; // Gain index of mic level to increase slowly
107 | int32_t micVol; // Remember volume between frames
108 | int32_t maxLevel; // Max possible vol level, incl dig gain
109 | int32_t maxAnalog; // Maximum possible analog volume level
110 | int32_t maxInit; // Initial value of "max"
111 | int32_t minLevel; // Minimum possible volume level
112 | int32_t minOutput; // Minimum output volume level
113 | int32_t zeroCtrlMax; // Remember max gain => don't amp low input
114 |
115 | int16_t scale; // Scale factor for internal volume levels
116 | #ifdef MIC_LEVEL_FEEDBACK
117 | int16_t numBlocksMicLvlSat;
118 | uint8_t micLvlSat;
119 | #endif
120 | // Structs for VAD and digital_agc
121 | AgcVad_t vadMic;
122 | DigitalAgc_t digitalAgc;
123 |
124 | #ifdef AGC_DEBUG
125 | FILE* fpt;
126 | FILE* agcLog;
127 | int32_t fcount;
128 | #endif
129 |
130 | int16_t lowLevelSignal;
131 | } Agc_t;
132 |
133 | #endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_
134 |
--------------------------------------------------------------------------------
/app/src/main/cpp/config.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | // TODO(pbos): Move Config from common.h to here.
12 |
13 | #ifndef WEBRTC_VIDEO_ENGINE_NEW_INCLUDE_CONFIG_H_
14 | #define WEBRTC_VIDEO_ENGINE_NEW_INCLUDE_CONFIG_H_
15 |
16 | #include
17 | #include
18 |
19 | #include "typedefs.h"
20 |
21 | namespace webrtc {
22 |
23 | struct RtpStatistics {
24 | RtpStatistics()
25 | : ssrc(0),
26 | fraction_loss(0),
27 | cumulative_loss(0),
28 | extended_max_sequence_number(0) {}
29 | uint32_t ssrc;
30 | int fraction_loss;
31 | int cumulative_loss;
32 | int extended_max_sequence_number;
33 | std::string c_name;
34 | };
35 |
36 | // Settings for NACK, see RFC 4585 for details.
37 | struct NackConfig {
38 | NackConfig() : rtp_history_ms(0) {}
39 | // Send side: the time RTP packets are stored for retransmissions.
40 | // Receive side: the time the receiver is prepared to wait for
41 | // retransmissions.
42 | // Set to '0' to disable.
43 | int rtp_history_ms;
44 | };
45 |
46 | // Settings for forward error correction, see RFC 5109 for details. Set the
47 | // payload types to '-1' to disable.
48 | struct FecConfig {
49 | FecConfig() : ulpfec_payload_type(-1), red_payload_type(-1) {}
50 | // Payload type used for ULPFEC packets.
51 | int ulpfec_payload_type;
52 |
53 | // Payload type used for RED packets.
54 | int red_payload_type;
55 | };
56 |
57 | // Settings for RTP retransmission payload format, see RFC 4588 for details.
58 | struct RtxConfig {
59 | RtxConfig() : rtx_payload_type(0), video_payload_type(0) {}
60 | // SSRCs to use for the RTX streams.
61 | std::vector ssrcs;
62 |
63 | // Payload type to use for the RTX stream.
64 | int rtx_payload_type;
65 |
66 | // Original video payload this RTX stream is used for.
67 | int video_payload_type;
68 | };
69 |
70 | // RTP header extension to use for the video stream, see RFC 5285.
71 | struct RtpExtension {
72 | static const char* kTOffset;
73 | static const char* kAbsSendTime;
74 | RtpExtension(const char* name, int id) : name(name), id(id) {}
75 | // TODO(mflodman) Add API to query supported extensions.
76 | std::string name;
77 | int id;
78 | };
79 | } // namespace webrtc
80 |
81 | #endif // WEBRTC_VIDEO_ENGINE_NEW_INCLUDE_CONFIG_H_
82 |
--------------------------------------------------------------------------------
/app/src/main/cpp/copy_set_operations.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 |
12 | /*
13 | * This file contains the implementation of functions
14 | * WebRtcSpl_MemSetW16()
15 | * WebRtcSpl_MemSetW32()
16 | * WebRtcSpl_MemCpyReversedOrder()
17 | * WebRtcSpl_CopyFromEndW16()
18 | * WebRtcSpl_ZerosArrayW16()
19 | * WebRtcSpl_ZerosArrayW32()
20 | * WebRtcSpl_OnesArrayW16()
21 | * WebRtcSpl_OnesArrayW32()
22 | *
23 | * The description header can be found in signal_processing_library.h
24 | *
25 | */
26 |
27 | #include
28 | #include "signal_processing_library.h"
29 |
30 |
31 | void WebRtcSpl_MemSetW16(int16_t *ptr, int16_t set_value, int length)
32 | {
33 | int j;
34 | int16_t *arrptr = ptr;
35 |
36 | for (j = length; j > 0; j--)
37 | {
38 | *arrptr++ = set_value;
39 | }
40 | }
41 |
42 | void WebRtcSpl_MemSetW32(int32_t *ptr, int32_t set_value, int length)
43 | {
44 | int j;
45 | int32_t *arrptr = ptr;
46 |
47 | for (j = length; j > 0; j--)
48 | {
49 | *arrptr++ = set_value;
50 | }
51 | }
52 |
53 | void WebRtcSpl_MemCpyReversedOrder(int16_t* dest, int16_t* source, int length)
54 | {
55 | int j;
56 | int16_t* destPtr = dest;
57 | int16_t* sourcePtr = source;
58 |
59 | for (j = 0; j < length; j++)
60 | {
61 | *destPtr-- = *sourcePtr++;
62 | }
63 | }
64 |
65 | int16_t WebRtcSpl_CopyFromEndW16(const int16_t *vector_in,
66 | int16_t length,
67 | int16_t samples,
68 | int16_t *vector_out)
69 | {
70 | // Copy the last of the input vector to vector_out
71 | WEBRTC_SPL_MEMCPY_W16(vector_out, &vector_in[length - samples], samples);
72 |
73 | return samples;
74 | }
75 |
76 | int16_t WebRtcSpl_ZerosArrayW16(int16_t *vector, int16_t length)
77 | {
78 | WebRtcSpl_MemSetW16(vector, 0, length);
79 | return length;
80 | }
81 |
82 | int16_t WebRtcSpl_ZerosArrayW32(int32_t *vector, int16_t length)
83 | {
84 | WebRtcSpl_MemSetW32(vector, 0, length);
85 | return length;
86 | }
87 |
88 | int16_t WebRtcSpl_OnesArrayW16(int16_t *vector, int16_t length)
89 | {
90 | int16_t i;
91 | int16_t *tmpvec = vector;
92 | for (i = 0; i < length; i++)
93 | {
94 | *tmpvec++ = 1;
95 | }
96 | return length;
97 | }
98 |
99 | int16_t WebRtcSpl_OnesArrayW32(int32_t *vector, int16_t length)
100 | {
101 | int16_t i;
102 | int32_t *tmpvec = vector;
103 | for (i = 0; i < length; i++)
104 | {
105 | *tmpvec++ = 1;
106 | }
107 | return length;
108 | }
109 |
--------------------------------------------------------------------------------
/app/src/main/cpp/cpu_features_wrapper.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_SYSTEM_WRAPPERS_INTERFACE_CPU_FEATURES_WRAPPER_H_
12 | #define WEBRTC_SYSTEM_WRAPPERS_INTERFACE_CPU_FEATURES_WRAPPER_H_
13 |
14 | #if defined(__cplusplus) || defined(c_plusplus)
15 | extern "C" {
16 | #endif
17 |
18 | #include "typedefs.h"
19 |
20 | // List of features in x86.
21 | typedef enum {
22 | kSSE2,
23 | kSSE3
24 | } CPUFeature;
25 |
26 | // List of features in ARM.
27 | enum {
28 | kCPUFeatureARMv7 = (1 << 0),
29 | kCPUFeatureVFPv3 = (1 << 1),
30 | kCPUFeatureNEON = (1 << 2),
31 | kCPUFeatureLDREXSTREX = (1 << 3)
32 | };
33 |
34 | typedef int (*WebRtc_CPUInfo)(CPUFeature feature);
35 |
36 | // Returns true if the CPU supports the feature.
37 | extern WebRtc_CPUInfo WebRtc_GetCPUInfo;
38 |
39 | // No CPU feature is available => straight C path.
40 | extern WebRtc_CPUInfo WebRtc_GetCPUInfoNoASM;
41 |
42 | // Return the features in an ARM device.
43 | // It detects the features in the hardware platform, and returns supported
44 | // values in the above enum definition as a bitmask.
45 | extern uint64_t WebRtc_GetCPUFeaturesARM(void);
46 |
47 | #if defined(__cplusplus) || defined(c_plusplus)
48 | } // extern "C"
49 | #endif
50 |
51 | #endif // WEBRTC_SYSTEM_WRAPPERS_INTERFACE_CPU_FEATURES_WRAPPER_H_
52 |
--------------------------------------------------------------------------------
/app/src/main/cpp/defines.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_
12 | #define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_
13 |
14 | //#define PROCESS_FLOW_0 // Use the traditional method.
15 | //#define PROCESS_FLOW_1 // Use traditional with DD estimate of prior SNR.
16 | #define PROCESS_FLOW_2 // Use the new method of speech/noise classification.
17 |
18 | #define BLOCKL_MAX 160 // max processing block length: 160
19 | #define ANAL_BLOCKL_MAX 256 // max analysis block length: 256
20 | #define HALF_ANAL_BLOCKL 129 // half max analysis block length + 1
21 |
22 | #define QUANTILE (float)0.25
23 |
24 | #define SIMULT 3
25 | #define END_STARTUP_LONG 200
26 | #define END_STARTUP_SHORT 50
27 | #define FACTOR (float)40.0
28 | #define WIDTH (float)0.01
29 |
30 | #define SMOOTH (float)0.75 // filter smoothing
31 | // Length of fft work arrays.
32 | #define IP_LENGTH (ANAL_BLOCKL_MAX >> 1) // must be at least ceil(2 + sqrt(ANAL_BLOCKL_MAX/2))
33 | #define W_LENGTH (ANAL_BLOCKL_MAX >> 1)
34 |
35 | //PARAMETERS FOR NEW METHOD
36 | #define DD_PR_SNR (float)0.98 // DD update of prior SNR
37 | #define LRT_TAVG (float)0.50 // tavg parameter for LRT (previously 0.90)
38 | #define SPECT_FL_TAVG (float)0.30 // tavg parameter for spectral flatness measure
39 | #define SPECT_DIFF_TAVG (float)0.30 // tavg parameter for spectral difference measure
40 | #define PRIOR_UPDATE (float)0.10 // update parameter of prior model
41 | #define NOISE_UPDATE (float)0.90 // update parameter for noise
42 | #define SPEECH_UPDATE (float)0.99 // update parameter when likely speech
43 | #define WIDTH_PR_MAP (float)4.0 // width parameter in sigmoid map for prior model
44 | #define LRT_FEATURE_THR (float)0.5 // default threshold for LRT feature
45 | #define SF_FEATURE_THR (float)0.5 // default threshold for Spectral Flatness feature
46 | #define SD_FEATURE_THR (float)0.5 // default threshold for Spectral Difference feature
47 | #define PROB_RANGE (float)0.20 // probability threshold for noise state in
48 | // speech/noise likelihood
49 | #define HIST_PAR_EST 1000 // histogram size for estimation of parameters
50 | #define GAMMA_PAUSE (float)0.05 // update for conservative noise estimate
51 | //
52 | #define B_LIM (float)0.5 // threshold in final energy gain factor calculation
53 | #endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_DEFINES_H_
54 |
--------------------------------------------------------------------------------
/app/src/main/cpp/digital_agc.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | /* digital_agc.c
12 | *
13 | */
14 |
15 | #include "digital_agc.h"
16 |
17 | #include
18 | #include
19 | #ifdef AGC_DEBUG
20 | #include
21 | #endif
22 |
23 | #include "gain_control.h"
24 |
25 | // To generate the gaintable, copy&paste the following lines to a Matlab window:
26 | // MaxGain = 6; MinGain = 0; CompRatio = 3; Knee = 1;
27 | // zeros = 0:31; lvl = 2.^(1-zeros);
28 | // A = -10*log10(lvl) * (CompRatio - 1) / CompRatio;
29 | // B = MaxGain - MinGain;
30 | // gains = round(2^16*10.^(0.05 * (MinGain + B * ( log(exp(-Knee*A)+exp(-Knee*B)) - log(1+exp(-Knee*B)) ) / log(1/(1+exp(Knee*B))))));
31 | // fprintf(1, '\t%i, %i, %i, %i,\n', gains);
32 | // % Matlab code for plotting the gain and input/output level characteristic (copy/paste the following 3 lines):
33 | // in = 10*log10(lvl); out = 20*log10(gains/65536);
34 | // subplot(121); plot(in, out); axis([-30, 0, -5, 20]); grid on; xlabel('Input (dB)'); ylabel('Gain (dB)');
35 | // subplot(122); plot(in, in+out); axis([-30, 0, -30, 5]); grid on; xlabel('Input (dB)'); ylabel('Output (dB)');
36 | // zoom on;
37 |
38 | // Generator table for y=log2(1+e^x) in Q8.
39 | enum { kGenFuncTableSize = 128 };
40 | static const uint16_t kGenFuncTable[kGenFuncTableSize] = {
41 | 256, 485, 786, 1126, 1484, 1849, 2217, 2586,
42 | 2955, 3324, 3693, 4063, 4432, 4801, 5171, 5540,
43 | 5909, 6279, 6648, 7017, 7387, 7756, 8125, 8495,
44 | 8864, 9233, 9603, 9972, 10341, 10711, 11080, 11449,
45 | 11819, 12188, 12557, 12927, 13296, 13665, 14035, 14404,
46 | 14773, 15143, 15512, 15881, 16251, 16620, 16989, 17359,
47 | 17728, 18097, 18466, 18836, 19205, 19574, 19944, 20313,
48 | 20682, 21052, 21421, 21790, 22160, 22529, 22898, 23268,
49 | 23637, 24006, 24376, 24745, 25114, 25484, 25853, 26222,
50 | 26592, 26961, 27330, 27700, 28069, 28438, 28808, 29177,
51 | 29546, 29916, 30285, 30654, 31024, 31393, 31762, 32132,
52 | 32501, 32870, 33240, 33609, 33978, 34348, 34717, 35086,
53 | 35456, 35825, 36194, 36564, 36933, 37302, 37672, 38041,
54 | 38410, 38780, 39149, 39518, 39888, 40257, 40626, 40996,
55 | 41365, 41734, 42104, 42473, 42842, 43212, 43581, 43950,
56 | 44320, 44689, 45058, 45428, 45797, 46166, 46536, 46905
57 | };
58 |
59 | static const int16_t kAvgDecayTime = 250; // frames; < 3000
60 |
61 | int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16
62 | int16_t digCompGaindB, // Q0
63 | int16_t targetLevelDbfs,// Q0
64 | uint8_t limiterEnable,
65 | int16_t analogTarget) // Q0
66 | {
67 | // This function generates the compressor gain table used in the fixed digital part.
68 | uint32_t tmpU32no1, tmpU32no2, absInLevel, logApprox;
69 | int32_t inLevel, limiterLvl;
70 | int32_t tmp32, tmp32no1, tmp32no2, numFIX, den, y32;
71 | const uint16_t kLog10 = 54426; // log2(10) in Q14
72 | const uint16_t kLog10_2 = 49321; // 10*log10(2) in Q14
73 | const uint16_t kLogE_1 = 23637; // log2(e) in Q14
74 | uint16_t constMaxGain;
75 | uint16_t tmpU16, intPart, fracPart;
76 | const int16_t kCompRatio = 3;
77 | const int16_t kSoftLimiterLeft = 1;
78 | int16_t limiterOffset = 0; // Limiter offset
79 | int16_t limiterIdx, limiterLvlX;
80 | int16_t constLinApprox, zeroGainLvl, maxGain, diffGain;
81 | int16_t i, tmp16, tmp16no1;
82 | int zeros, zerosScale;
83 |
84 | // Constants
85 | // kLogE_1 = 23637; // log2(e) in Q14
86 | // kLog10 = 54426; // log2(10) in Q14
87 | // kLog10_2 = 49321; // 10*log10(2) in Q14
88 |
89 | // Calculate maximum digital gain and zero gain level
90 | tmp32no1 = WEBRTC_SPL_MUL_16_16(digCompGaindB - analogTarget, kCompRatio - 1);
91 | tmp16no1 = analogTarget - targetLevelDbfs;
92 | tmp16no1 += WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
93 | maxGain = WEBRTC_SPL_MAX(tmp16no1, (analogTarget - targetLevelDbfs));
94 | tmp32no1 = WEBRTC_SPL_MUL_16_16(maxGain, kCompRatio);
95 | zeroGainLvl = digCompGaindB;
96 | zeroGainLvl -= WebRtcSpl_DivW32W16ResW16(tmp32no1 + ((kCompRatio - 1) >> 1),
97 | kCompRatio - 1);
98 | if ((digCompGaindB <= analogTarget) && (limiterEnable))
99 | {
100 | zeroGainLvl += (analogTarget - digCompGaindB + kSoftLimiterLeft);
101 | limiterOffset = 0;
102 | }
103 |
104 | // Calculate the difference between maximum gain and gain at 0dB0v:
105 | // diffGain = maxGain + (compRatio-1)*zeroGainLvl/compRatio
106 | // = (compRatio-1)*digCompGaindB/compRatio
107 | tmp32no1 = WEBRTC_SPL_MUL_16_16(digCompGaindB, kCompRatio - 1);
108 | diffGain = WebRtcSpl_DivW32W16ResW16(tmp32no1 + (kCompRatio >> 1), kCompRatio);
109 | if (diffGain < 0 || diffGain >= kGenFuncTableSize)
110 | {
111 | assert(0);
112 | return -1;
113 | }
114 |
115 | // Calculate the limiter level and index:
116 | // limiterLvlX = analogTarget - limiterOffset
117 | // limiterLvl = targetLevelDbfs + limiterOffset/compRatio
118 | limiterLvlX = analogTarget - limiterOffset;
119 | limiterIdx = 2
120 | + WebRtcSpl_DivW32W16ResW16(WEBRTC_SPL_LSHIFT_W32((int32_t)limiterLvlX, 13),
121 | WEBRTC_SPL_RSHIFT_U16(kLog10_2, 1));
122 | tmp16no1 = WebRtcSpl_DivW32W16ResW16(limiterOffset + (kCompRatio >> 1), kCompRatio);
123 | limiterLvl = targetLevelDbfs + tmp16no1;
124 |
125 | // Calculate (through table lookup):
126 | // constMaxGain = log2(1+2^(log2(e)*diffGain)); (in Q8)
127 | constMaxGain = kGenFuncTable[diffGain]; // in Q8
128 |
129 | // Calculate a parameter used to approximate the fractional part of 2^x with a
130 | // piecewise linear function in Q14:
131 | // constLinApprox = round(3/2*(4*(3-2*sqrt(2))/(log(2)^2)-0.5)*2^14);
132 | constLinApprox = 22817; // in Q14
133 |
134 | // Calculate a denominator used in the exponential part to convert from dB to linear scale:
135 | // den = 20*constMaxGain (in Q8)
136 | den = WEBRTC_SPL_MUL_16_U16(20, constMaxGain); // in Q8
137 |
138 | for (i = 0; i < 32; i++)
139 | {
140 | // Calculate scaled input level (compressor):
141 | // inLevel = fix((-constLog10_2*(compRatio-1)*(1-i)+fix(compRatio/2))/compRatio)
142 | tmp16 = (int16_t)WEBRTC_SPL_MUL_16_16(kCompRatio - 1, i - 1); // Q0
143 | tmp32 = WEBRTC_SPL_MUL_16_U16(tmp16, kLog10_2) + 1; // Q14
144 | inLevel = WebRtcSpl_DivW32W16(tmp32, kCompRatio); // Q14
145 |
146 | // Calculate diffGain-inLevel, to map using the genFuncTable
147 | inLevel = WEBRTC_SPL_LSHIFT_W32((int32_t)diffGain, 14) - inLevel; // Q14
148 |
149 | // Make calculations on abs(inLevel) and compensate for the sign afterwards.
150 | absInLevel = (uint32_t)WEBRTC_SPL_ABS_W32(inLevel); // Q14
151 |
152 | // LUT with interpolation
153 | intPart = (uint16_t)WEBRTC_SPL_RSHIFT_U32(absInLevel, 14);
154 | fracPart = (uint16_t)(absInLevel & 0x00003FFF); // extract the fractional part
155 | tmpU16 = kGenFuncTable[intPart + 1] - kGenFuncTable[intPart]; // Q8
156 | tmpU32no1 = WEBRTC_SPL_UMUL_16_16(tmpU16, fracPart); // Q22
157 | tmpU32no1 += WEBRTC_SPL_LSHIFT_U32((uint32_t)kGenFuncTable[intPart], 14); // Q22
158 | logApprox = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 8); // Q14
159 | // Compensate for negative exponent using the relation:
160 | // log2(1 + 2^-x) = log2(1 + 2^x) - x
161 | if (inLevel < 0)
162 | {
163 | zeros = WebRtcSpl_NormU32(absInLevel);
164 | zerosScale = 0;
165 | if (zeros < 15)
166 | {
167 | // Not enough space for multiplication
168 | tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(absInLevel, 15 - zeros); // Q(zeros-1)
169 | tmpU32no2 = WEBRTC_SPL_UMUL_32_16(tmpU32no2, kLogE_1); // Q(zeros+13)
170 | if (zeros < 9)
171 | {
172 | tmpU32no1 = WEBRTC_SPL_RSHIFT_U32(tmpU32no1, 9 - zeros); // Q(zeros+13)
173 | zerosScale = 9 - zeros;
174 | } else
175 | {
176 | tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, zeros - 9); // Q22
177 | }
178 | } else
179 | {
180 | tmpU32no2 = WEBRTC_SPL_UMUL_32_16(absInLevel, kLogE_1); // Q28
181 | tmpU32no2 = WEBRTC_SPL_RSHIFT_U32(tmpU32no2, 6); // Q22
182 | }
183 | logApprox = 0;
184 | if (tmpU32no2 < tmpU32no1)
185 | {
186 | logApprox = WEBRTC_SPL_RSHIFT_U32(tmpU32no1 - tmpU32no2, 8 - zerosScale); //Q14
187 | }
188 | }
189 | numFIX = WEBRTC_SPL_LSHIFT_W32(WEBRTC_SPL_MUL_16_U16(maxGain, constMaxGain), 6); // Q14
190 | numFIX -= WEBRTC_SPL_MUL_32_16((int32_t)logApprox, diffGain); // Q14
191 |
192 | // Calculate ratio
193 | // Shift |numFIX| as much as possible.
194 | // Ensure we avoid wrap-around in |den| as well.
195 | if (numFIX > (den >> 8)) // |den| is Q8.
196 | {
197 | zeros = WebRtcSpl_NormW32(numFIX);
198 | } else
199 | {
200 | zeros = WebRtcSpl_NormW32(den) + 8;
201 | }
202 | numFIX = WEBRTC_SPL_LSHIFT_W32(numFIX, zeros); // Q(14+zeros)
203 |
204 | // Shift den so we end up in Qy1
205 | tmp32no1 = WEBRTC_SPL_SHIFT_W32(den, zeros - 8); // Q(zeros)
206 | if (numFIX < 0)
207 | {
208 | numFIX -= WEBRTC_SPL_RSHIFT_W32(tmp32no1, 1);
209 | } else
210 | {
211 | numFIX += WEBRTC_SPL_RSHIFT_W32(tmp32no1, 1);
212 | }
213 | y32 = WEBRTC_SPL_DIV(numFIX, tmp32no1); // in Q14
214 | if (limiterEnable && (i < limiterIdx))
215 | {
216 | tmp32 = WEBRTC_SPL_MUL_16_U16(i - 1, kLog10_2); // Q14
217 | tmp32 -= WEBRTC_SPL_LSHIFT_W32(limiterLvl, 14); // Q14
218 | y32 = WebRtcSpl_DivW32W16(tmp32 + 10, 20);
219 | }
220 | if (y32 > 39000)
221 | {
222 | tmp32 = WEBRTC_SPL_MUL(y32 >> 1, kLog10) + 4096; // in Q27
223 | tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 13); // in Q14
224 | } else
225 | {
226 | tmp32 = WEBRTC_SPL_MUL(y32, kLog10) + 8192; // in Q28
227 | tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 14); // in Q14
228 | }
229 | tmp32 += WEBRTC_SPL_LSHIFT_W32(16, 14); // in Q14 (Make sure final output is in Q16)
230 |
231 | // Calculate power
232 | if (tmp32 > 0)
233 | {
234 | intPart = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 14);
235 | fracPart = (uint16_t)(tmp32 & 0x00003FFF); // in Q14
236 | if (WEBRTC_SPL_RSHIFT_W32(fracPart, 13))
237 | {
238 | tmp16 = WEBRTC_SPL_LSHIFT_W16(2, 14) - constLinApprox;
239 | tmp32no2 = WEBRTC_SPL_LSHIFT_W32(1, 14) - fracPart;
240 | tmp32no2 = WEBRTC_SPL_MUL_32_16(tmp32no2, tmp16);
241 | tmp32no2 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, 13);
242 | tmp32no2 = WEBRTC_SPL_LSHIFT_W32(1, 14) - tmp32no2;
243 | } else
244 | {
245 | tmp16 = constLinApprox - WEBRTC_SPL_LSHIFT_W16(1, 14);
246 | tmp32no2 = WEBRTC_SPL_MUL_32_16(fracPart, tmp16);
247 | tmp32no2 = WEBRTC_SPL_RSHIFT_W32(tmp32no2, 13);
248 | }
249 | fracPart = (uint16_t)tmp32no2;
250 | gainTable[i] = WEBRTC_SPL_LSHIFT_W32(1, intPart)
251 | + WEBRTC_SPL_SHIFT_W32(fracPart, intPart - 14);
252 | } else
253 | {
254 | gainTable[i] = 0;
255 | }
256 | }
257 |
258 | return 0;
259 | }
260 |
261 | int32_t WebRtcAgc_InitDigital(DigitalAgc_t *stt, int16_t agcMode)
262 | {
263 |
264 | if (agcMode == kAgcModeFixedDigital)
265 | {
266 | // start at minimum to find correct gain faster
267 | stt->capacitorSlow = 0;
268 | } else
269 | {
270 | // start out with 0 dB gain
271 | stt->capacitorSlow = 134217728; // (int32_t)(0.125f * 32768.0f * 32768.0f);
272 | }
273 | stt->capacitorFast = 0;
274 | stt->gain = 65536;
275 | stt->gatePrevious = 0;
276 | stt->agcMode = agcMode;
277 | #ifdef AGC_DEBUG
278 | stt->frameCounter = 0;
279 | #endif
280 |
281 | // initialize VADs
282 | WebRtcAgc_InitVad(&stt->vadNearend);
283 | WebRtcAgc_InitVad(&stt->vadFarend);
284 |
285 | return 0;
286 | }
287 |
288 | int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc_t *stt, const int16_t *in_far,
289 | int16_t nrSamples)
290 | {
291 | // Check for valid pointer
292 | if (&stt->vadFarend == NULL)
293 | {
294 | return -1;
295 | }
296 |
297 | // VAD for far end
298 | WebRtcAgc_ProcessVad(&stt->vadFarend, in_far, nrSamples);
299 |
300 | return 0;
301 | }
302 |
303 | int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *stt, const int16_t *in_near,
304 | const int16_t *in_near_H, int16_t *out,
305 | int16_t *out_H, uint32_t FS,
306 | int16_t lowlevelSignal)
307 | {
308 | // array for gains (one value per ms, incl start & end)
309 | int32_t gains[11];
310 |
311 | int32_t out_tmp, tmp32;
312 | int32_t env[10];
313 | int32_t nrg, max_nrg;
314 | int32_t cur_level;
315 | int32_t gain32, delta;
316 | int16_t logratio;
317 | int16_t lower_thr, upper_thr;
318 | int16_t zeros, zeros_fast, frac;
319 | int16_t decay;
320 | int16_t gate, gain_adj;
321 | int16_t k, n;
322 | int16_t L, L2; // samples/subframe
323 |
324 | // determine number of samples per ms
325 | if (FS == 8000)
326 | {
327 | L = 8;
328 | L2 = 3;
329 | } else if (FS == 16000)
330 | {
331 | L = 16;
332 | L2 = 4;
333 | } else if (FS == 32000)
334 | {
335 | L = 16;
336 | L2 = 4;
337 | } else
338 | {
339 | return -1;
340 | }
341 |
342 | // TODO(andrew): again, we don't need input and output pointers...
343 | if (in_near != out)
344 | {
345 | // Only needed if they don't already point to the same place.
346 | memcpy(out, in_near, 10 * L * sizeof(int16_t));
347 | }
348 | if (FS == 32000)
349 | {
350 | if (in_near_H != out_H)
351 | {
352 | memcpy(out_H, in_near_H, 10 * L * sizeof(int16_t));
353 | }
354 | }
355 | // VAD for near end
356 | logratio = WebRtcAgc_ProcessVad(&stt->vadNearend, out, L * 10);
357 |
358 | // Account for far end VAD
359 | if (stt->vadFarend.counter > 10)
360 | {
361 | tmp32 = WEBRTC_SPL_MUL_16_16(3, logratio);
362 | logratio = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 - stt->vadFarend.logRatio, 2);
363 | }
364 |
365 | // Determine decay factor depending on VAD
366 | // upper_thr = 1.0f;
367 | // lower_thr = 0.25f;
368 | upper_thr = 1024; // Q10
369 | lower_thr = 0; // Q10
370 | if (logratio > upper_thr)
371 | {
372 | // decay = -2^17 / DecayTime; -> -65
373 | decay = -65;
374 | } else if (logratio < lower_thr)
375 | {
376 | decay = 0;
377 | } else
378 | {
379 | // decay = (int16_t)(((lower_thr - logratio)
380 | // * (2^27/(DecayTime*(upper_thr-lower_thr)))) >> 10);
381 | // SUBSTITUTED: 2^27/(DecayTime*(upper_thr-lower_thr)) -> 65
382 | tmp32 = WEBRTC_SPL_MUL_16_16((lower_thr - logratio), 65);
383 | decay = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 10);
384 | }
385 |
386 | // adjust decay factor for long silence (detected as low standard deviation)
387 | // This is only done in the adaptive modes
388 | if (stt->agcMode != kAgcModeFixedDigital)
389 | {
390 | if (stt->vadNearend.stdLongTerm < 4000)
391 | {
392 | decay = 0;
393 | } else if (stt->vadNearend.stdLongTerm < 8096)
394 | {
395 | // decay = (int16_t)(((stt->vadNearend.stdLongTerm - 4000) * decay) >> 12);
396 | tmp32 = WEBRTC_SPL_MUL_16_16((stt->vadNearend.stdLongTerm - 4000), decay);
397 | decay = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 12);
398 | }
399 |
400 | if (lowlevelSignal != 0)
401 | {
402 | decay = 0;
403 | }
404 | }
405 | #ifdef AGC_DEBUG
406 | stt->frameCounter++;
407 | fprintf(stt->logFile, "%5.2f\t%d\t%d\t%d\t", (float)(stt->frameCounter) / 100, logratio, decay, stt->vadNearend.stdLongTerm);
408 | #endif
409 | // Find max amplitude per sub frame
410 | // iterate over sub frames
411 | for (k = 0; k < 10; k++)
412 | {
413 | // iterate over samples
414 | max_nrg = 0;
415 | for (n = 0; n < L; n++)
416 | {
417 | nrg = WEBRTC_SPL_MUL_16_16(out[k * L + n], out[k * L + n]);
418 | if (nrg > max_nrg)
419 | {
420 | max_nrg = nrg;
421 | }
422 | }
423 | env[k] = max_nrg;
424 | }
425 |
426 | // Calculate gain per sub frame
427 | gains[0] = stt->gain;
428 | for (k = 0; k < 10; k++)
429 | {
430 | // Fast envelope follower
431 | // decay time = -131000 / -1000 = 131 (ms)
432 | stt->capacitorFast = AGC_SCALEDIFF32(-1000, stt->capacitorFast, stt->capacitorFast);
433 | if (env[k] > stt->capacitorFast)
434 | {
435 | stt->capacitorFast = env[k];
436 | }
437 | // Slow envelope follower
438 | if (env[k] > stt->capacitorSlow)
439 | {
440 | // increase capacitorSlow
441 | stt->capacitorSlow
442 | = AGC_SCALEDIFF32(500, (env[k] - stt->capacitorSlow), stt->capacitorSlow);
443 | } else
444 | {
445 | // decrease capacitorSlow
446 | stt->capacitorSlow
447 | = AGC_SCALEDIFF32(decay, stt->capacitorSlow, stt->capacitorSlow);
448 | }
449 |
450 | // use maximum of both capacitors as current level
451 | if (stt->capacitorFast > stt->capacitorSlow)
452 | {
453 | cur_level = stt->capacitorFast;
454 | } else
455 | {
456 | cur_level = stt->capacitorSlow;
457 | }
458 | // Translate signal level into gain, using a piecewise linear approximation
459 | // find number of leading zeros
460 | zeros = WebRtcSpl_NormU32((uint32_t)cur_level);
461 | if (cur_level == 0)
462 | {
463 | zeros = 31;
464 | }
465 | tmp32 = (WEBRTC_SPL_LSHIFT_W32(cur_level, zeros) & 0x7FFFFFFF);
466 | frac = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 19); // Q12
467 | tmp32 = WEBRTC_SPL_MUL((stt->gainTable[zeros-1] - stt->gainTable[zeros]), frac);
468 | gains[k + 1] = stt->gainTable[zeros] + WEBRTC_SPL_RSHIFT_W32(tmp32, 12);
469 | #ifdef AGC_DEBUG
470 | if (k == 0)
471 | {
472 | fprintf(stt->logFile, "%d\t%d\t%d\t%d\t%d\n", env[0], cur_level, stt->capacitorFast, stt->capacitorSlow, zeros);
473 | }
474 | #endif
475 | }
476 |
477 | // Gate processing (lower gain during absence of speech)
478 | zeros = WEBRTC_SPL_LSHIFT_W16(zeros, 9) - WEBRTC_SPL_RSHIFT_W16(frac, 3);
479 | // find number of leading zeros
480 | zeros_fast = WebRtcSpl_NormU32((uint32_t)stt->capacitorFast);
481 | if (stt->capacitorFast == 0)
482 | {
483 | zeros_fast = 31;
484 | }
485 | tmp32 = (WEBRTC_SPL_LSHIFT_W32(stt->capacitorFast, zeros_fast) & 0x7FFFFFFF);
486 | zeros_fast = WEBRTC_SPL_LSHIFT_W16(zeros_fast, 9);
487 | zeros_fast -= (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 22);
488 |
489 | gate = 1000 + zeros_fast - zeros - stt->vadNearend.stdShortTerm;
490 |
491 | if (gate < 0)
492 | {
493 | stt->gatePrevious = 0;
494 | } else
495 | {
496 | tmp32 = WEBRTC_SPL_MUL_16_16(stt->gatePrevious, 7);
497 | gate = (int16_t)WEBRTC_SPL_RSHIFT_W32((int32_t)gate + tmp32, 3);
498 | stt->gatePrevious = gate;
499 | }
500 | // gate < 0 -> no gate
501 | // gate > 2500 -> max gate
502 | if (gate > 0)
503 | {
504 | if (gate < 2500)
505 | {
506 | gain_adj = WEBRTC_SPL_RSHIFT_W16(2500 - gate, 5);
507 | } else
508 | {
509 | gain_adj = 0;
510 | }
511 | for (k = 0; k < 10; k++)
512 | {
513 | if ((gains[k + 1] - stt->gainTable[0]) > 8388608)
514 | {
515 | // To prevent wraparound
516 | tmp32 = WEBRTC_SPL_RSHIFT_W32((gains[k+1] - stt->gainTable[0]), 8);
517 | tmp32 = WEBRTC_SPL_MUL(tmp32, (178 + gain_adj));
518 | } else
519 | {
520 | tmp32 = WEBRTC_SPL_MUL((gains[k+1] - stt->gainTable[0]), (178 + gain_adj));
521 | tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 8);
522 | }
523 | gains[k + 1] = stt->gainTable[0] + tmp32;
524 | }
525 | }
526 |
527 | // Limit gain to avoid overload distortion
528 | for (k = 0; k < 10; k++)
529 | {
530 | // To prevent wrap around
531 | zeros = 10;
532 | if (gains[k + 1] > 47453132)
533 | {
534 | zeros = 16 - WebRtcSpl_NormW32(gains[k + 1]);
535 | }
536 | gain32 = WEBRTC_SPL_RSHIFT_W32(gains[k+1], zeros) + 1;
537 | gain32 = WEBRTC_SPL_MUL(gain32, gain32);
538 | // check for overflow
539 | while (AGC_MUL32(WEBRTC_SPL_RSHIFT_W32(env[k], 12) + 1, gain32)
540 | > WEBRTC_SPL_SHIFT_W32((int32_t)32767, 2 * (1 - zeros + 10)))
541 | {
542 | // multiply by 253/256 ==> -0.1 dB
543 | if (gains[k + 1] > 8388607)
544 | {
545 | // Prevent wrap around
546 | gains[k + 1] = WEBRTC_SPL_MUL(WEBRTC_SPL_RSHIFT_W32(gains[k+1], 8), 253);
547 | } else
548 | {
549 | gains[k + 1] = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL(gains[k+1], 253), 8);
550 | }
551 | gain32 = WEBRTC_SPL_RSHIFT_W32(gains[k+1], zeros) + 1;
552 | gain32 = WEBRTC_SPL_MUL(gain32, gain32);
553 | }
554 | }
555 | // gain reductions should be done 1 ms earlier than gain increases
556 | for (k = 1; k < 10; k++)
557 | {
558 | if (gains[k] > gains[k + 1])
559 | {
560 | gains[k] = gains[k + 1];
561 | }
562 | }
563 | // save start gain for next frame
564 | stt->gain = gains[10];
565 |
566 | // Apply gain
567 | // handle first sub frame separately
568 | delta = WEBRTC_SPL_LSHIFT_W32(gains[1] - gains[0], (4 - L2));
569 | gain32 = WEBRTC_SPL_LSHIFT_W32(gains[0], 4);
570 | // iterate over samples
571 | for (n = 0; n < L; n++)
572 | {
573 | // For lower band
574 | tmp32 = WEBRTC_SPL_MUL((int32_t)out[n], WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7));
575 | out_tmp = WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
576 | if (out_tmp > 4095)
577 | {
578 | out[n] = (int16_t)32767;
579 | } else if (out_tmp < -4096)
580 | {
581 | out[n] = (int16_t)-32768;
582 | } else
583 | {
584 | tmp32 = WEBRTC_SPL_MUL((int32_t)out[n], WEBRTC_SPL_RSHIFT_W32(gain32, 4));
585 | out[n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
586 | }
587 | // For higher band
588 | if (FS == 32000)
589 | {
590 | tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[n],
591 | WEBRTC_SPL_RSHIFT_W32(gain32 + 127, 7));
592 | out_tmp = WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
593 | if (out_tmp > 4095)
594 | {
595 | out_H[n] = (int16_t)32767;
596 | } else if (out_tmp < -4096)
597 | {
598 | out_H[n] = (int16_t)-32768;
599 | } else
600 | {
601 | tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[n],
602 | WEBRTC_SPL_RSHIFT_W32(gain32, 4));
603 | out_H[n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
604 | }
605 | }
606 | //
607 |
608 | gain32 += delta;
609 | }
610 | // iterate over subframes
611 | for (k = 1; k < 10; k++)
612 | {
613 | delta = WEBRTC_SPL_LSHIFT_W32(gains[k+1] - gains[k], (4 - L2));
614 | gain32 = WEBRTC_SPL_LSHIFT_W32(gains[k], 4);
615 | // iterate over samples
616 | for (n = 0; n < L; n++)
617 | {
618 | // For lower band
619 | tmp32 = WEBRTC_SPL_MUL((int32_t)out[k * L + n],
620 | WEBRTC_SPL_RSHIFT_W32(gain32, 4));
621 | out[k * L + n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
622 | // For higher band
623 | if (FS == 32000)
624 | {
625 | tmp32 = WEBRTC_SPL_MUL((int32_t)out_H[k * L + n],
626 | WEBRTC_SPL_RSHIFT_W32(gain32, 4));
627 | out_H[k * L + n] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32 , 16);
628 | }
629 | gain32 += delta;
630 | }
631 | }
632 |
633 | return 0;
634 | }
635 |
636 | void WebRtcAgc_InitVad(AgcVad_t *state)
637 | {
638 | int16_t k;
639 |
640 | state->HPstate = 0; // state of high pass filter
641 | state->logRatio = 0; // log( P(active) / P(inactive) )
642 | // average input level (Q10)
643 | state->meanLongTerm = WEBRTC_SPL_LSHIFT_W16(15, 10);
644 |
645 | // variance of input level (Q8)
646 | state->varianceLongTerm = WEBRTC_SPL_LSHIFT_W32(500, 8);
647 |
648 | state->stdLongTerm = 0; // standard deviation of input level in dB
649 | // short-term average input level (Q10)
650 | state->meanShortTerm = WEBRTC_SPL_LSHIFT_W16(15, 10);
651 |
652 | // short-term variance of input level (Q8)
653 | state->varianceShortTerm = WEBRTC_SPL_LSHIFT_W32(500, 8);
654 |
655 | state->stdShortTerm = 0; // short-term standard deviation of input level in dB
656 | state->counter = 3; // counts updates
657 | for (k = 0; k < 8; k++)
658 | {
659 | // downsampling filter
660 | state->downState[k] = 0;
661 | }
662 | }
663 |
664 | int16_t WebRtcAgc_ProcessVad(AgcVad_t *state, // (i) VAD state
665 | const int16_t *in, // (i) Speech signal
666 | int16_t nrSamples) // (i) number of samples
667 | {
668 | int32_t out, nrg, tmp32, tmp32b;
669 | uint16_t tmpU16;
670 | int16_t k, subfr, tmp16;
671 | int16_t buf1[8];
672 | int16_t buf2[4];
673 | int16_t HPstate;
674 | int16_t zeros, dB;
675 |
676 | // process in 10 sub frames of 1 ms (to save on memory)
677 | nrg = 0;
678 | HPstate = state->HPstate;
679 | for (subfr = 0; subfr < 10; subfr++)
680 | {
681 | // downsample to 4 kHz
682 | if (nrSamples == 160)
683 | {
684 | for (k = 0; k < 8; k++)
685 | {
686 | tmp32 = (int32_t)in[2 * k] + (int32_t)in[2 * k + 1];
687 | tmp32 = WEBRTC_SPL_RSHIFT_W32(tmp32, 1);
688 | buf1[k] = (int16_t)tmp32;
689 | }
690 | in += 16;
691 |
692 | WebRtcSpl_DownsampleBy2(buf1, 8, buf2, state->downState);
693 | } else
694 | {
695 | WebRtcSpl_DownsampleBy2(in, 8, buf2, state->downState);
696 | in += 8;
697 | }
698 |
699 | // high pass filter and compute energy
700 | for (k = 0; k < 4; k++)
701 | {
702 | out = buf2[k] + HPstate;
703 | tmp32 = WEBRTC_SPL_MUL(600, out);
704 | HPstate = (int16_t)(WEBRTC_SPL_RSHIFT_W32(tmp32, 10) - buf2[k]);
705 | tmp32 = WEBRTC_SPL_MUL(out, out);
706 | nrg += WEBRTC_SPL_RSHIFT_W32(tmp32, 6);
707 | }
708 | }
709 | state->HPstate = HPstate;
710 |
711 | // find number of leading zeros
712 | if (!(0xFFFF0000 & nrg))
713 | {
714 | zeros = 16;
715 | } else
716 | {
717 | zeros = 0;
718 | }
719 | if (!(0xFF000000 & (nrg << zeros)))
720 | {
721 | zeros += 8;
722 | }
723 | if (!(0xF0000000 & (nrg << zeros)))
724 | {
725 | zeros += 4;
726 | }
727 | if (!(0xC0000000 & (nrg << zeros)))
728 | {
729 | zeros += 2;
730 | }
731 | if (!(0x80000000 & (nrg << zeros)))
732 | {
733 | zeros += 1;
734 | }
735 |
736 | // energy level (range {-32..30}) (Q10)
737 | dB = WEBRTC_SPL_LSHIFT_W16(15 - zeros, 11);
738 |
739 | // Update statistics
740 |
741 | if (state->counter < kAvgDecayTime)
742 | {
743 | // decay time = AvgDecTime * 10 ms
744 | state->counter++;
745 | }
746 |
747 | // update short-term estimate of mean energy level (Q10)
748 | tmp32 = (WEBRTC_SPL_MUL_16_16(state->meanShortTerm, 15) + (int32_t)dB);
749 | state->meanShortTerm = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 4);
750 |
751 | // update short-term estimate of variance in energy level (Q8)
752 | tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_16_16(dB, dB), 12);
753 | tmp32 += WEBRTC_SPL_MUL(state->varianceShortTerm, 15);
754 | state->varianceShortTerm = WEBRTC_SPL_RSHIFT_W32(tmp32, 4);
755 |
756 | // update short-term estimate of standard deviation in energy level (Q10)
757 | tmp32 = WEBRTC_SPL_MUL_16_16(state->meanShortTerm, state->meanShortTerm);
758 | tmp32 = WEBRTC_SPL_LSHIFT_W32(state->varianceShortTerm, 12) - tmp32;
759 | state->stdShortTerm = (int16_t)WebRtcSpl_Sqrt(tmp32);
760 |
761 | // update long-term estimate of mean energy level (Q10)
762 | tmp32 = WEBRTC_SPL_MUL_16_16(state->meanLongTerm, state->counter) + (int32_t)dB;
763 | state->meanLongTerm = WebRtcSpl_DivW32W16ResW16(tmp32,
764 | WEBRTC_SPL_ADD_SAT_W16(state->counter, 1));
765 |
766 | // update long-term estimate of variance in energy level (Q8)
767 | tmp32 = WEBRTC_SPL_RSHIFT_W32(WEBRTC_SPL_MUL_16_16(dB, dB), 12);
768 | tmp32 += WEBRTC_SPL_MUL(state->varianceLongTerm, state->counter);
769 | state->varianceLongTerm = WebRtcSpl_DivW32W16(tmp32,
770 | WEBRTC_SPL_ADD_SAT_W16(state->counter, 1));
771 |
772 | // update long-term estimate of standard deviation in energy level (Q10)
773 | tmp32 = WEBRTC_SPL_MUL_16_16(state->meanLongTerm, state->meanLongTerm);
774 | tmp32 = WEBRTC_SPL_LSHIFT_W32(state->varianceLongTerm, 12) - tmp32;
775 | state->stdLongTerm = (int16_t)WebRtcSpl_Sqrt(tmp32);
776 |
777 | // update voice activity measure (Q10)
778 | tmp16 = WEBRTC_SPL_LSHIFT_W16(3, 12);
779 | tmp32 = WEBRTC_SPL_MUL_16_16(tmp16, (dB - state->meanLongTerm));
780 | tmp32 = WebRtcSpl_DivW32W16(tmp32, state->stdLongTerm);
781 | tmpU16 = WEBRTC_SPL_LSHIFT_U16((uint16_t)13, 12);
782 | tmp32b = WEBRTC_SPL_MUL_16_U16(state->logRatio, tmpU16);
783 | tmp32 += WEBRTC_SPL_RSHIFT_W32(tmp32b, 10);
784 |
785 | state->logRatio = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp32, 6);
786 |
787 | // limit
788 | if (state->logRatio > 2048)
789 | {
790 | state->logRatio = 2048;
791 | }
792 | if (state->logRatio < -2048)
793 | {
794 | state->logRatio = -2048;
795 | }
796 |
797 | return state->logRatio; // Q10
798 | }
799 |
--------------------------------------------------------------------------------
/app/src/main/cpp/digital_agc.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_DIGITAL_AGC_H_
12 | #define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_DIGITAL_AGC_H_
13 |
14 | #ifdef AGC_DEBUG
15 | #include
16 | #endif
17 | #include "signal_processing_library.h"
18 | #include "typedefs.h"
19 |
20 | // the 32 most significant bits of A(19) * B(26) >> 13
21 | #define AGC_MUL32(A, B) (((B)>>13)*(A) + ( ((0x00001FFF & (B))*(A)) >> 13 ))
22 | // C + the 32 most significant bits of A * B
23 | #define AGC_SCALEDIFF32(A, B, C) ((C) + ((B)>>16)*(A) + ( ((0x0000FFFF & (B))*(A)) >> 16 ))
24 |
25 | typedef struct
26 | {
27 | int32_t downState[8];
28 | int16_t HPstate;
29 | int16_t counter;
30 | int16_t logRatio; // log( P(active) / P(inactive) ) (Q10)
31 | int16_t meanLongTerm; // Q10
32 | int32_t varianceLongTerm; // Q8
33 | int16_t stdLongTerm; // Q10
34 | int16_t meanShortTerm; // Q10
35 | int32_t varianceShortTerm; // Q8
36 | int16_t stdShortTerm; // Q10
37 | } AgcVad_t; // total = 54 bytes
38 |
39 | typedef struct
40 | {
41 | int32_t capacitorSlow;
42 | int32_t capacitorFast;
43 | int32_t gain;
44 | int32_t gainTable[32];
45 | int16_t gatePrevious;
46 | int16_t agcMode;
47 | AgcVad_t vadNearend;
48 | AgcVad_t vadFarend;
49 | #ifdef AGC_DEBUG
50 | FILE* logFile;
51 | int frameCounter;
52 | #endif
53 | } DigitalAgc_t;
54 |
55 | int32_t WebRtcAgc_InitDigital(DigitalAgc_t *digitalAgcInst, int16_t agcMode);
56 |
57 | int32_t WebRtcAgc_ProcessDigital(DigitalAgc_t *digitalAgcInst,
58 | const int16_t *inNear, const int16_t *inNear_H,
59 | int16_t *out, int16_t *out_H, uint32_t FS,
60 | int16_t lowLevelSignal);
61 |
62 | int32_t WebRtcAgc_AddFarendToDigital(DigitalAgc_t *digitalAgcInst,
63 | const int16_t *inFar,
64 | int16_t nrSamples);
65 |
66 | void WebRtcAgc_InitVad(AgcVad_t *vadInst);
67 |
68 | int16_t WebRtcAgc_ProcessVad(AgcVad_t *vadInst, // (i) VAD state
69 | const int16_t *in, // (i) Speech signal
70 | int16_t nrSamples); // (i) number of samples
71 |
72 | int32_t WebRtcAgc_CalculateGainTable(int32_t *gainTable, // Q16
73 | int16_t compressionGaindB, // Q0 (in dB)
74 | int16_t targetLevelDbfs,// Q0 (in dB)
75 | uint8_t limiterEnable,
76 | int16_t analogTarget);
77 |
78 | #endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_MAIN_SOURCE_ANALOG_AGC_H_
79 |
--------------------------------------------------------------------------------
/app/src/main/cpp/division_operations.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 |
12 | /*
13 | * This file contains implementations of the divisions
14 | * WebRtcSpl_DivU32U16()
15 | * WebRtcSpl_DivW32W16()
16 | * WebRtcSpl_DivW32W16ResW16()
17 | * WebRtcSpl_DivResultInQ31()
18 | * WebRtcSpl_DivW32HiLow()
19 | *
20 | * The description header can be found in signal_processing_library.h
21 | *
22 | */
23 |
24 | #include "signal_processing_library.h"
25 |
26 | uint32_t WebRtcSpl_DivU32U16(uint32_t num, uint16_t den)
27 | {
28 | // Guard against division with 0
29 | if (den != 0)
30 | {
31 | return (uint32_t)(num / den);
32 | } else
33 | {
34 | return (uint32_t)0xFFFFFFFF;
35 | }
36 | }
37 |
38 | int32_t WebRtcSpl_DivW32W16(int32_t num, int16_t den)
39 | {
40 | // Guard against division with 0
41 | if (den != 0)
42 | {
43 | return (int32_t)(num / den);
44 | } else
45 | {
46 | return (int32_t)0x7FFFFFFF;
47 | }
48 | }
49 |
50 | int16_t WebRtcSpl_DivW32W16ResW16(int32_t num, int16_t den)
51 | {
52 | // Guard against division with 0
53 | if (den != 0)
54 | {
55 | return (int16_t)(num / den);
56 | } else
57 | {
58 | return (int16_t)0x7FFF;
59 | }
60 | }
61 |
62 | int32_t WebRtcSpl_DivResultInQ31(int32_t num, int32_t den)
63 | {
64 | int32_t L_num = num;
65 | int32_t L_den = den;
66 | int32_t div = 0;
67 | int k = 31;
68 | int change_sign = 0;
69 |
70 | if (num == 0)
71 | return 0;
72 |
73 | if (num < 0)
74 | {
75 | change_sign++;
76 | L_num = -num;
77 | }
78 | if (den < 0)
79 | {
80 | change_sign++;
81 | L_den = -den;
82 | }
83 | while (k--)
84 | {
85 | div <<= 1;
86 | L_num <<= 1;
87 | if (L_num >= L_den)
88 | {
89 | L_num -= L_den;
90 | div++;
91 | }
92 | }
93 | if (change_sign == 1)
94 | {
95 | div = -div;
96 | }
97 | return div;
98 | }
99 |
100 | int32_t WebRtcSpl_DivW32HiLow(int32_t num, int16_t den_hi, int16_t den_low)
101 | {
102 | int16_t approx, tmp_hi, tmp_low, num_hi, num_low;
103 | int32_t tmpW32;
104 |
105 | approx = (int16_t)WebRtcSpl_DivW32W16((int32_t)0x1FFFFFFF, den_hi);
106 | // result in Q14 (Note: 3FFFFFFF = 0.5 in Q30)
107 |
108 | // tmpW32 = 1/den = approx * (2.0 - den * approx) (in Q30)
109 | tmpW32 = (WEBRTC_SPL_MUL_16_16(den_hi, approx) << 1)
110 | + ((WEBRTC_SPL_MUL_16_16(den_low, approx) >> 15) << 1);
111 | // tmpW32 = den * approx
112 |
113 | tmpW32 = (int32_t)0x7fffffffL - tmpW32; // result in Q30 (tmpW32 = 2.0-(den*approx))
114 |
115 | // Store tmpW32 in hi and low format
116 | tmp_hi = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmpW32, 16);
117 | tmp_low = (int16_t)WEBRTC_SPL_RSHIFT_W32((tmpW32
118 | - WEBRTC_SPL_LSHIFT_W32((int32_t)tmp_hi, 16)), 1);
119 |
120 | // tmpW32 = 1/den in Q29
121 | tmpW32 = ((WEBRTC_SPL_MUL_16_16(tmp_hi, approx) + (WEBRTC_SPL_MUL_16_16(tmp_low, approx)
122 | >> 15)) << 1);
123 |
124 | // 1/den in hi and low format
125 | tmp_hi = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmpW32, 16);
126 | tmp_low = (int16_t)WEBRTC_SPL_RSHIFT_W32((tmpW32
127 | - WEBRTC_SPL_LSHIFT_W32((int32_t)tmp_hi, 16)), 1);
128 |
129 | // Store num in hi and low format
130 | num_hi = (int16_t)WEBRTC_SPL_RSHIFT_W32(num, 16);
131 | num_low = (int16_t)WEBRTC_SPL_RSHIFT_W32((num
132 | - WEBRTC_SPL_LSHIFT_W32((int32_t)num_hi, 16)), 1);
133 |
134 | // num * (1/den) by 32 bit multiplication (result in Q28)
135 |
136 | tmpW32 = (WEBRTC_SPL_MUL_16_16(num_hi, tmp_hi) + (WEBRTC_SPL_MUL_16_16(num_hi, tmp_low)
137 | >> 15) + (WEBRTC_SPL_MUL_16_16(num_low, tmp_hi) >> 15));
138 |
139 | // Put result in Q31 (convert from Q28)
140 | tmpW32 = WEBRTC_SPL_LSHIFT_W32(tmpW32, 3);
141 |
142 | return tmpW32;
143 | }
144 |
--------------------------------------------------------------------------------
/app/src/main/cpp/dot_product_with_scale.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #include "signal_processing_library.h"
12 |
13 | int32_t WebRtcSpl_DotProductWithScale(const int16_t* vector1,
14 | const int16_t* vector2,
15 | int length,
16 | int scaling) {
17 | int32_t sum = 0;
18 | int i = 0;
19 |
20 | /* Unroll the loop to improve performance. */
21 | for (i = 0; i < length - 3; i += 4) {
22 | sum += (vector1[i + 0] * vector2[i + 0]) >> scaling;
23 | sum += (vector1[i + 1] * vector2[i + 1]) >> scaling;
24 | sum += (vector1[i + 2] * vector2[i + 2]) >> scaling;
25 | sum += (vector1[i + 3] * vector2[i + 3]) >> scaling;
26 | }
27 | for (; i < length; i++) {
28 | sum += (vector1[i] * vector2[i]) >> scaling;
29 | }
30 |
31 | return sum;
32 | }
33 |
--------------------------------------------------------------------------------
/app/src/main/cpp/downsample_fast.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #include "signal_processing_library.h"
12 |
13 | // TODO(Bjornv): Change the function parameter order to WebRTC code style.
14 | // C version of WebRtcSpl_DownsampleFast() for generic platforms.
15 | int WebRtcSpl_DownsampleFastC(const int16_t* data_in,
16 | int data_in_length,
17 | int16_t* data_out,
18 | int data_out_length,
19 | const int16_t* __restrict coefficients,
20 | int coefficients_length,
21 | int factor,
22 | int delay) {
23 | int i = 0;
24 | int j = 0;
25 | int32_t out_s32 = 0;
26 | int endpos = delay + factor * (data_out_length - 1) + 1;
27 |
28 | // Return error if any of the running conditions doesn't meet.
29 | if (data_out_length <= 0 || coefficients_length <= 0
30 | || data_in_length < endpos) {
31 | return -1;
32 | }
33 |
34 | for (i = delay; i < endpos; i += factor) {
35 | out_s32 = 2048; // Round value, 0.5 in Q12.
36 |
37 | for (j = 0; j < coefficients_length; j++) {
38 | out_s32 += coefficients[j] * data_in[i - j]; // Q12.
39 | }
40 |
41 | out_s32 >>= 12; // Q0.
42 |
43 | // Saturate and store the output.
44 | *data_out++ = WebRtcSpl_SatW32ToW16(out_s32);
45 | }
46 |
47 | return 0;
48 | }
49 |
--------------------------------------------------------------------------------
/app/src/main/cpp/fft4g.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_FFT4G_H_
12 | #define WEBRTC_MODULES_AUDIO_PROCESSING_UTILITY_FFT4G_H_
13 |
14 | void WebRtc_rdft(int, int, float *, int *, float *);
15 | void WebRtc_cdft(int, int, float *, int *, float *);
16 |
17 | #endif
18 |
--------------------------------------------------------------------------------
/app/src/main/cpp/gain_control.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_AGC_INCLUDE_GAIN_CONTROL_H_
12 | #define WEBRTC_MODULES_AUDIO_PROCESSING_AGC_INCLUDE_GAIN_CONTROL_H_
13 |
14 | #include "typedefs.h"
15 |
16 | // Errors
17 | #define AGC_UNSPECIFIED_ERROR 18000
18 | #define AGC_UNSUPPORTED_FUNCTION_ERROR 18001
19 | #define AGC_UNINITIALIZED_ERROR 18002
20 | #define AGC_NULL_POINTER_ERROR 18003
21 | #define AGC_BAD_PARAMETER_ERROR 18004
22 |
23 | // Warnings
24 | #define AGC_BAD_PARAMETER_WARNING 18050
25 |
26 | enum
27 | {
28 | kAgcModeUnchanged,
29 | kAgcModeAdaptiveAnalog,
30 | kAgcModeAdaptiveDigital,
31 | kAgcModeFixedDigital
32 | };
33 |
34 | enum
35 | {
36 | kAgcFalse = 0,
37 | kAgcTrue
38 | };
39 |
40 | typedef struct
41 | {
42 | int16_t targetLevelDbfs; // default 3 (-3 dBOv)
43 | int16_t compressionGaindB; // default 9 dB
44 | uint8_t limiterEnable; // default kAgcTrue (on)
45 | } WebRtcAgc_config_t;
46 |
47 | #if defined(__cplusplus)
48 | extern "C"
49 | {
50 | #endif
51 |
52 | /*
53 | * This function processes a 10/20ms frame of far-end speech to determine
54 | * if there is active speech. Far-end speech length can be either 10ms or
55 | * 20ms. The length of the input speech vector must be given in samples
56 | * (80/160 when FS=8000, and 160/320 when FS=16000 or FS=32000).
57 | *
58 | * Input:
59 | * - agcInst : AGC instance.
60 | * - inFar : Far-end input speech vector (10 or 20ms)
61 | * - samples : Number of samples in input vector
62 | *
63 | * Return value:
64 | * : 0 - Normal operation.
65 | * : -1 - Error
66 | */
67 | int WebRtcAgc_AddFarend(void* agcInst,
68 | const int16_t* inFar,
69 | int16_t samples);
70 |
71 | /*
72 | * This function processes a 10/20ms frame of microphone speech to determine
73 | * if there is active speech. Microphone speech length can be either 10ms or
74 | * 20ms. The length of the input speech vector must be given in samples
75 | * (80/160 when FS=8000, and 160/320 when FS=16000 or FS=32000). For very low
76 | * input levels, the input signal is increased in level by multiplying and
77 | * overwriting the samples in inMic[].
78 | *
79 | * This function should be called before any further processing of the
80 | * near-end microphone signal.
81 | *
82 | * Input:
83 | * - agcInst : AGC instance.
84 | * - inMic : Microphone input speech vector (10 or 20 ms) for
85 | * L band
86 | * - inMic_H : Microphone input speech vector (10 or 20 ms) for
87 | * H band
88 | * - samples : Number of samples in input vector
89 | *
90 | * Return value:
91 | * : 0 - Normal operation.
92 | * : -1 - Error
93 | */
94 | int WebRtcAgc_AddMic(void* agcInst,
95 | int16_t* inMic,
96 | int16_t* inMic_H,
97 | int16_t samples);
98 |
99 | /*
100 | * This function replaces the analog microphone with a virtual one.
101 | * It is a digital gain applied to the input signal and is used in the
102 | * agcAdaptiveDigital mode where no microphone level is adjustable.
103 | * Microphone speech length can be either 10ms or 20ms. The length of the
104 | * input speech vector must be given in samples (80/160 when FS=8000, and
105 | * 160/320 when FS=16000 or FS=32000).
106 | *
107 | * Input:
108 | * - agcInst : AGC instance.
109 | * - inMic : Microphone input speech vector for (10 or 20 ms)
110 | * L band
111 | * - inMic_H : Microphone input speech vector for (10 or 20 ms)
112 | * H band
113 | * - samples : Number of samples in input vector
114 | * - micLevelIn : Input level of microphone (static)
115 | *
116 | * Output:
117 | * - inMic : Microphone output after processing (L band)
118 | * - inMic_H : Microphone output after processing (H band)
119 | * - micLevelOut : Adjusted microphone level after processing
120 | *
121 | * Return value:
122 | * : 0 - Normal operation.
123 | * : -1 - Error
124 | */
125 | int WebRtcAgc_VirtualMic(void* agcInst,
126 | int16_t* inMic,
127 | int16_t* inMic_H,
128 | int16_t samples,
129 | int32_t micLevelIn,
130 | int32_t* micLevelOut);
131 |
132 | /*
133 | * This function processes a 10/20ms frame and adjusts (normalizes) the gain
134 | * both analog and digitally. The gain adjustments are done only during
135 | * active periods of speech. The input speech length can be either 10ms or
136 | * 20ms and the output is of the same length. The length of the speech
137 | * vectors must be given in samples (80/160 when FS=8000, and 160/320 when
138 | * FS=16000 or FS=32000). The echo parameter can be used to ensure the AGC will
139 | * not adjust upward in the presence of echo.
140 | *
141 | * This function should be called after processing the near-end microphone
142 | * signal, in any case after any echo cancellation.
143 | *
144 | * Input:
145 | * - agcInst : AGC instance
146 | * - inNear : Near-end input speech vector (10 or 20 ms) for
147 | * L band
148 | * - inNear_H : Near-end input speech vector (10 or 20 ms) for
149 | * H band
150 | * - samples : Number of samples in input/output vector
151 | * - inMicLevel : Current microphone volume level
152 | * - echo : Set to 0 if the signal passed to add_mic is
153 | * almost certainly free of echo; otherwise set
154 | * to 1. If you have no information regarding echo
155 | * set to 0.
156 | *
157 | * Output:
158 | * - outMicLevel : Adjusted microphone volume level
159 | * - out : Gain-adjusted near-end speech vector (L band)
160 | * : May be the same vector as the input.
161 | * - out_H : Gain-adjusted near-end speech vector (H band)
162 | * - saturationWarning : A returned value of 1 indicates a saturation event
163 | * has occurred and the volume cannot be further
164 | * reduced. Otherwise will be set to 0.
165 | *
166 | * Return value:
167 | * : 0 - Normal operation.
168 | * : -1 - Error
169 | */
170 | int WebRtcAgc_Process(void* agcInst,
171 | const int16_t* inNear,
172 | const int16_t* inNear_H,
173 | int16_t samples,
174 | int16_t* out,
175 | int16_t* out_H,
176 | int32_t inMicLevel,
177 | int32_t* outMicLevel,
178 | int16_t echo,
179 | uint8_t* saturationWarning);
180 |
181 | /*
182 | * This function sets the config parameters (targetLevelDbfs,
183 | * compressionGaindB and limiterEnable).
184 | *
185 | * Input:
186 | * - agcInst : AGC instance
187 | * - config : config struct
188 | *
189 | * Output:
190 | *
191 | * Return value:
192 | * : 0 - Normal operation.
193 | * : -1 - Error
194 | */
195 | int WebRtcAgc_set_config(void* agcInst, WebRtcAgc_config_t config);
196 |
197 | /*
198 | * This function returns the config parameters (targetLevelDbfs,
199 | * compressionGaindB and limiterEnable).
200 | *
201 | * Input:
202 | * - agcInst : AGC instance
203 | *
204 | * Output:
205 | * - config : config struct
206 | *
207 | * Return value:
208 | * : 0 - Normal operation.
209 | * : -1 - Error
210 | */
211 | int WebRtcAgc_get_config(void* agcInst, WebRtcAgc_config_t* config);
212 |
213 | /*
214 | * This function creates an AGC instance, which will contain the state
215 | * information for one (duplex) channel.
216 | *
217 | * Return value : AGC instance if successful
218 | * : 0 (i.e., a NULL pointer) if unsuccessful
219 | */
220 | int WebRtcAgc_Create(void **agcInst);
221 |
222 | /*
223 | * This function frees the AGC instance created at the beginning.
224 | *
225 | * Input:
226 | * - agcInst : AGC instance.
227 | *
228 | * Return value : 0 - Ok
229 | * -1 - Error
230 | */
231 | int WebRtcAgc_Free(void *agcInst);
232 |
233 | /*
234 | * This function initializes an AGC instance.
235 | *
236 | * Input:
237 | * - agcInst : AGC instance.
238 | * - minLevel : Minimum possible mic level
239 | * - maxLevel : Maximum possible mic level
240 | * - agcMode : 0 - Unchanged
241 | * : 1 - Adaptive Analog Automatic Gain Control -3dBOv
242 | * : 2 - Adaptive Digital Automatic Gain Control -3dBOv
243 | * : 3 - Fixed Digital Gain 0dB
244 | * - fs : Sampling frequency
245 | *
246 | * Return value : 0 - Ok
247 | * -1 - Error
248 | */
249 | int WebRtcAgc_Init(void *agcInst,
250 | int32_t minLevel,
251 | int32_t maxLevel,
252 | int16_t agcMode,
253 | uint32_t fs);
254 |
255 | #if defined(__cplusplus)
256 | }
257 | #endif
258 |
259 | #endif // WEBRTC_MODULES_AUDIO_PROCESSING_AGC_INCLUDE_GAIN_CONTROL_H_
260 |
--------------------------------------------------------------------------------
/app/src/main/cpp/noise_suppression.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #include
12 | #include
13 |
14 | #include "signal_processing_library.h"
15 | #include "defines.h"
16 | #include "ns_core.h"
17 | #include "noise_suppression.h"
18 |
19 |
20 | int WebRtcNs_Create(NsHandle** NS_inst) {
21 | *NS_inst = (NsHandle*) malloc(sizeof(NSinst_t));
22 | if (*NS_inst != NULL) {
23 | (*(NSinst_t**)NS_inst)->initFlag = 0;
24 | return 0;
25 | } else {
26 | return -1;
27 | }
28 |
29 | }
30 |
31 | int WebRtcNs_Free(NsHandle* NS_inst) {
32 | free(NS_inst);
33 | return 0;
34 | }
35 |
36 |
37 | int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs) {
38 | return WebRtcNs_InitCore((NSinst_t*) NS_inst, fs);
39 | }
40 |
41 | int WebRtcNs_set_policy(NsHandle* NS_inst, int mode) {
42 | return WebRtcNs_set_policy_core((NSinst_t*) NS_inst, mode);
43 | }
44 |
45 |
46 | int WebRtcNs_Process(NsHandle* NS_inst, short* spframe, short* spframe_H,
47 | short* outframe, short* outframe_H) {
48 | return WebRtcNs_ProcessCore(
49 | (NSinst_t*) NS_inst, spframe, spframe_H, outframe, outframe_H);
50 | }
51 |
52 | float WebRtcNs_prior_speech_probability(NsHandle* handle) {
53 | NSinst_t* self = (NSinst_t*) handle;
54 | if (handle == NULL) {
55 | return -1;
56 | }
57 | if (self->initFlag == 0) {
58 | return -1;
59 | }
60 | return self->priorSpeechProb;
61 | }
62 |
--------------------------------------------------------------------------------
/app/src/main/cpp/noise_suppression.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
12 | #define WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
13 |
14 | #include "typedefs.h"
15 |
16 | typedef struct NsHandleT NsHandle;
17 |
18 | #ifdef __cplusplus
19 | extern "C" {
20 | #endif
21 |
22 | /*
23 | * This function creates an instance to the noise suppression structure
24 | *
25 | * Input:
26 | * - NS_inst : Pointer to noise suppression instance that should be
27 | * created
28 | *
29 | * Output:
30 | * - NS_inst : Pointer to created noise suppression instance
31 | *
32 | * Return value : 0 - Ok
33 | * -1 - Error
34 | */
35 | int WebRtcNs_Create(NsHandle** NS_inst);
36 |
37 |
38 | /*
39 | * This function frees the dynamic memory of a specified noise suppression
40 | * instance.
41 | *
42 | * Input:
43 | * - NS_inst : Pointer to NS instance that should be freed
44 | *
45 | * Return value : 0 - Ok
46 | * -1 - Error
47 | */
48 | int WebRtcNs_Free(NsHandle* NS_inst);
49 |
50 |
51 | /*
52 | * This function initializes a NS instance and has to be called before any other
53 | * processing is made.
54 | *
55 | * Input:
56 | * - NS_inst : Instance that should be initialized
57 | * - fs : sampling frequency
58 | *
59 | * Output:
60 | * - NS_inst : Initialized instance
61 | *
62 | * Return value : 0 - Ok
63 | * -1 - Error
64 | */
65 | int WebRtcNs_Init(NsHandle* NS_inst, uint32_t fs);
66 |
67 | /*
68 | * This changes the aggressiveness of the noise suppression method.
69 | *
70 | * Input:
71 | * - NS_inst : Noise suppression instance.
72 | * - mode : 0: Mild, 1: Medium , 2: Aggressive
73 | *
74 | * Output:
75 | * - NS_inst : Updated instance.
76 | *
77 | * Return value : 0 - Ok
78 | * -1 - Error
79 | */
80 | int WebRtcNs_set_policy(NsHandle* NS_inst, int mode);
81 |
82 |
83 | /*
84 | * This functions does Noise Suppression for the inserted speech frame. The
85 | * input and output signals should always be 10ms (80 or 160 samples).
86 | *
87 | * Input
88 | * - NS_inst : Noise suppression instance.
89 | * - spframe : Pointer to speech frame buffer for L band
90 | * - spframe_H : Pointer to speech frame buffer for H band
91 | * - fs : sampling frequency
92 | *
93 | * Output:
94 | * - NS_inst : Updated NS instance
95 | * - outframe : Pointer to output frame for L band
96 | * - outframe_H : Pointer to output frame for H band
97 | *
98 | * Return value : 0 - OK
99 | * -1 - Error
100 | */
101 | int WebRtcNs_Process(NsHandle* NS_inst,
102 | short* spframe,
103 | short* spframe_H,
104 | short* outframe,
105 | short* outframe_H);
106 |
107 | /* Returns the internally used prior speech probability of the current frame.
108 | * There is a frequency bin based one as well, with which this should not be
109 | * confused.
110 | *
111 | * Input
112 | * - handle : Noise suppression instance.
113 | *
114 | * Return value : Prior speech probability in interval [0.0, 1.0].
115 | * -1 - NULL pointer or uninitialized instance.
116 | */
117 | float WebRtcNs_prior_speech_probability(NsHandle* handle);
118 |
119 | #ifdef __cplusplus
120 | }
121 | #endif
122 |
123 | #endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_INCLUDE_NOISE_SUPPRESSION_H_
124 |
--------------------------------------------------------------------------------
/app/src/main/cpp/ns_core.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
12 | #define WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
13 |
14 | #include "defines.h"
15 |
16 | typedef struct NSParaExtract_t_ {
17 |
18 | //bin size of histogram
19 | float binSizeLrt;
20 | float binSizeSpecFlat;
21 | float binSizeSpecDiff;
22 | //range of histogram over which lrt threshold is computed
23 | float rangeAvgHistLrt;
24 | //scale parameters: multiply dominant peaks of the histograms by scale factor to obtain
25 | //thresholds for prior model
26 | float factor1ModelPars; //for lrt and spectral difference
27 | float factor2ModelPars; //for spectral_flatness: used when noise is flatter than speech
28 | //peak limit for spectral flatness (varies between 0 and 1)
29 | float thresPosSpecFlat;
30 | //limit on spacing of two highest peaks in histogram: spacing determined by bin size
31 | float limitPeakSpacingSpecFlat;
32 | float limitPeakSpacingSpecDiff;
33 | //limit on relevance of second peak:
34 | float limitPeakWeightsSpecFlat;
35 | float limitPeakWeightsSpecDiff;
36 | //limit on fluctuation of lrt feature
37 | float thresFluctLrt;
38 | //limit on the max and min values for the feature thresholds
39 | float maxLrt;
40 | float minLrt;
41 | float maxSpecFlat;
42 | float minSpecFlat;
43 | float maxSpecDiff;
44 | float minSpecDiff;
45 | //criteria of weight of histogram peak to accept/reject feature
46 | int thresWeightSpecFlat;
47 | int thresWeightSpecDiff;
48 |
49 | } NSParaExtract_t;
50 |
51 | typedef struct NSinst_t_ {
52 |
53 | uint32_t fs;
54 | int blockLen;
55 | int blockLen10ms;
56 | int windShift;
57 | int outLen;
58 | int anaLen;
59 | int magnLen;
60 | int aggrMode;
61 | const float* window;
62 | float dataBuf[ANAL_BLOCKL_MAX];
63 | float syntBuf[ANAL_BLOCKL_MAX];
64 | float outBuf[3 * BLOCKL_MAX];
65 |
66 | int initFlag;
67 | // parameters for quantile noise estimation
68 | float density[SIMULT* HALF_ANAL_BLOCKL];
69 | float lquantile[SIMULT* HALF_ANAL_BLOCKL];
70 | float quantile[HALF_ANAL_BLOCKL];
71 | int counter[SIMULT];
72 | int updates;
73 | // parameters for Wiener filter
74 | float smooth[HALF_ANAL_BLOCKL];
75 | float overdrive;
76 | float denoiseBound;
77 | int gainmap;
78 | // fft work arrays.
79 | int ip[IP_LENGTH];
80 | float wfft[W_LENGTH];
81 |
82 | // parameters for new method: some not needed, will reduce/cleanup later
83 | int32_t blockInd; //frame index counter
84 | int modelUpdatePars[4]; //parameters for updating or estimating
85 | // thresholds/weights for prior model
86 | float priorModelPars[7]; //parameters for prior model
87 | float noisePrev[HALF_ANAL_BLOCKL]; //noise spectrum from previous frame
88 | float magnPrev[HALF_ANAL_BLOCKL]; //magnitude spectrum of previous frame
89 | float logLrtTimeAvg[HALF_ANAL_BLOCKL]; //log lrt factor with time-smoothing
90 | float priorSpeechProb; //prior speech/noise probability
91 | float featureData[7]; //data for features
92 | float magnAvgPause[HALF_ANAL_BLOCKL]; //conservative noise spectrum estimate
93 | float signalEnergy; //energy of magn
94 | float sumMagn; //sum of magn
95 | float whiteNoiseLevel; //initial noise estimate
96 | float initMagnEst[HALF_ANAL_BLOCKL]; //initial magnitude spectrum estimate
97 | float pinkNoiseNumerator; //pink noise parameter: numerator
98 | float pinkNoiseExp; //pink noise parameter: power of freq
99 | NSParaExtract_t featureExtractionParams; //parameters for feature extraction
100 | //histograms for parameter estimation
101 | int histLrt[HIST_PAR_EST];
102 | int histSpecFlat[HIST_PAR_EST];
103 | int histSpecDiff[HIST_PAR_EST];
104 | //quantities for high band estimate
105 | float speechProbHB[HALF_ANAL_BLOCKL]; //final speech/noise prob: prior + LRT
106 | float dataBufHB[ANAL_BLOCKL_MAX]; //buffering data for HB
107 |
108 | } NSinst_t;
109 |
110 |
111 | #ifdef __cplusplus
112 | extern "C" {
113 | #endif
114 |
115 | /****************************************************************************
116 | * WebRtcNs_InitCore(...)
117 | *
118 | * This function initializes a noise suppression instance
119 | *
120 | * Input:
121 | * - inst : Instance that should be initialized
122 | * - fs : Sampling frequency
123 | *
124 | * Output:
125 | * - inst : Initialized instance
126 | *
127 | * Return value : 0 - Ok
128 | * -1 - Error
129 | */
130 | int WebRtcNs_InitCore(NSinst_t* inst, uint32_t fs);
131 |
132 | /****************************************************************************
133 | * WebRtcNs_set_policy_core(...)
134 | *
135 | * This changes the aggressiveness of the noise suppression method.
136 | *
137 | * Input:
138 | * - inst : Instance that should be initialized
139 | * - mode : 0: Mild (6 dB), 1: Medium (10 dB), 2: Aggressive (15 dB)
140 | *
141 | * Output:
142 | * - NS_inst : Initialized instance
143 | *
144 | * Return value : 0 - Ok
145 | * -1 - Error
146 | */
147 | int WebRtcNs_set_policy_core(NSinst_t* inst, int mode);
148 |
149 | /****************************************************************************
150 | * WebRtcNs_ProcessCore
151 | *
152 | * Do noise suppression.
153 | *
154 | * Input:
155 | * - inst : Instance that should be initialized
156 | * - inFrameLow : Input speech frame for lower band
157 | * - inFrameHigh : Input speech frame for higher band
158 | *
159 | * Output:
160 | * - inst : Updated instance
161 | * - outFrameLow : Output speech frame for lower band
162 | * - outFrameHigh : Output speech frame for higher band
163 | *
164 | * Return value : 0 - OK
165 | * -1 - Error
166 | */
167 |
168 |
169 | int WebRtcNs_ProcessCore(NSinst_t* inst,
170 | short* inFrameLow,
171 | short* inFrameHigh,
172 | short* outFrameLow,
173 | short* outFrameHigh);
174 |
175 |
176 | #ifdef __cplusplus
177 | }
178 | #endif
179 | #endif // WEBRTC_MODULES_AUDIO_PROCESSING_NS_MAIN_SOURCE_NS_CORE_H_
180 |
--------------------------------------------------------------------------------
/app/src/main/cpp/real_fft.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #ifndef WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_
12 | #define WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_
13 |
14 | #include "typedefs.h"
15 |
16 | // For ComplexFFT(), the maximum fft order is 10;
17 | // for OpenMax FFT in ARM, it is 12;
18 | // WebRTC APM uses orders of only 7 and 8.
19 | enum {kMaxFFTOrder = 10};
20 |
21 | struct RealFFT;
22 |
23 | #ifdef __cplusplus
24 | extern "C" {
25 | #endif
26 |
27 | typedef struct RealFFT* (*CreateRealFFT)(int order);
28 | typedef void (*FreeRealFFT)(struct RealFFT* self);
29 | typedef int (*RealForwardFFT)(struct RealFFT* self,
30 | const int16_t* real_data_in,
31 | int16_t* complex_data_out);
32 | typedef int (*RealInverseFFT)(struct RealFFT* self,
33 | const int16_t* complex_data_in,
34 | int16_t* real_data_out);
35 |
36 | extern CreateRealFFT WebRtcSpl_CreateRealFFT;
37 | extern FreeRealFFT WebRtcSpl_FreeRealFFT;
38 | extern RealForwardFFT WebRtcSpl_RealForwardFFT;
39 | extern RealInverseFFT WebRtcSpl_RealInverseFFT;
40 |
41 | struct RealFFT* WebRtcSpl_CreateRealFFTC(int order);
42 | void WebRtcSpl_FreeRealFFTC(struct RealFFT* self);
43 |
44 | #if (defined WEBRTC_DETECT_ARM_NEON) || (defined WEBRTC_ARCH_ARM_NEON)
45 | struct RealFFT* WebRtcSpl_CreateRealFFTNeon(int order);
46 | void WebRtcSpl_FreeRealFFTNeon(struct RealFFT* self);
47 | #endif
48 |
49 | // Compute an FFT for a real-valued signal of length of 2^order,
50 | // where 1 < order <= MAX_FFT_ORDER. Transform length is determined by the
51 | // specification structure, which must be initialized prior to calling the FFT
52 | // function with WebRtcSpl_CreateRealFFT().
53 | // The relationship between the input and output sequences can
54 | // be expressed in terms of the DFT, i.e.:
55 | // x[n] = (2^(-scalefactor)/N) . SUM[k=0,...,N-1] X[k].e^(jnk.2.pi/N)
56 | // n=0,1,2,...N-1
57 | // N=2^order.
58 | // The conjugate-symmetric output sequence is represented using a CCS vector,
59 | // which is of length N+2, and is organized as follows:
60 | // Index: 0 1 2 3 4 5 . . . N-2 N-1 N N+1
61 | // Component: R0 0 R1 I1 R2 I2 . . . R[N/2-1] I[N/2-1] R[N/2] 0
62 | // where R[n] and I[n], respectively, denote the real and imaginary components
63 | // for FFT bin 'n'. Bins are numbered from 0 to N/2, where N is the FFT length.
64 | // Bin index 0 corresponds to the DC component, and bin index N/2 corresponds to
65 | // the foldover frequency.
66 | //
67 | // Input Arguments:
68 | // self - pointer to preallocated and initialized FFT specification structure.
69 | // real_data_in - the input signal. For an ARM Neon platform, it must be
70 | // aligned on a 32-byte boundary.
71 | //
72 | // Output Arguments:
73 | // complex_data_out - the output complex signal with (2^order + 2) 16-bit
74 | // elements. For an ARM Neon platform, it must be different
75 | // from real_data_in, and aligned on a 32-byte boundary.
76 | //
77 | // Return Value:
78 | // 0 - FFT calculation is successful.
79 | // -1 - Error with bad arguments (NULL pointers).
80 | int WebRtcSpl_RealForwardFFTC(struct RealFFT* self,
81 | const int16_t* real_data_in,
82 | int16_t* complex_data_out);
83 |
84 | #if (defined WEBRTC_DETECT_ARM_NEON) || (defined WEBRTC_ARCH_ARM_NEON)
85 | int WebRtcSpl_RealForwardFFTNeon(struct RealFFT* self,
86 | const int16_t* real_data_in,
87 | int16_t* complex_data_out);
88 | #endif
89 |
90 | // Compute the inverse FFT for a conjugate-symmetric input sequence of length of
91 | // 2^order, where 1 < order <= MAX_FFT_ORDER. Transform length is determined by
92 | // the specification structure, which must be initialized prior to calling the
93 | // FFT function with WebRtcSpl_CreateRealFFT().
94 | // For a transform of length M, the input sequence is represented using a packed
95 | // CCS vector of length M+2, which is explained in the comments for
96 | // WebRtcSpl_RealForwardFFTC above.
97 | //
98 | // Input Arguments:
99 | // self - pointer to preallocated and initialized FFT specification structure.
100 | // complex_data_in - the input complex signal with (2^order + 2) 16-bit
101 | // elements. For an ARM Neon platform, it must be aligned on
102 | // a 32-byte boundary.
103 | //
104 | // Output Arguments:
105 | // real_data_out - the output real signal. For an ARM Neon platform, it must
106 | // be different to complex_data_in, and aligned on a 32-byte
107 | // boundary.
108 | //
109 | // Return Value:
110 | // 0 or a positive number - a value that the elements in the |real_data_out|
111 | // should be shifted left with in order to get
112 | // correct physical values.
113 | // -1 - Error with bad arguments (NULL pointers).
114 | int WebRtcSpl_RealInverseFFTC(struct RealFFT* self,
115 | const int16_t* complex_data_in,
116 | int16_t* real_data_out);
117 |
118 | #if (defined WEBRTC_DETECT_ARM_NEON) || (defined WEBRTC_ARCH_ARM_NEON)
119 | int WebRtcSpl_RealInverseFFTNeon(struct RealFFT* self,
120 | const int16_t* complex_data_in,
121 | int16_t* real_data_out);
122 | #endif
123 |
124 | #ifdef __cplusplus
125 | }
126 | #endif
127 |
128 | #endif // WEBRTC_COMMON_AUDIO_SIGNAL_PROCESSING_INCLUDE_REAL_FFT_H_
129 |
--------------------------------------------------------------------------------
/app/src/main/cpp/resample_by_2.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 |
12 | /*
13 | * This file contains the resampling by two functions.
14 | * The description header can be found in signal_processing_library.h
15 | *
16 | */
17 |
18 | #include "signal_processing_library.h"
19 |
20 | #ifdef WEBRTC_ARCH_ARM_V7
21 |
22 | // allpass filter coefficients.
23 | static const uint32_t kResampleAllpass1[3] = {3284, 24441, 49528 << 15};
24 | static const uint32_t kResampleAllpass2[3] =
25 | {12199, 37471 << 15, 60255 << 15};
26 |
27 | // Multiply two 32-bit values and accumulate to another input value.
28 | // Return: state + ((diff * tbl_value) >> 16)
29 |
30 | static __inline int32_t MUL_ACCUM_1(int32_t tbl_value,
31 | int32_t diff,
32 | int32_t state) {
33 | int32_t result;
34 | __asm __volatile ("smlawb %0, %1, %2, %3": "=r"(result): "r"(diff),
35 | "r"(tbl_value), "r"(state));
36 | return result;
37 | }
38 |
39 | // Multiply two 32-bit values and accumulate to another input value.
40 | // Return: Return: state + (((diff << 1) * tbl_value) >> 32)
41 | //
42 | // The reason to introduce this function is that, in case we can't use smlawb
43 | // instruction (in MUL_ACCUM_1) due to input value range, we can still use
44 | // smmla to save some cycles.
45 |
46 | static __inline int32_t MUL_ACCUM_2(int32_t tbl_value,
47 | int32_t diff,
48 | int32_t state) {
49 | int32_t result;
50 | __asm __volatile ("smmla %0, %1, %2, %3": "=r"(result): "r"(diff << 1),
51 | "r"(tbl_value), "r"(state));
52 | return result;
53 | }
54 |
55 | #else
56 |
57 | // allpass filter coefficients.
58 | static const uint16_t kResampleAllpass1[3] = {3284, 24441, 49528};
59 | static const uint16_t kResampleAllpass2[3] = {12199, 37471, 60255};
60 |
61 | // Multiply a 32-bit value with a 16-bit value and accumulate to another input:
62 | #define MUL_ACCUM_1(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c)
63 | #define MUL_ACCUM_2(a, b, c) WEBRTC_SPL_SCALEDIFF32(a, b, c)
64 |
65 | #endif // WEBRTC_ARCH_ARM_V7
66 |
67 |
68 | // decimator
69 | #if !defined(MIPS32_LE)
70 | void WebRtcSpl_DownsampleBy2(const int16_t* in, int16_t len,
71 | int16_t* out, int32_t* filtState) {
72 | int32_t tmp1, tmp2, diff, in32, out32;
73 | int16_t i;
74 |
75 | register int32_t state0 = filtState[0];
76 | register int32_t state1 = filtState[1];
77 | register int32_t state2 = filtState[2];
78 | register int32_t state3 = filtState[3];
79 | register int32_t state4 = filtState[4];
80 | register int32_t state5 = filtState[5];
81 | register int32_t state6 = filtState[6];
82 | register int32_t state7 = filtState[7];
83 |
84 | for (i = (len >> 1); i > 0; i--) {
85 | // lower allpass filter
86 | in32 = (int32_t)(*in++) << 10;
87 | diff = in32 - state1;
88 | tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state0);
89 | state0 = in32;
90 | diff = tmp1 - state2;
91 | tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state1);
92 | state1 = tmp1;
93 | diff = tmp2 - state3;
94 | state3 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state2);
95 | state2 = tmp2;
96 |
97 | // upper allpass filter
98 | in32 = (int32_t)(*in++) << 10;
99 | diff = in32 - state5;
100 | tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state4);
101 | state4 = in32;
102 | diff = tmp1 - state6;
103 | tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state5);
104 | state5 = tmp1;
105 | diff = tmp2 - state7;
106 | state7 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state6);
107 | state6 = tmp2;
108 |
109 | // add two allpass outputs, divide by two and round
110 | out32 = (state3 + state7 + 1024) >> 11;
111 |
112 | // limit amplitude to prevent wrap-around, and write to output array
113 | *out++ = WebRtcSpl_SatW32ToW16(out32);
114 | }
115 |
116 | filtState[0] = state0;
117 | filtState[1] = state1;
118 | filtState[2] = state2;
119 | filtState[3] = state3;
120 | filtState[4] = state4;
121 | filtState[5] = state5;
122 | filtState[6] = state6;
123 | filtState[7] = state7;
124 | }
125 | #endif // #if defined(MIPS32_LE)
126 |
127 |
128 | void WebRtcSpl_UpsampleBy2(const int16_t* in, int16_t len,
129 | int16_t* out, int32_t* filtState) {
130 | int32_t tmp1, tmp2, diff, in32, out32;
131 | int16_t i;
132 |
133 | register int32_t state0 = filtState[0];
134 | register int32_t state1 = filtState[1];
135 | register int32_t state2 = filtState[2];
136 | register int32_t state3 = filtState[3];
137 | register int32_t state4 = filtState[4];
138 | register int32_t state5 = filtState[5];
139 | register int32_t state6 = filtState[6];
140 | register int32_t state7 = filtState[7];
141 |
142 | for (i = len; i > 0; i--) {
143 | // lower allpass filter
144 | in32 = (int32_t)(*in++) << 10;
145 | diff = in32 - state1;
146 | tmp1 = MUL_ACCUM_1(kResampleAllpass1[0], diff, state0);
147 | state0 = in32;
148 | diff = tmp1 - state2;
149 | tmp2 = MUL_ACCUM_1(kResampleAllpass1[1], diff, state1);
150 | state1 = tmp1;
151 | diff = tmp2 - state3;
152 | state3 = MUL_ACCUM_2(kResampleAllpass1[2], diff, state2);
153 | state2 = tmp2;
154 |
155 | // round; limit amplitude to prevent wrap-around; write to output array
156 | out32 = (state3 + 512) >> 10;
157 | *out++ = WebRtcSpl_SatW32ToW16(out32);
158 |
159 | // upper allpass filter
160 | diff = in32 - state5;
161 | tmp1 = MUL_ACCUM_1(kResampleAllpass2[0], diff, state4);
162 | state4 = in32;
163 | diff = tmp1 - state6;
164 | tmp2 = MUL_ACCUM_2(kResampleAllpass2[1], diff, state5);
165 | state5 = tmp1;
166 | diff = tmp2 - state7;
167 | state7 = MUL_ACCUM_2(kResampleAllpass2[2], diff, state6);
168 | state6 = tmp2;
169 |
170 | // round; limit amplitude to prevent wrap-around; write to output array
171 | out32 = (state7 + 512) >> 10;
172 | *out++ = WebRtcSpl_SatW32ToW16(out32);
173 | }
174 |
175 | filtState[0] = state0;
176 | filtState[1] = state1;
177 | filtState[2] = state2;
178 | filtState[3] = state3;
179 | filtState[4] = state4;
180 | filtState[5] = state5;
181 | filtState[6] = state6;
182 | filtState[7] = state7;
183 | }
184 |
--------------------------------------------------------------------------------
/app/src/main/cpp/spl_inl.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 |
12 | // This header file includes the inline functions in
13 | // the fix point signal processing library.
14 |
15 | #ifndef WEBRTC_SPL_SPL_INL_H_
16 | #define WEBRTC_SPL_SPL_INL_H_
17 |
18 | #ifdef WEBRTC_ARCH_ARM_V7
19 | #include "spl_inl_armv7.h"
20 | #else
21 |
22 | #if defined(MIPS32_LE)
23 | #include "spl_inl_mips.h"
24 | #endif
25 |
26 | #if !defined(MIPS_DSP_R1_LE)
27 | static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) {
28 | int16_t out16 = (int16_t) value32;
29 |
30 | if (value32 > 32767)
31 | out16 = 32767;
32 | else if (value32 < -32768)
33 | out16 = -32768;
34 |
35 | return out16;
36 | }
37 |
38 | static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) {
39 | return WebRtcSpl_SatW32ToW16((int32_t) a + (int32_t) b);
40 | }
41 |
42 | static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) {
43 | return WebRtcSpl_SatW32ToW16((int32_t) var1 - (int32_t) var2);
44 | }
45 | #endif // #if !defined(MIPS_DSP_R1_LE)
46 |
47 | #if !defined(MIPS32_LE)
48 | static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) {
49 | int bits;
50 |
51 | if (0xFFFF0000 & n) {
52 | bits = 16;
53 | } else {
54 | bits = 0;
55 | }
56 | if (0x0000FF00 & (n >> bits)) bits += 8;
57 | if (0x000000F0 & (n >> bits)) bits += 4;
58 | if (0x0000000C & (n >> bits)) bits += 2;
59 | if (0x00000002 & (n >> bits)) bits += 1;
60 | if (0x00000001 & (n >> bits)) bits += 1;
61 |
62 | return bits;
63 | }
64 |
65 | static __inline int WebRtcSpl_NormW32(int32_t a) {
66 | int zeros;
67 |
68 | if (a == 0) {
69 | return 0;
70 | }
71 | else if (a < 0) {
72 | a = ~a;
73 | }
74 |
75 | if (!(0xFFFF8000 & a)) {
76 | zeros = 16;
77 | } else {
78 | zeros = 0;
79 | }
80 | if (!(0xFF800000 & (a << zeros))) zeros += 8;
81 | if (!(0xF8000000 & (a << zeros))) zeros += 4;
82 | if (!(0xE0000000 & (a << zeros))) zeros += 2;
83 | if (!(0xC0000000 & (a << zeros))) zeros += 1;
84 |
85 | return zeros;
86 | }
87 |
88 | static __inline int WebRtcSpl_NormU32(uint32_t a) {
89 | int zeros;
90 |
91 | if (a == 0) return 0;
92 |
93 | if (!(0xFFFF0000 & a)) {
94 | zeros = 16;
95 | } else {
96 | zeros = 0;
97 | }
98 | if (!(0xFF000000 & (a << zeros))) zeros += 8;
99 | if (!(0xF0000000 & (a << zeros))) zeros += 4;
100 | if (!(0xC0000000 & (a << zeros))) zeros += 2;
101 | if (!(0x80000000 & (a << zeros))) zeros += 1;
102 |
103 | return zeros;
104 | }
105 |
106 | static __inline int WebRtcSpl_NormW16(int16_t a) {
107 | int zeros;
108 |
109 | if (a == 0) {
110 | return 0;
111 | }
112 | else if (a < 0) {
113 | a = ~a;
114 | }
115 |
116 | if (!(0xFF80 & a)) {
117 | zeros = 8;
118 | } else {
119 | zeros = 0;
120 | }
121 | if (!(0xF800 & (a << zeros))) zeros += 4;
122 | if (!(0xE000 & (a << zeros))) zeros += 2;
123 | if (!(0xC000 & (a << zeros))) zeros += 1;
124 |
125 | return zeros;
126 | }
127 |
128 | static __inline int32_t WebRtc_MulAccumW16(int16_t a, int16_t b, int32_t c) {
129 | return (a * b + c);
130 | }
131 | #endif // #if !defined(MIPS32_LE)
132 |
133 | #endif // WEBRTC_ARCH_ARM_V7
134 |
135 | // The following functions have no optimized versions.
136 | // TODO(kma): Consider saturating add/sub instructions in X86 platform.
137 | #if !defined(MIPS_DSP_R1_LE)
138 | static __inline int32_t WebRtcSpl_AddSatW32(int32_t l_var1, int32_t l_var2) {
139 | int32_t l_sum;
140 |
141 | // Perform long addition
142 | l_sum = l_var1 + l_var2;
143 |
144 | if (l_var1 < 0) { // Check for underflow.
145 | if ((l_var2 < 0) && (l_sum >= 0)) {
146 | l_sum = (int32_t)0x80000000;
147 | }
148 | } else { // Check for overflow.
149 | if ((l_var2 > 0) && (l_sum < 0)) {
150 | l_sum = (int32_t)0x7FFFFFFF;
151 | }
152 | }
153 |
154 | return l_sum;
155 | }
156 |
157 | static __inline int32_t WebRtcSpl_SubSatW32(int32_t l_var1, int32_t l_var2) {
158 | int32_t l_diff;
159 |
160 | // Perform subtraction.
161 | l_diff = l_var1 - l_var2;
162 |
163 | if (l_var1 < 0) { // Check for underflow.
164 | if ((l_var2 > 0) && (l_diff > 0)) {
165 | l_diff = (int32_t)0x80000000;
166 | }
167 | } else { // Check for overflow.
168 | if ((l_var2 < 0) && (l_diff < 0)) {
169 | l_diff = (int32_t)0x7FFFFFFF;
170 | }
171 | }
172 |
173 | return l_diff;
174 | }
175 | #endif // #if !defined(MIPS_DSP_R1_LE)
176 |
177 | #endif // WEBRTC_SPL_SPL_INL_H_
178 |
--------------------------------------------------------------------------------
/app/src/main/cpp/spl_inl_armv7.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 |
12 | /* This header file includes the inline functions for ARM processors in
13 | * the fix point signal processing library.
14 | */
15 |
16 | #ifndef WEBRTC_SPL_SPL_INL_ARMV7_H_
17 | #define WEBRTC_SPL_SPL_INL_ARMV7_H_
18 |
19 | /* TODO(kma): Replace some assembly code with GCC intrinsics
20 | * (e.g. __builtin_clz).
21 | */
22 |
23 | #include
24 |
25 | /* This function produces result that is not bit exact with that by the generic
26 | * C version in some cases, although the former is at least as accurate as the
27 | * later.
28 | */
29 | static __inline int32_t WEBRTC_SPL_MUL_16_32_RSFT16(int16_t a, int32_t b) {
30 | int32_t tmp = 0;
31 | __asm __volatile ("smulwb %0, %1, %2":"=r"(tmp):"r"(b), "r"(a));
32 | return tmp;
33 | }
34 |
35 | /* This function produces result that is not bit exact with that by the generic
36 | * C version in some cases, although the former is at least as accurate as the
37 | * later.
38 | */
39 | static __inline int32_t WEBRTC_SPL_MUL_32_32_RSFT32(int16_t a,
40 | int16_t b,
41 | int32_t c) {
42 | int32_t tmp = 0;
43 | __asm __volatile (
44 | "pkhbt %[tmp], %[b], %[a], lsl #16\n\t"
45 | "smmulr %[tmp], %[tmp], %[c]\n\t"
46 | :[tmp]"+r"(tmp)
47 | :[a]"r"(a),
48 | [b]"r"(b),
49 | [c]"r"(c)
50 | );
51 | return tmp;
52 | }
53 |
54 | static __inline int32_t WEBRTC_SPL_MUL_32_32_RSFT32BI(int32_t a, int32_t b) {
55 | int32_t tmp = 0;
56 | __asm volatile ("smmulr %0, %1, %2":"=r"(tmp):"r"(a), "r"(b));
57 | return tmp;
58 | }
59 |
60 | static __inline int32_t WEBRTC_SPL_MUL_16_16(int16_t a, int16_t b) {
61 | int32_t tmp = 0;
62 | __asm __volatile ("smulbb %0, %1, %2":"=r"(tmp):"r"(a), "r"(b));
63 | return tmp;
64 | }
65 |
66 | // TODO(kma): add unit test.
67 | static __inline int32_t WebRtc_MulAccumW16(int16_t a, int16_t b, int32_t c) {
68 | int32_t tmp = 0;
69 | __asm __volatile ("smlabb %0, %1, %2, %3":"=r"(tmp):"r"(a), "r"(b), "r"(c));
70 | return tmp;
71 | }
72 |
73 | static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) {
74 | int32_t s_sum = 0;
75 |
76 | __asm __volatile ("qadd16 %0, %1, %2":"=r"(s_sum):"r"(a), "r"(b));
77 |
78 | return (int16_t) s_sum;
79 | }
80 |
81 | /* TODO(kma): find the cause of unittest errors by the next two functions:
82 | * http://code.google.com/p/webrtc/issues/detail?id=740.
83 | */
84 | #if 0
85 | static __inline int32_t WebRtcSpl_AddSatW32(int32_t l_var1, int32_t l_var2) {
86 | int32_t l_sum = 0;
87 |
88 | __asm __volatile ("qadd %0, %1, %2":"=r"(l_sum):"r"(l_var1), "r"(l_var2));
89 |
90 | return l_sum;
91 | }
92 |
93 | static __inline int32_t WebRtcSpl_SubSatW32(int32_t l_var1, int32_t l_var2) {
94 | int32_t l_sub = 0;
95 |
96 | __asm __volatile ("qsub %0, %1, %2":"=r"(l_sub):"r"(l_var1), "r"(l_var2));
97 |
98 | return l_sub;
99 | }
100 | #endif
101 |
102 | static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) {
103 | int32_t s_sub = 0;
104 |
105 | __asm __volatile ("qsub16 %0, %1, %2":"=r"(s_sub):"r"(var1), "r"(var2));
106 |
107 | return (int16_t)s_sub;
108 | }
109 |
110 | static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) {
111 | int32_t tmp = 0;
112 |
113 | __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(n));
114 |
115 | return (int16_t)(32 - tmp);
116 | }
117 |
118 | static __inline int WebRtcSpl_NormW32(int32_t a) {
119 | int32_t tmp = 0;
120 |
121 | if (a == 0) {
122 | return 0;
123 | }
124 | else if (a < 0) {
125 | a ^= 0xFFFFFFFF;
126 | }
127 |
128 | __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a));
129 |
130 | return tmp - 1;
131 | }
132 |
133 | static __inline int WebRtcSpl_NormU32(uint32_t a) {
134 | int tmp = 0;
135 |
136 | if (a == 0) return 0;
137 |
138 | __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a));
139 |
140 | return tmp;
141 | }
142 |
143 | static __inline int WebRtcSpl_NormW16(int16_t a) {
144 | int32_t tmp = 0;
145 |
146 | if (a == 0) {
147 | return 0;
148 | }
149 | else if (a < 0) {
150 | a ^= 0xFFFFFFFF;
151 | }
152 |
153 | __asm __volatile ("clz %0, %1":"=r"(tmp):"r"(a));
154 |
155 | return tmp - 17;
156 | }
157 |
158 | // TODO(kma): add unit test.
159 | static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) {
160 | int32_t out = 0;
161 |
162 | __asm __volatile ("ssat %0, #16, %1" : "=r"(out) : "r"(value32));
163 |
164 | return (int16_t)out;
165 | }
166 |
167 | #endif // WEBRTC_SPL_SPL_INL_ARMV7_H_
168 |
--------------------------------------------------------------------------------
/app/src/main/cpp/spl_inl_mips.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 |
12 | // This header file includes the inline functions in
13 | // the fix point signal processing library.
14 |
15 | #ifndef WEBRTC_SPL_SPL_INL_MIPS_H_
16 | #define WEBRTC_SPL_SPL_INL_MIPS_H_
17 |
18 | #include
19 |
20 | static __inline int32_t WEBRTC_SPL_MUL_16_16(int32_t a,
21 | int32_t b) {
22 | int32_t value32 = 0;
23 | int32_t a1 = 0, b1 = 0;
24 |
25 | __asm __volatile(
26 | #if defined(MIPS32_R2_LE)
27 | "seh %[a1], %[a] \n\t"
28 | "seh %[b1], %[b] \n\t"
29 | #else
30 | "sll %[a1], %[a], 16 \n\t"
31 | "sll %[b1], %[b], 16 \n\t"
32 | "sra %[a1], %[a1], 16 \n\t"
33 | "sra %[b1], %[b1], 16 \n\t"
34 | #endif
35 | "mul %[value32], %[a1], %[b1] \n\t"
36 | : [value32] "=r" (value32), [a1] "=&r" (a1), [b1] "=&r" (b1)
37 | : [a] "r" (a), [b] "r" (b)
38 | : "hi", "lo"
39 | );
40 | return value32;
41 | }
42 |
43 | static __inline int32_t WEBRTC_SPL_MUL_16_32_RSFT16(int16_t a,
44 | int32_t b) {
45 | int32_t value32 = 0, b1 = 0, b2 = 0;
46 | int32_t a1 = 0;
47 |
48 | __asm __volatile(
49 | #if defined(MIPS32_R2_LE)
50 | "seh %[a1], %[a] \n\t"
51 | #else
52 | "sll %[a1], %[a], 16 \n\t"
53 | "sra %[a1], %[a1], 16 \n\t"
54 | #endif
55 | "andi %[b2], %[b], 0xFFFF \n\t"
56 | "sra %[b1], %[b], 16 \n\t"
57 | "sra %[b2], %[b2], 1 \n\t"
58 | "mul %[value32], %[a1], %[b1] \n\t"
59 | "mul %[b2], %[a1], %[b2] \n\t"
60 | "addiu %[b2], %[b2], 0x4000 \n\t"
61 | "sra %[b2], %[b2], 15 \n\t"
62 | "addu %[value32], %[value32], %[b2] \n\t"
63 | : [value32] "=&r" (value32), [b1] "=&r" (b1), [b2] "=&r" (b2),
64 | [a1] "=&r" (a1)
65 | : [a] "r" (a), [b] "r" (b)
66 | : "hi", "lo"
67 | );
68 | return value32;
69 | }
70 |
71 | static __inline int32_t WEBRTC_SPL_MUL_32_32_RSFT32BI(int32_t a,
72 | int32_t b) {
73 | int32_t tmp = 0;
74 |
75 | if ((32767 < a) || (a < 0))
76 | tmp = WEBRTC_SPL_MUL_16_32_RSFT16(((int16_t)(a >> 16)), b);
77 | tmp += WEBRTC_SPL_MUL_16_32_RSFT16(((int16_t)((a & 0x0000FFFF) >> 1)),
78 | b) >> 15;
79 |
80 | return tmp;
81 | }
82 |
83 | static __inline int32_t WEBRTC_SPL_MUL_32_32_RSFT32(int16_t a,
84 | int16_t b,
85 | int32_t c) {
86 | int32_t tmp1 = 0, tmp2 = 0, tmp3 = 0, tmp4 = 0;
87 |
88 | __asm __volatile(
89 | "sra %[tmp1], %[c], 16 \n\t"
90 | "andi %[tmp2], %[c], 0xFFFF \n\t"
91 | #if defined(MIPS32_R2_LE)
92 | "seh %[a], %[a] \n\t"
93 | "seh %[b], %[b] \n\t"
94 | #else
95 | "sll %[a], %[a], 16 \n\t"
96 | "sra %[a], %[a], 16 \n\t"
97 | "sll %[b], %[b], 16 \n\t"
98 | "sra %[b], %[b], 16 \n\t"
99 | #endif
100 | "sra %[tmp2], %[tmp2], 1 \n\t"
101 | "mul %[tmp3], %[a], %[tmp2] \n\t"
102 | "mul %[tmp4], %[b], %[tmp2] \n\t"
103 | "mul %[tmp2], %[a], %[tmp1] \n\t"
104 | "mul %[tmp1], %[b], %[tmp1] \n\t"
105 | #if defined(MIPS_DSP_R1_LE)
106 | "shra_r.w %[tmp3], %[tmp3], 15 \n\t"
107 | "shra_r.w %[tmp4], %[tmp4], 15 \n\t"
108 | #else
109 | "addiu %[tmp3], %[tmp3], 0x4000 \n\t"
110 | "sra %[tmp3], %[tmp3], 15 \n\t"
111 | "addiu %[tmp4], %[tmp4], 0x4000 \n\t"
112 | "sra %[tmp4], %[tmp4], 15 \n\t"
113 | #endif
114 | "addu %[tmp3], %[tmp3], %[tmp2] \n\t"
115 | "addu %[tmp4], %[tmp4], %[tmp1] \n\t"
116 | "sra %[tmp4], %[tmp4], 16 \n\t"
117 | "addu %[tmp1], %[tmp3], %[tmp4] \n\t"
118 | : [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2),
119 | [tmp3] "=&r" (tmp3), [tmp4] "=&r" (tmp4),
120 | [a] "+r" (a), [b] "+r" (b)
121 | : [c] "r" (c)
122 | : "hi", "lo"
123 | );
124 | return tmp1;
125 | }
126 |
127 | #if defined(MIPS_DSP_R1_LE)
128 | static __inline int16_t WebRtcSpl_SatW32ToW16(int32_t value32) {
129 | __asm __volatile(
130 | "shll_s.w %[value32], %[value32], 16 \n\t"
131 | "sra %[value32], %[value32], 16 \n\t"
132 | : [value32] "+r" (value32)
133 | :
134 | );
135 | int16_t out16 = (int16_t)value32;
136 | return out16;
137 | }
138 |
139 | static __inline int16_t WebRtcSpl_AddSatW16(int16_t a, int16_t b) {
140 | int32_t value32 = 0;
141 |
142 | __asm __volatile(
143 | "addq_s.ph %[value32], %[a], %[b] \n\t"
144 | : [value32] "=r" (value32)
145 | : [a] "r" (a), [b] "r" (b)
146 | );
147 | return (int16_t)value32;
148 | }
149 |
150 | static __inline int32_t WebRtcSpl_AddSatW32(int32_t l_var1, int32_t l_var2) {
151 | int32_t l_sum;
152 |
153 | __asm __volatile(
154 | "addq_s.w %[l_sum], %[l_var1], %[l_var2] \n\t"
155 | : [l_sum] "=r" (l_sum)
156 | : [l_var1] "r" (l_var1), [l_var2] "r" (l_var2)
157 | );
158 |
159 | return l_sum;
160 | }
161 |
162 | static __inline int16_t WebRtcSpl_SubSatW16(int16_t var1, int16_t var2) {
163 | int32_t value32;
164 |
165 | __asm __volatile(
166 | "subq_s.ph %[value32], %[var1], %[var2] \n\t"
167 | : [value32] "=r" (value32)
168 | : [var1] "r" (var1), [var2] "r" (var2)
169 | );
170 |
171 | return (int16_t)value32;
172 | }
173 |
174 | static __inline int32_t WebRtcSpl_SubSatW32(int32_t l_var1, int32_t l_var2) {
175 | int32_t l_diff;
176 |
177 | __asm __volatile(
178 | "subq_s.w %[l_diff], %[l_var1], %[l_var2] \n\t"
179 | : [l_diff] "=r" (l_diff)
180 | : [l_var1] "r" (l_var1), [l_var2] "r" (l_var2)
181 | );
182 |
183 | return l_diff;
184 | }
185 | #endif
186 |
187 | static __inline int16_t WebRtcSpl_GetSizeInBits(uint32_t n) {
188 | int bits = 0;
189 | int i32 = 32;
190 |
191 | __asm __volatile(
192 | "clz %[bits], %[n] \n\t"
193 | "subu %[bits], %[i32], %[bits] \n\t"
194 | : [bits] "=&r" (bits)
195 | : [n] "r" (n), [i32] "r" (i32)
196 | );
197 |
198 | return bits;
199 | }
200 |
201 | static __inline int WebRtcSpl_NormW32(int32_t a) {
202 | int zeros = 0;
203 |
204 | __asm __volatile(
205 | ".set push \n\t"
206 | ".set noreorder \n\t"
207 | "bnez %[a], 1f \n\t"
208 | " sra %[zeros], %[a], 31 \n\t"
209 | "b 2f \n\t"
210 | " move %[zeros], $zero \n\t"
211 | "1: \n\t"
212 | "xor %[zeros], %[a], %[zeros] \n\t"
213 | "clz %[zeros], %[zeros] \n\t"
214 | "addiu %[zeros], %[zeros], -1 \n\t"
215 | "2: \n\t"
216 | ".set pop \n\t"
217 | : [zeros]"=&r"(zeros)
218 | : [a] "r" (a)
219 | );
220 |
221 | return zeros;
222 | }
223 |
224 | static __inline int WebRtcSpl_NormU32(uint32_t a) {
225 | int zeros = 0;
226 |
227 | __asm __volatile(
228 | "clz %[zeros], %[a] \n\t"
229 | : [zeros] "=r" (zeros)
230 | : [a] "r" (a)
231 | );
232 |
233 | return (zeros & 0x1f);
234 | }
235 |
236 | static __inline int WebRtcSpl_NormW16(int16_t a) {
237 | int zeros = 0;
238 | int a0 = a << 16;
239 |
240 | __asm __volatile(
241 | ".set push \n\t"
242 | ".set noreorder \n\t"
243 | "bnez %[a0], 1f \n\t"
244 | " sra %[zeros], %[a0], 31 \n\t"
245 | "b 2f \n\t"
246 | " move %[zeros], $zero \n\t"
247 | "1: \n\t"
248 | "xor %[zeros], %[a0], %[zeros] \n\t"
249 | "clz %[zeros], %[zeros] \n\t"
250 | "addiu %[zeros], %[zeros], -1 \n\t"
251 | "2: \n\t"
252 | ".set pop \n\t"
253 | : [zeros]"=&r"(zeros)
254 | : [a0] "r" (a0)
255 | );
256 |
257 | return zeros;
258 | }
259 |
260 | static __inline int32_t WebRtc_MulAccumW16(int16_t a,
261 | int16_t b,
262 | int32_t c) {
263 | int32_t res = 0, c1 = 0;
264 | __asm __volatile(
265 | #if defined(MIPS32_R2_LE)
266 | "seh %[a], %[a] \n\t"
267 | "seh %[b], %[b] \n\t"
268 | #else
269 | "sll %[a], %[a], 16 \n\t"
270 | "sll %[b], %[b], 16 \n\t"
271 | "sra %[a], %[a], 16 \n\t"
272 | "sra %[b], %[b], 16 \n\t"
273 | #endif
274 | "mul %[res], %[a], %[b] \n\t"
275 | "addu %[c1], %[c], %[res] \n\t"
276 | : [c1] "=r" (c1), [res] "=&r" (res)
277 | : [a] "r" (a), [b] "r" (b), [c] "r" (c)
278 | : "hi", "lo"
279 | );
280 | return (c1);
281 | }
282 |
283 | #endif // WEBRTC_SPL_SPL_INL_MIPS_H_
284 |
--------------------------------------------------------------------------------
/app/src/main/cpp/spl_sqrt.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 |
12 | /*
13 | * This file contains the function WebRtcSpl_Sqrt().
14 | * The description header can be found in signal_processing_library.h
15 | *
16 | */
17 |
18 | #include "signal_processing_library.h"
19 |
20 | int32_t WebRtcSpl_SqrtLocal(int32_t in);
21 |
22 | int32_t WebRtcSpl_SqrtLocal(int32_t in)
23 | {
24 |
25 | int16_t x_half, t16;
26 | int32_t A, B, x2;
27 |
28 | /* The following block performs:
29 | y=in/2
30 | x=y-2^30
31 | x_half=x/2^31
32 | t = 1 + (x_half) - 0.5*((x_half)^2) + 0.5*((x_half)^3) - 0.625*((x_half)^4)
33 | + 0.875*((x_half)^5)
34 | */
35 |
36 | B = in;
37 |
38 | B = WEBRTC_SPL_RSHIFT_W32(B, 1); // B = in/2
39 | B = B - ((int32_t)0x40000000); // B = in/2 - 1/2
40 | x_half = (int16_t)WEBRTC_SPL_RSHIFT_W32(B, 16);// x_half = x/2 = (in-1)/2
41 | B = B + ((int32_t)0x40000000); // B = 1 + x/2
42 | B = B + ((int32_t)0x40000000); // Add 0.5 twice (since 1.0 does not exist in Q31)
43 |
44 | x2 = ((int32_t)x_half) * ((int32_t)x_half) * 2; // A = (x/2)^2
45 | A = -x2; // A = -(x/2)^2
46 | B = B + (A >> 1); // B = 1 + x/2 - 0.5*(x/2)^2
47 |
48 | A = WEBRTC_SPL_RSHIFT_W32(A, 16);
49 | A = A * A * 2; // A = (x/2)^4
50 | t16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16);
51 | B = B + WEBRTC_SPL_MUL_16_16(-20480, t16) * 2; // B = B - 0.625*A
52 | // After this, B = 1 + x/2 - 0.5*(x/2)^2 - 0.625*(x/2)^4
53 |
54 | t16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16);
55 | A = WEBRTC_SPL_MUL_16_16(x_half, t16) * 2; // A = (x/2)^5
56 | t16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16);
57 | B = B + WEBRTC_SPL_MUL_16_16(28672, t16) * 2; // B = B + 0.875*A
58 | // After this, B = 1 + x/2 - 0.5*(x/2)^2 - 0.625*(x/2)^4 + 0.875*(x/2)^5
59 |
60 | t16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(x2, 16);
61 | A = WEBRTC_SPL_MUL_16_16(x_half, t16) * 2; // A = x/2^3
62 |
63 | B = B + (A >> 1); // B = B + 0.5*A
64 | // After this, B = 1 + x/2 - 0.5*(x/2)^2 + 0.5*(x/2)^3 - 0.625*(x/2)^4 + 0.875*(x/2)^5
65 |
66 | B = B + ((int32_t)32768); // Round off bit
67 |
68 | return B;
69 | }
70 |
71 | int32_t WebRtcSpl_Sqrt(int32_t value)
72 | {
73 | /*
74 | Algorithm:
75 |
76 | Six term Taylor Series is used here to compute the square root of a number
77 | y^0.5 = (1+x)^0.5 where x = y-1
78 | = 1+(x/2)-0.5*((x/2)^2+0.5*((x/2)^3-0.625*((x/2)^4+0.875*((x/2)^5)
79 | 0.5 <= x < 1
80 |
81 | Example of how the algorithm works, with ut=sqrt(in), and
82 | with in=73632 and ut=271 (even shift value case):
83 |
84 | in=73632
85 | y= in/131072
86 | x=y-1
87 | t = 1 + (x/2) - 0.5*((x/2)^2) + 0.5*((x/2)^3) - 0.625*((x/2)^4) + 0.875*((x/2)^5)
88 | ut=t*(1/sqrt(2))*512
89 |
90 | or:
91 |
92 | in=73632
93 | in2=73632*2^14
94 | y= in2/2^31
95 | x=y-1
96 | t = 1 + (x/2) - 0.5*((x/2)^2) + 0.5*((x/2)^3) - 0.625*((x/2)^4) + 0.875*((x/2)^5)
97 | ut=t*(1/sqrt(2))
98 | ut2=ut*2^9
99 |
100 | which gives:
101 |
102 | in = 73632
103 | in2 = 1206386688
104 | y = 0.56176757812500
105 | x = -0.43823242187500
106 | t = 0.74973506527313
107 | ut = 0.53014274874797
108 | ut2 = 2.714330873589594e+002
109 |
110 | or:
111 |
112 | in=73632
113 | in2=73632*2^14
114 | y=in2/2
115 | x=y-2^30
116 | x_half=x/2^31
117 | t = 1 + (x_half) - 0.5*((x_half)^2) + 0.5*((x_half)^3) - 0.625*((x_half)^4)
118 | + 0.875*((x_half)^5)
119 | ut=t*(1/sqrt(2))
120 | ut2=ut*2^9
121 |
122 | which gives:
123 |
124 | in = 73632
125 | in2 = 1206386688
126 | y = 603193344
127 | x = -470548480
128 | x_half = -0.21911621093750
129 | t = 0.74973506527313
130 | ut = 0.53014274874797
131 | ut2 = 2.714330873589594e+002
132 |
133 | */
134 |
135 | int16_t x_norm, nshift, t16, sh;
136 | int32_t A;
137 |
138 | int16_t k_sqrt_2 = 23170; // 1/sqrt2 (==5a82)
139 |
140 | A = value;
141 |
142 | if (A == 0)
143 | return (int32_t)0; // sqrt(0) = 0
144 |
145 | sh = WebRtcSpl_NormW32(A); // # shifts to normalize A
146 | A = WEBRTC_SPL_LSHIFT_W32(A, sh); // Normalize A
147 | if (A < (WEBRTC_SPL_WORD32_MAX - 32767))
148 | {
149 | A = A + ((int32_t)32768); // Round off bit
150 | } else
151 | {
152 | A = WEBRTC_SPL_WORD32_MAX;
153 | }
154 |
155 | x_norm = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16); // x_norm = AH
156 |
157 | nshift = WEBRTC_SPL_RSHIFT_W16(sh, 1); // nshift = sh>>1
158 | nshift = -nshift; // Negate the power for later de-normalization
159 |
160 | A = (int32_t)WEBRTC_SPL_LSHIFT_W32((int32_t)x_norm, 16);
161 | A = WEBRTC_SPL_ABS_W32(A); // A = abs(x_norm<<16)
162 | A = WebRtcSpl_SqrtLocal(A); // A = sqrt(A)
163 |
164 | if ((-2 * nshift) == sh)
165 | { // Even shift value case
166 |
167 | t16 = (int16_t)WEBRTC_SPL_RSHIFT_W32(A, 16); // t16 = AH
168 |
169 | A = WEBRTC_SPL_MUL_16_16(k_sqrt_2, t16) * 2; // A = 1/sqrt(2)*t16
170 | A = A + ((int32_t)32768); // Round off
171 | A = A & ((int32_t)0x7fff0000); // Round off
172 |
173 | A = WEBRTC_SPL_RSHIFT_W32(A, 15); // A = A>>16
174 |
175 | } else
176 | {
177 | A = WEBRTC_SPL_RSHIFT_W32(A, 16); // A = A>>16
178 | }
179 |
180 | A = A & ((int32_t)0x0000ffff);
181 | A = (int32_t)WEBRTC_SPL_SHIFT_W32(A, nshift); // De-normalize the result
182 |
183 | return A;
184 | }
185 |
--------------------------------------------------------------------------------
/app/src/main/cpp/splitting_filter.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | /*
12 | * This file contains the splitting filter functions.
13 | *
14 | */
15 |
16 | #include "signal_processing_library.h"
17 |
18 | // Number of samples in a low/high-band frame.
19 | enum
20 | {
21 | kBandFrameLength = 160
22 | };
23 |
24 | // QMF filter coefficients in Q16.
25 | static const uint16_t WebRtcSpl_kAllPassFilter1[3] = {6418, 36982, 57261};
26 | static const uint16_t WebRtcSpl_kAllPassFilter2[3] = {21333, 49062, 63010};
27 |
28 | ///////////////////////////////////////////////////////////////////////////////////////////////
29 | // WebRtcSpl_AllPassQMF(...)
30 | //
31 | // Allpass filter used by the analysis and synthesis parts of the QMF filter.
32 | //
33 | // Input:
34 | // - in_data : Input data sequence (Q10)
35 | // - data_length : Length of data sequence (>2)
36 | // - filter_coefficients : Filter coefficients (length 3, Q16)
37 | //
38 | // Input & Output:
39 | // - filter_state : Filter state (length 6, Q10).
40 | //
41 | // Output:
42 | // - out_data : Output data sequence (Q10), length equal to
43 | // |data_length|
44 | //
45 |
46 | void WebRtcSpl_AllPassQMF(int32_t* in_data, int16_t data_length,
47 | int32_t* out_data, const uint16_t* filter_coefficients,
48 | int32_t* filter_state)
49 | {
50 | // The procedure is to filter the input with three first order all pass filters
51 | // (cascade operations).
52 | //
53 | // a_3 + q^-1 a_2 + q^-1 a_1 + q^-1
54 | // y[n] = ----------- ----------- ----------- x[n]
55 | // 1 + a_3q^-1 1 + a_2q^-1 1 + a_1q^-1
56 | //
57 | // The input vector |filter_coefficients| includes these three filter coefficients.
58 | // The filter state contains the in_data state, in_data[-1], followed by
59 | // the out_data state, out_data[-1]. This is repeated for each cascade.
60 | // The first cascade filter will filter the |in_data| and store the output in
61 | // |out_data|. The second will the take the |out_data| as input and make an
62 | // intermediate storage in |in_data|, to save memory. The third, and final, cascade
63 | // filter operation takes the |in_data| (which is the output from the previous cascade
64 | // filter) and store the output in |out_data|.
65 | // Note that the input vector values are changed during the process.
66 | int16_t k;
67 | int32_t diff;
68 | // First all-pass cascade; filter from in_data to out_data.
69 |
70 | // Let y_i[n] indicate the output of cascade filter i (with filter coefficient a_i) at
71 | // vector position n. Then the final output will be y[n] = y_3[n]
72 |
73 | // First loop, use the states stored in memory.
74 | // "diff" should be safe from wrap around since max values are 2^25
75 | diff = WEBRTC_SPL_SUB_SAT_W32(in_data[0], filter_state[1]); // = (x[0] - y_1[-1])
76 | // y_1[0] = x[-1] + a_1 * (x[0] - y_1[-1])
77 | out_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[0], diff, filter_state[0]);
78 |
79 | // For the remaining loops, use previous values.
80 | for (k = 1; k < data_length; k++)
81 | {
82 | diff = WEBRTC_SPL_SUB_SAT_W32(in_data[k], out_data[k - 1]); // = (x[n] - y_1[n-1])
83 | // y_1[n] = x[n-1] + a_1 * (x[n] - y_1[n-1])
84 | out_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[0], diff, in_data[k - 1]);
85 | }
86 |
87 | // Update states.
88 | filter_state[0] = in_data[data_length - 1]; // x[N-1], becomes x[-1] next time
89 | filter_state[1] = out_data[data_length - 1]; // y_1[N-1], becomes y_1[-1] next time
90 |
91 | // Second all-pass cascade; filter from out_data to in_data.
92 | diff = WEBRTC_SPL_SUB_SAT_W32(out_data[0], filter_state[3]); // = (y_1[0] - y_2[-1])
93 | // y_2[0] = y_1[-1] + a_2 * (y_1[0] - y_2[-1])
94 | in_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[1], diff, filter_state[2]);
95 | for (k = 1; k < data_length; k++)
96 | {
97 | diff = WEBRTC_SPL_SUB_SAT_W32(out_data[k], in_data[k - 1]); // =(y_1[n] - y_2[n-1])
98 | // y_2[0] = y_1[-1] + a_2 * (y_1[0] - y_2[-1])
99 | in_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[1], diff, out_data[k-1]);
100 | }
101 |
102 | filter_state[2] = out_data[data_length - 1]; // y_1[N-1], becomes y_1[-1] next time
103 | filter_state[3] = in_data[data_length - 1]; // y_2[N-1], becomes y_2[-1] next time
104 |
105 | // Third all-pass cascade; filter from in_data to out_data.
106 | diff = WEBRTC_SPL_SUB_SAT_W32(in_data[0], filter_state[5]); // = (y_2[0] - y[-1])
107 | // y[0] = y_2[-1] + a_3 * (y_2[0] - y[-1])
108 | out_data[0] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[2], diff, filter_state[4]);
109 | for (k = 1; k < data_length; k++)
110 | {
111 | diff = WEBRTC_SPL_SUB_SAT_W32(in_data[k], out_data[k - 1]); // = (y_2[n] - y[n-1])
112 | // y[n] = y_2[n-1] + a_3 * (y_2[n] - y[n-1])
113 | out_data[k] = WEBRTC_SPL_SCALEDIFF32(filter_coefficients[2], diff, in_data[k-1]);
114 | }
115 | filter_state[4] = in_data[data_length - 1]; // y_2[N-1], becomes y_2[-1] next time
116 | filter_state[5] = out_data[data_length - 1]; // y[N-1], becomes y[-1] next time
117 | }
118 |
119 | void WebRtcSpl_AnalysisQMF(const int16_t* in_data, int16_t* low_band,
120 | int16_t* high_band, int32_t* filter_state1,
121 | int32_t* filter_state2)
122 | {
123 | int16_t i;
124 | int16_t k;
125 | int32_t tmp;
126 | int32_t half_in1[kBandFrameLength];
127 | int32_t half_in2[kBandFrameLength];
128 | int32_t filter1[kBandFrameLength];
129 | int32_t filter2[kBandFrameLength];
130 |
131 | // Split even and odd samples. Also shift them to Q10.
132 | for (i = 0, k = 0; i < kBandFrameLength; i++, k += 2)
133 | {
134 | half_in2[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k], 10);
135 | half_in1[i] = WEBRTC_SPL_LSHIFT_W32((int32_t)in_data[k + 1], 10);
136 | }
137 |
138 | // All pass filter even and odd samples, independently.
139 | WebRtcSpl_AllPassQMF(half_in1, kBandFrameLength, filter1, WebRtcSpl_kAllPassFilter1,
140 | filter_state1);
141 | WebRtcSpl_AllPassQMF(half_in2, kBandFrameLength, filter2, WebRtcSpl_kAllPassFilter2,
142 | filter_state2);
143 |
144 | // Take the sum and difference of filtered version of odd and even
145 | // branches to get upper & lower band.
146 | for (i = 0; i < kBandFrameLength; i++)
147 | {
148 | tmp = filter1[i] + filter2[i] + 1024;
149 | tmp = WEBRTC_SPL_RSHIFT_W32(tmp, 11);
150 | low_band[i] = WebRtcSpl_SatW32ToW16(tmp);
151 |
152 | tmp = filter1[i] - filter2[i] + 1024;
153 | tmp = WEBRTC_SPL_RSHIFT_W32(tmp, 11);
154 | high_band[i] = WebRtcSpl_SatW32ToW16(tmp);
155 | }
156 | }
157 |
158 | void WebRtcSpl_SynthesisQMF(const int16_t* low_band, const int16_t* high_band,
159 | int16_t* out_data, int32_t* filter_state1,
160 | int32_t* filter_state2)
161 | {
162 | int32_t tmp;
163 | int32_t half_in1[kBandFrameLength];
164 | int32_t half_in2[kBandFrameLength];
165 | int32_t filter1[kBandFrameLength];
166 | int32_t filter2[kBandFrameLength];
167 | int16_t i;
168 | int16_t k;
169 |
170 | // Obtain the sum and difference channels out of upper and lower-band channels.
171 | // Also shift to Q10 domain.
172 | for (i = 0; i < kBandFrameLength; i++)
173 | {
174 | tmp = (int32_t)low_band[i] + (int32_t)high_band[i];
175 | half_in1[i] = WEBRTC_SPL_LSHIFT_W32(tmp, 10);
176 | tmp = (int32_t)low_band[i] - (int32_t)high_band[i];
177 | half_in2[i] = WEBRTC_SPL_LSHIFT_W32(tmp, 10);
178 | }
179 |
180 | // all-pass filter the sum and difference channels
181 | WebRtcSpl_AllPassQMF(half_in1, kBandFrameLength, filter1, WebRtcSpl_kAllPassFilter2,
182 | filter_state1);
183 | WebRtcSpl_AllPassQMF(half_in2, kBandFrameLength, filter2, WebRtcSpl_kAllPassFilter1,
184 | filter_state2);
185 |
186 | // The filtered signals are even and odd samples of the output. Combine
187 | // them. The signals are Q10 should shift them back to Q0 and take care of
188 | // saturation.
189 | for (i = 0, k = 0; i < kBandFrameLength; i++)
190 | {
191 | tmp = WEBRTC_SPL_RSHIFT_W32(filter2[i] + 512, 10);
192 | out_data[k++] = WebRtcSpl_SatW32ToW16(tmp);
193 |
194 | tmp = WEBRTC_SPL_RSHIFT_W32(filter1[i] + 512, 10);
195 | out_data[k++] = WebRtcSpl_SatW32ToW16(tmp);
196 | }
197 |
198 | }
199 |
--------------------------------------------------------------------------------
/app/src/main/cpp/typedefs.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | // This file contains platform-specific typedefs and defines.
12 | // Much of it is derived from Chromium's build/build_config.h.
13 |
14 | #ifndef WEBRTC_TYPEDEFS_H_
15 | #define WEBRTC_TYPEDEFS_H_
16 |
17 | // For access to standard POSIXish features, use WEBRTC_POSIX instead of a
18 | // more specific macro.
19 | #if defined(WEBRTC_MAC) || defined(WEBRTC_LINUX) || \
20 | defined(WEBRTC_ANDROID)
21 | #define WEBRTC_POSIX
22 | #endif
23 |
24 | // Processor architecture detection. For more info on what's defined, see:
25 | // http://msdn.microsoft.com/en-us/library/b0084kay.aspx
26 | // http://www.agner.org/optimize/calling_conventions.pdf
27 | // or with gcc, run: "echo | gcc -E -dM -"
28 | #if defined(_M_X64) || defined(__x86_64__)
29 | #define WEBRTC_ARCH_X86_FAMILY
30 | #define WEBRTC_ARCH_X86_64
31 | #define WEBRTC_ARCH_64_BITS
32 | #define WEBRTC_ARCH_LITTLE_ENDIAN
33 | #elif defined(_M_IX86) || defined(__i386__)
34 | #define WEBRTC_ARCH_X86_FAMILY
35 | #define WEBRTC_ARCH_X86
36 | #define WEBRTC_ARCH_32_BITS
37 | #define WEBRTC_ARCH_LITTLE_ENDIAN
38 | #elif defined(__ARMEL__)
39 | // TODO(ajm): We'd prefer to control platform defines here, but this is
40 | // currently provided by the Android makefiles. Commented to avoid duplicate
41 | // definition warnings.
42 | //#define WEBRTC_ARCH_ARM
43 | // TODO(ajm): Chromium uses the following two defines. Should we switch?
44 | //#define WEBRTC_ARCH_ARM_FAMILY
45 | //#define WEBRTC_ARCH_ARMEL
46 | #define WEBRTC_ARCH_32_BITS
47 | #define WEBRTC_ARCH_LITTLE_ENDIAN
48 | #elif defined(__aarch64__)
49 | #define WEBRTC_ARCH_64_BITS
50 | #define WEBRTC_ARCH_LITTLE_ENDIAN
51 | #define WEBRTC_LITTLE_ENDIAN
52 | #elif defined(__MIPSEL__)
53 | #define WEBRTC_ARCH_32_BITS
54 | #define WEBRTC_ARCH_LITTLE_ENDIAN
55 | #else
56 | #error Please add support for your architecture in typedefs.h
57 | #endif
58 |
59 | #if !(defined(WEBRTC_ARCH_LITTLE_ENDIAN) ^ defined(WEBRTC_ARCH_BIG_ENDIAN))
60 | #error Define either WEBRTC_ARCH_LITTLE_ENDIAN or WEBRTC_ARCH_BIG_ENDIAN
61 | #endif
62 |
63 | #if defined(__SSE2__) || defined(_MSC_VER)
64 | #define WEBRTC_USE_SSE2
65 | #endif
66 |
67 | #if !defined(_MSC_VER)
68 | #include
69 | #else
70 | // Define C99 equivalent types, since MSVC doesn't provide stdint.h.
71 | typedef signed char int8_t;
72 | typedef signed short int16_t;
73 | typedef signed int int32_t;
74 | typedef __int64 int64_t;
75 | typedef unsigned char uint8_t;
76 | typedef unsigned short uint16_t;
77 | typedef unsigned int uint32_t;
78 | typedef unsigned __int64 uint64_t;
79 | #endif
80 |
81 | // Borrowed from Chromium's base/compiler_specific.h.
82 | // Annotate a virtual method indicating it must be overriding a virtual
83 | // method in the parent class.
84 | // Use like:
85 | // virtual void foo() OVERRIDE;
86 | #if defined(_MSC_VER)
87 | #define OVERRIDE override
88 | #elif defined(__clang__)
89 | // Clang defaults to C++03 and warns about using override. Squelch that.
90 | // Intentionally no push/pop here so all users of OVERRIDE ignore the warning
91 | // too. This is like passing -Wno-c++11-extensions, except that GCC won't die
92 | // (because it won't see this pragma).
93 | #pragma clang diagnostic ignored "-Wc++11-extensions"
94 | #define OVERRIDE override
95 | #elif defined(__GNUC__) && __cplusplus >= 201103 && \
96 | (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40700
97 | // GCC 4.7 supports explicit virtual overrides when C++11 support is enabled.
98 | #define OVERRIDE override
99 | #else
100 | #define OVERRIDE
101 | #endif
102 |
103 | // Annotate a function indicating the caller must examine the return value.
104 | // Use like:
105 | // int foo() WARN_UNUSED_RESULT;
106 | // TODO(ajm): Hack to avoid multiple definitions until the base/ of webrtc and
107 | // libjingle are merged.
108 | #if !defined(WARN_UNUSED_RESULT)
109 | #if defined(__GNUC__)
110 | #define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
111 | #else
112 | #define WARN_UNUSED_RESULT
113 | #endif
114 | #endif // WARN_UNUSED_RESULT
115 |
116 | #endif // WEBRTC_TYPEDEFS_H_
117 |
--------------------------------------------------------------------------------
/app/src/main/cpp/web_rtc.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include "_android_log_print.h"
4 | #include "noise_suppression.h"
5 | #include "analog_agc.h"
6 |
7 |
8 | #ifdef __cplusplus
9 | extern "C" {
10 | #endif
11 |
12 | //音量增益
13 | void *agcHandle = NULL;
14 |
15 |
16 | //JNIEXPORT void JNICALL
17 | //Java_com_webrtc_jni_WebRtcUtils_webRtcAgcProcess(JNIEnv *env, jclass type, jshortArray srcData_,
18 | // jshortArray desData_, jint srcLen) {
19 | //
20 | // jshort *srcData = env->GetShortArrayElements(srcData_, NULL);
21 | // jshort *desData = env->GetShortArrayElements(desData_, NULL);
22 | //
23 | // jsize src_len = env->GetArrayLength(srcData_);
24 | // int frameSize = 160;
25 | //
26 | // int micLevelIn = 0;
27 | // int micLevelOut = 0;
28 | //// LOGD("src_len=== %d", src_len);
29 | // int16_t echo = 1; //增益放大是否考虑回声影响
30 | // uint8_t saturationWarning;
31 | //
32 | // int iFrame;
33 | // int nFrame = src_len / frameSize; //帧数
34 | // int leftLen = src_len % frameSize; //最后一帧的大小
35 | // int onceLen = frameSize;
36 | // nFrame = (leftLen > 0) ? nFrame + 1 : nFrame;
37 | //
38 | // short *agcIn = (short *) malloc(frameSize * sizeof(short));
39 | // short *agcOut = (short *) malloc(frameSize * sizeof(short));
40 | //
41 | // for (iFrame = 0; iFrame < nFrame; iFrame++) {
42 | //
43 | // if (iFrame == nFrame - 1 && leftLen != 0) {
44 | // onceLen = leftLen;
45 | // }
46 | //// LOGE("WebRtcAgc_Process onceLen ==%d", onceLen);
47 | // memcpy(agcIn, srcData + iFrame * frameSize, onceLen * sizeof(short));
48 | //
49 | // int state = WebRtcAgc_Process(agcHandle, agcIn, NULL, 160, agcOut, NULL,
50 | // micLevelIn, &micLevelOut, echo, &saturationWarning);
51 | // if (state != 0) {
52 | // LOGE("WebRtcAgc_Process error");
53 | // break;
54 | // }
55 | // if (saturationWarning != 0) {
56 | // LOGE("[AgcProc]: saturationWarning occured");
57 | // }
58 | // memcpy(desData + iFrame * frameSize, agcOut + iFrame * frameSize, onceLen * sizeof(short));
59 | // micLevelIn = micLevelOut;
60 | // }
61 | //
62 | // free(agcIn);
63 | // free(agcOut);
64 | // env->ReleaseShortArrayElements(srcData_, srcData, 0);
65 | // env->ReleaseShortArrayElements(desData_, desData, 0);
66 | //}
67 |
68 | JNIEXPORT void JNICALL
69 | Java_com_webrtc_jni_WebRtcUtils_webRtcAgcProcess(JNIEnv *env, jclass type, jshortArray srcData_,
70 | jshortArray desData_, jint srcLen) {
71 | jshort *srcData = env->GetShortArrayElements(srcData_, NULL);
72 | jshort *desData = env->GetShortArrayElements(desData_, NULL);
73 |
74 | jsize src_len = env->GetArrayLength(srcData_);
75 |
76 | int micLevelIn = 0;
77 | int micLevelOut = 0;
78 | // LOGD("src_len=== %d", src_len);
79 | int16_t echo = 1; //增益放大是否考虑回声影响
80 | uint8_t saturationWarning;
81 |
82 | short shBufferIn[160] = {0};
83 | short shBufferOut[160] = {0};
84 |
85 | for (int i = 0; i < src_len; i += sizeof(short) * 80) {
86 | if (src_len - i >= sizeof(short) * 80) {
87 |
88 | memcpy(shBufferIn, (srcData + i), 160 * sizeof(short));
89 |
90 | if (0 != WebRtcAgc_Process(agcHandle, shBufferIn, NULL, 160, shBufferOut, NULL, micLevelIn, &micLevelOut, echo, &saturationWarning)) {
91 | LOGE("WebRtcUtils_webRtcAgcProcess err! \n");
92 | } else {
93 | memcpy(desData + i, shBufferOut, 160 * sizeof(short));
94 | LOGD("WebRtcUtils_webRtcAgcProcess success");
95 | micLevelIn = micLevelOut;
96 | }
97 | }
98 | }
99 |
100 | env->ReleaseShortArrayElements(srcData_, srcData, 0);
101 | env->ReleaseShortArrayElements(desData_, desData, 0);
102 | }
103 |
104 | JNIEXPORT void JNICALL
105 | Java_com_webrtc_jni_WebRtcUtils_webRtcAgcProcess32k(JNIEnv *env, jclass type, jshortArray srcData_,
106 | jshortArray desData_, jint srcLen) {
107 |
108 | jshort *srcData = env->GetShortArrayElements(srcData_, NULL);
109 | jshort *desData = env->GetShortArrayElements(desData_, NULL);
110 |
111 | jsize src_len = env->GetArrayLength(srcData_);
112 |
113 | int micLevelIn = 0;
114 | int micLevelOut = 0;
115 | // LOGD("src_len=== %d", src_len);
116 | int16_t echo = 1; //增益放大是否考虑回声影响
117 | uint8_t saturationWarning;
118 |
119 | short shBufferIn[320] = {0};
120 | short shBufferOut[320] = {0};
121 |
122 |
123 | int filter_state1[6], filter_state2[6];
124 | memset(filter_state1, 0, sizeof(filter_state1));
125 | memset(filter_state2, 0, sizeof(filter_state2));
126 |
127 | int Synthesis_state1[6], Synthesis_state12[6];
128 | memset(Synthesis_state1, 0, sizeof(Synthesis_state1));
129 | memset(Synthesis_state12, 0, sizeof(Synthesis_state12));
130 |
131 | for (int i = 0; i < src_len; i += sizeof(short) * 160) {
132 | if (src_len - i >= sizeof(short) * 160) {
133 |
134 | short shInL[160], shInH[160];
135 | short shOutL[160] = {0}, shOutH[160] = {0};
136 |
137 | memcpy(shBufferIn, (srcData + i), 320 * sizeof(short));
138 | //以高频和低频的方式传入函数内部
139 | WebRtcSpl_AnalysisQMF(shBufferIn, shInL, shInH, filter_state1, filter_state2);
140 |
141 | if (0 != WebRtcAgc_Process(agcHandle, shInL, shInH, 160, shOutL, shOutH, micLevelIn, &micLevelOut, echo, &saturationWarning)) {
142 | LOGE("WebRtcUtils_webRtcAgcProcess32k err! \n");
143 | } else {
144 | //合成数据
145 | WebRtcSpl_SynthesisQMF(shOutL, shOutH, shBufferOut, Synthesis_state1, Synthesis_state12);
146 |
147 | memcpy(desData + i, shBufferOut, 320 * sizeof(short));
148 | LOGD("WebRtcUtils_webRtcAgcProcess32k success");
149 | micLevelIn = micLevelOut;
150 | }
151 | }
152 | }
153 |
154 | env->ReleaseShortArrayElements(srcData_, srcData, 0);
155 | env->ReleaseShortArrayElements(desData_, desData, 0);
156 | }
157 |
158 | JNIEXPORT void JNICALL
159 | Java_com_webrtc_jni_WebRtcUtils_webRtcAgcFree(JNIEnv *env, jclass type) {
160 | if (agcHandle != NULL) {
161 | int free = WebRtcAgc_Free(agcHandle);
162 | if (free == -1) {
163 | LOGE("WebRtcAgc_Free error");
164 | }
165 | agcHandle = NULL;
166 | }
167 | }
168 |
169 | JNIEXPORT void JNICALL
170 | Java_com_webrtc_jni_WebRtcUtils_webRtcAgcInit(JNIEnv *env, jclass type, jlong minVolume, jlong maxVolume,
171 | jlong freq) {
172 | int agc = WebRtcAgc_Create(&agcHandle);
173 | if (agc == 0) {
174 | int16_t agcMode = kAgcModeFixedDigital;
175 | int agcInit = WebRtcAgc_Init(agcHandle, (int32_t) minVolume, (int32_t) maxVolume, agcMode,
176 | (uint32_t) freq);
177 | if (agcInit == 0) {
178 | WebRtcAgc_config_t agcConfig;
179 | agcConfig.compressionGaindB = 23;
180 | agcConfig.limiterEnable = 1;
181 | agcConfig.targetLevelDbfs = 3;
182 |
183 | int initConfig = WebRtcAgc_set_config(agcHandle, agcConfig);
184 | if (initConfig == -1) {
185 | LOGE("WebRtcAgc_set_config error");
186 | }
187 | } else {
188 | LOGE("WebRtcAgc_Init error");
189 | }
190 | } else {
191 | LOGE("WebRtcAgc_Create error");
192 | }
193 | }
194 |
195 | // 降噪处理句柄
196 | NsHandle *pNs_inst = NULL;
197 |
198 | JNIEXPORT jshortArray JNICALL
199 | Java_com_webrtc_jni_WebRtcUtils_webRtcNsProcess(JNIEnv *env, jclass type, jint freq, jint len, jshortArray proData_) {
200 |
201 | jshort *proData = env->GetShortArrayElements(proData_, NULL);
202 | int dataLen = env->GetArrayLength(proData_);
203 | // LOGD("webRtcNsProcess dataLen=== %d", dataLen);
204 | int size = freq / 100;
205 |
206 | if (pNs_inst) {
207 | for (int i = 0; i < dataLen; i += size) {
208 | if (dataLen - i >= size) {
209 | if (size == 80) {
210 | short shBufferIn[80] = {0};
211 | short shBufferOut[80] = {0};
212 | memcpy(shBufferIn, (char*)(proData + i), size * sizeof(short));
213 | if (0 != WebRtcNs_Process(pNs_inst, shBufferIn, NULL, shBufferOut, NULL)) {
214 | LOGE("Noise_Suppression WebRtcNs_Process err! \n");
215 | }
216 | memcpy(proData + i, shBufferOut, size * sizeof(short));
217 | LOGD("Noise_Suppression WebRtcNs_Process success");
218 | } else {
219 | short shBufferIn[160] = {0};
220 | short shBufferOut[160] = {0};
221 | memcpy(shBufferIn, (char*)(proData + i), size * sizeof(short));
222 | if (0 != WebRtcNs_Process(pNs_inst, shBufferIn, NULL, shBufferOut, NULL)) {
223 | LOGE("Noise_Suppression WebRtcNs_Process err! \n");
224 | }
225 | memcpy(proData + i, shBufferOut, size * sizeof(short));
226 | LOGD("Noise_Suppression WebRtcNs_Process success");
227 | }
228 | }
229 | }
230 | } else {
231 | LOGD("pNs_inst null==");
232 | }
233 |
234 | env->ReleaseShortArrayElements(proData_, proData, 0);
235 |
236 | return proData_;
237 | }
238 |
239 | JNIEXPORT jshortArray JNICALL
240 | Java_com_webrtc_jni_WebRtcUtils_webRtcNsProcess32k(JNIEnv *env, jclass type, jint len, jshortArray proData_) {
241 |
242 | jshort *proData = env->GetShortArrayElements(proData_, NULL);
243 | int dataLen = env->GetArrayLength(proData_);
244 |
245 | if (pNs_inst) {
246 | short shBufferIn[320] = {0};
247 | short shBufferOut[320] = {0};
248 |
249 |
250 | int filter_state1[6], filter_state2[6];
251 | memset(filter_state1, 0, sizeof(filter_state1));
252 | memset(filter_state2, 0, sizeof(filter_state2));
253 |
254 | int Synthesis_state1[6], Synthesis_state12[6];
255 | memset(Synthesis_state1, 0, sizeof(Synthesis_state1));
256 | memset(Synthesis_state12, 0, sizeof(Synthesis_state12));
257 |
258 | for (int i = 0; i < dataLen; i += sizeof(short) * 160) {
259 | if (dataLen - i >= sizeof(short) * 160) {
260 |
261 | short shInL[160], shInH[160];
262 | short shOutL[160] = {0}, shOutH[160] = {0};
263 |
264 | memcpy(shBufferIn, (proData + i), 320 * sizeof(short));
265 | //以高频和低频的方式传入函数内部
266 | WebRtcSpl_AnalysisQMF(shBufferIn, shInL, shInH, filter_state1, filter_state2);
267 |
268 | if (0 != WebRtcNs_Process(pNs_inst, shInL, shInH, shOutL, shOutH)) {
269 | LOGE("Noise_Suppression WebRtcNs_Process err! \n");
270 | } else {
271 | //合成数据
272 | WebRtcSpl_SynthesisQMF(shOutL, shOutH, shBufferOut, Synthesis_state1, Synthesis_state12);
273 |
274 | memcpy(proData + i, shBufferOut, 320 * sizeof(short));
275 | LOGD("Noise_Suppression WebRtcNs_Process");
276 | }
277 | }
278 | }
279 | } else {
280 | LOGD("pNs_inst null==");
281 | }
282 |
283 | env->ReleaseShortArrayElements(proData_, proData, 0);
284 |
285 | return proData_;
286 | }
287 |
288 | JNIEXPORT jint JNICALL
289 | Java_com_webrtc_jni_WebRtcUtils_webRtcNsFree(JNIEnv *env, jclass type) {
290 | int _result = -1;
291 | if (pNs_inst) {
292 | _result = WebRtcNs_Free(pNs_inst);
293 | pNs_inst = NULL;
294 | LOGD("Noise_Suppression webRtcNsFree");
295 | }
296 | return _result;
297 | }
298 |
299 | JNIEXPORT void JNICALL
300 | Java_com_webrtc_jni_WebRtcUtils_webRtcNsInit(JNIEnv *env, jclass type, jint freq) {
301 |
302 | //创建降噪句柄
303 | int val = WebRtcNs_Create(&pNs_inst);
304 | LOGD("WebRtcNs_Create ==");
305 | if (val == 0) {
306 | //初始化 采样率 8k 16k 32k
307 | WebRtcNs_Init(pNs_inst, freq);
308 | WebRtcNs_set_policy(pNs_inst, 2);
309 | } else {
310 | LOGD("WebRtcNs_Create fail");
311 | }
312 | }
313 |
314 | #ifdef __cplusplus
315 | }
316 | #endif
--------------------------------------------------------------------------------
/app/src/main/java/com/webrtc/WebRtcActivity.java:
--------------------------------------------------------------------------------
1 | package com.webrtc;
2 |
3 | import android.Manifest.permission;
4 | import android.content.pm.PackageManager;
5 | import android.content.res.AssetManager;
6 | import android.media.AudioFormat;
7 | import android.media.AudioManager;
8 | import android.media.AudioRecord;
9 | import android.media.AudioTrack;
10 | import android.os.Build.VERSION;
11 | import android.os.Build.VERSION_CODES;
12 | import android.os.Bundle;
13 | import android.os.Environment;
14 | import android.support.annotation.NonNull;
15 | import android.support.annotation.Nullable;
16 | import android.support.v4.app.ActivityCompat;
17 | import android.support.v7.app.AppCompatActivity;
18 | import android.text.TextUtils;
19 | import android.util.Log;
20 | import android.view.View;
21 | import android.view.View.OnClickListener;
22 | import android.widget.Button;
23 | import android.widget.RadioGroup;
24 | import android.widget.RadioGroup.OnCheckedChangeListener;
25 | import android.widget.Toast;
26 |
27 | import com.webrtc.jni.WebRtcUtils;
28 |
29 | import java.io.File;
30 | import java.io.FileInputStream;
31 | import java.io.FileOutputStream;
32 | import java.io.IOException;
33 | import java.io.InputStream;
34 | import java.nio.ByteBuffer;
35 | import java.nio.ByteOrder;
36 | import java.util.concurrent.ExecutorService;
37 | import java.util.concurrent.Executors;
38 |
39 | /**
40 | * Created by shiwenshui 2018/4/20 17:54
41 | */
42 | public class WebRtcActivity extends AppCompatActivity {
43 |
44 | private static final int SAMPLERATE_32K = 32000;
45 | private static final int SAMPLERATE_16K = 16000;
46 | private static final int SAMPLERATE_8K = 8000;
47 |
48 | private static final String AUDIO_FILE_AST_8K = "record/recorded_audio.pcm";
49 | private static final String AUDIO_FILE_AST_16k = "record/recorded_audio_16k.pcm";
50 | private static final String AUDIO_FILE_AST_32k = "record/recorded_audio_32k.pcm";
51 | // private static final String AUDIO_FILE_AST_32k = "record/test_32k.pcm";
52 |
53 | /**
54 | * 原始音频文件路径
55 | */
56 | private static final String AUDIO_FILE_PATH_8k = Environment.getExternalStorageDirectory().getPath() +
57 | "/recorded_audio.pcm";
58 | private static final String AUDIO_FILE_PATH_16k = Environment.getExternalStorageDirectory().getPath() +
59 | "/recorded_audio_16k.pcm";
60 | private static final String AUDIO_FILE_PATH_32K = Environment.getExternalStorageDirectory().getPath() +
61 | "/recorded_audio_32k.pcm";
62 | // private static final String AUDIO_FILE_PATH_32K = Environment.getExternalStorageDirectory().getPath() +
63 | // "/test_32k.pcm";
64 |
65 | /**
66 | * 处理过的音频文件路径
67 | */
68 | private static final String AUDIO_PROCESS_FILE_PATH_8k = Environment.getExternalStorageDirectory().getPath() +
69 | "/recorded_audio_process.pcm";
70 | private static final String AUDIO_PROCESS_FILE_PATH_16k = Environment.getExternalStorageDirectory().getPath() +
71 | "/recorded_audio_process_16k.pcm";
72 | private static final String AUDIO_PROCESS_FILE_PATH_32k = Environment.getExternalStorageDirectory().getPath() +
73 | "/recorded_audio_process_32k.pcm";
74 | // private static final String AUDIO_PROCESS_FILE_PATH_32k = Environment.getExternalStorageDirectory().getPath
75 | // () +
76 | // "/test_process_32k.pcm";
77 |
78 | private boolean isInitialized;
79 | private int mMinBufferSize;
80 | private AudioTrack mAudioTrack;
81 | private File mFile;
82 | private File mProcessFile;
83 |
84 | private String AUDIO_FILE_PATH;
85 | private String AUDIO_PROCESS_FILE_PATH;
86 | private String srcPath;
87 |
88 | private boolean process32KData;
89 |
90 | private int mSampleRate;
91 | private ExecutorService mThreadExecutor;
92 |
93 | @Override
94 | protected void onCreate(@Nullable Bundle savedInstanceState) {
95 | super.onCreate(savedInstanceState);
96 | setContentView(R.layout.activity_main);
97 |
98 | mThreadExecutor = Executors.newScheduledThreadPool(3);
99 | mSampleRate = SAMPLERATE_8K;
100 |
101 | initAudioRecord();
102 | AUDIO_FILE_PATH = AUDIO_FILE_PATH_8k;
103 | AUDIO_PROCESS_FILE_PATH = AUDIO_PROCESS_FILE_PATH_8k;
104 | srcPath = AUDIO_FILE_AST_8K;
105 |
106 | if (VERSION.SDK_INT >= VERSION_CODES.M && PackageManager.PERMISSION_GRANTED != ActivityCompat.checkSelfPermission(getApplicationContext(), permission.WRITE_EXTERNAL_STORAGE)) {
107 | requestPermissions(new String[]{permission.WRITE_EXTERNAL_STORAGE}, 1000);
108 | } else {
109 | initAudio();
110 | }
111 | setup();
112 | }
113 |
114 | @Override
115 | public void onRequestPermissionsResult(int requestCode, @NonNull String[] permissions,
116 | @NonNull int[] grantResults) {
117 | if (requestCode == 1000) {
118 | for (int grant : grantResults) {
119 | if (grant == PackageManager.PERMISSION_GRANTED) {
120 | initAudio();
121 | break;
122 | }
123 | }
124 | }
125 | super.onRequestPermissionsResult(requestCode, permissions, grantResults);
126 |
127 | }
128 |
129 | private void initAudio() {
130 | if (TextUtils.isEmpty(srcPath) || TextUtils.isEmpty(AUDIO_FILE_PATH) || TextUtils.isEmpty(AUDIO_PROCESS_FILE_PATH)) {
131 | return;
132 | }
133 | Log.e("sws", "srcPath==" + srcPath);
134 | Log.e("sws", "AUDIO_PROCESS_FILE_PATH==" + AUDIO_PROCESS_FILE_PATH);
135 | Log.e("sws", "AUDIO_FILE_PATH==" + AUDIO_FILE_PATH);
136 |
137 | mProcessFile = new File(AUDIO_PROCESS_FILE_PATH);
138 |
139 | mFile = new File(AUDIO_FILE_PATH);
140 |
141 | if (!mFile.exists() || mFile.length() <= 0) {
142 | Log.e("sws", " init file-----------");
143 | mThreadExecutor.execute(new Runnable() {
144 | @Override
145 | public void run() {
146 | AssetManager assets = getAssets();
147 | try {
148 | InputStream inputStream = assets.open(srcPath);
149 | FileOutputStream fileOutputStream = new FileOutputStream(mFile);
150 | byte[] buf = new byte[1024 * 1024];
151 | int len;
152 | while ((len = inputStream.read(buf)) != -1) {
153 | fileOutputStream.write(buf, 0, len);
154 | }
155 | inputStream.close();
156 | fileOutputStream.close();
157 | isInitialized = true;
158 | Log.e("sws", " init file end-----------");
159 | } catch (IOException e) {
160 | e.printStackTrace();
161 | }
162 | }
163 | });
164 | } else {
165 | Log.e("sws", "-----------");
166 | isInitialized = true;
167 | }
168 | }
169 |
170 | private void initAudioRecord() {
171 | stopPlay();
172 | mMinBufferSize = AudioRecord.getMinBufferSize(mSampleRate, AudioFormat.CHANNEL_IN_MONO,
173 | AudioFormat.ENCODING_PCM_16BIT);
174 | if (mAudioTrack == null) {
175 | mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, mSampleRate, AudioFormat.CHANNEL_OUT_MONO,
176 | AudioFormat.ENCODING_PCM_16BIT, mMinBufferSize, AudioTrack.MODE_STREAM);
177 | }
178 | }
179 |
180 | private int selectId = -1;
181 |
182 | private void setup() {
183 | selectId = R.id.rb_8k;
184 | RadioGroup radioGroup = findViewById(R.id.rg);
185 | int checkedRadioButtonId = radioGroup.getCheckedRadioButtonId();
186 | switchDataSrc(checkedRadioButtonId);
187 | radioGroup.setOnCheckedChangeListener(new OnCheckedChangeListener() {
188 | @Override
189 | public void onCheckedChanged(RadioGroup group, int checkedId) {
190 |
191 | switchDataSrc(checkedId);
192 | }
193 | });
194 |
195 | Button playingBtn = findViewById(R.id.playing);
196 | Button playingAgcNsBtn = findViewById(R.id.playing_process);
197 | Button agcNsProcessBtn = findViewById(R.id.agc_ns);
198 |
199 | playingBtn.setOnClickListener(new OnClickListener() {
200 | @Override
201 | public void onClick(View v) {
202 | if (!isInitialized || !mFile.exists()) {
203 | Toast.makeText(WebRtcActivity.this, "文件读写失败", Toast.LENGTH_SHORT).show();
204 | return;
205 | }
206 | playing(false);
207 | }
208 | });
209 |
210 | playingAgcNsBtn.setOnClickListener(new OnClickListener() {
211 | @Override
212 | public void onClick(View v) {
213 | if (!isInitialized && !mProcessFile.exists() || mProcessFile.length() <= 0) {
214 | Log.e("sws", "isInitialized ==" + isInitialized + ": mProcessFile==" + mProcessFile.exists());
215 | Toast.makeText(WebRtcActivity.this, "文件读写失败", Toast.LENGTH_SHORT).show();
216 | return;
217 | }
218 | playing(true);
219 | }
220 | });
221 |
222 | agcNsProcessBtn.setOnClickListener(new OnClickListener() {
223 | @Override
224 | public void onClick(View v) {
225 | if (!isInitialized || !mFile.exists()) {
226 | Toast.makeText(WebRtcActivity.this, "文件读写失败", Toast.LENGTH_SHORT).show();
227 | return;
228 | }
229 |
230 | process();
231 | }
232 | });
233 | }
234 |
235 | private void switchDataSrc(int rbId) {
236 | if (selectId == rbId) {
237 | return;
238 | }
239 | isInitialized = false;
240 | selectId = rbId;
241 | process32KData = false;
242 | switch (rbId) {
243 | case R.id.rb_8k:
244 | mSampleRate = SAMPLERATE_8K;
245 | AUDIO_FILE_PATH = AUDIO_FILE_PATH_8k;
246 | AUDIO_PROCESS_FILE_PATH = AUDIO_PROCESS_FILE_PATH_8k;
247 | srcPath = AUDIO_FILE_AST_8K;
248 |
249 | initAudioRecord();
250 | initAudio();
251 | break;
252 | case R.id.rb_16k:
253 | mSampleRate = SAMPLERATE_16K;
254 | AUDIO_FILE_PATH = AUDIO_FILE_PATH_16k;
255 | AUDIO_PROCESS_FILE_PATH = AUDIO_PROCESS_FILE_PATH_16k;
256 | srcPath = AUDIO_FILE_AST_16k;
257 |
258 | initAudioRecord();
259 | initAudio();
260 |
261 | break;
262 | case R.id.rb_32k:
263 | process32KData = true;
264 | mSampleRate = SAMPLERATE_32K;
265 | AUDIO_FILE_PATH = AUDIO_FILE_PATH_32K;
266 | AUDIO_PROCESS_FILE_PATH = AUDIO_PROCESS_FILE_PATH_32k;
267 | srcPath = AUDIO_FILE_AST_32k;
268 | initAudioRecord();
269 | initAudio();
270 | break;
271 | }
272 | }
273 |
274 | private boolean isProcessing;
275 |
276 | private void process() {
277 | if (isProcessing) {
278 | return;
279 | }
280 | isProcessing = true;
281 | mThreadExecutor.execute(new Runnable() {
282 | @Override
283 | public void run() {
284 | WebRtcUtils.webRtcAgcInit(0, 255, mSampleRate);
285 | WebRtcUtils.webRtcNsInit(mSampleRate);
286 |
287 | Log.e("sws", "====mSampleRate=" + mSampleRate + ": process32KData=" + process32KData);
288 | FileInputStream ins = null;
289 | FileOutputStream out = null;
290 | try {
291 | File inFile = mFile;
292 | ins = new FileInputStream(inFile);
293 | File outFile = new File(AUDIO_PROCESS_FILE_PATH);
294 | out = new FileOutputStream(outFile);
295 |
296 | byte[] buf;
297 | if (process32KData) {
298 | //TODO
299 | /*
300 | * 测试发现,32k采样率,数据buf越少,增益后可能有滋滋的声音
301 | *
302 | */
303 | buf = new byte[640 * 40];
304 | } else {
305 | buf = new byte[320];
306 | }
307 | while (ins.read(buf) != -1) {
308 | short[] shortData = new short[buf.length >> 1];
309 |
310 | short[] processData = new short[buf.length >> 1];
311 |
312 | ByteBuffer.wrap(buf).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shortData);
313 |
314 | if (process32KData) {
315 | // short[] nsProcessData =shortData;
316 | short[] nsProcessData = WebRtcUtils.webRtcNsProcess32k(shortData.length, shortData);
317 | WebRtcUtils.webRtcAgcProcess32k(nsProcessData, processData, nsProcessData.length);
318 | out.write(shortsToBytes(processData));
319 | } else {
320 | short[] nsProcessData;
321 | if (selectId == R.id.rb_16k) {
322 | nsProcessData = WebRtcUtils.webRtcNsProcess(mSampleRate, shortData.length, shortData);
323 | WebRtcUtils.webRtcAgcProcess(nsProcessData, processData, shortData.length);
324 | out.write(shortsToBytes(processData));
325 |
326 | } else if (selectId == R.id.rb_8k) {
327 |
328 | nsProcessData = WebRtcUtils.webRtcNsProcess(mSampleRate, shortData.length, shortData);
329 | WebRtcUtils.webRtcAgcProcess(nsProcessData, processData, nsProcessData.length);
330 | out.write(shortsToBytes(processData));
331 | }
332 | }
333 |
334 | }
335 | runOnUiThread(new Runnable() {
336 | @Override
337 | public void run() {
338 | Toast.makeText(getApplicationContext(), "处理完成", Toast.LENGTH_LONG).show();
339 | }
340 | });
341 | } catch (Exception e) {
342 | e.printStackTrace();
343 | } finally {
344 | isProcessing = false;
345 | WebRtcUtils.webRtcNsFree();
346 | WebRtcUtils.webRtcAgcFree();
347 | if (out != null) {
348 | try {
349 | out.close();
350 | } catch (IOException e) {
351 | e.printStackTrace();
352 | }
353 | }
354 | if (ins != null) {
355 | try {
356 | ins.close();
357 | } catch (IOException e) {
358 | e.printStackTrace();
359 | }
360 | }
361 | }
362 | Log.e("sws", "ns end======");
363 | }
364 | });
365 |
366 | }
367 |
368 | private byte[] shortsToBytes(short[] data) {
369 | byte[] buffer = new byte[data.length * 2];
370 | int shortIndex, byteIndex;
371 | shortIndex = byteIndex = 0;
372 | for (; shortIndex != data.length; ) {
373 | buffer[byteIndex] = (byte) (data[shortIndex] & 0x00FF);
374 | buffer[byteIndex + 1] = (byte) ((data[shortIndex] & 0xFF00) >> 8);
375 | ++shortIndex;
376 | byteIndex += 2;
377 | }
378 | return buffer;
379 | }
380 |
381 | private boolean isPlaying;
382 |
383 | private void playing(final boolean isPlayingProcess) {
384 | if (isPlaying) {
385 | isPlaying = false;
386 | mAudioTrack.stop();
387 | }
388 | mThreadExecutor.execute(new Runnable() {
389 | @Override
390 | public void run() {
391 | InputStream ins = null;
392 | try {
393 | File file = mFile;
394 | if (isPlayingProcess) {
395 | file = mProcessFile;
396 | }
397 |
398 | isPlaying = true;
399 |
400 | ins = new FileInputStream(file);
401 | mAudioTrack.play();
402 | int sampleRate = mAudioTrack.getSampleRate();
403 | Log.e("sws", "audioFormat ==" + sampleRate);
404 |
405 | byte[] buf = new byte[mMinBufferSize];
406 | int len;
407 | while ((len = ins.read(buf)) != -1 && mAudioTrack != null && isPlaying) {
408 | mAudioTrack.write(buf, 0, len);
409 | }
410 |
411 | if (isPlaying) {
412 | isPlaying = false;
413 | if (mAudioTrack != null) {
414 | mAudioTrack.stop();
415 | }
416 | }
417 | } catch (Exception e) {
418 | e.printStackTrace();
419 | } finally {
420 | isPlaying = false;
421 | if (ins != null) {
422 | try {
423 | ins.close();
424 | } catch (IOException e) {
425 | e.printStackTrace();
426 | }
427 | }
428 | }
429 | }
430 | });
431 | }
432 |
433 | @Override
434 | protected void onDestroy() {
435 | super.onDestroy();
436 | stopPlay();
437 | mThreadExecutor.shutdownNow();
438 | mThreadExecutor = null;
439 | }
440 |
441 | private void stopPlay() {
442 | isPlaying = false;
443 | if (mAudioTrack != null) {
444 | mAudioTrack.release();
445 | mAudioTrack = null;
446 | }
447 | }
448 | }
449 |
--------------------------------------------------------------------------------
/app/src/main/java/com/webrtc/jni/WebRtcUtils.java:
--------------------------------------------------------------------------------
1 | package com.webrtc.jni;
2 |
3 | /**
4 | * Created by shiwenshui 2018/4/20 17:40
5 | */
6 | public class WebRtcUtils {
7 |
8 | static {
9 | System.loadLibrary("WRtcAudio");
10 | }
11 |
12 | public static native void webRtcNsInit(int freq);
13 | public static native short[] webRtcNsProcess(int sampleRate, int len, short[] proData);
14 | public static native short[] webRtcNsProcess32k(int len, short[] proData);
15 | public static native int webRtcNsFree();
16 |
17 | public static native void webRtcAgcInit(long minVolume, long maxVolume, long freq);
18 | public static native void webRtcAgcProcess(short[] srcData, short[] desData, int srcLen);
19 | public static native void webRtcAgcProcess32k(short[] srcData, short[] desData, int srcLen);
20 | public static native void webRtcAgcFree();
21 | }
22 |
--------------------------------------------------------------------------------
/app/src/main/res/drawable-v24/ic_launcher_foreground.xml:
--------------------------------------------------------------------------------
1 |
7 |
12 |
13 |
19 |
22 |
25 |
26 |
27 |
28 |
34 |
35 |
--------------------------------------------------------------------------------
/app/src/main/res/drawable/ic_launcher_background.xml:
--------------------------------------------------------------------------------
1 |
2 |
8 |
11 |
16 |
21 |
26 |
31 |
36 |
41 |
46 |
51 |
56 |
61 |
66 |
71 |
76 |
81 |
86 |
91 |
96 |
101 |
106 |
111 |
116 |
121 |
126 |
131 |
136 |
141 |
146 |
151 |
156 |
161 |
166 |
171 |
172 |
--------------------------------------------------------------------------------
/app/src/main/res/layout/activity_main.xml:
--------------------------------------------------------------------------------
1 |
2 |
8 |
9 |
14 |
15 |
21 |
22 |
28 |
29 |
35 |
41 |
46 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-hdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-hdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-hdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-hdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-mdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-mdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-mdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-mdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-xhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-xxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png
--------------------------------------------------------------------------------
/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png
--------------------------------------------------------------------------------
/app/src/main/res/values/colors.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | #3F51B5
4 | #303F9F
5 | #FF4081
6 |
7 |
--------------------------------------------------------------------------------
/app/src/main/res/values/strings.xml:
--------------------------------------------------------------------------------
1 |
2 | WebRtcAgcNs
3 |
4 |
--------------------------------------------------------------------------------
/app/src/main/res/values/styles.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/app/src/test/java/com/webrtc/ExampleUnitTest.java:
--------------------------------------------------------------------------------
1 | package com.webrtc;
2 |
3 | import org.junit.Test;
4 |
5 | import static org.junit.Assert.*;
6 |
7 | /**
8 | * Example local unit test, which will execute on the development machine (host).
9 | *
10 | * @see Testing documentation
11 | */
12 | public class ExampleUnitTest {
13 | @Test
14 | public void addition_isCorrect() {
15 | assertEquals(4, 2 + 2);
16 | }
17 | }
--------------------------------------------------------------------------------
/build.gradle:
--------------------------------------------------------------------------------
1 | // Top-level build file where you can add configuration options common to all sub-projects/modules.
2 |
3 | buildscript {
4 |
5 | repositories {
6 | google()
7 | jcenter()
8 | }
9 | dependencies {
10 | classpath 'com.android.tools.build:gradle:3.1.1'
11 |
12 |
13 | // NOTE: Do not place your application dependencies here; they belong
14 | // in the individual module build.gradle files
15 | }
16 | }
17 |
18 | allprojects {
19 | repositories {
20 | google()
21 | jcenter()
22 | }
23 | }
24 |
25 | task clean(type: Delete) {
26 | delete rootProject.buildDir
27 | }
28 |
--------------------------------------------------------------------------------
/gradle.properties:
--------------------------------------------------------------------------------
1 | # Project-wide Gradle settings.
2 | # IDE (e.g. Android Studio) users:
3 | # Gradle settings configured through the IDE *will override*
4 | # any settings specified in this file.
5 | # For more details on how to configure your build environment visit
6 | # http://www.gradle.org/docs/current/userguide/build_environment.html
7 | # Specifies the JVM arguments used for the daemon process.
8 | # The setting is particularly useful for tweaking memory settings.
9 | org.gradle.jvmargs=-Xmx1536m
10 | # When configured, Gradle will run in incubating parallel mode.
11 | # This option should only be used with decoupled projects. More details, visit
12 | # http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects
13 | # org.gradle.parallel=true
14 |
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sws1011/webrtc/001b33e1697f1b981646db16c9b15370fa8a9637/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | #Fri Apr 20 17:37:35 CST 2018
2 | distributionBase=GRADLE_USER_HOME
3 | distributionPath=wrapper/dists
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 | distributionUrl=https\://services.gradle.org/distributions/gradle-4.4-all.zip
7 |
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | ##############################################################################
4 | ##
5 | ## Gradle start up script for UN*X
6 | ##
7 | ##############################################################################
8 |
9 | # Attempt to set APP_HOME
10 | # Resolve links: $0 may be a link
11 | PRG="$0"
12 | # Need this for relative symlinks.
13 | while [ -h "$PRG" ] ; do
14 | ls=`ls -ld "$PRG"`
15 | link=`expr "$ls" : '.*-> \(.*\)$'`
16 | if expr "$link" : '/.*' > /dev/null; then
17 | PRG="$link"
18 | else
19 | PRG=`dirname "$PRG"`"/$link"
20 | fi
21 | done
22 | SAVED="`pwd`"
23 | cd "`dirname \"$PRG\"`/" >/dev/null
24 | APP_HOME="`pwd -P`"
25 | cd "$SAVED" >/dev/null
26 |
27 | APP_NAME="Gradle"
28 | APP_BASE_NAME=`basename "$0"`
29 |
30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
31 | DEFAULT_JVM_OPTS=""
32 |
33 | # Use the maximum available, or set MAX_FD != -1 to use that value.
34 | MAX_FD="maximum"
35 |
36 | warn () {
37 | echo "$*"
38 | }
39 |
40 | die () {
41 | echo
42 | echo "$*"
43 | echo
44 | exit 1
45 | }
46 |
47 | # OS specific support (must be 'true' or 'false').
48 | cygwin=false
49 | msys=false
50 | darwin=false
51 | nonstop=false
52 | case "`uname`" in
53 | CYGWIN* )
54 | cygwin=true
55 | ;;
56 | Darwin* )
57 | darwin=true
58 | ;;
59 | MINGW* )
60 | msys=true
61 | ;;
62 | NONSTOP* )
63 | nonstop=true
64 | ;;
65 | esac
66 |
67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
68 |
69 | # Determine the Java command to use to start the JVM.
70 | if [ -n "$JAVA_HOME" ] ; then
71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
72 | # IBM's JDK on AIX uses strange locations for the executables
73 | JAVACMD="$JAVA_HOME/jre/sh/java"
74 | else
75 | JAVACMD="$JAVA_HOME/bin/java"
76 | fi
77 | if [ ! -x "$JAVACMD" ] ; then
78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
79 |
80 | Please set the JAVA_HOME variable in your environment to match the
81 | location of your Java installation."
82 | fi
83 | else
84 | JAVACMD="java"
85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
86 |
87 | Please set the JAVA_HOME variable in your environment to match the
88 | location of your Java installation."
89 | fi
90 |
91 | # Increase the maximum file descriptors if we can.
92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
93 | MAX_FD_LIMIT=`ulimit -H -n`
94 | if [ $? -eq 0 ] ; then
95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
96 | MAX_FD="$MAX_FD_LIMIT"
97 | fi
98 | ulimit -n $MAX_FD
99 | if [ $? -ne 0 ] ; then
100 | warn "Could not set maximum file descriptor limit: $MAX_FD"
101 | fi
102 | else
103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
104 | fi
105 | fi
106 |
107 | # For Darwin, add options to specify how the application appears in the dock
108 | if $darwin; then
109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
110 | fi
111 |
112 | # For Cygwin, switch paths to Windows format before running java
113 | if $cygwin ; then
114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"`
115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
116 | JAVACMD=`cygpath --unix "$JAVACMD"`
117 |
118 | # We build the pattern for arguments to be converted via cygpath
119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
120 | SEP=""
121 | for dir in $ROOTDIRSRAW ; do
122 | ROOTDIRS="$ROOTDIRS$SEP$dir"
123 | SEP="|"
124 | done
125 | OURCYGPATTERN="(^($ROOTDIRS))"
126 | # Add a user-defined pattern to the cygpath arguments
127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then
128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
129 | fi
130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
131 | i=0
132 | for arg in "$@" ; do
133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option
135 |
136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition
137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
138 | else
139 | eval `echo args$i`="\"$arg\""
140 | fi
141 | i=$((i+1))
142 | done
143 | case $i in
144 | (0) set -- ;;
145 | (1) set -- "$args0" ;;
146 | (2) set -- "$args0" "$args1" ;;
147 | (3) set -- "$args0" "$args1" "$args2" ;;
148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
154 | esac
155 | fi
156 |
157 | # Escape application args
158 | save () {
159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
160 | echo " "
161 | }
162 | APP_ARGS=$(save "$@")
163 |
164 | # Collect all arguments for the java command, following the shell quoting and substitution rules
165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
166 |
167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
169 | cd "$(dirname "$0")"
170 | fi
171 |
172 | exec "$JAVACMD" "$@"
173 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @if "%DEBUG%" == "" @echo off
2 | @rem ##########################################################################
3 | @rem
4 | @rem Gradle startup script for Windows
5 | @rem
6 | @rem ##########################################################################
7 |
8 | @rem Set local scope for the variables with windows NT shell
9 | if "%OS%"=="Windows_NT" setlocal
10 |
11 | set DIRNAME=%~dp0
12 | if "%DIRNAME%" == "" set DIRNAME=.
13 | set APP_BASE_NAME=%~n0
14 | set APP_HOME=%DIRNAME%
15 |
16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
17 | set DEFAULT_JVM_OPTS=
18 |
19 | @rem Find java.exe
20 | if defined JAVA_HOME goto findJavaFromJavaHome
21 |
22 | set JAVA_EXE=java.exe
23 | %JAVA_EXE% -version >NUL 2>&1
24 | if "%ERRORLEVEL%" == "0" goto init
25 |
26 | echo.
27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
28 | echo.
29 | echo Please set the JAVA_HOME variable in your environment to match the
30 | echo location of your Java installation.
31 |
32 | goto fail
33 |
34 | :findJavaFromJavaHome
35 | set JAVA_HOME=%JAVA_HOME:"=%
36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
37 |
38 | if exist "%JAVA_EXE%" goto init
39 |
40 | echo.
41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
42 | echo.
43 | echo Please set the JAVA_HOME variable in your environment to match the
44 | echo location of your Java installation.
45 |
46 | goto fail
47 |
48 | :init
49 | @rem Get command-line arguments, handling Windows variants
50 |
51 | if not "%OS%" == "Windows_NT" goto win9xME_args
52 |
53 | :win9xME_args
54 | @rem Slurp the command line arguments.
55 | set CMD_LINE_ARGS=
56 | set _SKIP=2
57 |
58 | :win9xME_args_slurp
59 | if "x%~1" == "x" goto execute
60 |
61 | set CMD_LINE_ARGS=%*
62 |
63 | :execute
64 | @rem Setup the command line
65 |
66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
67 |
68 | @rem Execute Gradle
69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
70 |
71 | :end
72 | @rem End local scope for the variables with windows NT shell
73 | if "%ERRORLEVEL%"=="0" goto mainEnd
74 |
75 | :fail
76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
77 | rem the _cmd.exe /c_ return code!
78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
79 | exit /b 1
80 |
81 | :mainEnd
82 | if "%OS%"=="Windows_NT" endlocal
83 |
84 | :omega
85 |
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | include ':app'
2 |
--------------------------------------------------------------------------------