├── Data
├── 001A_16k.pcm
├── 001B_16k.wav
├── delaytest44100.pcm
└── delaytest48000.pcm
├── LICENSE
├── LICENSE.webrtc
├── PATENTS.webrtc
├── README.md
├── README.webrtc
├── Visual Studio Project Files
├── WebRTC_VoiceEngine.sln
├── WebRTC_VoiceEngine.vcxproj
├── WebRTC_VoiceEngine.vcxproj.filters
└── WebRTC_VoiceEngine.vcxproj.user
├── demo_main.cpp
├── my_level_indicator.cc
├── wasapi.cpp
├── wasapi.h
├── webrtc_voe.h
├── webrtc_voe_impl.cpp
├── webrtc_voe_impl.h
└── webrtc_volume_control_impl.cpp
/Data/001A_16k.pcm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/garyyu/WebRTC_VoiceEngine/799414fd6af85abe1da4071deedf207c392f4b5a/Data/001A_16k.pcm
--------------------------------------------------------------------------------
/Data/001B_16k.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/garyyu/WebRTC_VoiceEngine/799414fd6af85abe1da4071deedf207c392f4b5a/Data/001B_16k.wav
--------------------------------------------------------------------------------
/Data/delaytest44100.pcm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/garyyu/WebRTC_VoiceEngine/799414fd6af85abe1da4071deedf207c392f4b5a/Data/delaytest44100.pcm
--------------------------------------------------------------------------------
/Data/delaytest48000.pcm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/garyyu/WebRTC_VoiceEngine/799414fd6af85abe1da4071deedf207c392f4b5a/Data/delaytest48000.pcm
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013, Gary Yu. All rights reserved.
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are
5 | met:
6 |
7 | * Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright
11 | notice, this list of conditions and the following disclaimer in
12 | the documentation and/or other materials provided with the
13 | distribution.
14 |
15 | * Neither the name of Google nor the names of its contributors may
16 | be used to endorse or promote products derived from this software
17 | without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/LICENSE.webrtc:
--------------------------------------------------------------------------------
1 | Copyright (c) 2011, The WebRTC project authors. All rights reserved.
2 |
3 | Redistribution and use in source and binary forms, with or without
4 | modification, are permitted provided that the following conditions are
5 | met:
6 |
7 | * Redistributions of source code must retain the above copyright
8 | notice, this list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright
11 | notice, this list of conditions and the following disclaimer in
12 | the documentation and/or other materials provided with the
13 | distribution.
14 |
15 | * Neither the name of Google nor the names of its contributors may
16 | be used to endorse or promote products derived from this software
17 | without specific prior written permission.
18 |
19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/PATENTS.webrtc:
--------------------------------------------------------------------------------
1 | Additional IP Rights Grant (Patents)
2 |
3 | "This implementation" means the copyrightable works distributed by
4 | Google as part of the WebRTC code package.
5 |
6 | Google hereby grants to you a perpetual, worldwide, non-exclusive,
7 | no-charge, irrevocable (except as stated in this section) patent
8 | license to make, have made, use, offer to sell, sell, import,
9 | transfer, and otherwise run, modify and propagate the contents of this
10 | implementation of the WebRTC code package, where such license applies
11 | only to those patent claims, both currently owned by Google and
12 | acquired in the future, licensable by Google that are necessarily
13 | infringed by this implementation of the WebRTC code package. This
14 | grant does not include claims that would be infringed only as a
15 | consequence of further modification of this implementation. If you or
16 | your agent or exclusive licensee institute or order or agree to the
17 | institution of patent litigation against any entity (including a
18 | cross-claim or counterclaim in a lawsuit) alleging that this
19 | implementation of the WebRTC code package or any code incorporated
20 | within this implementation of the WebRTC code package constitutes
21 | direct or contributory patent infringement, or inducement of patent
22 | infringement, then any patent rights granted to you under this License
23 | for this implementation of the WebRTC code package shall terminate as
24 | of the date such litigation is filed.
25 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Name: WebRTC_VoiceEngine
2 | ========================
3 | URL: https://github.com/garyyu/WebRTC_VoiceEngine
4 | License: BSD
5 | License File: LICENSE
6 | For WebRTC License & Patents information, please read files LICENSE.webrtc and PATENTS.webrtc.
7 |
8 | Description:
9 | This WebRTC VoiceEngine includes Acoustic Echo Cancellation (AEC), Noise Suppression (NS), VAD (Voice Active Detection) and so on.
10 | The purpose of this project is to make use of Google WebRTC OpenSource project, to provide a simple wrapper API for the voice engine part of WebRTC.
11 | So far, this project is built in Win7 with Visual Studio 2010.
12 |
13 | Default, this project is built as DLL.
14 | If want to build demo_main.cpp as standalone demo, you can modify the Project Property:
15 | (1) Linker->Input: ignore default library "libcmt";
16 | (2) General->Configuration Type: change "DLL" as "exe"; and "User MFC" as "Static Library".
17 | And modify "webrtc_voe.h" to comment this line:
18 | //#define _WEBRTC_API_EXPORTS // For DLL Building.
19 | Both Release Build and Debug Build is OK.
20 |
21 | This project depends on WebRTC project, which I put on my folder "C:\Users\garyyu\Work\trunk". If you put "trunk" on different folder, please modify the external library and header files path in Visual Studio IDE. About how to download and build this WebRTC project, please read the following documents:
22 | http://www.webrtc.org/reference/getting-started
23 | http://www.webrtc.org/reference/getting-started/prerequisite-sw
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/README.webrtc:
--------------------------------------------------------------------------------
1 | Name: WebRTC
2 | URL: http://www.webrtc.org
3 | Version: 90
4 | License: BSD
5 | License File: LICENSE
6 |
7 | Description:
8 | WebRTC provides real time voice and video processing
9 | functionality to enable the implementation of
10 | PeerConnection/MediaStream.
11 |
12 | Third party code used in this project is described
13 | in the file LICENSE_THIRD_PARTY.
14 |
--------------------------------------------------------------------------------
/Visual Studio Project Files/WebRTC_VoiceEngine.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 11.00
3 | # Visual Studio 2010
4 | Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "WebRTC_VoiceEngine", "WebRTC_VoiceEngine.vcxproj", "{81E50AE9-A005-4B5B-B3FB-8496668FAEE4}"
5 | EndProject
6 | Global
7 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
8 | Debug|Win32 = Debug|Win32
9 | Release|Win32 = Release|Win32
10 | EndGlobalSection
11 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
12 | {81E50AE9-A005-4B5B-B3FB-8496668FAEE4}.Debug|Win32.ActiveCfg = Debug|Win32
13 | {81E50AE9-A005-4B5B-B3FB-8496668FAEE4}.Debug|Win32.Build.0 = Debug|Win32
14 | {81E50AE9-A005-4B5B-B3FB-8496668FAEE4}.Release|Win32.ActiveCfg = Release|Win32
15 | {81E50AE9-A005-4B5B-B3FB-8496668FAEE4}.Release|Win32.Build.0 = Release|Win32
16 | EndGlobalSection
17 | GlobalSection(SolutionProperties) = preSolution
18 | HideSolutionNode = FALSE
19 | EndGlobalSection
20 | EndGlobal
21 |
--------------------------------------------------------------------------------
/Visual Studio Project Files/WebRTC_VoiceEngine.vcxproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Debug
6 | Win32
7 |
8 |
9 | Release
10 | Win32
11 |
12 |
13 |
14 | {81E50AE9-A005-4B5B-B3FB-8496668FAEE4}
15 | Win32Proj
16 | WebRTC_VoiceEngine
17 |
18 |
19 |
20 | Application
21 | true
22 | Unicode
23 | Static
24 |
25 |
26 | DynamicLibrary
27 | false
28 | true
29 | Unicode
30 | false
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 | true
44 |
45 |
46 | false
47 |
48 |
49 |
50 |
51 |
52 | Level3
53 | Disabled
54 | WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)
55 | $(WEBRTC_TRUNK_DIR);$(WEBRTC_TRUNK_DIR)\webrtc;$(WEBRTC_TRUNK_DIR)\webrtc\voice_engine\include;$(WEBRTC_TRUNK_DIR)\webrtc\video_engine\include;$(WEBRTC_TRUNK_DIR)\webrtc\test\channel_transport\include;$(WEBRTC_TRUNK_DIR)\webrtc\test\channel_transport;$(WEBRTC_TRUNK_DIR)\webrtc\voice_engine\include\mock;$(WEBRTC_TRUNK_DIR)\webrtc\system_wrappers\interface;%(AdditionalIncludeDirectories)
56 |
57 |
58 | Console
59 | true
60 | $(WEBRTC_TRUNK_DIR)\build\Debug;$(WEBRTC_TRUNK_DIR)\build\Debug\lib;
61 | libyuv.lib;CNG.lib;G711.lib;G722.lib;NetEq.lib;NetEq4.lib;NetEq4TestTools.lib;NetEqTestTools.lib;PCM16B.lib;acm2.lib;audio_coding_module.lib;audio_conference_mixer.lib;audio_device.lib;audio_processing.lib;audio_processing_sse2.lib;audioproc_debug_proto.lib;audioproc_unittest_proto.lib;bitrate_controller.lib;channel_transport.lib;command_line_parser.lib;common_audio.lib;common_audio_sse2.lib;common_video.lib;crnspr.lib;crnss.lib;desktop_capture.lib;desktop_capture_differ_sse2.lib;directshow_baseclasses.lib;expat.lib;frame_editing_lib.lib;frame_generator.lib;genperf_libs.lib;iLBC.lib;iSAC.lib;iSACFix.lib;icui18n.lib;icuuc.lib;jsoncpp.lib;libjingle.lib;libjingle_media.lib;libjingle_p2p.lib;libjingle_peerconnection.lib;libjingle_sound.lib;libjingle_xmpphelp.lib;libjpeg.lib;libsrtp.lib;libtest.lib;libvietest.lib;libvpx.lib;libvpx_asm_offsets_vp8.lib;libvpx_intrinsics_mmx.lib;libvpx_intrinsics_sse2.lib;libvpx_intrinsics_ssse3.lib;media_file.lib;metrics.lib;neteq_unittest_tools.lib;nss_static.lib;opus.lib;paced_sender.lib;protobuf_full_do_not_use.lib;protobuf_lite.lib;rbe_components.lib;remote_bitrate_estimator.lib;rtp_rtcp.lib;sqlite3.lib;system_wrappers.lib;video_capture_module.lib;video_codecs_test_framework.lib;video_coding_utility.lib;video_engine_core.lib;video_processing.lib;video_processing_sse2.lib;video_quality_analysis.lib;video_render_module.lib;voice_engine.lib;webrtc.lib;webrtc_i420.lib;webrtc_opus.lib;webrtc_test_common.lib;webrtc_utility.lib;webrtc_video_coding.lib;webrtc_vp8.lib;winmm.lib;msdmo.lib;dmoguids.lib;wmcodecdspuuid.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)
62 | msvcprtd;
63 |
64 |
65 |
66 |
67 | Level3
68 |
69 |
70 | MaxSpeed
71 | true
72 | true
73 | WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)
74 | $(WEBRTC_TRUNK_DIR);$(WEBRTC_TRUNK_DIR)\webrtc;$(WEBRTC_TRUNK_DIR)\webrtc\voice_engine\include;$(WEBRTC_TRUNK_DIR)\webrtc\video_engine\include;$(WEBRTC_TRUNK_DIR)\webrtc\test\channel_transport\include;$(WEBRTC_TRUNK_DIR)\webrtc\test\channel_transport;$(WEBRTC_TRUNK_DIR)\webrtc\voice_engine\include\mock;$(WEBRTC_TRUNK_DIR)\webrtc\system_wrappers\interface;%(AdditionalIncludeDirectories)
75 |
76 |
77 | Console
78 | true
79 | true
80 | true
81 | $(WEBRTC_TRUNK_DIR)\build\Release;$(WEBRTC_TRUNK_DIR)\build\Release\lib;
82 | libyuv.lib;CNG.lib;G711.lib;G722.lib;NetEq.lib;NetEq4.lib;NetEq4TestTools.lib;NetEqTestTools.lib;PCM16B.lib;acm2.lib;audio_coding_module.lib;audio_conference_mixer.lib;audio_device.lib;audio_processing.lib;audio_processing_sse2.lib;audioproc_debug_proto.lib;audioproc_unittest_proto.lib;bitrate_controller.lib;channel_transport.lib;command_line_parser.lib;common_audio.lib;common_audio_sse2.lib;common_video.lib;crnspr.lib;crnss.lib;desktop_capture.lib;desktop_capture_differ_sse2.lib;directshow_baseclasses.lib;expat.lib;frame_editing_lib.lib;frame_generator.lib;genperf_libs.lib;iLBC.lib;iSAC.lib;iSACFix.lib;icui18n.lib;icuuc.lib;jsoncpp.lib;libjingle.lib;libjingle_media.lib;libjingle_p2p.lib;libjingle_peerconnection.lib;libjingle_sound.lib;libjingle_xmpphelp.lib;libjpeg.lib;libsrtp.lib;libtest.lib;libvietest.lib;libvpx.lib;libvpx_asm_offsets_vp8.lib;libvpx_intrinsics_mmx.lib;libvpx_intrinsics_sse2.lib;libvpx_intrinsics_ssse3.lib;media_file.lib;metrics.lib;neteq_unittest_tools.lib;nss_static.lib;opus.lib;paced_sender.lib;protobuf_full_do_not_use.lib;protobuf_lite.lib;rbe_components.lib;remote_bitrate_estimator.lib;rtp_rtcp.lib;sqlite3.lib;system_wrappers.lib;video_capture_module.lib;video_codecs_test_framework.lib;video_coding_utility.lib;video_engine_core.lib;video_processing.lib;video_processing_sse2.lib;video_quality_analysis.lib;video_render_module.lib;voice_engine.lib;webrtc.lib;webrtc_i420.lib;webrtc_opus.lib;webrtc_test_common.lib;webrtc_utility.lib;webrtc_video_coding.lib;webrtc_vp8.lib;Winmm.lib;dmoguids.lib;wmcodecdspuuid.lib;strmiids.lib;msdmo.lib;%(AdditionalDependencies)
83 | libcmt;msvcprt
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
--------------------------------------------------------------------------------
/Visual Studio Project Files/WebRTC_VoiceEngine.vcxproj.filters:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {4FC737F1-C7A5-4376-A066-2A32D752A2FF}
6 | cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx
7 |
8 |
9 | {93995380-89BD-4b04-88EB-625FBE52EBFB}
10 | h;hpp;hxx;hm;inl;inc;xsd
11 |
12 |
13 | {67DA6AB6-F800-4c08-8B7A-83BB121AAD01}
14 | rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms
15 |
16 |
17 |
18 |
19 | Source Files
20 |
21 |
22 | Source Files
23 |
24 |
25 | Source Files
26 |
27 |
28 | Source Files
29 |
30 |
31 | Source Files
32 |
33 |
34 |
35 |
36 | Header Files
37 |
38 |
39 | Header Files
40 |
41 |
42 | Header Files
43 |
44 |
45 |
--------------------------------------------------------------------------------
/Visual Studio Project Files/WebRTC_VoiceEngine.vcxproj.user:
--------------------------------------------------------------------------------
1 |
2 |
3 |
--------------------------------------------------------------------------------
/demo_main.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2013 Gary Yu. All Rights Reserved.
3 | *
4 | * URL: https://github.com/garyyu/WebRTC_VoiceEngine
5 | *
6 | * Use of this source code is governed by a New BSD license which can be found in the LICENSE file
7 | * in the root of the source tree. Refer to README.md.
8 | * For WebRTC License & Patents information, please read files LICENSE.webrtc and PATENTS.webrtc.
9 | */
10 |
11 | /*
12 | * This file contains the Usage Demo of this WebRTC AEC.
13 | *
14 | */
15 |
16 | #include
17 | #include
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include "webrtc_voe.h"
23 |
24 | #include "wasapi.h"
25 |
26 | #if !defined(_WEBRTC_API_EXPORTS)
27 |
28 | using namespace std;
29 |
30 | FILE *rfile = NULL;
31 | FILE *wfile = NULL;
32 |
33 | void PrintOptions()
34 | {
35 | cout << "WebRTC AEC Demo" <> i;
57 | while (i != 0)
58 | {
59 | switch(i)
60 | {
61 | case 1: RealTimeDelayTest();break;
62 | case 2: RealTimeDialogTest();break;
63 | case 3: LocalFileTest();break;
64 | default:break;
65 | }
66 |
67 | PrintOptions();
68 | cin >> i;
69 | }
70 |
71 | return 0;
72 | }
73 |
74 | void LocalFileTest()
75 | {
76 | unsigned i = 0;
77 | size_t read_size = 0;
78 | unsigned total_size= 0;
79 |
80 | webrtc_ec * echo = NULL;
81 | unsigned clock_rate = 16000; //16kHz
82 | unsigned samples_per_frame = 16*10; //16kHz*10ms
83 | unsigned system_delay = 0;
84 |
85 | int16_t * file_buffer = NULL;
86 | int16_t * farend_buffer = NULL;
87 | int16_t * nearend_buffer = NULL;
88 |
89 | char strFileIn [128] = "stero_in.pcm"; //Left Channel: Farend Signal, Right Channel: Nearend Input Signal (with echo)
90 | char strFileOut[128] = "stero_out.pcm"; //Right Channel: same as input, Right Channel: Nearend Output after AEC.
91 |
92 | cout << " Please give input file name:";
93 | cin >> strFileIn;
94 |
95 | fopen_s(&rfile, strFileIn, "rb");
96 | if (rfile == NULL){
97 | printf("file %s open fail.\n", strFileIn);
98 | return;
99 | }
100 | fopen_s(&wfile, strFileOut,"wb");
101 | assert(wfile!=NULL);
102 |
103 | cout << " Sound Card Clock Rate (kHz)?:";
104 | cin >> clock_rate;
105 | if ((clock_rate!=8) && (clock_rate!=16) && (clock_rate!=32) && (clock_rate!=48))
106 | {
107 | printf("Not Supported for your %d kHz.\n", clock_rate);
108 | fclose(rfile); rfile=NULL;
109 | fclose(wfile); wfile=NULL;
110 | return;
111 | }
112 | samples_per_frame = clock_rate*10; //10ms sample numbers
113 | clock_rate *= 1000; // kHz -> Hz
114 |
115 | cout << " System Delay (sound card buffer & application playout buffer) (ms)?:";
116 | cin >> system_delay;
117 | if (system_delay>320){ //To Be check
118 | printf("Not Supported for your system delay %d (ms).\n", system_delay);
119 | fclose(rfile); rfile=NULL;
120 | fclose(wfile); wfile=NULL;
121 | return;
122 | }
123 |
124 | if (0 != webrtc_aec_create(
125 | clock_rate,
126 | 1, // channel_count
127 | samples_per_frame, // clock_rate(kHz)*10(ms)
128 | system_delay, // system_delay (ms)
129 | 0, // options,
130 | (void**)&echo ) )
131 | {
132 | printf("%s:%d-Error on webrtc_aec_create()!\n", __FILE__, __LINE__);
133 | fclose(rfile);
134 | fclose(wfile);
135 | return;
136 | }
137 |
138 | file_buffer = (int16_t *)malloc( samples_per_frame * 2 * 2 ); //2 Bytes/Sample, 2 Channels: Left for Farend, Right for Nearend.
139 | assert( file_buffer != NULL );
140 | farend_buffer = (int16_t *)malloc( samples_per_frame * 2 ); //2 Bytes/Sample
141 | nearend_buffer = (int16_t *)malloc( samples_per_frame * 2 ); //2 Bytes/Sample
142 | assert( farend_buffer != NULL );
143 | assert( nearend_buffer!= NULL );
144 |
145 | while(1)
146 | {
147 | read_size = fread(file_buffer, sizeof(int16_t), 2*samples_per_frame, rfile);
148 | total_size += read_size;
149 | if (read_size != (2*samples_per_frame)){
150 | printf("File End. Total %d Bytes.\n", total_size<<1);
151 | break;
152 | }
153 |
154 | // Split into Farend and Nearend Signals
155 | for (i=0; i> autotest;
248 | if ((autotest[0]=='Y') || (autotest[0]=='y'))
249 | {
250 | printf("OK. Automatic Test Running now...\n");
251 | strcpy_s( strFilePlay, "001A_16k.pcm" );
252 | clock_rate = 16;
253 | system_delay = 80;
254 | bAutoTest = true;
255 | }
256 |
257 | if (bAutoTest==false){
258 | cout << " Please give input file name:";
259 | cin >> strFilePlay;
260 | }
261 |
262 | fopen_s(&rfile, strFilePlay, "rb");
263 | if (rfile == NULL){
264 | printf("file %s open fail.\n", strFilePlay);
265 | CloseAudio();
266 | return;
267 | }
268 |
269 | if (bAutoTest==false){
270 | cout << " Input File Clock Sample Rate (kHz)?:";
271 | cin >> clock_rate;
272 | }
273 | if ((clock_rate!=8) && (clock_rate!=16))
274 | {
275 | printf("Not Supported for your %d kHz. This AEC library only support 8kHz/16kHz\n", clock_rate);
276 | fclose(rfile); rfile=NULL;
277 | fclose(wfile); wfile=NULL;
278 | CloseAudio();
279 | return;
280 | }
281 | clock_rate *= 1000;
282 | samples_per_frame = clock_rate/100; //10ms sample numbers
283 |
284 | fopen_s(&wfile, strFileRecord,"wb");
285 | assert(wfile!=NULL);
286 |
287 | if (bAutoTest==false){
288 | cout << " System Delay (sound card buffer & application playout buffer) (ms)?:";
289 | cin >> system_delay;
290 | }
291 | if (system_delay>320){ //To Be check
292 | printf("Not Supported for your system delay %d (ms).\n", system_delay);
293 | fclose(rfile); rfile=NULL;
294 | fclose(wfile); wfile=NULL;
295 | CloseAudio();
296 | return;
297 | }
298 |
299 | printf("Two Resamplers are created between %d(Hz) and %d(Hz).\n", clock_rate, sndcard_clock_rate);
300 | if ( sndcard_clock_rate==44100 )
301 | {
302 | //WebRTC Resampler only support 44k!
303 | webrtc_resampler_create( clock_rate, 44000, &codec2sndcard_Resampler );
304 | webrtc_resampler_create( 44000, clock_rate, &sndcard2codec_Resampler );
305 | }
306 | else
307 | {
308 | webrtc_resampler_create( clock_rate, sndcard_clock_rate, &codec2sndcard_Resampler );
309 | webrtc_resampler_create( sndcard_clock_rate, clock_rate, &sndcard2codec_Resampler );
310 | }
311 |
312 | resampler_buffer = (int16_t *)malloc( sndcard_samples_per_frame * sizeof(int16_t) );
313 | assert(resampler_buffer != NULL);
314 |
315 | if (0 != webrtc_aec_create(
316 | clock_rate,
317 | 1, // channel_count
318 | samples_per_frame, // clock_rate(kHz)*10(ms)
319 | system_delay, // system_delay (ms)
320 | 0, // options,
321 | (void**)&echo ) )
322 | {
323 | printf("%s:%d-Error on webrtc_aec_create()!\n", __FILE__, __LINE__);
324 | fclose(rfile);
325 | fclose(wfile);
326 | CloseAudio();
327 | webrtc_resampler_destroy(codec2sndcard_Resampler);
328 | webrtc_resampler_destroy(sndcard2codec_Resampler);
329 | return;
330 | }
331 |
332 | infile_buffer = (int16_t *)malloc( samples_per_frame * sizeof(int16_t) ); //Mono.
333 | assert( infile_buffer != NULL );
334 | outfile_buffer = (int16_t *)malloc( samples_per_frame * sizeof(int16_t) *2 );//Stero. Put input to the Left Channel, and put Captured voice to the Right Channel.
335 | assert( outfile_buffer != NULL );
336 | farend_buffer = (int16_t *)malloc( samples_per_frame * sizeof(int16_t) );
337 | nearend_buffer = (int16_t *)malloc( samples_per_frame * sizeof(int16_t) );
338 | assert( farend_buffer != NULL );
339 | assert( nearend_buffer!= NULL );
340 |
341 | pCaptureData = (float*)malloc(m_pCaptureBuffer->m_iFrameSize_10ms * sizeof(float) * 2);
342 | assert(pCaptureData!=NULL);
343 | pRenderData = (float*)malloc(m_pRenderBuffer ->m_iFrameSize_10ms * sizeof(float) * 2);
344 | assert(pRenderData!=NULL);
345 |
346 | StartAudio();
347 | while(1)
348 | {
349 | Sleep(10);
350 |
351 | // Capture One Frame
352 | do{
353 | result = m_pCaptureBuffer->GetData(pCaptureData);
354 | if (result)
355 | {
356 | /*---- Each time when Captured one Frame, we need Playout one Frame to sync it. ----*/
357 |
358 | // Simulate Receiving one Packet and Playout
359 | {
360 | read_size = fread(farend_buffer, sizeof(int16_t), samples_per_frame, rfile);
361 | total_size += read_size;
362 | if (read_size != samples_per_frame){
363 | goto Exit;
364 | }
365 |
366 | // Resample from Codec to Soundcard
367 | int outLen = 0;
368 | webrtc_resampler_process(codec2sndcard_Resampler,
369 | farend_buffer,
370 | samples_per_frame,
371 | resampler_buffer,
372 | sndcard_samples_per_frame, outLen
373 | );
374 | //assert(outLen == sndcard_samples_per_frame);
375 | if ( sndcard_clock_rate==44100 ){
376 | // Special for 44.1kHz. Here temporary copy the last sample.
377 | resampler_buffer[outLen] = resampler_buffer[outLen-1];
378 | }
379 |
380 | // Playout One Frame
381 | {
382 | //Convert Mono Sound into Stero.
383 | for (i=0; iPutData( pRenderData, sndcard_samples_per_frame );
388 | }
389 | }
390 |
391 | // Convert SoundCard Captured Floating Format to INT16 Format
392 | for (i=0; iGetReadIndex(), m_pCaptureBuffer->GetWriteIndex(),
436 | m_pRenderBuffer ->GetReadIndex(), m_pRenderBuffer ->GetWriteIndex(),
437 | elapse_time);
438 |
439 | if (_kbhit()){ //Quit on Any Key Press
440 | break;
441 | }
442 | }
443 |
444 | Exit:
445 | CloseAudio();
446 | webrtc_resampler_destroy(codec2sndcard_Resampler);
447 | webrtc_resampler_destroy(sndcard2codec_Resampler);
448 |
449 | printf("\n");
450 | printf("Lost Frame Counter for Capturing: %d\n", m_pCaptureBuffer->GetLostFrmCount());
451 | printf("Lost Frame Counter for Rendering: %d\n", m_pRenderBuffer ->GetLostFrmCount());
452 |
453 | webrtc_aec_destroy( echo );
454 |
455 | if (rfile) { fclose(rfile); rfile=NULL; }
456 | if (wfile) { fclose(wfile); wfile=NULL; }
457 |
458 | if (infile_buffer ) free( infile_buffer );
459 | if (outfile_buffer) free( outfile_buffer);
460 | if (farend_buffer ) free( farend_buffer );
461 | if (nearend_buffer) free( nearend_buffer);
462 | if (pCaptureData ) free( pCaptureData );
463 | if (pRenderData ) free( pRenderData );
464 |
465 | return;
466 | }
467 |
468 |
469 | void RealTimeDelayTest()
470 | {
471 | unsigned elapse_time = 0;
472 | unsigned i = 0;
473 | size_t read_size = 0;
474 | unsigned total_size= 0;
475 |
476 | webrtc_ec * echo = NULL;
477 | unsigned clock_rate ;
478 | unsigned samples_per_frame;
479 | unsigned system_delay = 0;
480 |
481 | int16_t * file_buffer = NULL;
482 | int16_t * farend_buffer = NULL;
483 | int16_t * nearend_buffer = NULL;
484 |
485 | float * pCaptureData = NULL;
486 | float * pRenderData = NULL;
487 | bool result;
488 |
489 | char strFilePlay [128] = "delaytest.pcm"; //Mono, Which will be used as the playout sound.
490 | char strFileRecord[128] = "recorded.pcm"; //Stero. Left Channel: same contents as Play file, Right Channel: Recorded Sound
491 |
492 | InitAudioCaptureRender(clock_rate);
493 | printf("Sound Card IAudioClient Clock Rate = %d Hz\n", clock_rate);
494 | samples_per_frame = clock_rate/100; //10ms sample numbers
495 |
496 | sprintf_s(strFilePlay, "delaytest%d.pcm", clock_rate);
497 | fopen_s(&rfile, strFilePlay, "rb");
498 | if (rfile == NULL){
499 | printf("file %s open fail.\n", strFilePlay);
500 | CloseAudio();
501 | return;
502 | }
503 |
504 | fopen_s(&wfile, strFileRecord,"wb");
505 | assert(wfile!=NULL);
506 |
507 | file_buffer = (int16_t *)malloc( samples_per_frame * sizeof(int16_t) * 2);//Stero.
508 | assert( file_buffer != NULL );
509 | farend_buffer = (int16_t *)malloc( samples_per_frame * sizeof(int16_t) );
510 | nearend_buffer = (int16_t *)malloc( samples_per_frame * sizeof(int16_t) );
511 | assert( farend_buffer != NULL );
512 | assert( nearend_buffer!= NULL );
513 |
514 | pCaptureData = (float*)malloc(m_pCaptureBuffer->m_iFrameSize_10ms * sizeof(float) * 2);
515 | assert(pCaptureData!=NULL);
516 | pRenderData = (float*)malloc(m_pRenderBuffer ->m_iFrameSize_10ms * sizeof(float) * 2);
517 | assert(pRenderData!=NULL);
518 |
519 | StartAudio();
520 | while(1)
521 | {
522 | Sleep(10);
523 |
524 | // Capture One Frame
525 | do{
526 | result = m_pCaptureBuffer->GetData(pCaptureData);
527 | if (result)
528 | {
529 | /*---- Each time when Captured one Frame, we need Playout one Frame to sync it. ----*/
530 |
531 | // Simulate Receiving one Packet and Playout
532 | {
533 | read_size = fread(farend_buffer, sizeof(int16_t), samples_per_frame, rfile);
534 | total_size += read_size;
535 | if (read_size != samples_per_frame){
536 | goto Exit;
537 | }
538 |
539 | // Playout One Frame
540 | {
541 | //Convert Mono Sound into Stero.
542 | for (i=0; iPutData( pRenderData, samples_per_frame );
547 | }
548 | }
549 |
550 | // Save Playout & Captured Data in Same File for Checking the Delay
551 | for (i=0; iGetReadIndex(), m_pCaptureBuffer->GetWriteIndex(),
564 | m_pRenderBuffer ->GetReadIndex(), m_pRenderBuffer ->GetWriteIndex(),
565 | elapse_time);
566 |
567 | if (_kbhit()){ //Quit on Any Key Press
568 | break;
569 | }
570 | }
571 |
572 | Exit:
573 | CloseAudio();
574 |
575 | printf("Lost Frame Counter for Capturing: %d\n", m_pCaptureBuffer->GetLostFrmCount());
576 | printf("Lost Frame Counter for Rendering: %d\n", m_pRenderBuffer ->GetLostFrmCount());
577 |
578 | if (rfile) { fclose(rfile); rfile=NULL; }
579 | if (wfile) { fclose(wfile); wfile=NULL; }
580 |
581 | if (file_buffer ) free(file_buffer);
582 | if (farend_buffer ) free(farend_buffer);
583 | if (nearend_buffer) free(nearend_buffer);
584 | if (pCaptureData ) free(pCaptureData);
585 | if (pRenderData ) free(pRenderData );
586 |
587 | return;
588 | }
589 |
590 | #endif //_WEBRTC_API_EXPORTS
--------------------------------------------------------------------------------
/my_level_indicator.cc:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 | *
4 | * Use of this source code is governed by a BSD-style license
5 | * that can be found in the LICENSE file in the root of the source
6 | * tree. An additional intellectual property rights grant can be found
7 | * in the file PATENTS. All contributing project authors may
8 | * be found in the AUTHORS file in the root of the source tree.
9 | */
10 |
11 | #include "webrtc/typedefs.h"
12 | #include "webrtc_voe.h"
13 | #include
14 |
15 | // Number of bars on the indicator.
16 | // Note that the number of elements is specified because we are indexing it
17 | // in the range of 0-32
18 | const int8_t permutation[33] =
19 | {0,1,2,3,4,4,5,5,5,5,6,6,6,6,6,7,7,7,7,8,8,8,9,9,9,9,9,9,9,9,9,9,9};
20 |
21 | MyAudioLevel::MyAudioLevel() :
22 | //_critSect(*CriticalSectionWrapper::CreateCriticalSection()),
23 | _absMax(0),
24 | _count(0),
25 | _currentLevel(0),
26 | _currentLevelFullRange(0) {
27 | }
28 |
29 | MyAudioLevel::~MyAudioLevel() {
30 | //delete &_critSect;
31 | }
32 |
33 | void MyAudioLevel::Clear()
34 | {
35 | //CriticalSectionScoped cs(&_critSect);
36 | _absMax = 0;
37 | _count = 0;
38 | _currentLevel = 0;
39 | _currentLevelFullRange = 0;
40 | }
41 |
42 |
43 | // Maximum absolute value of word16 vector. C version for generic platforms.
44 | int16_t MyAudioLevel::My_WebRtcSpl_MaxAbsValueW16C(const int16_t* vector, int length) {
45 | int i = 0, absolute = 0, maximum = 0;
46 |
47 | if (vector == NULL || length <= 0) {
48 | return -1;
49 | }
50 |
51 | for (i = 0; i < length; i++) {
52 | absolute = (int)vector[i];
53 | if (absolute<0 ) absolute = -absolute;
54 |
55 | if (absolute > maximum) {
56 | maximum = absolute;
57 | }
58 | }
59 |
60 | // Guard the case for abs(-32768).
61 | if (maximum > 32767) {
62 | maximum = 32767;
63 | }
64 |
65 | return (int16_t)maximum;
66 | }
67 |
68 | void MyAudioLevel::ComputeLevel(const int16_t* audioFrame, int length)
69 | {
70 | int16_t absValue(0);
71 |
72 | // Check speech level (works for 2 channels as well)
73 | absValue = My_WebRtcSpl_MaxAbsValueW16C(
74 | audioFrame,
75 | length);
76 |
77 | // Protect member access using a lock since this method is called on a
78 | // dedicated audio thread in the RecordedDataIsAvailable() callback.
79 | //CriticalSectionScoped cs(&_critSect);
80 |
81 | if (absValue > _absMax)
82 | _absMax = absValue;
83 |
84 | // Update level approximately 10 times per second
85 | if (_count++ == kUpdateFrequency)
86 | {
87 | _currentLevelFullRange = _absMax;
88 |
89 | _count = 0;
90 |
91 | // Highest value for a int16_t is 0x7fff = 32767
92 | // Divide with 1000 to get in the range of 0-32 which is the range of
93 | // the permutation vector
94 | int32_t position = _absMax/1000;
95 |
96 | // Make it less likely that the bar stays at position 0. I.e. only if
97 | // its in the range 0-250 (instead of 0-1000)
98 | if ((position == 0) && (_absMax > 250))
99 | {
100 | position = 1;
101 | }
102 | _currentLevel = permutation[position];
103 |
104 | // Decay the absolute maximum (divide by 4)
105 | _absMax >>= 2;
106 | }
107 | }
108 |
109 | int8_t MyAudioLevel::Level() const
110 | {
111 | //CriticalSectionScoped cs(&_critSect);
112 | return _currentLevel;
113 | }
114 |
115 | int16_t MyAudioLevel::Count() const
116 | {
117 | //CriticalSectionScoped cs(&_critSect);
118 | return _count;
119 | }
120 |
121 | int16_t MyAudioLevel::LevelFullRange() const
122 | {
123 | //CriticalSectionScoped cs(&_critSect);
124 | return _currentLevelFullRange;
125 | }
126 |
127 |
--------------------------------------------------------------------------------
/wasapi.cpp:
--------------------------------------------------------------------------------
1 |
2 | #include
3 | #include
4 | #include
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 |
13 | #include "wasapi.h"
14 |
15 | #define EXIT_ON_ERROR(res) { if(res<0) { printf("wasapi.cpp:%d EXIT_ON_ERROR()\n", __LINE__); goto Exit; } }
16 |
17 | #define SAFE_DELETE(p) { if(p) { delete (p); (p)=NULL; } }
18 | #define SAFE_DELETE_ARRAY(p) { if(p) { delete[] (p); (p)=NULL; } }
19 | #define SAFE_FREE(p) { if(p) { free (p); (p)=NULL; } }
20 | #define SAFE_RELEASE(p) { if(p) { (p)->Release(); (p)=NULL; } }
21 |
22 |
23 | #define RFTIMES_PER_MILLISEC 10000 //// REFERENCE_TIME (100 ns) Time Units per Millisecond.
24 |
25 | static IAudioClient * m_pAudioClient_Capture = NULL;
26 | static IAudioClient * m_pAudioClient_Render = NULL;
27 | static IAudioCaptureClient * m_pCaptureClient = NULL;
28 | static IAudioRenderClient * m_pRenderClient = NULL;
29 | static IMMDevice * m_pCaptureEndpointDevice= NULL;
30 | static IMMDevice * m_pRenderEndpointDevice = NULL;
31 |
32 | static bool m_fAudioCaptureStarted = false;
33 | static bool m_fAudioRenderStarted = false;
34 |
35 | HANDLE m_hAudioCaptureEvent ;
36 | HANDLE m_hAudioRenderEvent ;
37 | HANDLE m_hStopCaptureThreadEvent;
38 | HANDLE m_hStopRenderThreadEvent ;
39 | static HANDLE m_hCaptureThreadStopedEvent ;
40 | static HANDLE m_hRenderThreadStopedEvent ;
41 |
42 | static UINT32 m_CaptureBufferFrameCount = 0;
43 | static UINT32 m_RenderBufferFrameCount = 0;
44 |
45 | IAudioCircleBuffer * m_pCaptureBuffer = NULL;
46 | IAudioCircleBuffer * m_pRenderBuffer = NULL;
47 |
48 |
49 | /**********************************************************************************************
50 | * Circle Buffer for Cature and Render *
51 | **********************************************************************************************/
52 |
53 | #define MAX_LEN_CIRCLE 8
54 | #define GRID_SIZE (2*48*20) //Suppose maximum 48kHz @ Stero @ 20ms
55 | #define CIRCLE_BUFF_SIZE (GRID_SIZE*MAX_LEN_CIRCLE)
56 |
57 |
58 | IAudioCircleBuffer::IAudioCircleBuffer()
59 | {
60 | iReadPos = 0;
61 | iWritePos = 0;
62 | iLostFrmCount = 0;
63 |
64 | m_pIAudioCircleBuffer = NULL;
65 | m_pIAudioCircleBuffer = (float*)malloc(CIRCLE_BUFF_SIZE * sizeof(float));
66 | assert(m_pIAudioCircleBuffer!=NULL);
67 | memset(m_pIAudioCircleBuffer, 0, CIRCLE_BUFF_SIZE * sizeof(float));
68 | }
69 |
70 | IAudioCircleBuffer::~IAudioCircleBuffer()
71 | {
72 | if (m_pIAudioCircleBuffer){
73 | free(m_pIAudioCircleBuffer);
74 | m_pIAudioCircleBuffer = NULL;
75 | }
76 | }
77 |
78 |
79 | unsigned IAudioCircleBuffer::GetLostFrmCount(void)
80 | {
81 | return iLostFrmCount;
82 | }
83 |
84 | unsigned IAudioCircleBuffer::GetReadIndex(void)
85 | {
86 | return iReadPos/GRID_SIZE;
87 | }
88 |
89 | unsigned IAudioCircleBuffer::GetWriteIndex(void)
90 | {
91 | return iWritePos/GRID_SIZE;
92 | }
93 |
94 | bool IAudioCircleBuffer::IsDataAvailable()
95 | {
96 | if (iReadPos != iWritePos)
97 | return true;
98 | else
99 | return false;
100 | }
101 |
102 | bool IAudioCircleBuffer::GetData(void *pReadTo)
103 | {
104 | float * pBuffer = NULL;
105 | if (iReadPos != iWritePos){
106 | pBuffer = &m_pIAudioCircleBuffer[iReadPos];
107 | memcpy( pReadTo, pBuffer, m_iFrameSize_10ms*sizeof(float)*2 ); // Stero
108 | iReadPos += GRID_SIZE;
109 | if (iReadPos >= CIRCLE_BUFF_SIZE){
110 | iReadPos = 0;
111 | }
112 | return true;
113 | }
114 | else
115 | return false;
116 | }
117 |
118 | void IAudioCircleBuffer::PutData(void *pData, UINT32 iNumFramesToRead)
119 | {
120 | float * pBuffer = NULL;
121 | unsigned iNextWritePos = 0;
122 | iNextWritePos = iWritePos + GRID_SIZE;
123 | if (iNextWritePos >= CIRCLE_BUFF_SIZE){
124 | iNextWritePos = 0;
125 | }
126 |
127 | if (iNextWritePos == iReadPos){
128 | //Buffer full, Throw this frame and count it.
129 | iLostFrmCount++;
130 | //printf("\nIAudioCircleBuffer::PutData() R/W=%d/%d. iLostFrmCount=%d\n", GetReadIndex(), GetWriteIndex(), iLostFrmCount);
131 | }
132 | else{
133 | pBuffer = &m_pIAudioCircleBuffer[iWritePos];
134 | assert(iNumFramesToRead <= GRID_SIZE);
135 |
136 | memcpy( pBuffer, pData, iNumFramesToRead*sizeof(float)*2 ); // Stero
137 | iWritePos = iNextWritePos;
138 | }
139 |
140 | return;
141 | }
142 |
143 | /**********************************************************************************************
144 | * IAudioClient Interface *
145 | **********************************************************************************************/
146 |
147 | #define DisplayWasapiError(C,D) _DisplayWasapiError(__FILE__, __LINE__, C, D)
148 | static void _DisplayWasapiError(const char* filename, int linenum, HRESULT res, char * method)
149 | {
150 | char *text = 0;
151 | switch(res){
152 | case S_OK: return;
153 | case E_POINTER :text ="E_POINTER"; break;
154 | case E_INVALIDARG :text ="E_INVALIDARG"; break;
155 | case AUDCLNT_E_NOT_INITIALIZED :text ="AUDCLNT_E_NOT_INITIALIZED"; break;
156 | case AUDCLNT_E_ALREADY_INITIALIZED :text ="AUDCLNT_E_ALREADY_INITIALIZED"; break;
157 | case AUDCLNT_E_WRONG_ENDPOINT_TYPE :text ="AUDCLNT_E_WRONG_ENDPOINT_TYPE"; break;
158 | case AUDCLNT_E_DEVICE_INVALIDATED :text ="AUDCLNT_E_DEVICE_INVALIDATED"; break;
159 | case AUDCLNT_E_NOT_STOPPED :text ="AUDCLNT_E_NOT_STOPPED"; break;
160 | case AUDCLNT_E_BUFFER_TOO_LARGE :text ="AUDCLNT_E_BUFFER_TOO_LARGE"; break;
161 | case AUDCLNT_E_OUT_OF_ORDER :text ="AUDCLNT_E_OUT_OF_ORDER"; break;
162 | case AUDCLNT_E_UNSUPPORTED_FORMAT :text ="AUDCLNT_E_UNSUPPORTED_FORMAT"; break;
163 | case AUDCLNT_E_INVALID_SIZE :text ="AUDCLNT_E_INVALID_SIZE"; break;
164 | case AUDCLNT_E_DEVICE_IN_USE :text ="AUDCLNT_E_DEVICE_IN_USE"; break;
165 | case AUDCLNT_E_BUFFER_OPERATION_PENDING :text ="AUDCLNT_E_BUFFER_OPERATION_PENDING"; break;
166 | case AUDCLNT_E_THREAD_NOT_REGISTERED :text ="AUDCLNT_E_THREAD_NOT_REGISTERED"; break;
167 | case AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED :text ="AUDCLNT_E_EXCLUSIVE_MODE_NOT_ALLOWED"; break;
168 | case AUDCLNT_E_ENDPOINT_CREATE_FAILED :text ="AUDCLNT_E_ENDPOINT_CREATE_FAILED"; break;
169 | case AUDCLNT_E_SERVICE_NOT_RUNNING :text ="AUDCLNT_E_SERVICE_NOT_RUNNING"; break;
170 | case AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED :text ="AUDCLNT_E_EVENTHANDLE_NOT_EXPECTED"; break;
171 | case AUDCLNT_E_EXCLUSIVE_MODE_ONLY :text ="AUDCLNT_E_EXCLUSIVE_MODE_ONLY"; break;
172 | case AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL :text ="AUDCLNT_E_BUFDURATION_PERIOD_NOT_EQUAL"; break;
173 | case AUDCLNT_E_EVENTHANDLE_NOT_SET :text ="AUDCLNT_E_EVENTHANDLE_NOT_SET"; break;
174 | case AUDCLNT_E_INCORRECT_BUFFER_SIZE :text ="AUDCLNT_E_INCORRECT_BUFFER_SIZE"; break;
175 | case AUDCLNT_E_BUFFER_SIZE_ERROR :text ="AUDCLNT_E_BUFFER_SIZE_ERROR"; break;
176 | case AUDCLNT_E_CPUUSAGE_EXCEEDED :text ="AUDCLNT_E_CPUUSAGE_EXCEEDED"; break;
177 | case AUDCLNT_S_BUFFER_EMPTY :text ="AUDCLNT_S_BUFFER_EMPTY"; break;
178 | case AUDCLNT_S_THREAD_ALREADY_REGISTERED :text ="AUDCLNT_S_THREAD_ALREADY_REGISTERED"; break;
179 | case AUDCLNT_S_POSITION_STALLED :text ="AUDCLNT_S_POSITION_STALLED"; break;
180 | default:
181 | text =" Unkown Error!";
182 | break;
183 |
184 | }
185 | printf("%s:%d-ERROR in %s / WASAPI ERROR HRESULT: %d : %s\n", filename, linenum, method, res, text);
186 | }
187 |
188 | static int CreateThreadNotification ();
189 |
190 |
191 | const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
192 | const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
193 | const IID IID_IAudioClient = __uuidof(IAudioClient);
194 | const IID IID_IAudioCaptureClient = __uuidof(IAudioCaptureClient);
195 |
196 | static int InitEndpointDevice(void){
197 | HRESULT hr = S_OK;
198 | UINT count = 0;
199 | bool isFound = false;
200 |
201 | //IMMDeviceCollection * pCollection = NULL;
202 | //IPropertyStore * pProps = NULL;
203 | IMMDeviceEnumerator * pEnumerator = NULL;
204 |
205 | //printf("InitEndpointDevice() func enter.\n" );
206 |
207 | //Create the list of audio device
208 | hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
209 | EXIT_ON_ERROR(hr);
210 |
211 | hr = CoCreateInstance(
212 | CLSID_MMDeviceEnumerator, NULL,
213 | CLSCTX_ALL, IID_IMMDeviceEnumerator,
214 | (void**)&pEnumerator);
215 | EXIT_ON_ERROR(hr);
216 |
217 | hr = pEnumerator->GetDefaultAudioEndpoint(eCapture, eCommunications, &m_pCaptureEndpointDevice);
218 | EXIT_ON_ERROR(hr);
219 |
220 | hr = pEnumerator->GetDefaultAudioEndpoint(eRender, eCommunications, &m_pRenderEndpointDevice);
221 | EXIT_ON_ERROR(hr);
222 |
223 | Exit :
224 | if (FAILED(hr)) {
225 | DisplayWasapiError(hr, "InitEndpointDevice");
226 | }
227 | //SAFE_RELEASE(pProps);
228 | SAFE_RELEASE(pEnumerator);
229 | //SAFE_RELEASE(pCollection);
230 | return SUCCEEDED(hr);
231 | }
232 |
233 |
234 | static int InitAudioService(unsigned &nSampleRate)
235 | {
236 | HRESULT hr = S_OK;
237 | WAVEFORMATEX *pwfx = NULL;
238 |
239 | if (!InitEndpointDevice())
240 | return 0;
241 |
242 | /**********************************************************************************************
243 | * Audio Capture Part *
244 | **********************************************************************************************/
245 |
246 | hr = m_pCaptureEndpointDevice->Activate(
247 | IID_IAudioClient, CLSCTX_ALL,
248 | NULL, (void**)&m_pAudioClient_Capture);
249 | EXIT_ON_ERROR(hr);
250 | if (!m_pAudioClient_Capture ){
251 | printf("%s:%d-IAudioClient Activate Failure!\n", __FILE__, __LINE__);
252 | return 0;
253 | }
254 |
255 | hr = m_pAudioClient_Capture->GetMixFormat(&pwfx);
256 | EXIT_ON_ERROR(hr);
257 |
258 | {
259 | printf("\nCapture Device MixFormat:\n");
260 | printf("MixFormat: wFormatTag=%d\n" , pwfx->wFormatTag);
261 | printf("MixFormat: nChannels=%d\n" , pwfx->nChannels);
262 | printf("MixFormat: nSamplesPerSec=%d\n" , pwfx->nSamplesPerSec);
263 | printf("MixFormat: nAvgBytesPerSec=%d\n", pwfx->nAvgBytesPerSec);
264 | printf("MixFormat: nBlockAlign=%d\n" , pwfx->nBlockAlign);
265 | printf("MixFormat: wBitsPerSample=%d\n" , pwfx->wBitsPerSample);
266 | printf("MixFormat: cbSize=%d\n" , pwfx->cbSize);
267 | assert(pwfx->nChannels==2);
268 | }
269 | m_pCaptureBuffer->m_iFrameSize_10ms = pwfx->nSamplesPerSec / 100;
270 |
271 | nSampleRate = pwfx->nSamplesPerSec;
272 |
273 | // Create a stream with the our format
274 | hr = m_pAudioClient_Capture->Initialize(AUDCLNT_SHAREMODE_SHARED,
275 | AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
276 | 20 * RFTIMES_PER_MILLISEC, // buffer duration: 10ms
277 | 20 * RFTIMES_PER_MILLISEC, // periodicity
278 | pwfx, // wave format
279 | NULL); // session GUID //FD_WIZARD
280 | EXIT_ON_ERROR(hr);
281 |
282 | // Get the actual size of the allocated buffer.
283 | hr = m_pAudioClient_Capture->GetBufferSize(&m_CaptureBufferFrameCount);
284 | EXIT_ON_ERROR(hr);
285 | printf("InitAudioService(): actual allocated capture buffer size: %d\n", m_CaptureBufferFrameCount);
286 |
287 | hr = m_pAudioClient_Capture->SetEventHandle( m_hAudioCaptureEvent );
288 | EXIT_ON_ERROR(hr);
289 |
290 | // Get the capture client
291 | hr = m_pAudioClient_Capture->GetService(__uuidof (IAudioCaptureClient), (void**)&m_pCaptureClient );
292 | EXIT_ON_ERROR(hr);
293 |
294 | /**********************************************************************************************
295 | * Audio Render Part *
296 | **********************************************************************************************/
297 |
298 | hr = m_pRenderEndpointDevice->Activate(
299 | IID_IAudioClient, CLSCTX_ALL,
300 | NULL, (void**)&m_pAudioClient_Render);
301 | EXIT_ON_ERROR(hr);
302 | if (!m_pAudioClient_Render ){
303 | printf("%s:%d-IAudioClient Activate Failure!\n", __FILE__, __LINE__);
304 | return 0;
305 | }
306 |
307 | hr = m_pAudioClient_Render->GetMixFormat(&pwfx);
308 | EXIT_ON_ERROR(hr);
309 |
310 | {
311 | printf("\nRender Device MixFormat:\n");
312 | printf("MixFormat: wFormatTag=%d\n" , pwfx->wFormatTag);
313 | printf("MixFormat: nChannels=%d\n" , pwfx->nChannels);
314 | printf("MixFormat: nSamplesPerSec=%d\n" , pwfx->nSamplesPerSec);
315 | printf("MixFormat: nAvgBytesPerSec=%d\n", pwfx->nAvgBytesPerSec);
316 | printf("MixFormat: nBlockAlign=%d\n" , pwfx->nBlockAlign);
317 | printf("MixFormat: wBitsPerSample=%d\n" , pwfx->wBitsPerSample);
318 | printf("MixFormat: cbSize=%d\n" , pwfx->cbSize);
319 | assert(pwfx->nChannels==2);
320 | }
321 | m_pRenderBuffer->m_iFrameSize_10ms = pwfx->nSamplesPerSec / 100;
322 |
323 | // Create a stream
324 | hr = m_pAudioClient_Render->Initialize(AUDCLNT_SHAREMODE_SHARED,
325 | AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
326 | 20 * RFTIMES_PER_MILLISEC, // buffer duration: 10ms
327 | 20 * RFTIMES_PER_MILLISEC, // periodicity
328 | pwfx, // wave format
329 | NULL); // session GUID //FD_WIZARD
330 | EXIT_ON_ERROR(hr);
331 |
332 | // Get the actual size of the allocated buffer.
333 | hr = m_pAudioClient_Render->GetBufferSize(&m_RenderBufferFrameCount);
334 | EXIT_ON_ERROR(hr);
335 | printf("InitAudioService(): actual allocated render buffer size: %d\n", m_RenderBufferFrameCount);
336 |
337 | hr = m_pAudioClient_Render->SetEventHandle( m_hAudioRenderEvent );
338 | EXIT_ON_ERROR(hr);
339 |
340 | // Get the render Client
341 | hr = m_pAudioClient_Render->GetService(__uuidof (IAudioRenderClient), (void**)&m_pRenderClient );
342 | EXIT_ON_ERROR(hr);
343 |
344 | /**********************************************************************************************
345 | * Start Audio Capturing & Rendering in the same time *
346 | **********************************************************************************************/
347 |
348 | CreateThreadNotification();
349 |
350 | // Before Starting Rendering, Make sure Render Buffer is filled for the 1st time with silence.
351 | {
352 | BYTE *pData = NULL;
353 |
354 | // Grab all the available space in the shared buffer.
355 | hr = m_pRenderClient->GetBuffer(m_RenderBufferFrameCount, &pData);
356 | EXIT_ON_ERROR(hr)
357 |
358 | hr = m_pRenderClient->ReleaseBuffer(m_RenderBufferFrameCount, AUDCLNT_BUFFERFLAGS_SILENT);
359 | EXIT_ON_ERROR(hr)
360 | }
361 |
362 | Exit :
363 | if(FAILED(hr)){
364 | DisplayWasapiError(hr, "InitAudioService()");
365 | }
366 |
367 | return SUCCEEDED(hr);
368 | }
369 |
370 | int StartAudio()
371 | {
372 | HRESULT hr = S_OK;
373 |
374 | // Start up the render stream.
375 | printf("%s:%d-start the audio rendering\n", __FILE__, __LINE__ );
376 | hr = m_pAudioClient_Render->Start();
377 | EXIT_ON_ERROR(hr);
378 | m_fAudioRenderStarted = true;
379 |
380 | // Start up the capture stream.
381 | printf("%s:%d-start the audio capturing\n", __FILE__, __LINE__ );
382 | hr = m_pAudioClient_Capture->Start();
383 | EXIT_ON_ERROR(hr);
384 | m_fAudioCaptureStarted = true;
385 |
386 | Exit :
387 | if(FAILED(hr)){
388 | DisplayWasapiError(hr, "StartAudio()");
389 | }
390 |
391 | return SUCCEEDED(hr);
392 | }
393 |
394 |
395 | static void ProcessCaptureStream()
396 | {
397 | HRESULT hr;
398 |
399 | UINT32 numFramesAvailable = 0;
400 | UINT32 packetLength = 0;
401 | BYTE *pData = NULL;
402 | DWORD flags = 0;
403 |
404 | hr = m_pCaptureClient->GetNextPacketSize(&packetLength);
405 | EXIT_ON_ERROR(hr)
406 |
407 | while (packetLength != 0)
408 | {
409 | // Get the available data in the shared buffer.
410 | hr = m_pCaptureClient->GetBuffer(
411 | &pData,
412 | &numFramesAvailable,
413 | &flags, NULL, NULL);
414 | EXIT_ON_ERROR(hr)
415 |
416 | if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
417 | {
418 | pData = NULL; // Tell CopyData to write silence.
419 | //TODO:
420 | }
421 |
422 | // Copy the available capture data
423 | if (pData){
424 | assert(numFramesAvailable == m_pCaptureBuffer->m_iFrameSize_10ms);
425 | m_pCaptureBuffer->PutData( pData, numFramesAvailable);
426 | //printf("IAudioCircleBuffer::PutData() R/W=%d/%d\n", m_pCaptureBuffer->GetReadIndex(), m_pCaptureBuffer->GetWriteIndex());
427 | }
428 |
429 | hr = m_pCaptureClient->ReleaseBuffer(numFramesAvailable);
430 | EXIT_ON_ERROR(hr)
431 |
432 | hr = m_pCaptureClient->GetNextPacketSize(&packetLength);
433 | EXIT_ON_ERROR(hr)
434 | }
435 |
436 | Exit :
437 | if(FAILED(hr)){
438 | DisplayWasapiError(hr, "ProcessCaptureStream");
439 | }
440 |
441 | return;
442 | }
443 |
444 | static void ProcessRenderStream()
445 | {
446 | HRESULT hr;
447 |
448 | UINT32 numFramesAvailable;
449 | UINT32 numFramesPadding;
450 | BYTE *pData;
451 | float * pRenderData = NULL;
452 | bool result;
453 |
454 | if (!m_pRenderBuffer->IsDataAvailable()) // Wait data to playout but no available data! Possible reasons: Packet Lost, Jitter.
455 | {
456 | //Force a silence packet instead
457 | pRenderData = (float*)malloc( m_pRenderBuffer->m_iFrameSize_10ms * sizeof(float) * 2 ); //Stero
458 | assert(pRenderData!=NULL);
459 | memset(pRenderData, 0, m_pRenderBuffer->m_iFrameSize_10ms * sizeof(float) * 2 );
460 | m_pRenderBuffer->PutData( pRenderData, m_pRenderBuffer->m_iFrameSize_10ms );
461 | free(pRenderData);
462 | pRenderData = NULL;
463 | }
464 |
465 | while( m_pRenderBuffer->IsDataAvailable() )
466 | {
467 | // See how much buffer space is available.
468 | hr = m_pAudioClient_Render->GetCurrentPadding(&numFramesPadding);
469 | EXIT_ON_ERROR(hr)
470 |
471 | numFramesAvailable = m_RenderBufferFrameCount - numFramesPadding;
472 | if (numFramesAvailable >= m_pRenderBuffer->m_iFrameSize_10ms)
473 | {
474 | // Grab all the available space in the shared buffer.
475 | hr = m_pRenderClient->GetBuffer(m_pRenderBuffer->m_iFrameSize_10ms, &pData);
476 | EXIT_ON_ERROR(hr)
477 |
478 | // Copy the available render data to the audio client buffer.
479 | if (pData){
480 | result = m_pRenderBuffer->GetData(pData);
481 | assert(result == true);
482 | }
483 |
484 | hr = m_pRenderClient->ReleaseBuffer(m_pRenderBuffer->m_iFrameSize_10ms, 0);
485 | EXIT_ON_ERROR(hr)
486 | }
487 | else{
488 | break;
489 | }
490 | }
491 |
492 | Exit :
493 | if(FAILED(hr)){
494 | DisplayWasapiError(hr, "ProcessRenderStream");
495 | }
496 |
497 | return;
498 | }
499 |
500 | DWORD WINAPI CaptureNotificationProc( LPVOID lpParam )
501 | {
502 | bool bDone = false ;
503 | HANDLE hTask = NULL;
504 | DWORD waitResult;
505 |
506 | while(!bDone)
507 | {
508 | //check if need exit
509 | if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hStopCaptureThreadEvent,0) )
510 | {
511 | printf("m_hStopCaptureThreadEvent is signaled\n") ;
512 | bDone = true;
513 | break;
514 | }
515 |
516 | //---- Capture Event ----/
517 | waitResult = WaitForSingleObject(m_hAudioCaptureEvent, 60); // 60ms Timeout
518 | if (WAIT_OBJECT_0==waitResult) // Signaled
519 | {
520 | ProcessCaptureStream();
521 | }
522 | else if (WAIT_TIMEOUT==waitResult) // Timeout
523 | {
524 | //TODO:
525 | if ( m_fAudioCaptureStarted ){
526 | printf("\nNotificationProc(): m_hAudioCaptureEvent Wait Timeout!\n");
527 | }
528 | }
529 | else{
530 | printf("Wait Failure on Audio Capturing Signal !\n");
531 | bDone = true;
532 | break;
533 | }
534 | } //end of while
535 |
536 | m_pAudioClient_Capture->Stop();
537 |
538 | printf("End of Capture Notification Thread\n") ;
539 | if (!SetEvent( m_hCaptureThreadStopedEvent )){
540 | printf("%s:%d-SetEvent() Failure! m_hCaptureThreadStopedEvent can't send.\n", __FILE__, __LINE__);
541 | }
542 |
543 | return 0;
544 | }
545 |
546 | DWORD WINAPI RenderNotificationProc( LPVOID lpParam )
547 | {
548 | bool bDone = false ;
549 | HANDLE hTask = NULL;
550 | DWORD waitResult;
551 |
552 | while(!bDone)
553 | {
554 | //check if need exit
555 | if ( WAIT_OBJECT_0 == WaitForSingleObject(m_hStopRenderThreadEvent,0) )
556 | {
557 | printf("m_hStopRenderThreadEvent is signaled\n") ;
558 | bDone = true;
559 | break;
560 | }
561 |
562 | //---- Render Event ----/
563 | waitResult = WaitForSingleObject(m_hAudioRenderEvent, 60); // 60ms Timeout
564 | if (WAIT_OBJECT_0==waitResult) // Signaled
565 | {
566 | ProcessRenderStream();
567 | }
568 | else if (WAIT_TIMEOUT==waitResult) // Timeout
569 | {
570 | //TODO:
571 | if ( m_fAudioRenderStarted ){
572 | printf("\nNotificationProc(): m_hAudioRenderEvent Wait Timeout!\n");
573 | }
574 | }
575 | else{
576 | printf("Wait Failure on Audio Rendering Signal !\n");
577 | bDone = true;
578 | }
579 | } //end of while
580 |
581 | m_pAudioClient_Render ->Stop();
582 |
583 | printf("End of Render Notification Thread\n") ;
584 | if (!SetEvent( m_hRenderThreadStopedEvent )){
585 | printf("%s:%d-SetEvent() Failure! m_hRenderThreadStopedEvent can't send.\n", __FILE__, __LINE__);
586 | }
587 |
588 | return 0;
589 | }
590 |
591 |
592 | static int CreateThreadNotification ()
593 | {
594 | HANDLE hThread;
595 |
596 | /*---- Rendering Thread ----*/
597 |
598 | hThread = CreateThread( NULL, 0,
599 | RenderNotificationProc, NULL, 0, NULL);
600 | assert( hThread != NULL);
601 |
602 | if ( !SetThreadPriority( hThread , THREAD_PRIORITY_TIME_CRITICAL) ){
603 | printf("%s:%d-SetThreadPriority() failure.\n", __FILE__, __LINE__);
604 | }
605 |
606 | /*---- Capturing Thread ----*/
607 |
608 | hThread = CreateThread( NULL, 0,
609 | CaptureNotificationProc, NULL, 0, NULL);
610 | assert( hThread != NULL);
611 |
612 | if ( !SetThreadPriority( hThread , THREAD_PRIORITY_TIME_CRITICAL) ){
613 | printf("%s:%d-SetThreadPriority() failure.\n", __FILE__, __LINE__);
614 | }
615 |
616 | return 1;
617 | }
618 |
619 |
620 | int InitAudioCaptureRender(unsigned &nSampleRate)
621 | {
622 | HRESULT hr = S_OK;
623 |
624 | // Initialize Circle Buffer.
625 | m_pCaptureBuffer = new IAudioCircleBuffer;
626 | m_pRenderBuffer = new IAudioCircleBuffer;
627 | assert(m_pCaptureBuffer);
628 | assert(m_pRenderBuffer);
629 |
630 | m_hAudioCaptureEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
631 | assert(m_hAudioCaptureEvent != NULL);
632 |
633 | m_hAudioRenderEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
634 | assert(m_hAudioRenderEvent != NULL);
635 |
636 | m_hStopCaptureThreadEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
637 | assert(m_hStopCaptureThreadEvent != NULL);
638 |
639 | m_hStopRenderThreadEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
640 | assert(m_hStopRenderThreadEvent != NULL);
641 |
642 | m_hCaptureThreadStopedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
643 | assert(m_hCaptureThreadStopedEvent != NULL);
644 |
645 | m_hRenderThreadStopedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
646 | assert(m_hRenderThreadStopedEvent != NULL);
647 |
648 | hr = InitAudioService(nSampleRate);
649 | EXIT_ON_ERROR(hr);
650 |
651 | Exit :
652 | if(FAILED(hr)){
653 | DisplayWasapiError(hr, "InitAudioCapture");
654 | }
655 |
656 | return SUCCEEDED(hr);
657 | }
658 |
659 | int CloseAudio()
660 | {
661 | DWORD waitResult;
662 | if (!SetEvent(m_hStopCaptureThreadEvent)){
663 | printf("SetEvent() Failure! Thread Stop Event can't Send.\n");
664 | }
665 | if (!SetEvent(m_hStopRenderThreadEvent)){
666 | printf("SetEvent() Failure! Thread Stop Event can't Send.\n");
667 | }
668 | printf("\n");
669 |
670 | waitResult =WaitForSingleObject(m_hCaptureThreadStopedEvent, 1000); // 1 second Timeout
671 | if (WAIT_OBJECT_0!=waitResult) // Signaled
672 | {
673 | if (WAIT_TIMEOUT==waitResult) // Timeout
674 | printf("CloseAudio(): Wait timeout for Thread exit.\n");
675 | else
676 | printf("CloseAudio(): Wait Error for Thread exit.\n");
677 | }
678 |
679 | waitResult =WaitForSingleObject(m_hRenderThreadStopedEvent, 1000); // 1 second Timeout
680 | if (WAIT_OBJECT_0!=waitResult) // Signaled
681 | {
682 | if (WAIT_TIMEOUT==waitResult) // Timeout
683 | printf("CloseAudio(): Wait timeout for Thread exit.\n");
684 | else
685 | printf("CloseAudio(): Wait Error for Thread exit.\n");
686 | }
687 |
688 | SAFE_RELEASE(m_pCaptureEndpointDevice);
689 | SAFE_RELEASE(m_pRenderEndpointDevice );
690 | SAFE_RELEASE(m_pAudioClient_Capture);
691 | SAFE_RELEASE(m_pAudioClient_Render );
692 | SAFE_RELEASE(m_pCaptureClient);
693 | SAFE_RELEASE(m_pRenderClient );
694 |
695 | if ( m_hStopCaptureThreadEvent ) CloseHandle(m_hStopCaptureThreadEvent);
696 | if ( m_hStopRenderThreadEvent ) CloseHandle(m_hStopRenderThreadEvent);
697 | if ( m_hAudioCaptureEvent ) CloseHandle(m_hAudioCaptureEvent);
698 | if ( m_hAudioRenderEvent ) CloseHandle(m_hAudioRenderEvent);
699 | if ( m_hCaptureThreadStopedEvent ) CloseHandle(m_hCaptureThreadStopedEvent);
700 | if ( m_hRenderThreadStopedEvent ) CloseHandle(m_hRenderThreadStopedEvent);
701 |
702 | if (m_pCaptureBuffer) delete m_pCaptureBuffer;
703 | if (m_pRenderBuffer ) delete m_pRenderBuffer;
704 |
705 | return 0;
706 | }
707 |
--------------------------------------------------------------------------------
/wasapi.h:
--------------------------------------------------------------------------------
1 |
2 | #ifndef _WASAPI_H
3 | #define _WASAPI_H
4 |
5 | int InitAudioCaptureRender(unsigned &nSampleRate);
6 | int StartAudio();
7 | int CloseAudio();
8 |
9 | class IAudioCircleBuffer{
10 | public:
11 | IAudioCircleBuffer();
12 | ~IAudioCircleBuffer();
13 |
14 | unsigned GetLostFrmCount(void);
15 | unsigned GetReadIndex(void);
16 | unsigned GetWriteIndex(void);
17 |
18 | bool GetData(void *pReadTo);
19 | void PutData(void *pData, UINT32 iNumFramesToRead);
20 |
21 | bool IsDataAvailable();
22 |
23 | private:
24 | float *m_pIAudioCircleBuffer;
25 | unsigned iReadPos ;
26 | unsigned iWritePos;
27 | unsigned iLostFrmCount ;
28 |
29 | public:
30 | UINT32 m_iFrameSize_10ms; //Unit is Frame of IAudioClient. One 'Frame' means nBlockAlign Bytes.
31 | };
32 |
33 | extern IAudioCircleBuffer * m_pCaptureBuffer;
34 | extern IAudioCircleBuffer * m_pRenderBuffer ;
35 |
36 |
37 | #endif //_WASAPI_H
--------------------------------------------------------------------------------
/webrtc_voe.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2013 Gary Yu. All Rights Reserved.
3 | *
4 | * URL: https://github.com/garyyu/WebRTC_VoiceEngine
5 | *
6 | * Use of this source code is governed by a New BSD license which can be found in the LICENSE file
7 | * in the root of the source tree. Refer to README.md.
8 | * For WebRTC License & Patents information, please read files LICENSE.webrtc and PATENTS.webrtc.
9 | */
10 |
11 | /*
12 | * This file contains the Wrapper of WebRTC Voice Engine.
13 | * You just need this header file , lib file , and dll file in the root folder.
14 | *
15 | */
16 |
17 | #ifndef __WRTC_VOICE_ENGINE_H__
18 | #define __WRTC_VOICE_ENGINE_H__
19 |
20 | #define _WEBRTC_API_EXPORTS // For DLL Building.
21 | #define _WEBRTC_FOR_PC
22 |
23 | #if defined(_WEBRTC_API_EXPORTS)
24 | #define WEBRTC_API __declspec(dllexport)
25 | #elif defined(_WEBRTC_API_IMPORTS)
26 | #define WEBRTC_API __declspec(dllimport)
27 | #else
28 | #define WEBRTC_API
29 | #endif
30 |
31 |
32 | #define VOE_TRUE 1
33 | #define VOE_FALSE 0
34 |
35 | typedef short int16_t;
36 |
37 | typedef struct webrtc_ec
38 | {
39 | void* AEC_inst;
40 | void* NS_inst;
41 | void* HP_FilterState;
42 | unsigned samples_per_frame;
43 | unsigned echo_tail;
44 | unsigned echo_skew;
45 | unsigned clock_rate;
46 | unsigned blockLen10ms;
47 | int16_t* tmp_frame;
48 | int16_t* tmp_frame2;
49 | } webrtc_ec;
50 |
51 |
52 | typedef struct {
53 | int instant;
54 | int average;
55 | int max;
56 | int min;
57 | } MyAecLevel;
58 |
59 | typedef struct {
60 | MyAecLevel rerl;
61 | MyAecLevel erl;
62 | MyAecLevel erle;
63 | MyAecLevel aNlp;
64 | } MyAecMetrics;
65 |
66 |
67 | #if defined(_WEBRTC_FOR_PC)
68 | #define WEBRTC_AEC_USE_MOBILE 0
69 | #else
70 | #define WEBRTC_AEC_USE_MOBILE 1
71 | #endif
72 |
73 | /************************************************************************/
74 | /* Volume Control API */
75 | /************************************************************************/
76 |
77 | class WEBRTC_API WebRTCVolumeCtlImpl
78 | {
79 | private:
80 | void* m_voe; //VoiceEngine*
81 | void* m_base; //VoEBase*
82 | void* volume_control; //VoEVolumeControl*
83 | //void* m_apm; //VoEAudioProcessing*
84 | //void* m_hardware; //VoEHardware*
85 |
86 | public:
87 |
88 | WebRTCVolumeCtlImpl();
89 | ~WebRTCVolumeCtlImpl();
90 |
91 | int webrtc_voe_init();
92 | void webrtc_voe_deinit();
93 |
94 | /*--- Microphone Level Control. Valid range is [0,255]. ---*/
95 | int SetMicVolume(unsigned int level);
96 | int GetMicVolume(unsigned int &level);
97 |
98 | /*--- Speaker Level Control. Valid range is [0,255]. ---*/
99 | int SetSpkVolume(unsigned int volume);
100 | int GetSpkVolume(unsigned int &volume);
101 | };
102 |
103 |
104 | class WEBRTC_API MyAudioLevel
105 | {
106 | public:
107 | MyAudioLevel();
108 | ~MyAudioLevel();
109 |
110 | signed char Level() const;
111 | signed short Count() const;
112 | signed short LevelFullRange() const;
113 | void Clear();
114 |
115 | void ComputeLevel(const signed short* audioFrame, int length);
116 |
117 | private:
118 | enum { kUpdateFrequency = 10};
119 |
120 | signed short _absMax;
121 | signed short _count;
122 | signed char _currentLevel;
123 | signed short _currentLevelFullRange;
124 |
125 | signed short My_WebRtcSpl_MaxAbsValueW16C(const signed short* vector, int length);
126 |
127 | };
128 |
129 |
130 | /************************************************************************/
131 | /* Main AEC API */
132 | /************************************************************************/
133 |
134 | extern "C" int WEBRTC_API webrtc_aec_create(
135 | unsigned clock_rate,
136 | unsigned channel_count,
137 | unsigned samples_per_frame,
138 | unsigned tail_ms,
139 | unsigned options,
140 | void **p_echo );
141 |
142 | extern "C" int WEBRTC_API webrtc_aec_destroy(void *state );
143 |
144 | extern "C" void WEBRTC_API webrtc_aec_reset(void *state );
145 | extern "C" int WEBRTC_API webrtc_aec_cancel_echo(void *state,
146 | int16_t *rec_frm,
147 | const int16_t *play_frm,
148 | unsigned framing,
149 | unsigned options,
150 | void *reserved );
151 |
152 | extern "C" int WEBRTC_API webrtc_aec_get_metrics(
153 | void *state,
154 | void *_aec_metrics );
155 |
156 | extern "C" int WEBRTC_API webrtc_aec_get_delay_metrics(
157 | void *state,
158 | int* median,
159 | int* std );
160 |
161 | /************************************************************************/
162 | /* Main Resampler API */
163 | /************************************************************************/
164 |
165 | extern "C" int WEBRTC_API webrtc_resampler_create(
166 | int inFreq,
167 | int outFreq,
168 | void **p_resampler
169 | );
170 |
171 | extern "C" int WEBRTC_API webrtc_resampler_destroy(void *state );
172 |
173 | extern "C" int WEBRTC_API webrtc_resampler_reset( void *state, int inFreq, int outFreq );
174 | extern "C" int WEBRTC_API webrtc_resampler_process(void *state,
175 | const int16_t* samplesIn,
176 | int lengthIn,
177 | int16_t* samplesOut,
178 | int maxLen, int &outLen
179 | );
180 |
181 | /************************************************************************/
182 | /* Voice processing configure: (AEC, NS, AGC, VAD) */
183 | /************************************************************************/
184 | /*
185 | **vad: mode
186 | **0: low
187 | **1:
188 | **2:
189 | **3: most high
190 | */
191 | int WebRTCVoe_SetVADStatus(int channelsid, bool b, int mode = 1);
192 |
193 | /*
194 | **agc: mode
195 | **0: previously set mode
196 | **1: platform default
197 | **2: adaptive mode for use when analog volume control exists (e.g. for PC softphone)
198 | **3: scaling takes place in the digital domain (e.g. for conference servers and embedded devices)
199 | **4: can be used on embedded devices where the capture signal level is predictable
200 | */
201 | int WebRTCVoe_SetAgcStatus(bool b, int mode = 1);
202 |
203 | /*
204 | **EC mode
205 | **0: previously set mode
206 | **1: platform default
207 | **2: conferencing default (aggressive AEC)
208 | **3: Acoustic Echo Cancellation
209 | **4: AEC mobile
210 | */
211 | int WebRTCVoe_SetEcStatus(bool b, int mode = 3);
212 |
213 | /*
214 | **NS mode
215 | **0: previously set mode
216 | **1: platform default
217 | **2: conferencing default
218 | **3: lowest suppression
219 | **4: Moderate Suppression
220 | **5: High Suppression
221 | **6: highest suppression
222 | */
223 | int WebRTCVoe_SetNsStatus(bool b, int mode = 4);
224 |
225 | int WebRTCVoe_GetVADStatus(int channelsid, bool &b, int &mode);
226 | int WebRTCVoe_GetAgcStatus(bool &b, int &mode);
227 | int WebRTCVoe_GetEcStatus(bool &b, int &mode) ;
228 | int WebRTCVoe_GetNsStatus(bool &b, int &mode) ;
229 |
230 |
231 |
232 | #endif //__WRTC_VOICE_ENGINE_H__
--------------------------------------------------------------------------------
/webrtc_voe_impl.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2013 Gary Yu. All Rights Reserved.
3 | *
4 | * URL: https://github.com/garyyu/WebRTC_VoiceEngine
5 | *
6 | * Use of this source code is governed by a New BSD license which can be found in the LICENSE file
7 | * in the root of the source tree. Refer to README.md.
8 | * For WebRTC License & Patents information, please read files LICENSE.webrtc and PATENTS.webrtc.
9 | */
10 |
11 | /*
12 | * This file contains the Wrapper of WebRTC Voice Engine.
13 | *
14 | */
15 |
16 | #include
17 | #include "webrtc_voe_impl.h"
18 | #include "common_audio/resampler/include/resampler.h"
19 | #include "webrtc/modules/audio_processing/splitting_filter.h"
20 |
21 |
22 | #ifndef WEBRTC_AEC_AGGRESSIVENESS
23 | #define WEBRTC_AEC_AGGRESSIVENESS kAecNlpAggressive //kAecNlpConservative, kAecNlpModerate, kAecNlpAggressive
24 | #endif
25 |
26 | #ifndef WEBRTC_USE_NS
27 | #define WEBRTC_USE_NS VOE_FALSE //True: Enable NS, False: Disable NS.
28 | #endif
29 |
30 | #ifndef WEBRTC_NS_POLICY
31 | #define WEBRTC_NS_POLICY 2 //0: Mild, 1: Medium , 2: Aggressive
32 | #endif
33 |
34 | #if WEBRTC_AEC_USE_MOBILE == 1
35 | #include
36 | #define W_WebRtcAec_Create WebRtcAecm_Create
37 | #define W_WebRtcAec_Free WebRtcAecm_Free
38 | #define W_WebRtcAec_get_error_code WebRtcAecm_get_error_code
39 | #define W_WebRtcAec_Init(INST, CR) WebRtcAecm_Init(INST, CR)
40 | #define W_WebRtcAec_BufferFarend WebRtcAecm_BufferFarend
41 | #else
42 | #include
43 | #define W_WebRtcAec_Create WebRtcAec_Create
44 | #define W_WebRtcAec_Free WebRtcAec_Free
45 | #define W_WebRtcAec_get_error_code WebRtcAec_get_error_code
46 | #define W_WebRtcAec_Init(INST, CR) WebRtcAec_Init(INST, CR, CR)
47 | #define W_WebRtcAec_BufferFarend WebRtcAec_BufferFarend
48 | #endif
49 |
50 | #include
51 |
52 | #ifdef _DEBUG
53 | #define VALIDATE \
54 | if (res != 0){ \
55 | printf("##%s(%i) ERROR:\n",__FILE__, __LINE__); \
56 | printf(" %s error, code = %i\n",__FUNCTION__, base->LastError()); \
57 | }
58 |
59 | #else
60 | #define VALIDATE
61 | #endif
62 |
63 | #define SAFE_FREE(p) { if (p) { free(p); p=NULL; } }
64 |
65 | bool webrtc_use_ns = WEBRTC_USE_NS;
66 |
67 | //-----------------------------------------------------------------------------------//
68 |
69 | #define print_webrtc_aec_error(c,d) _print_webrtc_aec_error(__FILE__, __LINE__, c, d)
70 | static void _print_webrtc_aec_error(const char* filename, int linenum, const char* tag, void *AEC_inst) {
71 | unsigned status = W_WebRtcAec_get_error_code(AEC_inst);
72 | printf("%s:%d-WebRTC AEC ERROR (%s) %d\n", filename, linenum, tag, status);
73 | }
74 |
75 | //-----------------------------------------------------------------------------------//
76 |
77 | /*
78 | enum {
79 | kSamplesPer8kHzChannel = 80,
80 | kSamplesPer16kHzChannel = 160,
81 | kSamplesPer32kHzChannel = 320
82 | };
83 |
84 | struct MixedAudioChannel {
85 | MixedAudioChannel() {
86 | memset(data, 0, sizeof(data));
87 | }
88 |
89 | int16_t data[kSamplesPer32kHzChannel];
90 | };
91 |
92 | struct SplitAudioChannel {
93 | SplitAudioChannel() {
94 | memset(low_pass_data, 0, sizeof(low_pass_data));
95 | memset(high_pass_data, 0, sizeof(high_pass_data));
96 | memset(analysis_filter_state1, 0, sizeof(analysis_filter_state1));
97 | memset(analysis_filter_state2, 0, sizeof(analysis_filter_state2));
98 | memset(synthesis_filter_state1, 0, sizeof(synthesis_filter_state1));
99 | memset(synthesis_filter_state2, 0, sizeof(synthesis_filter_state2));
100 | }
101 |
102 | int16_t low_pass_data[kSamplesPer16kHzChannel];
103 | int16_t high_pass_data[kSamplesPer16kHzChannel];
104 |
105 | int32_t analysis_filter_state1[6];
106 | int32_t analysis_filter_state2[6];
107 | int32_t synthesis_filter_state1[6];
108 | int32_t synthesis_filter_state2[6];
109 | };
110 | */
111 |
112 | //-----------------------------------------------------------------------------------//
113 | //--- High Pass Filter ---//
114 |
115 | const int16_t kFilterCoefficients8kHz[5] =
116 | {3798, -7596, 3798, 7807, -3733};
117 |
118 | const int16_t kFilterCoefficients[5] =
119 | {4012, -8024, 4012, 8002, -3913};
120 |
121 | struct FilterState {
122 | int16_t y[4];
123 | int16_t x[2];
124 | const int16_t* ba;
125 | };
126 |
127 | static int InitializeFilter(FilterState* hpf, int sample_rate_hz) {
128 | assert(hpf != NULL);
129 |
130 | if (sample_rate_hz == 8000) {
131 | hpf->ba = kFilterCoefficients8kHz;
132 | } else {
133 | hpf->ba = kFilterCoefficients;
134 | }
135 |
136 | WebRtcSpl_MemSetW16(hpf->x, 0, 2);
137 | WebRtcSpl_MemSetW16(hpf->y, 0, 4);
138 |
139 | return 0;
140 | }
141 |
142 |
143 | static int Filter(FilterState* hpf, int16_t* data, int length) {
144 | assert(hpf != NULL);
145 |
146 | int32_t tmp_int32 = 0;
147 | int16_t* y = hpf->y;
148 | int16_t* x = hpf->x;
149 | const int16_t* ba = hpf->ba;
150 |
151 | for (int i = 0; i < length; i++) {
152 | // y[i] = b[0] * x[i] + b[1] * x[i-1] + b[2] * x[i-2]
153 | // + -a[1] * y[i-1] + -a[2] * y[i-2];
154 |
155 | tmp_int32 =
156 | WEBRTC_SPL_MUL_16_16(y[1], ba[3]); // -a[1] * y[i-1] (low part)
157 | tmp_int32 +=
158 | WEBRTC_SPL_MUL_16_16(y[3], ba[4]); // -a[2] * y[i-2] (low part)
159 | tmp_int32 = (tmp_int32 >> 15);
160 | tmp_int32 +=
161 | WEBRTC_SPL_MUL_16_16(y[0], ba[3]); // -a[1] * y[i-1] (high part)
162 | tmp_int32 +=
163 | WEBRTC_SPL_MUL_16_16(y[2], ba[4]); // -a[2] * y[i-2] (high part)
164 | tmp_int32 = (tmp_int32 << 1);
165 |
166 | tmp_int32 += WEBRTC_SPL_MUL_16_16(data[i], ba[0]); // b[0]*x[0]
167 | tmp_int32 += WEBRTC_SPL_MUL_16_16(x[0], ba[1]); // b[1]*x[i-1]
168 | tmp_int32 += WEBRTC_SPL_MUL_16_16(x[1], ba[2]); // b[2]*x[i-2]
169 |
170 | // Update state (input part)
171 | x[1] = x[0];
172 | x[0] = data[i];
173 |
174 | // Update state (filtered part)
175 | y[2] = y[0];
176 | y[3] = y[1];
177 | y[0] = static_cast(tmp_int32 >> 13);
178 | y[1] = static_cast((tmp_int32 -
179 | WEBRTC_SPL_LSHIFT_W32(static_cast(y[0]), 13)) << 2);
180 |
181 | // Rounding in Q12, i.e. add 2^11
182 | tmp_int32 += 2048;
183 |
184 | // Saturate (to 2^27) so that the HP filtered signal does not overflow
185 | tmp_int32 = WEBRTC_SPL_SAT(static_cast(134217727),
186 | tmp_int32,
187 | static_cast(-134217728));
188 |
189 | // Convert back to Q0 and use rounding
190 | data[i] = (int16_t)WEBRTC_SPL_RSHIFT_W32(tmp_int32, 12);
191 |
192 | }
193 |
194 | return 0;
195 | }
196 |
197 | //-----------------------------------------------------------------------------------//
198 |
199 | /*
200 | * Create the AEC.
201 | */
202 | extern "C" int WEBRTC_API webrtc_aec_create(
203 | unsigned clock_rate,
204 | unsigned channel_count,
205 | unsigned samples_per_frame,
206 | unsigned tail_ms,
207 | unsigned options,
208 | void **p_echo )
209 | {
210 | webrtc_ec *echo;
211 | //int sampling_rate;
212 | int status;
213 |
214 | *p_echo = NULL;
215 |
216 | echo = (webrtc_ec *) malloc(sizeof(webrtc_ec));
217 | assert(echo != NULL);
218 | memset(echo, 0, sizeof(webrtc_ec));
219 |
220 | // Alloc memory
221 | status = W_WebRtcAec_Create(&echo->AEC_inst);
222 | if(status){
223 | return -1; //No Memory
224 | }
225 | printf("%s:%d-Create webRTC AEC with clock rate %d\n", __FILE__, __LINE__, clock_rate);
226 |
227 | // Init
228 | status = W_WebRtcAec_Init(echo->AEC_inst,
229 | clock_rate);
230 |
231 | if(status != 0) {
232 | if (echo->AEC_inst) {
233 | print_webrtc_aec_error("Init", echo->AEC_inst);
234 | W_WebRtcAec_Free(echo->AEC_inst);
235 | echo->AEC_inst = NULL;
236 | }
237 | return -2; //Init Failure
238 | }
239 |
240 | // Set configuration -- sample code for future use
241 | #if WEBRTC_AEC_USE_MOBILE == 1
242 | AecmConfig aecm_config;
243 | aecm_config.cngMode = AecmTrue;
244 | aecm_config.echoMode = 4;
245 |
246 | status = WebRtcAecm_set_config(echo->AEC_inst, aecm_config);
247 | if(status != 0) {
248 | print_webrtc_aec_error("Init config", echo->AEC_inst);
249 | WebRtcAec_Free(echo->AEC_inst);
250 | echo->AEC_inst = NULL;
251 | return -1; // API Failure
252 | }
253 | #else
254 | AecConfig aec_config;
255 | aec_config.nlpMode = WEBRTC_AEC_AGGRESSIVENESS;
256 | aec_config.skewMode = kAecTrue;
257 | aec_config.metricsMode = kAecTrue;
258 | aec_config.delay_logging = kAecTrue;
259 |
260 | status = WebRtcAec_set_config(echo->AEC_inst, aec_config);
261 | if(status != 0) {
262 | print_webrtc_aec_error("Init config", echo->AEC_inst);
263 | WebRtcAec_Free(echo->AEC_inst);
264 | echo->AEC_inst = NULL;
265 | return -1; // API Failure
266 | }
267 | #endif
268 |
269 | if (webrtc_use_ns == VOE_TRUE){
270 | status = WebRtcNs_Create((NsHandle **)&echo->NS_inst);
271 | if(status != 0) {
272 | return -1; // No Memory
273 | }
274 |
275 | status = WebRtcNs_Init((NsHandle *)echo->NS_inst, clock_rate);
276 | if(status != 0) {
277 | if(echo->AEC_inst){
278 | W_WebRtcAec_Free(echo->AEC_inst);
279 | echo->AEC_inst = NULL;
280 | }
281 |
282 | if (echo->NS_inst) {
283 | printf("%s:%d-Could not initialize noise suppressor", __FILE__, __LINE__);
284 | WebRtcNs_Free((NsHandle *)echo->NS_inst);
285 | echo->NS_inst = NULL;
286 | }
287 | return -1; //Init Failure
288 | }
289 |
290 | status = WebRtcNs_set_policy((NsHandle *)echo->NS_inst, WEBRTC_NS_POLICY);
291 | if (status != 0) {
292 | printf("%s:%d-Could not set noise suppressor policy", __FILE__, __LINE__);
293 | }
294 | }else{
295 | echo->NS_inst = NULL;
296 | }
297 |
298 | // Initialize High Pass Filter
299 | echo->HP_FilterState = (FilterState*)malloc(sizeof(FilterState));
300 | InitializeFilter((FilterState*)(echo->HP_FilterState), clock_rate);
301 |
302 | echo->samples_per_frame = samples_per_frame;
303 | echo->echo_tail = tail_ms;
304 | echo->echo_skew = 0;
305 | echo->clock_rate = clock_rate;
306 | echo->blockLen10ms = (10 * channel_count * clock_rate / 1000);
307 |
308 | /* Create temporary frames for echo cancellation */
309 | echo->tmp_frame = (int16_t*) malloc(sizeof(int16_t)*MAX_FRAMING);
310 | assert(echo->tmp_frame != NULL);
311 | echo->tmp_frame2 = (int16_t*) malloc(sizeof(int16_t)*MAX_FRAMING);
312 | assert(echo->tmp_frame2 != NULL);
313 |
314 | /* Done */
315 | *p_echo = echo;
316 | return 0;
317 | }
318 |
319 |
320 | /*
321 | * Destroy AEC
322 | */
323 | extern "C" int WEBRTC_API webrtc_aec_destroy(void *state )
324 | {
325 | webrtc_ec *echo = (webrtc_ec*) state;
326 | assert(echo);
327 |
328 | if (echo->AEC_inst) {
329 | W_WebRtcAec_Free(echo->AEC_inst);
330 | echo->AEC_inst = NULL;
331 | }
332 | if (echo->NS_inst) {
333 | WebRtcNs_Free((NsHandle *)echo->NS_inst);
334 | echo->NS_inst = NULL;
335 | }
336 | SAFE_FREE(echo->tmp_frame );
337 | SAFE_FREE(echo->tmp_frame2);
338 | SAFE_FREE(echo->HP_FilterState);
339 |
340 | SAFE_FREE(echo);
341 |
342 | return 0;
343 | }
344 |
345 |
346 | /*
347 | * Reset AEC
348 | */
349 | extern "C" void WEBRTC_API webrtc_aec_reset(void *state )
350 | {
351 | webrtc_ec *echo = (webrtc_ec*) state;
352 | assert(echo != NULL);
353 | int status;
354 | /* re-initialize the EC */
355 | status = W_WebRtcAec_Init(echo->AEC_inst, echo->clock_rate);
356 | if(status != 0) {
357 | print_webrtc_aec_error("re-Init", echo->AEC_inst);
358 | return;
359 | } else {
360 |
361 | #if WEBRTC_AEC_USE_MOBILE == 1
362 | AecmConfig aecm_config;
363 | aecm_config.cngMode = AecmTrue;
364 | aecm_config.echoMode = 4;
365 |
366 | status = WebRtcAecm_set_config(echo->AEC_inst, aecm_config);
367 | if(status != 0) {
368 | print_webrtc_aec_error("re-Init config", echo->AEC_inst);
369 | return;
370 | }
371 | #else
372 | AecConfig aec_config;
373 | aec_config.nlpMode = WEBRTC_AEC_AGGRESSIVENESS;
374 | aec_config.skewMode = kAecTrue;
375 | aec_config.metricsMode = kAecTrue;
376 | aec_config.delay_logging = kAecTrue;
377 |
378 | status = WebRtcAec_set_config(echo->AEC_inst, aec_config);
379 | if(status != 0) {
380 | print_webrtc_aec_error("re-Init config", echo->AEC_inst);
381 | return;
382 | }
383 | #endif
384 | }
385 | printf("%s:%d-WebRTC AEC reset succeeded", __FILE__, __LINE__);
386 | }
387 |
388 |
389 | /*
390 | * Perform echo cancellation.
391 | */
392 | extern "C" int WEBRTC_API webrtc_aec_cancel_echo( void *state,
393 | int16_t *rec_frm,
394 | const int16_t *play_frm,
395 | unsigned framing,
396 | unsigned options,
397 | void *reserved )
398 | {
399 | webrtc_ec *echo = (webrtc_ec*) state;
400 | int status;
401 | unsigned i; //, tail_factor;
402 |
403 | /* Sanity checks */
404 | assert(echo && rec_frm && play_frm && options==0 && reserved==NULL);
405 | if ((echo==NULL) || (rec_frm==NULL) || (play_frm==NULL) || (framing>MAX_FRAMING)){
406 | return -1;
407 | }
408 | echo->samples_per_frame = framing; //for better flexibility, framing can be changed dynamically.
409 |
410 | //tail_factor = echo->samples_per_frame / echo->blockLen10ms;
411 | for(i=0; i < echo->samples_per_frame; i+= echo->blockLen10ms) {
412 |
413 | /* Feed farend buffer */
414 | status = W_WebRtcAec_BufferFarend(echo->AEC_inst, &play_frm[i], echo->blockLen10ms);
415 | if(status != 0) {
416 | print_webrtc_aec_error("buffer farend", echo->AEC_inst);
417 | return -1;
418 | }
419 |
420 | /* high pass filter */
421 | Filter((FilterState*)(echo->HP_FilterState), &rec_frm[i], echo->blockLen10ms);
422 |
423 | /* Process echo cancellation */
424 | #if WEBRTC_AEC_USE_MOBILE == 1
425 | status = WebRtcAecm_Process(echo->AEC_inst,
426 | (WebRtc_Word16 *) (&rec_frm[i]),
427 | (WebRtc_Word16 *) (&echo->tmp_frame[i]),
428 | echo->blockLen10ms,
429 | echo->echo_tail / tail_factor);
430 | #else
431 | status = WebRtcAec_Process(echo->AEC_inst,
432 | (WebRtc_Word16 *) (&rec_frm[i]),
433 | NULL,
434 | (WebRtc_Word16 *) (&echo->tmp_frame[i]),
435 | NULL,
436 | echo->blockLen10ms,
437 | (options==0?echo->echo_tail:options), // echo->echo_tail / tail_factor,
438 | echo->echo_skew);
439 | #endif
440 | if(status != 0){
441 | print_webrtc_aec_error("Process echo", echo->AEC_inst);
442 | return -1;
443 | }
444 |
445 | if(echo->NS_inst){
446 | /* Noise suppression */
447 | status = WebRtcNs_Process((NsHandle *)echo->NS_inst,
448 | (WebRtc_Word16 *) (&echo->tmp_frame[i]),
449 | NULL,
450 | (WebRtc_Word16 *) (&echo->tmp_frame2[i]),
451 | NULL);
452 | if (status != 0) {
453 | printf("%s:%d-Error suppressing noise", __FILE__, __LINE__);
454 | return -1;
455 | }
456 | }
457 | }
458 |
459 |
460 | /* Copy temporary buffer back to original rec_frm */
461 | memcpy(rec_frm,
462 | (echo->NS_inst)?(echo->tmp_frame2):(echo->tmp_frame),
463 | (echo->samples_per_frame)<<1);
464 |
465 | return 0;
466 | }
467 |
468 |
469 | /*
470 | * Inquiry echo cancellation metrics.
471 | */
472 | extern "C" int WEBRTC_API webrtc_aec_get_metrics( void *state, void *_aec_metrics )
473 | {
474 | webrtc_ec *echo = (webrtc_ec*) state;
475 |
476 | /* Sanity checks */
477 | assert(echo && _aec_metrics);
478 | if ((echo==NULL) || (_aec_metrics==NULL)){
479 | return -1;
480 | }
481 |
482 | if (0 == WebRtcAec_GetMetrics(echo->AEC_inst, (AecMetrics*)_aec_metrics) ){
483 | return 0;
484 | }
485 | else{
486 | return -1;
487 | }
488 | }
489 |
490 | /*
491 | * Inquiry echo cancellation delay metrics.
492 | */
493 | extern "C" int WEBRTC_API webrtc_aec_get_delay_metrics( void *state, int* median, int* std )
494 | {
495 | webrtc_ec *echo = (webrtc_ec*) state;
496 |
497 | /* Sanity checks */
498 | assert(echo && median && std);
499 | if ((echo==NULL) || (median==NULL) || (std==NULL)){
500 | return -1;
501 | }
502 |
503 | if (0 == WebRtcAec_GetDelayMetrics(echo->AEC_inst, median, std) ){
504 | return 0;
505 | }
506 | else{
507 | return -1;
508 | }
509 | }
510 |
511 | /**********************************************************************************************
512 | * WebRTC Resampler API *
513 | **********************************************************************************************/
514 |
515 | extern "C" int WEBRTC_API webrtc_resampler_create(
516 | int inFreq,
517 | int outFreq,
518 | void **p_resampler
519 | )
520 | {
521 | Resampler * p_objResampler = NULL;
522 |
523 | p_objResampler = new Resampler(inFreq, outFreq, kResamplerSynchronous);
524 | assert(p_objResampler!=NULL);
525 |
526 | *p_resampler = p_objResampler;
527 | return 0;
528 | }
529 |
530 | extern "C" int WEBRTC_API webrtc_resampler_destroy( void *state )
531 | {
532 | Resampler * p_objResampler = (Resampler *)state;
533 | if (p_objResampler!=NULL){
534 | delete p_objResampler;
535 | return 0;
536 | }
537 | else
538 | return -1;
539 | }
540 |
541 | extern "C" int WEBRTC_API webrtc_resampler_reset(void *state, int inFreq, int outFreq)
542 | {
543 | Resampler * p_objResampler = (Resampler *)state;
544 | if (p_objResampler!=NULL){
545 | p_objResampler->Reset(inFreq, outFreq, kResamplerSynchronous);
546 | return 0;
547 | }
548 | else
549 | return -1;
550 | }
551 |
552 | extern "C" int WEBRTC_API webrtc_resampler_process(void *state,
553 | const int16_t* samplesIn,
554 | int lengthIn,
555 | int16_t* samplesOut,
556 | int maxLen, int &outLen
557 | )
558 | {
559 | int iRetVal = 0;
560 | Resampler * p_objResampler = (Resampler *)state;
561 | if (p_objResampler!=NULL){
562 | iRetVal = p_objResampler->Push(samplesIn, lengthIn, samplesOut, maxLen, outLen);
563 | return iRetVal;
564 | }
565 | else
566 | return -1;
567 |
568 | }
569 |
570 |
--------------------------------------------------------------------------------
/webrtc_voe_impl.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2013 Gary Yu. All Rights Reserved.
3 | *
4 | * URL: https://github.com/garyyu/WebRTC_VoiceEngine
5 | *
6 | * Use of this source code is governed by a New BSD license which can be found in the LICENSE file
7 | * in the root of the source tree. Refer to README.md.
8 | * For WebRTC License & Patents information, please read files LICENSE.webrtc and PATENTS.webrtc.
9 | */
10 |
11 | /*
12 | * This file contains the Wrapper of WebRTC Voice Engine.
13 | *
14 | */
15 |
16 |
17 | #ifndef __WRTC_VOICE_ENGINE_IMPL_H__
18 | #define __WRTC_VOICE_ENGINE_IMPL_H__
19 |
20 |
21 | #include
22 | #include
23 |
24 | #include "webrtc_voe.h"
25 |
26 | #include "common_types.h"
27 | #include "voe_errors.h"
28 | #include "voe_base.h"
29 | #include "voe_codec.h"
30 | #include "voe_volume_control.h"
31 | #include "voe_audio_processing.h"
32 | #include "voe_file.h"
33 | #include "voe_hardware.h"
34 | #include "voe_network.h"
35 | #include "engine_configurations.h"
36 | #include "voe_neteq_stats.h"
37 | #include "voe_external_media.h"
38 | #include "voe_encryption.h"
39 | #include "voe_rtp_rtcp.h"
40 | #include "voe_video_sync.h"
41 | #include "channel_transport.h"
42 |
43 | using namespace webrtc;
44 | using namespace test;
45 |
46 | typedef int8_t WebRtc_Word8;
47 | typedef int16_t WebRtc_Word16;
48 | typedef int32_t WebRtc_Word32;
49 | typedef int64_t WebRtc_Word64;
50 | typedef uint8_t WebRtc_UWord8;
51 | typedef uint16_t WebRtc_UWord16;
52 | typedef uint32_t WebRtc_UWord32;
53 | typedef uint64_t WebRtc_UWord64;
54 |
55 | #define MAX_FRAMING 960 // 60ms@16kHz = 960
56 |
57 | /*
58 | class WebRTCVoiceEngineImpl:public WebRTCVoiceEngine
59 | {
60 | private:
61 | VoiceEngine* m_voe;
62 | VoEBase* base;
63 | VoECodec* codec;
64 |
65 | public:
66 |
67 | WebRTCVoiceEngineImpl();
68 | virtual ~WebRTCVoiceEngineImpl();
69 |
70 | //Voice Processing Configurations
71 | virtual int WebRTCVoe_SetVADStatus(int channelsid, bool b, int mode);
72 | virtual int WebRTCVoe_SetAgcStatus(bool b, int mode);
73 | virtual int WebRTCVoe_SetEcStatus(bool b, int mode);
74 | virtual int WebRTCVoe_SetNsStatus(bool b, int mode);
75 | virtual int WebRTCVoe_GetVADStatus(int channelsid, bool &b, int &mode);
76 | virtual int WebRTCVoe_GetAgcStatus(bool &b, int &mode);
77 | virtual int WebRTCVoe_GetEcStatus(bool &b, int &mode);
78 | virtual int WebRTCVoe_GetNsStatus(bool &b, int &mode);
79 |
80 | };
81 | */
82 |
83 | #endif //__WRTC_VOICE_ENGINE_IMPL_H__
--------------------------------------------------------------------------------
/webrtc_volume_control_impl.cpp:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2013 Gary Yu. All Rights Reserved.
3 | *
4 | * URL: https://github.com/garyyu/WebRTC_VoiceEngine
5 | *
6 | * Use of this source code is governed by a New BSD license which can be found in the LICENSE file
7 | * in the root of the source tree. Refer to README.md.
8 | * For WebRTC License & Patents information, please read files LICENSE.webrtc and PATENTS.webrtc.
9 | */
10 |
11 | /*
12 | * This file contains the Wrapper of WebRTC Voice Engine.
13 | *
14 | */
15 |
16 | #include
17 | #include "webrtc_voe_impl.h"
18 | #include "trace.h"
19 |
20 | // VoiceEngine* m_voe;
21 | // VoEBase* m_base;
22 | // VoEVolumeControl* volume_control;
23 | // VoEAudioProcessing* m_apm;
24 | // VoEHardware* m_hardware;
25 |
26 | #define kMyWebRTCTraceId 111
27 |
28 | WebRTCVolumeCtlImpl::WebRTCVolumeCtlImpl():
29 | m_voe(NULL),
30 | m_base(NULL),
31 | //m_apm(NULL),
32 | volume_control(NULL)
33 | //m_hardware(NULL)
34 | {
35 | return;
36 | }
37 |
38 |
39 | WebRTCVolumeCtlImpl::~WebRTCVolumeCtlImpl()
40 | {
41 | webrtc_voe_deinit();
42 |
43 | return;
44 | }
45 |
46 |
47 | int WebRTCVolumeCtlImpl::webrtc_voe_init()
48 | {
49 | m_voe = webrtc::VoiceEngine::Create();
50 | m_base = webrtc::VoEBase::GetInterface( (webrtc::VoiceEngine*)m_voe );
51 | //m_apm = webrtc::VoEAudioProcessing::GetInterface((webrtc::VoiceEngine*)m_voe);
52 | volume_control = webrtc::VoEVolumeControl::GetInterface( (webrtc::VoiceEngine*)m_voe );
53 | //m_hardware = webrtc::VoEHardware::GetInterface((webrtc::VoiceEngine*)m_voe);
54 |
55 | //CHECK(m_voe != NULL, "Voice engine instance failed to be created");
56 | //CHECK(m_base != NULL, "Failed to acquire base interface");
57 | //CHECK(volume_control != NULL, "Failed to acquire volume interface");
58 |
59 | if ((m_voe == NULL) || (m_base==NULL) || (volume_control==NULL) /*|| (m_apm==NULL) || (m_hardware==NULL)*/){
60 | return 1;
61 | }
62 |
63 | #if defined(_DEBUG)
64 | webrtc::VoiceEngine::SetTraceFile("C:\\TEMP\\WebRTC-Trace.log", true);
65 | webrtc::VoiceEngine::SetTraceFilter(kTraceAll);
66 | #else
67 | //Turn Off WebRTC Traces
68 | webrtc::VoiceEngine::SetTraceFilter(kTraceNone);
69 | #endif
70 |
71 | return ((webrtc::VoEBase*)m_base)->Init();
72 | }
73 |
74 |
75 | void WebRTCVolumeCtlImpl::webrtc_voe_deinit()
76 | {
77 | if (m_base){
78 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): start terminate VoEBase.");
79 | ((webrtc::VoEBase*)m_base)->Terminate();
80 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): VoEBase terminated.");
81 | }
82 | else{
83 | return;
84 | }
85 |
86 | if (m_base){
87 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): start release VoEBase.");
88 | ((webrtc::VoEBase*)m_base)->Release();
89 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): VoEBase released.");
90 | m_base = NULL;
91 | }
92 |
93 | if (volume_control){
94 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): start release VoEVolumeControl.");
95 | ((webrtc::VoEVolumeControl*)volume_control)->Release();
96 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): VoEVolumeControl released.");
97 | volume_control = NULL;
98 | }
99 |
100 | /*
101 | if (m_apm){
102 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): start release VoEAudioProcessing.");
103 | ((webrtc::VoEAudioProcessing*)m_apm)->Release();
104 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): VoEAudioProcessing released.");
105 | m_apm = NULL;
106 | }
107 |
108 | if (m_hardware){
109 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): start release VoEHardware.");
110 | ((webrtc::VoEHardware*)m_hardware)->Release();
111 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): VoEHardware released.");
112 | m_hardware = NULL;
113 | }*/
114 |
115 | webrtc::VoiceEngine* voe = (webrtc::VoiceEngine*)m_voe;
116 | if (m_voe){
117 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): start Delete VoiceEngine.");
118 | webrtc::VoiceEngine::Delete(voe);
119 | WEBRTC_TRACE(kTraceApiCall, kTraceVoice, kMyWebRTCTraceId, "webrtc_voe_deinit(): VoiceEngine Deleted.");
120 | m_voe = NULL;
121 | }
122 |
123 | return;
124 | }
125 |
126 | /*--- Microphone Level Control. Valid range is [0,255]. ---*/
127 | int WebRTCVolumeCtlImpl::SetMicVolume(unsigned int level)
128 | {
129 | if (volume_control==NULL)
130 | return 1;
131 |
132 | // Set to 0 first in case the mic is above 100%.
133 | ((webrtc::VoEVolumeControl*)volume_control)->SetMicVolume(0);
134 |
135 | if (((webrtc::VoEVolumeControl*)volume_control)->SetMicVolume(level) != 0) {
136 | //failure
137 | return 1;
138 | }
139 |
140 | return 0;
141 | }
142 |
143 | /*--- Microphone Level Control. Valid range is [0,255]. ---*/
144 | int WebRTCVolumeCtlImpl::GetMicVolume(unsigned int &level)
145 | {
146 | if (volume_control==NULL)
147 | return 1;
148 |
149 | level = 0;
150 | if (((webrtc::VoEVolumeControl*)volume_control)->GetMicVolume(level) != 0) {
151 | //failure
152 | return 1;
153 | }
154 |
155 | return 0;
156 | }
157 |
158 | /*--- Speaker Level Control. Valid range is [0,255]. ---*/
159 | int WebRTCVolumeCtlImpl::SetSpkVolume(unsigned int volume)
160 | {
161 | if (volume_control==NULL)
162 | return 1;
163 |
164 | if (((webrtc::VoEVolumeControl*)volume_control)->SetSpeakerVolume(volume) != 0) {
165 | //failure
166 | return 1;
167 | }
168 |
169 | return 0;
170 | }
171 |
172 | /*--- Speaker Level Control. Valid range is [0,255]. ---*/
173 | int WebRTCVolumeCtlImpl::GetSpkVolume(unsigned int &volume)
174 | {
175 | if (volume_control==NULL)
176 | return 1;
177 |
178 | volume = 0;
179 | if (((webrtc::VoEVolumeControl*)volume_control)->GetSpeakerVolume(volume) != 0) {
180 | //failure
181 | return 1;
182 | }
183 |
184 | return 0;
185 | }
186 |
187 |
188 |
189 |
190 |
191 |
--------------------------------------------------------------------------------