1 /*
2 * Copyright 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android/log.h>
18 #include <assert.h>
19 #include <jni.h>
20 #include <malloc.h>
21 #include <math.h>
22 #include <sys/types.h>
23
24 // for native audio
25 #include <SLES/OpenSLES.h>
26 #include <SLES/OpenSLES_Android.h>
27 #include <SLES/OpenSLES_AndroidConfiguration.h>
28
29 #include "sync_clock.h"
30
31 // logging
32 #define APPNAME "WALT"
33
34 // engine interfaces
35 static SLObjectItf engineObject = NULL;
36 static SLEngineItf engineEngine = NULL;
37
38 // output mix interfaces
39 static SLObjectItf outputMixObject = NULL;
40
41 // buffer queue player interfaces
42 static SLObjectItf bqPlayerObject = NULL;
43 static SLPlayItf bqPlayerPlay = NULL;
44 static SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue = NULL;
45
46 // recorder interfaces
47 static SLObjectItf recorderObject = NULL;
48 static SLRecordItf recorderRecord;
49 static SLAndroidSimpleBufferQueueItf recorderBufferQueue;
50 static volatile int bqPlayerRecorderBusy = 0;
51
52 static unsigned int recorder_frames;
53 static short* recorderBuffer;
54 static unsigned recorderSize = 0;
55
56 static unsigned int framesPerBuffer;
57
58 #define CHANNELS 1 // 1 for mono, 2 for stereo
59
60 // Each short represents a 16-bit audio sample
61 static short* beepBuffer = NULL;
62 static short* silenceBuffer = NULL;
63 static unsigned int bufferSizeInBytes = 0;
64
65 #define MAXIMUM_AMPLITUDE_VALUE 32767
66
67 // how many times to play the wave table (so we can actually hear it)
68 #define BUFFERS_TO_PLAY 10
69
70 static unsigned buffersRemaining = 0;
71 static short warmedUp = 0;
72
73
74 // Timestamps
75 // te - enqueue time
76 // tc - callback time
77 int64_t te_play = 0, te_rec = 0, tc_rec = 0;
78
79 /**
80 * Create wave tables for audio out.
81 */
createWaveTables()82 void createWaveTables(){
83 bufferSizeInBytes = framesPerBuffer * sizeof(*beepBuffer);
84 silenceBuffer = malloc(bufferSizeInBytes);
85 beepBuffer = malloc(bufferSizeInBytes);
86
87
88 __android_log_print(ANDROID_LOG_VERBOSE,
89 APPNAME,
90 "Creating wave tables, 1 channel. Frames: %i Buffer size (bytes): %i",
91 framesPerBuffer,
92 bufferSizeInBytes);
93
94 unsigned int i;
95 for (i = 0; i < framesPerBuffer; i++) {
96 silenceBuffer[i] = 0;
97 beepBuffer[i] = (i & 2 - 1) * MAXIMUM_AMPLITUDE_VALUE;
98 // This fills a buffer that looks like [min, min, max, max, min, min...]
99 // which is a square wave at 1/4 frequency of the sampling rate
100 // for 48kHz sampling this is 12kHz pitch, still well audible.
101 }
102 }
103
104 // this callback handler is called every time a buffer finishes playing
bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq,void * context)105 void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, __attribute__((unused)) void *context)
106 {
107 if (bq == NULL) {
108 __android_log_print(ANDROID_LOG_ERROR, APPNAME, "buffer queue is null");
109 }
110 assert(bq == bqPlayerBufferQueue);
111 assert(NULL == context);
112
113 if (buffersRemaining > 0) { // continue playing tone
114 if(buffersRemaining == BUFFERS_TO_PLAY && warmedUp) {
115 // Enqueue the first non-silent buffer, save the timestamp
116 // For cold test Enqueue happens in playTone rather than here.
117 te_play = uptimeMicros();
118 }
119 buffersRemaining--;
120
121 SLresult result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, beepBuffer,
122 bufferSizeInBytes);
123 (void)result;
124 assert(SL_RESULT_SUCCESS == result);
125 } else if (warmedUp) { // stop tone but keep playing silence
126 SLresult result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, silenceBuffer,
127 bufferSizeInBytes);
128 assert(SL_RESULT_SUCCESS == result);
129 (void) result;
130 } else { // stop playing completely
131 SLresult result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_STOPPED);
132 assert(SL_RESULT_SUCCESS == result);
133 (void)result;
134
135 __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Done playing tone");
136 }
137 }
138
Java_org_chromium_latency_walt_AudioTest_playTone(JNIEnv * env,jclass clazz)139 jlong Java_org_chromium_latency_walt_AudioTest_playTone(__attribute__((unused)) JNIEnv* env,
140 __attribute__((unused)) jclass clazz){
141
142 int64_t t_start = uptimeMicros();
143 te_play = 0;
144
145 SLresult result;
146
147 if (!warmedUp) {
148 result = (*bqPlayerBufferQueue)->Clear(bqPlayerBufferQueue);
149 assert(SL_RESULT_SUCCESS == result);
150 (void)result;
151
152 // Enqueue first buffer
153 te_play = uptimeMicros();
154 result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, beepBuffer,
155 bufferSizeInBytes);
156 assert(SL_RESULT_SUCCESS == result);
157 (void) result;
158
159 result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING);
160 assert(SL_RESULT_SUCCESS == result);
161 (void) result;
162
163 int dt_state = uptimeMicros() - t_start;
164 __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "playTone() changed state to playing dt=%d us", dt_state);
165 // TODO: this block takes lots of time (~13ms on Nexus 7) research this and decide how to measure.
166 }
167
168 __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Playing tone");
169 buffersRemaining = BUFFERS_TO_PLAY;
170
171 return (jlong) t_start;
172 }
173
174
175 // create the engine and output mix objects
Java_org_chromium_latency_walt_AudioTest_createEngine(JNIEnv * env,jclass clazz)176 void Java_org_chromium_latency_walt_AudioTest_createEngine(__attribute__((unused)) JNIEnv* env,
177 __attribute__((unused)) jclass clazz)
178 {
179 __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Creating audio engine");
180
181 SLresult result;
182
183 // create engine
184 result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
185 assert(SL_RESULT_SUCCESS == result);
186 (void)result;
187
188 // realize the engine
189 result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
190 assert(SL_RESULT_SUCCESS == result);
191 (void)result;
192
193 // get the engine interface, which is needed in order to create other objects
194 result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
195 assert(SL_RESULT_SUCCESS == result);
196 (void)result;
197
198 // create output mix,
199 result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 0, NULL, NULL);
200 assert(SL_RESULT_SUCCESS == result);
201 (void)result;
202
203 // realize the output mix
204 result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);
205 assert(SL_RESULT_SUCCESS == result);
206 (void)result;
207 }
208
Java_org_chromium_latency_walt_AudioTest_destroyEngine(JNIEnv * env,jclass clazz)209 void Java_org_chromium_latency_walt_AudioTest_destroyEngine(__attribute__((unused)) JNIEnv *env,
210 __attribute__((unused)) jclass clazz)
211 {
212 if (bqPlayerObject != NULL) {
213 (*bqPlayerObject)->Destroy(bqPlayerObject);
214 bqPlayerObject = NULL;
215 }
216
217 if (outputMixObject != NULL) {
218 (*outputMixObject)->Destroy(outputMixObject);
219 outputMixObject = NULL;
220 }
221
222 if (engineObject != NULL) {
223 (*engineObject)->Destroy(engineObject);
224 engineObject = NULL;
225 }
226 }
227
228 // create buffer queue audio player
Java_org_chromium_latency_walt_AudioTest_createBufferQueueAudioPlayer(JNIEnv * env,jclass clazz,jint optimalFrameRate,jint optimalFramesPerBuffer)229 void Java_org_chromium_latency_walt_AudioTest_createBufferQueueAudioPlayer(
230 __attribute__((unused)) JNIEnv* env,
231 __attribute__((unused)) jclass clazz,
232 jint optimalFrameRate,
233 jint optimalFramesPerBuffer)
234 {
235 __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Creating audio player with frame rate %d and frames per buffer %d",
236 optimalFrameRate, optimalFramesPerBuffer);
237
238 framesPerBuffer = optimalFramesPerBuffer;
239 createWaveTables();
240
241 SLresult result;
242
243 // configure the audio source (supply data through a buffer queue in PCM format)
244 SLDataLocator_AndroidSimpleBufferQueue locator_bufferqueue_source;
245 SLDataFormat_PCM format_pcm;
246 SLDataSource audio_source;
247
248 // source location
249 locator_bufferqueue_source.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
250 locator_bufferqueue_source.numBuffers = 1;
251
252 // source format
253 format_pcm.formatType = SL_DATAFORMAT_PCM;
254 format_pcm.numChannels = 1;
255
256 // Note: this shouldn't be called samplesPerSec it should be called *framesPerSec*
257 // because when channels = 2 then there are 2 samples per frame.
258 format_pcm.samplesPerSec = (SLuint32) optimalFrameRate * 1000;
259 format_pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
260 format_pcm.containerSize = 16;
261 format_pcm.channelMask = SL_SPEAKER_FRONT_CENTER;
262 format_pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
263
264 audio_source.pLocator = &locator_bufferqueue_source;
265 audio_source.pFormat = &format_pcm;
266
267 // configure the output: An output mix sink
268 SLDataLocator_OutputMix locator_output_mix;
269 SLDataSink audio_sink;
270
271 locator_output_mix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
272 locator_output_mix.outputMix = outputMixObject;
273
274 audio_sink.pLocator = &locator_output_mix;
275 audio_sink.pFormat = NULL;
276
277 // create audio player
278 // Note: Adding other output interfaces here will result in your audio being routed using the
279 // normal path NOT the fast path
280 const SLInterfaceID interface_ids[2] = { SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_VOLUME };
281 const SLboolean interfaces_required[2] = { SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
282
283 result = (*engineEngine)->CreateAudioPlayer(
284 engineEngine,
285 &bqPlayerObject,
286 &audio_source,
287 &audio_sink,
288 2, // Number of interfaces
289 interface_ids,
290 interfaces_required
291 );
292
293 assert(SL_RESULT_SUCCESS == result);
294 (void)result;
295
296 // realize the player
297 result = (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE);
298 assert(SL_RESULT_SUCCESS == result);
299 (void)result;
300
301 // get the play interface
302 result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
303 assert(SL_RESULT_SUCCESS == result);
304 (void)result;
305
306 // get the buffer queue interface
307 result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE,
308 &bqPlayerBufferQueue);
309 assert(SL_RESULT_SUCCESS == result);
310 (void)result;
311
312 // register callback on the buffer queue
313 result = (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, bqPlayerCallback, NULL);
314 assert(SL_RESULT_SUCCESS == result);
315 (void)result;
316 }
317
Java_org_chromium_latency_walt_AudioTest_startWarmTest(JNIEnv * env,jclass clazz)318 void Java_org_chromium_latency_walt_AudioTest_startWarmTest(__attribute__((unused)) JNIEnv* env,
319 __attribute__((unused)) jclass clazz) {
320 SLresult result;
321
322 result = (*bqPlayerBufferQueue)->Clear(bqPlayerBufferQueue);
323 assert(SL_RESULT_SUCCESS == result);
324 (void)result;
325
326 // enqueue some silence
327 result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, silenceBuffer, bufferSizeInBytes);
328 assert(SL_RESULT_SUCCESS == result);
329 (void)result;
330
331 // set the player's state to playing
332 result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING);
333 assert(SL_RESULT_SUCCESS == result);
334 (void)result;
335
336 warmedUp = 1;
337 }
338
Java_org_chromium_latency_walt_AudioTest_stopTests(JNIEnv * env,jclass clazz)339 void Java_org_chromium_latency_walt_AudioTest_stopTests(__attribute__((unused)) JNIEnv *env,
340 __attribute__((unused)) jclass clazz) {
341 SLresult result;
342
343 result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_STOPPED);
344 assert(SL_RESULT_SUCCESS == result);
345 (void)result;
346
347 warmedUp = 0;
348 }
349
350 // this callback handler is called every time a buffer finishes recording
bqRecorderCallback(SLAndroidSimpleBufferQueueItf bq,void * context)351 void bqRecorderCallback(__attribute__((unused)) SLAndroidSimpleBufferQueueItf bq,
352 __attribute__((unused)) void *context)
353 {
354 tc_rec = uptimeMicros();
355 assert(bq == recorderBufferQueue);
356 assert(NULL == context);
357
358 // for streaming recording, here we would call Enqueue to give recorder the next buffer to fill
359 // but instead, this is a one-time buffer so we stop recording
360 SLresult result;
361 result = (*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_STOPPED);
362 if (SL_RESULT_SUCCESS == result) {
363 recorderSize = recorder_frames * sizeof(short);
364 }
365 bqPlayerRecorderBusy = 0;
366
367 //// TODO: Use small buffers and re-enqueue each time
368 // result = (*recorderBufferQueue)->Enqueue(recorderBufferQueue, recorderBuffer,
369 // recorder_frames * sizeof(short));
370 // assert(SL_RESULT_SUCCESS == result);
371 }
372
373 // create audio recorder
Java_org_chromium_latency_walt_AudioTest_createAudioRecorder(JNIEnv * env,jclass clazz,jint optimalFrameRate,jint framesToRecord)374 jboolean Java_org_chromium_latency_walt_AudioTest_createAudioRecorder(
375 __attribute__((unused)) JNIEnv* env,
376 __attribute__((unused)) jclass clazz,
377 jint optimalFrameRate,
378 jint framesToRecord)
379 {
380 SLresult result;
381
382 __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Creating audio recorder with frame rate %d and frames to record %d",
383 optimalFrameRate, framesToRecord);
384 // Allocate buffer
385 recorder_frames = framesToRecord;
386 recorderBuffer = malloc(sizeof(*recorderBuffer) * recorder_frames);
387
388 // configure audio source
389 SLDataLocator_IODevice loc_dev = {
390 SL_DATALOCATOR_IODEVICE,
391 SL_IODEVICE_AUDIOINPUT,
392 SL_DEFAULTDEVICEID_AUDIOINPUT,
393 NULL
394 };
395 SLDataSource audioSrc = {&loc_dev, NULL};
396
397 // configure audio sink
398 SLDataLocator_AndroidSimpleBufferQueue loc_bq;
399 loc_bq.locatorType = SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE;
400 loc_bq.numBuffers = 2;
401
402
403 // source format
404 SLDataFormat_PCM format_pcm;
405 format_pcm.formatType = SL_DATAFORMAT_PCM;
406 format_pcm.numChannels = CHANNELS;
407 // Note: this shouldn't be called samplesPerSec it should be called *framesPerSec*
408 // because when channels = 2 then there are 2 samples per frame.
409 format_pcm.samplesPerSec = (SLuint32) optimalFrameRate * 1000;
410 format_pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
411 format_pcm.containerSize = 16;
412 format_pcm.channelMask = SL_SPEAKER_FRONT_CENTER;
413 format_pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
414
415
416 SLDataSink audioSnk = {&loc_bq, &format_pcm};
417
418 // create audio recorder
419 // (requires the RECORD_AUDIO permission)
420 const SLInterfaceID id[2] = {SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
421 SL_IID_ANDROIDCONFIGURATION };
422 const SLboolean req[2] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
423 result = (*engineEngine)->CreateAudioRecorder(engineEngine,
424 &recorderObject,
425 &audioSrc,
426 &audioSnk,
427 sizeof(id)/sizeof(id[0]),
428 id, req);
429
430 // Configure the voice recognition preset which has no
431 // signal processing for lower latency.
432 SLAndroidConfigurationItf inputConfig;
433 result = (*recorderObject)->GetInterface(recorderObject,
434 SL_IID_ANDROIDCONFIGURATION,
435 &inputConfig);
436 if (SL_RESULT_SUCCESS == result) {
437 SLuint32 presetValue = SL_ANDROID_RECORDING_PRESET_VOICE_RECOGNITION;
438 (*inputConfig)->SetConfiguration(inputConfig,
439 SL_ANDROID_KEY_RECORDING_PRESET,
440 &presetValue,
441 sizeof(SLuint32));
442 }
443
444 // realize the audio recorder
445 result = (*recorderObject)->Realize(recorderObject, SL_BOOLEAN_FALSE);
446 if (SL_RESULT_SUCCESS != result) {
447 return JNI_FALSE;
448 }
449
450 // get the record interface
451 result = (*recorderObject)->GetInterface(recorderObject, SL_IID_RECORD, &recorderRecord);
452 assert(SL_RESULT_SUCCESS == result);
453 (void)result;
454
455 // get the buffer queue interface
456 result = (*recorderObject)->GetInterface(recorderObject, SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
457 &recorderBufferQueue);
458 assert(SL_RESULT_SUCCESS == result);
459 (void)result;
460
461 // register callback on the buffer queue
462 result = (*recorderBufferQueue)->RegisterCallback(recorderBufferQueue, bqRecorderCallback,
463 NULL);
464 assert(SL_RESULT_SUCCESS == result);
465 (void)result;
466
467 __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Audio recorder created, buffer size: %d frames",
468 recorder_frames);
469
470 return JNI_TRUE;
471 }
472
473
474 // set the recording state for the audio recorder
Java_org_chromium_latency_walt_AudioTest_startRecording(JNIEnv * env,jclass clazz)475 void Java_org_chromium_latency_walt_AudioTest_startRecording(__attribute__((unused)) JNIEnv* env,
476 __attribute__((unused)) jclass clazz)
477 {
478 SLresult result;
479
480 if( bqPlayerRecorderBusy) {
481 return;
482 }
483 // in case already recording, stop recording and clear buffer queue
484 result = (*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_STOPPED);
485 assert(SL_RESULT_SUCCESS == result);
486 (void)result;
487 result = (*recorderBufferQueue)->Clear(recorderBufferQueue);
488 assert(SL_RESULT_SUCCESS == result);
489 (void)result;
490
491 // the buffer is not valid for playback yet
492 recorderSize = 0;
493
494 // enqueue an empty buffer to be filled by the recorder
495 // (for streaming recording, we would enqueue at least 2 empty buffers to start things off)
496 te_rec = uptimeMicros(); // TODO: investigate if it's better to time after SetRecordState
497 tc_rec = 0;
498 result = (*recorderBufferQueue)->Enqueue(recorderBufferQueue, recorderBuffer,
499 recorder_frames * sizeof(short));
500 // the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
501 // which for this code example would indicate a programming error
502 assert(SL_RESULT_SUCCESS == result);
503 (void)result;
504
505 // start recording
506 result = (*recorderRecord)->SetRecordState(recorderRecord, SL_RECORDSTATE_RECORDING);
507 assert(SL_RESULT_SUCCESS == result);
508 (void)result;
509 bqPlayerRecorderBusy = 1;
510 }
511
Java_org_chromium_latency_walt_AudioTest_getRecordedWave(JNIEnv * env,jclass cls)512 jshortArray Java_org_chromium_latency_walt_AudioTest_getRecordedWave(
513 JNIEnv *env,
514 __attribute__((unused)) jclass cls)
515 {
516 jshortArray result;
517 result = (*env)->NewShortArray(env, recorder_frames);
518 if (result == NULL) {
519 return NULL; /* out of memory error thrown */
520 }
521 (*env)->SetShortArrayRegion(env, result, 0, recorder_frames, recorderBuffer);
522 return result;
523 }
524
Java_org_chromium_latency_walt_AudioTest_getTcRec(JNIEnv * env,jclass cls)525 jlong Java_org_chromium_latency_walt_AudioTest_getTcRec(__attribute__((unused)) JNIEnv *env,
526 __attribute__((unused)) jclass cls) {
527 return (jlong) tc_rec;
528 }
529
Java_org_chromium_latency_walt_AudioTest_getTeRec(JNIEnv * env,jclass cls)530 jlong Java_org_chromium_latency_walt_AudioTest_getTeRec(__attribute__((unused)) JNIEnv *env,
531 __attribute__((unused)) jclass cls) {
532 return (jlong) te_rec;
533 }
534
Java_org_chromium_latency_walt_AudioTest_getTePlay(JNIEnv * env,jclass cls)535 jlong Java_org_chromium_latency_walt_AudioTest_getTePlay(__attribute__((unused)) JNIEnv *env,
536 __attribute__((unused)) jclass cls) {
537 return (jlong) te_play;
538 }
539