• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <AudioToolbox/AudioServices.h>  // AudioSession
12 
13 #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
14 
15 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
16 #include "webrtc/system_wrappers/interface/trace.h"
17 
18 namespace webrtc {
AudioDeviceIPhone(const int32_t id)19 AudioDeviceIPhone::AudioDeviceIPhone(const int32_t id)
20     :
21     _ptrAudioBuffer(NULL),
22     _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
23     _captureWorkerThread(NULL),
24     _captureWorkerThreadId(0),
25     _id(id),
26     _auVoiceProcessing(NULL),
27     _initialized(false),
28     _isShutDown(false),
29     _recording(false),
30     _playing(false),
31     _recIsInitialized(false),
32     _playIsInitialized(false),
33     _recordingDeviceIsSpecified(false),
34     _playoutDeviceIsSpecified(false),
35     _micIsInitialized(false),
36     _speakerIsInitialized(false),
37     _AGC(false),
38     _adbSampFreq(0),
39     _recordingDelay(0),
40     _playoutDelay(0),
41     _playoutDelayMeasurementCounter(9999),
42     _recordingDelayHWAndOS(0),
43     _recordingDelayMeasurementCounter(9999),
44     _playWarning(0),
45     _playError(0),
46     _recWarning(0),
47     _recError(0),
48     _playoutBufferUsed(0),
49     _recordingCurrentSeq(0),
50     _recordingBufferTotalSize(0) {
51     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
52                  "%s created", __FUNCTION__);
53 
54     memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
55     memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
56     memset(_recordingLength, 0, sizeof(_recordingLength));
57     memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
58 }
59 
~AudioDeviceIPhone()60 AudioDeviceIPhone::~AudioDeviceIPhone() {
61     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
62                  "%s destroyed", __FUNCTION__);
63 
64     Terminate();
65 
66     delete &_critSect;
67 }
68 
69 
70 // ============================================================================
71 //                                     API
72 // ============================================================================
73 
AttachAudioBuffer(AudioDeviceBuffer * audioBuffer)74 void AudioDeviceIPhone::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
75     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
76                  "%s", __FUNCTION__);
77 
78     CriticalSectionScoped lock(&_critSect);
79 
80     _ptrAudioBuffer = audioBuffer;
81 
82     // inform the AudioBuffer about default settings for this implementation
83     _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES);
84     _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
85     _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
86     _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
87 }
88 
ActiveAudioLayer(AudioDeviceModule::AudioLayer & audioLayer) const89 int32_t AudioDeviceIPhone::ActiveAudioLayer(
90     AudioDeviceModule::AudioLayer& audioLayer) const {
91     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
92                  "%s", __FUNCTION__);
93     audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
94     return 0;
95 }
96 
Init()97 int32_t AudioDeviceIPhone::Init() {
98     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
99                  "%s", __FUNCTION__);
100 
101     CriticalSectionScoped lock(&_critSect);
102 
103     if (_initialized) {
104         return 0;
105     }
106 
107     _isShutDown = false;
108 
109     // Create and start capture thread
110     if (_captureWorkerThread == NULL) {
111         _captureWorkerThread
112             = ThreadWrapper::CreateThread(RunCapture, this, kRealtimePriority,
113                                           "CaptureWorkerThread");
114 
115         if (_captureWorkerThread == NULL) {
116             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice,
117                          _id, "CreateThread() error");
118             return -1;
119         }
120 
121         unsigned int threadID(0);
122         bool res = _captureWorkerThread->Start(threadID);
123         _captureWorkerThreadId = static_cast<uint32_t>(threadID);
124         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
125                      _id, "CaptureWorkerThread started (res=%d)", res);
126     } else {
127         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
128                      _id, "Thread already created");
129     }
130     _playWarning = 0;
131     _playError = 0;
132     _recWarning = 0;
133     _recError = 0;
134 
135     _initialized = true;
136 
137     return 0;
138 }
139 
Terminate()140 int32_t AudioDeviceIPhone::Terminate() {
141     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
142                  "%s", __FUNCTION__);
143 
144     if (!_initialized) {
145         return 0;
146     }
147 
148 
149     // Stop capture thread
150     if (_captureWorkerThread != NULL) {
151         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
152                      _id, "Stopping CaptureWorkerThread");
153         bool res = _captureWorkerThread->Stop();
154         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
155                      _id, "CaptureWorkerThread stopped (res=%d)", res);
156         delete _captureWorkerThread;
157         _captureWorkerThread = NULL;
158     }
159 
160     // Shut down Audio Unit
161     ShutdownPlayOrRecord();
162 
163     _isShutDown = true;
164     _initialized = false;
165     _speakerIsInitialized = false;
166     _micIsInitialized = false;
167     _playoutDeviceIsSpecified = false;
168     _recordingDeviceIsSpecified = false;
169     return 0;
170 }
171 
Initialized() const172 bool AudioDeviceIPhone::Initialized() const {
173     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
174                  "%s", __FUNCTION__);
175     return (_initialized);
176 }
177 
InitSpeaker()178 int32_t AudioDeviceIPhone::InitSpeaker() {
179     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
180                  "%s", __FUNCTION__);
181 
182     CriticalSectionScoped lock(&_critSect);
183 
184     if (!_initialized) {
185         WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
186                      _id, "  Not initialized");
187         return -1;
188     }
189 
190     if (_playing) {
191         WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
192                      _id, "  Cannot init speaker when playing");
193         return -1;
194     }
195 
196     if (!_playoutDeviceIsSpecified) {
197         WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
198                      _id, "  Playout device is not specified");
199         return -1;
200     }
201 
202     // Do nothing
203     _speakerIsInitialized = true;
204 
205     return 0;
206 }
207 
InitMicrophone()208 int32_t AudioDeviceIPhone::InitMicrophone() {
209     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
210                  "%s", __FUNCTION__);
211 
212     CriticalSectionScoped lock(&_critSect);
213 
214     if (!_initialized) {
215         WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
216                      _id, "  Not initialized");
217         return -1;
218     }
219 
220     if (_recording) {
221         WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
222                      _id, "  Cannot init mic when recording");
223         return -1;
224     }
225 
226     if (!_recordingDeviceIsSpecified) {
227         WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
228                      _id, "  Recording device is not specified");
229         return -1;
230     }
231 
232     // Do nothing
233 
234     _micIsInitialized = true;
235 
236     return 0;
237 }
238 
SpeakerIsInitialized() const239 bool AudioDeviceIPhone::SpeakerIsInitialized() const {
240     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
241                  "%s", __FUNCTION__);
242     return _speakerIsInitialized;
243 }
244 
MicrophoneIsInitialized() const245 bool AudioDeviceIPhone::MicrophoneIsInitialized() const {
246     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
247                  "%s", __FUNCTION__);
248     return _micIsInitialized;
249 }
250 
SpeakerVolumeIsAvailable(bool & available)251 int32_t AudioDeviceIPhone::SpeakerVolumeIsAvailable(bool& available) {
252     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
253                  "%s", __FUNCTION__);
254 
255     available = false;  // Speaker volume not supported on iOS
256 
257     return 0;
258 }
259 
SetSpeakerVolume(uint32_t volume)260 int32_t AudioDeviceIPhone::SetSpeakerVolume(uint32_t volume) {
261     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
262                  "AudioDeviceIPhone::SetSpeakerVolume(volume=%u)", volume);
263 
264     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
265                  "  API call not supported on this platform");
266     return -1;
267 }
268 
SpeakerVolume(uint32_t & volume) const269 int32_t AudioDeviceIPhone::SpeakerVolume(uint32_t& volume) const {
270     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
271                  "%s", __FUNCTION__);
272 
273     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
274                  "  API call not supported on this platform");
275     return -1;
276 }
277 
278 int32_t
SetWaveOutVolume(uint16_t volumeLeft,uint16_t volumeRight)279     AudioDeviceIPhone::SetWaveOutVolume(uint16_t volumeLeft,
280                                         uint16_t volumeRight) {
281     WEBRTC_TRACE(
282         kTraceModuleCall,
283         kTraceAudioDevice,
284         _id,
285         "AudioDeviceIPhone::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
286         volumeLeft, volumeRight);
287 
288     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
289                  "  API call not supported on this platform");
290 
291     return -1;
292 }
293 
294 int32_t
WaveOutVolume(uint16_t &,uint16_t &) const295 AudioDeviceIPhone::WaveOutVolume(uint16_t& /*volumeLeft*/,
296                                  uint16_t& /*volumeRight*/) const {
297     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
298                  "%s", __FUNCTION__);
299 
300     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
301                  "  API call not supported on this platform");
302     return -1;
303 }
304 
305 int32_t
MaxSpeakerVolume(uint32_t & maxVolume) const306     AudioDeviceIPhone::MaxSpeakerVolume(uint32_t& maxVolume) const {
307     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
308                  "%s", __FUNCTION__);
309 
310     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
311                  "  API call not supported on this platform");
312     return -1;
313 }
314 
MinSpeakerVolume(uint32_t & minVolume) const315 int32_t AudioDeviceIPhone::MinSpeakerVolume(
316     uint32_t& minVolume) const {
317     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
318                  "%s", __FUNCTION__);
319 
320     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
321                  "  API call not supported on this platform");
322     return -1;
323 }
324 
325 int32_t
SpeakerVolumeStepSize(uint16_t & stepSize) const326     AudioDeviceIPhone::SpeakerVolumeStepSize(uint16_t& stepSize) const {
327     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
328                  "%s", __FUNCTION__);
329 
330     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
331                  "  API call not supported on this platform");
332     return -1;
333 }
334 
SpeakerMuteIsAvailable(bool & available)335 int32_t AudioDeviceIPhone::SpeakerMuteIsAvailable(bool& available) {
336     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
337                  "%s", __FUNCTION__);
338 
339     available = false;  // Speaker mute not supported on iOS
340 
341     return 0;
342 }
343 
SetSpeakerMute(bool enable)344 int32_t AudioDeviceIPhone::SetSpeakerMute(bool enable) {
345     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
346                  "%s", __FUNCTION__);
347 
348     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
349                  "  API call not supported on this platform");
350     return -1;
351 }
352 
SpeakerMute(bool & enabled) const353 int32_t AudioDeviceIPhone::SpeakerMute(bool& enabled) const {
354     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
355                  "%s", __FUNCTION__);
356 
357     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
358                  "  API call not supported on this platform");
359     return -1;
360 }
361 
MicrophoneMuteIsAvailable(bool & available)362 int32_t AudioDeviceIPhone::MicrophoneMuteIsAvailable(bool& available) {
363     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
364                  "%s", __FUNCTION__);
365 
366     available = false;  // Mic mute not supported on iOS
367 
368     return 0;
369 }
370 
SetMicrophoneMute(bool enable)371 int32_t AudioDeviceIPhone::SetMicrophoneMute(bool enable) {
372     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
373                  "%s", __FUNCTION__);
374 
375     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
376                  "  API call not supported on this platform");
377     return -1;
378 }
379 
MicrophoneMute(bool & enabled) const380 int32_t AudioDeviceIPhone::MicrophoneMute(bool& enabled) const {
381     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
382                  "%s", __FUNCTION__);
383 
384     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
385                  "  API call not supported on this platform");
386     return -1;
387 }
388 
MicrophoneBoostIsAvailable(bool & available)389 int32_t AudioDeviceIPhone::MicrophoneBoostIsAvailable(bool& available) {
390     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
391                  "%s", __FUNCTION__);
392 
393     available = false;  // Mic boost not supported on iOS
394 
395     return 0;
396 }
397 
SetMicrophoneBoost(bool enable)398 int32_t AudioDeviceIPhone::SetMicrophoneBoost(bool enable) {
399     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
400                  "AudioDeviceIPhone::SetMicrophoneBoost(enable=%u)", enable);
401 
402     if (!_micIsInitialized) {
403         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
404                      "  Microphone not initialized");
405         return -1;
406     }
407 
408     if (enable) {
409         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
410                      "  SetMicrophoneBoost cannot be enabled on this platform");
411         return -1;
412     }
413 
414     return 0;
415 }
416 
MicrophoneBoost(bool & enabled) const417 int32_t AudioDeviceIPhone::MicrophoneBoost(bool& enabled) const {
418     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
419                  "%s", __FUNCTION__);
420     if (!_micIsInitialized) {
421         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
422                      "  Microphone not initialized");
423         return -1;
424     }
425 
426     enabled = false;
427 
428     return 0;
429 }
430 
StereoRecordingIsAvailable(bool & available)431 int32_t AudioDeviceIPhone::StereoRecordingIsAvailable(bool& available) {
432     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
433                  "%s", __FUNCTION__);
434 
435     available = false;  // Stereo recording not supported on iOS
436 
437     return 0;
438 }
439 
SetStereoRecording(bool enable)440 int32_t AudioDeviceIPhone::SetStereoRecording(bool enable) {
441     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
442                  "AudioDeviceIPhone::SetStereoRecording(enable=%u)", enable);
443 
444     if (enable) {
445         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
446                      " Stereo recording is not supported on this platform");
447         return -1;
448     }
449     return 0;
450 }
451 
StereoRecording(bool & enabled) const452 int32_t AudioDeviceIPhone::StereoRecording(bool& enabled) const {
453     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
454                  "%s", __FUNCTION__);
455 
456     enabled = false;
457     return 0;
458 }
459 
StereoPlayoutIsAvailable(bool & available)460 int32_t AudioDeviceIPhone::StereoPlayoutIsAvailable(bool& available) {
461     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
462                  "%s", __FUNCTION__);
463 
464     available = false;  // Stereo playout not supported on iOS
465 
466     return 0;
467 }
468 
SetStereoPlayout(bool enable)469 int32_t AudioDeviceIPhone::SetStereoPlayout(bool enable) {
470     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
471                  "AudioDeviceIPhone::SetStereoPlayout(enable=%u)", enable);
472 
473     if (enable) {
474         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
475                      " Stereo playout is not supported on this platform");
476         return -1;
477     }
478     return 0;
479 }
480 
StereoPlayout(bool & enabled) const481 int32_t AudioDeviceIPhone::StereoPlayout(bool& enabled) const {
482     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
483                  "%s", __FUNCTION__);
484 
485     enabled = false;
486     return 0;
487 }
488 
SetAGC(bool enable)489 int32_t AudioDeviceIPhone::SetAGC(bool enable) {
490     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
491                  "AudioDeviceIPhone::SetAGC(enable=%d)", enable);
492 
493     _AGC = enable;
494 
495     return 0;
496 }
497 
AGC() const498 bool AudioDeviceIPhone::AGC() const {
499     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
500                  "%s", __FUNCTION__);
501 
502     return _AGC;
503 }
504 
MicrophoneVolumeIsAvailable(bool & available)505 int32_t AudioDeviceIPhone::MicrophoneVolumeIsAvailable(bool& available) {
506     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
507                  "%s", __FUNCTION__);
508 
509     available = false;  // Mic volume not supported on IOS
510 
511     return 0;
512 }
513 
SetMicrophoneVolume(uint32_t volume)514 int32_t AudioDeviceIPhone::SetMicrophoneVolume(uint32_t volume) {
515     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
516                  "AudioDeviceIPhone::SetMicrophoneVolume(volume=%u)", volume);
517 
518     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
519                  "  API call not supported on this platform");
520     return -1;
521 }
522 
523 int32_t
MicrophoneVolume(uint32_t & volume) const524     AudioDeviceIPhone::MicrophoneVolume(uint32_t& volume) const {
525     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
526                  "%s", __FUNCTION__);
527 
528     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
529                  "  API call not supported on this platform");
530     return -1;
531 }
532 
533 int32_t
MaxMicrophoneVolume(uint32_t & maxVolume) const534     AudioDeviceIPhone::MaxMicrophoneVolume(uint32_t& maxVolume) const {
535     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
536                  "%s", __FUNCTION__);
537 
538     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
539                  "  API call not supported on this platform");
540     return -1;
541 }
542 
543 int32_t
MinMicrophoneVolume(uint32_t & minVolume) const544     AudioDeviceIPhone::MinMicrophoneVolume(uint32_t& minVolume) const {
545     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
546                  "%s", __FUNCTION__);
547 
548     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
549                  "  API call not supported on this platform");
550     return -1;
551 }
552 
553 int32_t
MicrophoneVolumeStepSize(uint16_t & stepSize) const554     AudioDeviceIPhone::MicrophoneVolumeStepSize(
555                                             uint16_t& stepSize) const {
556     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
557                  "%s", __FUNCTION__);
558 
559     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
560                  "  API call not supported on this platform");
561     return -1;
562 }
563 
PlayoutDevices()564 int16_t AudioDeviceIPhone::PlayoutDevices() {
565     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
566                  "%s", __FUNCTION__);
567 
568     return (int16_t)1;
569 }
570 
SetPlayoutDevice(uint16_t index)571 int32_t AudioDeviceIPhone::SetPlayoutDevice(uint16_t index) {
572     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
573                  "AudioDeviceIPhone::SetPlayoutDevice(index=%u)", index);
574 
575     if (_playIsInitialized) {
576         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
577                      "  Playout already initialized");
578         return -1;
579     }
580 
581     if (index !=0) {
582         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
583                      "  SetPlayoutDevice invalid index");
584         return -1;
585     }
586     _playoutDeviceIsSpecified = true;
587 
588     return 0;
589 }
590 
591 int32_t
SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType)592     AudioDeviceIPhone::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
593     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
594                  "WindowsDeviceType not supported");
595     return -1;
596 }
597 
598 int32_t
PlayoutDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])599     AudioDeviceIPhone::PlayoutDeviceName(uint16_t index,
600                                          char name[kAdmMaxDeviceNameSize],
601                                          char guid[kAdmMaxGuidSize]) {
602     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
603                  "AudioDeviceIPhone::PlayoutDeviceName(index=%u)", index);
604 
605     if (index != 0) {
606         return -1;
607     }
608     // return empty strings
609     memset(name, 0, kAdmMaxDeviceNameSize);
610     if (guid != NULL) {
611         memset(guid, 0, kAdmMaxGuidSize);
612     }
613 
614     return 0;
615 }
616 
617 int32_t
RecordingDeviceName(uint16_t index,char name[kAdmMaxDeviceNameSize],char guid[kAdmMaxGuidSize])618     AudioDeviceIPhone::RecordingDeviceName(uint16_t index,
619                                            char name[kAdmMaxDeviceNameSize],
620                                            char guid[kAdmMaxGuidSize]) {
621     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
622                  "AudioDeviceIPhone::RecordingDeviceName(index=%u)", index);
623 
624     if (index != 0) {
625         return -1;
626     }
627     // return empty strings
628     memset(name, 0, kAdmMaxDeviceNameSize);
629     if (guid != NULL) {
630         memset(guid, 0, kAdmMaxGuidSize);
631     }
632 
633     return 0;
634 }
635 
RecordingDevices()636 int16_t AudioDeviceIPhone::RecordingDevices() {
637     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
638 
639     return (int16_t)1;
640 }
641 
SetRecordingDevice(uint16_t index)642 int32_t AudioDeviceIPhone::SetRecordingDevice(uint16_t index) {
643     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
644                  "AudioDeviceIPhone::SetRecordingDevice(index=%u)", index);
645 
646     if (_recIsInitialized) {
647         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
648                      "  Recording already initialized");
649         return -1;
650     }
651 
652     if (index !=0) {
653         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
654                      "  SetRecordingDevice invalid index");
655         return -1;
656     }
657 
658     _recordingDeviceIsSpecified = true;
659 
660     return 0;
661 }
662 
663 int32_t
SetRecordingDevice(AudioDeviceModule::WindowsDeviceType)664     AudioDeviceIPhone::SetRecordingDevice(
665                                         AudioDeviceModule::WindowsDeviceType) {
666     WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
667                  "WindowsDeviceType not supported");
668     return -1;
669 }
670 
671 // ----------------------------------------------------------------------------
672 //  SetLoudspeakerStatus
673 //
674 //  Overrides the receiver playout route to speaker instead. See
675 //  kAudioSessionProperty_OverrideCategoryDefaultToSpeaker in CoreAudio
676 //  documentation.
677 // ----------------------------------------------------------------------------
678 
SetLoudspeakerStatus(bool enable)679 int32_t AudioDeviceIPhone::SetLoudspeakerStatus(bool enable) {
680     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
681                  "AudioDeviceIPhone::SetLoudspeakerStatus(enable=%d)", enable);
682 
683     UInt32 doChangeDefaultRoute = enable ? 1 : 0;
684     OSStatus err = AudioSessionSetProperty(
685         kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
686         sizeof(doChangeDefaultRoute), &doChangeDefaultRoute);
687 
688     if (err != noErr) {
689         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
690             "Error changing default output route " \
691             "(only available on iOS 3.1 or later)");
692         return -1;
693     }
694 
695     return 0;
696 }
697 
GetLoudspeakerStatus(bool & enabled) const698 int32_t AudioDeviceIPhone::GetLoudspeakerStatus(bool &enabled) const {
699     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
700                  "AudioDeviceIPhone::SetLoudspeakerStatus(enabled=?)");
701 
702     UInt32 route(0);
703     UInt32 size = sizeof(route);
704     OSStatus err = AudioSessionGetProperty(
705         kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
706         &size, &route);
707     if (err != noErr) {
708         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
709             "Error changing default output route " \
710             "(only available on iOS 3.1 or later)");
711         return -1;
712     }
713 
714     enabled = route == 1 ? true: false;
715 
716     return 0;
717 }
718 
PlayoutIsAvailable(bool & available)719 int32_t AudioDeviceIPhone::PlayoutIsAvailable(bool& available) {
720     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
721 
722     available = false;
723 
724     // Try to initialize the playout side
725     int32_t res = InitPlayout();
726 
727     // Cancel effect of initialization
728     StopPlayout();
729 
730     if (res != -1) {
731         available = true;
732     }
733 
734     return 0;
735 }
736 
RecordingIsAvailable(bool & available)737 int32_t AudioDeviceIPhone::RecordingIsAvailable(bool& available) {
738     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
739 
740     available = false;
741 
742     // Try to initialize the recording side
743     int32_t res = InitRecording();
744 
745     // Cancel effect of initialization
746     StopRecording();
747 
748     if (res != -1) {
749         available = true;
750     }
751 
752     return 0;
753 }
754 
InitPlayout()755 int32_t AudioDeviceIPhone::InitPlayout() {
756     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
757 
758     CriticalSectionScoped lock(&_critSect);
759 
760     if (!_initialized) {
761         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "  Not initialized");
762         return -1;
763     }
764 
765     if (_playing) {
766         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
767                      "  Playout already started");
768         return -1;
769     }
770 
771     if (_playIsInitialized) {
772         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
773                      "  Playout already initialized");
774         return 0;
775     }
776 
777     if (!_playoutDeviceIsSpecified) {
778         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
779                      "  Playout device is not specified");
780         return -1;
781     }
782 
783     // Initialize the speaker
784     if (InitSpeaker() == -1) {
785         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
786                      "  InitSpeaker() failed");
787     }
788 
789     _playIsInitialized = true;
790 
791     if (!_recIsInitialized) {
792         // Audio init
793         if (InitPlayOrRecord() == -1) {
794             // todo: Handle error
795             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
796                          "  InitPlayOrRecord() failed");
797         }
798     } else {
799         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
800         "  Recording already initialized - InitPlayOrRecord() not called");
801     }
802 
803     return 0;
804 }
805 
PlayoutIsInitialized() const806 bool AudioDeviceIPhone::PlayoutIsInitialized() const {
807     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
808     return (_playIsInitialized);
809 }
810 
InitRecording()811 int32_t AudioDeviceIPhone::InitRecording() {
812     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
813 
814     CriticalSectionScoped lock(&_critSect);
815 
816     if (!_initialized) {
817         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
818                      "  Not initialized");
819         return -1;
820     }
821 
822     if (_recording) {
823         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
824                      "  Recording already started");
825         return -1;
826     }
827 
828     if (_recIsInitialized) {
829         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
830                      "  Recording already initialized");
831         return 0;
832     }
833 
834     if (!_recordingDeviceIsSpecified) {
835         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
836                      "  Recording device is not specified");
837         return -1;
838     }
839 
840     // Initialize the microphone
841     if (InitMicrophone() == -1) {
842         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
843                      "  InitMicrophone() failed");
844     }
845 
846     _recIsInitialized = true;
847 
848     if (!_playIsInitialized) {
849         // Audio init
850         if (InitPlayOrRecord() == -1) {
851             // todo: Handle error
852             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
853                          "  InitPlayOrRecord() failed");
854         }
855     } else {
856         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
857                      "  Playout already initialized - InitPlayOrRecord() " \
858                      "not called");
859     }
860 
861     return 0;
862 }
863 
RecordingIsInitialized() const864 bool AudioDeviceIPhone::RecordingIsInitialized() const {
865     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
866     return (_recIsInitialized);
867 }
868 
StartRecording()869 int32_t AudioDeviceIPhone::StartRecording() {
870     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
871 
872     CriticalSectionScoped lock(&_critSect);
873 
874     if (!_recIsInitialized) {
875         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
876                      "  Recording not initialized");
877         return -1;
878     }
879 
880     if (_recording) {
881         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
882                      "  Recording already started");
883         return 0;
884     }
885 
886     // Reset recording buffer
887     memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
888     memset(_recordingLength, 0, sizeof(_recordingLength));
889     memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
890     _recordingCurrentSeq = 0;
891     _recordingBufferTotalSize = 0;
892     _recordingDelay = 0;
893     _recordingDelayHWAndOS = 0;
894     // Make sure first call to update delay function will update delay
895     _recordingDelayMeasurementCounter = 9999;
896     _recWarning = 0;
897     _recError = 0;
898 
899     if (!_playing) {
900         // Start Audio Unit
901         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
902                      "  Starting Audio Unit");
903         OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
904         if (0 != result) {
905             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
906                          "  Error starting Audio Unit (result=%d)", result);
907             return -1;
908         }
909     }
910 
911     _recording = true;
912 
913     return 0;
914 }
915 
StopRecording()916 int32_t AudioDeviceIPhone::StopRecording() {
917     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
918 
919     CriticalSectionScoped lock(&_critSect);
920 
921     if (!_recIsInitialized) {
922         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
923                      "  Recording is not initialized");
924         return 0;
925     }
926 
927     _recording = false;
928 
929     if (!_playing) {
930         // Both playout and recording has stopped, shutdown the device
931         ShutdownPlayOrRecord();
932     }
933 
934     _recIsInitialized = false;
935     _micIsInitialized = false;
936 
937     return 0;
938 }
939 
Recording() const940 bool AudioDeviceIPhone::Recording() const {
941     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
942     return (_recording);
943 }
944 
StartPlayout()945 int32_t AudioDeviceIPhone::StartPlayout() {
946     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
947 
948     // This lock is (among other things) needed to avoid concurrency issues
949     // with capture thread
950     // shutting down Audio Unit
951     CriticalSectionScoped lock(&_critSect);
952 
953     if (!_playIsInitialized) {
954         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
955                      "  Playout not initialized");
956         return -1;
957     }
958 
959     if (_playing) {
960         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
961                      "  Playing already started");
962         return 0;
963     }
964 
965     // Reset playout buffer
966     memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
967     _playoutBufferUsed = 0;
968     _playoutDelay = 0;
969     // Make sure first call to update delay function will update delay
970     _playoutDelayMeasurementCounter = 9999;
971     _playWarning = 0;
972     _playError = 0;
973 
974     if (!_recording) {
975         // Start Audio Unit
976         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
977                      "  Starting Audio Unit");
978         OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
979         if (0 != result) {
980             WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
981                          "  Error starting Audio Unit (result=%d)", result);
982             return -1;
983         }
984     }
985 
986     _playing = true;
987 
988     return 0;
989 }
990 
StopPlayout()991 int32_t AudioDeviceIPhone::StopPlayout() {
992     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
993 
994     CriticalSectionScoped lock(&_critSect);
995 
996     if (!_playIsInitialized) {
997         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
998                      "  Playout is not initialized");
999         return 0;
1000     }
1001 
1002     _playing = false;
1003 
1004     if (!_recording) {
1005         // Both playout and recording has stopped, signal shutdown the device
1006         ShutdownPlayOrRecord();
1007     }
1008 
1009     _playIsInitialized = false;
1010     _speakerIsInitialized = false;
1011 
1012     return 0;
1013 }
1014 
Playing() const1015 bool AudioDeviceIPhone::Playing() const {
1016     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
1017                  "%s", __FUNCTION__);
1018     return (_playing);
1019 }
1020 
1021 // ----------------------------------------------------------------------------
1022 //  ResetAudioDevice
1023 //
1024 //  Disable playout and recording, signal to capture thread to shutdown,
1025 //  and set enable states after shutdown to same as current.
1026 //  In capture thread audio device will be shutdown, then started again.
1027 // ----------------------------------------------------------------------------
ResetAudioDevice()1028 int32_t AudioDeviceIPhone::ResetAudioDevice() {
1029     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1030 
1031     CriticalSectionScoped lock(&_critSect);
1032 
1033     if (!_playIsInitialized && !_recIsInitialized) {
1034         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1035                      "  Playout or recording not initialized, doing nothing");
1036         return 0;  // Nothing to reset
1037     }
1038 
1039     // Store the states we have before stopping to restart below
1040     bool initPlay = _playIsInitialized;
1041     bool play = _playing;
1042     bool initRec = _recIsInitialized;
1043     bool rec = _recording;
1044 
1045     int res(0);
1046 
1047     // Stop playout and recording
1048     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1049                  "  Stopping playout and recording");
1050     res += StopPlayout();
1051     res += StopRecording();
1052 
1053     // Restart
1054     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1055                  "  Restarting playout and recording (%d, %d, %d, %d)",
1056                  initPlay, play, initRec, rec);
1057     if (initPlay) res += InitPlayout();
1058     if (initRec)  res += InitRecording();
1059     if (play)     res += StartPlayout();
1060     if (rec)      res += StartRecording();
1061 
1062     if (0 != res) {
1063         // Logging is done in init/start/stop calls above
1064         return -1;
1065     }
1066 
1067     return 0;
1068 }
1069 
PlayoutDelay(uint16_t & delayMS) const1070 int32_t AudioDeviceIPhone::PlayoutDelay(uint16_t& delayMS) const {
1071     delayMS = _playoutDelay;
1072     return 0;
1073 }
1074 
RecordingDelay(uint16_t & delayMS) const1075 int32_t AudioDeviceIPhone::RecordingDelay(uint16_t& delayMS) const {
1076     delayMS = _recordingDelay;
1077     return 0;
1078 }
1079 
1080 int32_t
SetPlayoutBuffer(const AudioDeviceModule::BufferType type,uint16_t sizeMS)1081     AudioDeviceIPhone::SetPlayoutBuffer(
1082                                     const AudioDeviceModule::BufferType type,
1083                                     uint16_t sizeMS) {
1084     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
1085                  "AudioDeviceIPhone::SetPlayoutBuffer(type=%u, sizeMS=%u)",
1086                  type, sizeMS);
1087 
1088     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1089                  "  API call not supported on this platform");
1090     return -1;
1091 }
1092 
1093 int32_t
PlayoutBuffer(AudioDeviceModule::BufferType & type,uint16_t & sizeMS) const1094     AudioDeviceIPhone::PlayoutBuffer(AudioDeviceModule::BufferType& type,
1095                                      uint16_t& sizeMS) const {
1096     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1097 
1098     type = AudioDeviceModule::kAdaptiveBufferSize;
1099 
1100     sizeMS = _playoutDelay;
1101 
1102     return 0;
1103 }
1104 
CPULoad(uint16_t &) const1105 int32_t AudioDeviceIPhone::CPULoad(uint16_t& /*load*/) const {
1106     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1107 
1108     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1109                  "  API call not supported on this platform");
1110     return -1;
1111 }
1112 
PlayoutWarning() const1113 bool AudioDeviceIPhone::PlayoutWarning() const {
1114     return (_playWarning > 0);
1115 }
1116 
PlayoutError() const1117 bool AudioDeviceIPhone::PlayoutError() const {
1118     return (_playError > 0);
1119 }
1120 
RecordingWarning() const1121 bool AudioDeviceIPhone::RecordingWarning() const {
1122     return (_recWarning > 0);
1123 }
1124 
RecordingError() const1125 bool AudioDeviceIPhone::RecordingError() const {
1126     return (_recError > 0);
1127 }
1128 
ClearPlayoutWarning()1129 void AudioDeviceIPhone::ClearPlayoutWarning() {
1130     _playWarning = 0;
1131 }
1132 
ClearPlayoutError()1133 void AudioDeviceIPhone::ClearPlayoutError() {
1134     _playError = 0;
1135 }
1136 
ClearRecordingWarning()1137 void AudioDeviceIPhone::ClearRecordingWarning() {
1138     _recWarning = 0;
1139 }
1140 
ClearRecordingError()1141 void AudioDeviceIPhone::ClearRecordingError() {
1142     _recError = 0;
1143 }
1144 
1145 // ============================================================================
1146 //                                 Private Methods
1147 // ============================================================================
1148 
InitPlayOrRecord()1149 int32_t AudioDeviceIPhone::InitPlayOrRecord() {
1150     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1151 
1152     OSStatus result = -1;
1153 
1154     // Check if already initialized
1155     if (NULL != _auVoiceProcessing) {
1156         // We already have initialized before and created any of the audio unit,
1157         // check that all exist
1158         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1159                      "  Already initialized");
1160         // todo: Call AudioUnitReset() here and empty all buffers?
1161         return 0;
1162     }
1163 
1164     // Create Voice Processing Audio Unit
1165     AudioComponentDescription desc;
1166     AudioComponent comp;
1167 
1168     desc.componentType = kAudioUnitType_Output;
1169     desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
1170     desc.componentManufacturer = kAudioUnitManufacturer_Apple;
1171     desc.componentFlags = 0;
1172     desc.componentFlagsMask = 0;
1173 
1174     comp = AudioComponentFindNext(NULL, &desc);
1175     if (NULL == comp) {
1176         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1177                      "  Could not find audio component for Audio Unit");
1178         return -1;
1179     }
1180 
1181     result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
1182     if (0 != result) {
1183         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1184                      "  Could not create Audio Unit instance (result=%d)",
1185                      result);
1186         return -1;
1187     }
1188 
1189     // Set preferred hardware sample rate to 16 kHz
1190     Float64 sampleRate(16000.0);
1191     result = AudioSessionSetProperty(
1192                          kAudioSessionProperty_PreferredHardwareSampleRate,
1193                          sizeof(sampleRate), &sampleRate);
1194     if (0 != result) {
1195         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1196                      "Could not set preferred sample rate (result=%d)", result);
1197     }
1198 
1199     uint32_t voiceChat = kAudioSessionMode_VoiceChat;
1200     AudioSessionSetProperty(kAudioSessionProperty_Mode,
1201                             sizeof(voiceChat), &voiceChat);
1202 
1203     //////////////////////
1204     // Setup Voice Processing Audio Unit
1205 
1206     // Note: For Signal Processing AU element 0 is output bus, element 1 is
1207     //       input bus for global scope element is irrelevant (always use
1208     //       element 0)
1209 
1210     // Enable IO on both elements
1211 
1212     // todo: Below we just log and continue upon error. We might want
1213     //       to close AU and return error for some cases.
1214     // todo: Log info about setup.
1215 
1216     UInt32 enableIO = 1;
1217     result = AudioUnitSetProperty(_auVoiceProcessing,
1218                                   kAudioOutputUnitProperty_EnableIO,
1219                                   kAudioUnitScope_Input,
1220                                   1,  // input bus
1221                                   &enableIO,
1222                                   sizeof(enableIO));
1223     if (0 != result) {
1224         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1225                      "  Could not enable IO on input (result=%d)", result);
1226     }
1227 
1228     result = AudioUnitSetProperty(_auVoiceProcessing,
1229                                   kAudioOutputUnitProperty_EnableIO,
1230                                   kAudioUnitScope_Output,
1231                                   0,   // output bus
1232                                   &enableIO,
1233                                   sizeof(enableIO));
1234     if (0 != result) {
1235         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1236                      "  Could not enable IO on output (result=%d)", result);
1237     }
1238 
1239     // Disable AU buffer allocation for the recorder, we allocate our own
1240     UInt32 flag = 0;
1241     result = AudioUnitSetProperty(
1242         _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer,
1243         kAudioUnitScope_Output,  1, &flag, sizeof(flag));
1244     if (0 != result) {
1245         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1246                      "  Could not disable AU buffer allocation (result=%d)",
1247                      result);
1248         // Should work anyway
1249     }
1250 
1251     // Set recording callback
1252     AURenderCallbackStruct auCbS;
1253     memset(&auCbS, 0, sizeof(auCbS));
1254     auCbS.inputProc = RecordProcess;
1255     auCbS.inputProcRefCon = this;
1256     result = AudioUnitSetProperty(_auVoiceProcessing,
1257                                   kAudioOutputUnitProperty_SetInputCallback,
1258                                   kAudioUnitScope_Global, 1,
1259                                   &auCbS, sizeof(auCbS));
1260     if (0 != result) {
1261         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1262             "  Could not set record callback for Audio Unit (result=%d)",
1263             result);
1264     }
1265 
1266     // Set playout callback
1267     memset(&auCbS, 0, sizeof(auCbS));
1268     auCbS.inputProc = PlayoutProcess;
1269     auCbS.inputProcRefCon = this;
1270     result = AudioUnitSetProperty(_auVoiceProcessing,
1271                                   kAudioUnitProperty_SetRenderCallback,
1272                                   kAudioUnitScope_Global, 0,
1273                                   &auCbS, sizeof(auCbS));
1274     if (0 != result) {
1275         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1276             "  Could not set play callback for Audio Unit (result=%d)",
1277             result);
1278     }
1279 
1280     // Get stream format for out/0
1281     AudioStreamBasicDescription playoutDesc;
1282     UInt32 size = sizeof(playoutDesc);
1283     result = AudioUnitGetProperty(_auVoiceProcessing,
1284                                   kAudioUnitProperty_StreamFormat,
1285                                   kAudioUnitScope_Output, 0, &playoutDesc,
1286                                   &size);
1287     if (0 != result) {
1288         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1289             "  Could not get stream format Audio Unit out/0 (result=%d)",
1290             result);
1291     }
1292     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1293                  "  Audio Unit playout opened in sampling rate %f",
1294                  playoutDesc.mSampleRate);
1295 
1296     playoutDesc.mSampleRate = sampleRate;
1297 
1298     // Store the sampling frequency to use towards the Audio Device Buffer
1299     // todo: Add 48 kHz (increase buffer sizes). Other fs?
1300     if ((playoutDesc.mSampleRate > 44090.0)
1301         && (playoutDesc.mSampleRate < 44110.0)) {
1302         _adbSampFreq = 44100;
1303     } else if ((playoutDesc.mSampleRate > 15990.0)
1304                && (playoutDesc.mSampleRate < 16010.0)) {
1305         _adbSampFreq = 16000;
1306     } else if ((playoutDesc.mSampleRate > 7990.0)
1307                && (playoutDesc.mSampleRate < 8010.0)) {
1308         _adbSampFreq = 8000;
1309     } else {
1310         _adbSampFreq = 0;
1311         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1312             "  Audio Unit out/0 opened in unknown sampling rate (%f)",
1313             playoutDesc.mSampleRate);
1314         // todo: We should bail out here.
1315     }
1316 
1317     // Set the audio device buffer sampling rate,
1318     // we assume we get the same for play and record
1319     if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) {
1320         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1321             "  Could not set audio device buffer recording sampling rate (%d)",
1322             _adbSampFreq);
1323     }
1324 
1325     if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) {
1326         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1327             "  Could not set audio device buffer playout sampling rate (%d)",
1328             _adbSampFreq);
1329     }
1330 
1331     // Set stream format for in/0  (use same sampling frequency as for out/0)
1332     playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
1333                                | kLinearPCMFormatFlagIsPacked
1334                                | kLinearPCMFormatFlagIsNonInterleaved;
1335     playoutDesc.mBytesPerPacket = 2;
1336     playoutDesc.mFramesPerPacket = 1;
1337     playoutDesc.mBytesPerFrame = 2;
1338     playoutDesc.mChannelsPerFrame = 1;
1339     playoutDesc.mBitsPerChannel = 16;
1340     result = AudioUnitSetProperty(_auVoiceProcessing,
1341                                   kAudioUnitProperty_StreamFormat,
1342                                   kAudioUnitScope_Input, 0, &playoutDesc, size);
1343     if (0 != result) {
1344         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1345             "  Could not set stream format Audio Unit in/0 (result=%d)",
1346             result);
1347     }
1348 
1349     // Get stream format for in/1
1350     AudioStreamBasicDescription recordingDesc;
1351     size = sizeof(recordingDesc);
1352     result = AudioUnitGetProperty(_auVoiceProcessing,
1353                                   kAudioUnitProperty_StreamFormat,
1354                                   kAudioUnitScope_Input, 1, &recordingDesc,
1355                                   &size);
1356     if (0 != result) {
1357         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1358             "  Could not get stream format Audio Unit in/1 (result=%d)",
1359             result);
1360     }
1361     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1362                  "  Audio Unit recording opened in sampling rate %f",
1363                  recordingDesc.mSampleRate);
1364 
1365     recordingDesc.mSampleRate = sampleRate;
1366 
1367     // Set stream format for out/1 (use same sampling frequency as for in/1)
1368     recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
1369                                  | kLinearPCMFormatFlagIsPacked
1370                                  | kLinearPCMFormatFlagIsNonInterleaved;
1371 
1372     recordingDesc.mBytesPerPacket = 2;
1373     recordingDesc.mFramesPerPacket = 1;
1374     recordingDesc.mBytesPerFrame = 2;
1375     recordingDesc.mChannelsPerFrame = 1;
1376     recordingDesc.mBitsPerChannel = 16;
1377     result = AudioUnitSetProperty(_auVoiceProcessing,
1378                                   kAudioUnitProperty_StreamFormat,
1379                                   kAudioUnitScope_Output, 1, &recordingDesc,
1380                                   size);
1381     if (0 != result) {
1382         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1383             "  Could not set stream format Audio Unit out/1 (result=%d)",
1384             result);
1385     }
1386 
1387     // Initialize here already to be able to get/set stream properties.
1388     result = AudioUnitInitialize(_auVoiceProcessing);
1389     if (0 != result) {
1390         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1391                      "  Could not init Audio Unit (result=%d)", result);
1392     }
1393 
1394     // Get hardware sample rate for logging (see if we get what we asked for)
1395     Float64 hardwareSampleRate = 0.0;
1396     size = sizeof(hardwareSampleRate);
1397     result = AudioSessionGetProperty(
1398         kAudioSessionProperty_CurrentHardwareSampleRate, &size,
1399         &hardwareSampleRate);
1400     if (0 != result) {
1401         WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1402             "  Could not get current HW sample rate (result=%d)", result);
1403     }
1404     WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1405                  "  Current HW sample rate is %f, ADB sample rate is %d",
1406              hardwareSampleRate, _adbSampFreq);
1407 
1408     return 0;
1409 }
1410 
ShutdownPlayOrRecord()1411 int32_t AudioDeviceIPhone::ShutdownPlayOrRecord() {
1412     WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1413 
1414     // Close and delete AU
1415     OSStatus result = -1;
1416     if (NULL != _auVoiceProcessing) {
1417         result = AudioOutputUnitStop(_auVoiceProcessing);
1418         if (0 != result) {
1419             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1420                 "  Error stopping Audio Unit (result=%d)", result);
1421         }
1422         result = AudioComponentInstanceDispose(_auVoiceProcessing);
1423         if (0 != result) {
1424             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1425                 "  Error disposing Audio Unit (result=%d)", result);
1426         }
1427         _auVoiceProcessing = NULL;
1428     }
1429 
1430     return 0;
1431 }
1432 
1433 // ============================================================================
1434 //                                  Thread Methods
1435 // ============================================================================
1436 
1437 OSStatus
RecordProcess(void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,AudioBufferList * ioData)1438     AudioDeviceIPhone::RecordProcess(void *inRefCon,
1439                                      AudioUnitRenderActionFlags *ioActionFlags,
1440                                      const AudioTimeStamp *inTimeStamp,
1441                                      UInt32 inBusNumber,
1442                                      UInt32 inNumberFrames,
1443                                      AudioBufferList *ioData) {
1444     AudioDeviceIPhone* ptrThis = static_cast<AudioDeviceIPhone*>(inRefCon);
1445 
1446     return ptrThis->RecordProcessImpl(ioActionFlags,
1447                                       inTimeStamp,
1448                                       inBusNumber,
1449                                       inNumberFrames);
1450 }
1451 
1452 
1453 OSStatus
RecordProcessImpl(AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,uint32_t inBusNumber,uint32_t inNumberFrames)1454     AudioDeviceIPhone::RecordProcessImpl(
1455                                     AudioUnitRenderActionFlags *ioActionFlags,
1456                                     const AudioTimeStamp *inTimeStamp,
1457                                     uint32_t inBusNumber,
1458                                     uint32_t inNumberFrames) {
1459     // Setup some basic stuff
1460     // Use temp buffer not to lock up recording buffer more than necessary
1461     // todo: Make dataTmp a member variable with static size that holds
1462     //       max possible frames?
1463     int16_t* dataTmp = new int16_t[inNumberFrames];
1464     memset(dataTmp, 0, 2*inNumberFrames);
1465 
1466     AudioBufferList abList;
1467     abList.mNumberBuffers = 1;
1468     abList.mBuffers[0].mData = dataTmp;
1469     abList.mBuffers[0].mDataByteSize = 2*inNumberFrames;  // 2 bytes/sample
1470     abList.mBuffers[0].mNumberChannels = 1;
1471 
1472     // Get data from mic
1473     OSStatus res = AudioUnitRender(_auVoiceProcessing,
1474                                    ioActionFlags, inTimeStamp,
1475                                    inBusNumber, inNumberFrames, &abList);
1476     if (res != 0) {
1477         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1478                      "  Error getting rec data, error = %d", res);
1479 
1480         if (_recWarning > 0) {
1481             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1482                          "  Pending rec warning exists");
1483         }
1484         _recWarning = 1;
1485 
1486         delete [] dataTmp;
1487         return 0;
1488     }
1489 
1490     if (_recording) {
1491         // Insert all data in temp buffer into recording buffers
1492         // There is zero or one buffer partially full at any given time,
1493         // all others are full or empty
1494         // Full means filled with noSamp10ms samples.
1495 
1496         const unsigned int noSamp10ms = _adbSampFreq / 100;
1497         unsigned int dataPos = 0;
1498         uint16_t bufPos = 0;
1499         int16_t insertPos = -1;
1500         unsigned int nCopy = 0;  // Number of samples to copy
1501 
1502         while (dataPos < inNumberFrames) {
1503             // Loop over all recording buffers or
1504             // until we find the partially full buffer
1505             // First choice is to insert into partially full buffer,
1506             // second choice is to insert into empty buffer
1507             bufPos = 0;
1508             insertPos = -1;
1509             nCopy = 0;
1510             while (bufPos < N_REC_BUFFERS) {
1511                 if ((_recordingLength[bufPos] > 0)
1512                     && (_recordingLength[bufPos] < noSamp10ms)) {
1513                     // Found the partially full buffer
1514                     insertPos = static_cast<int16_t>(bufPos);
1515                     // Don't need to search more, quit loop
1516                     bufPos = N_REC_BUFFERS;
1517                 } else if ((-1 == insertPos)
1518                            && (0 == _recordingLength[bufPos])) {
1519                     // Found an empty buffer
1520                     insertPos = static_cast<int16_t>(bufPos);
1521                 }
1522                 ++bufPos;
1523             }
1524 
1525             // Insert data into buffer
1526             if (insertPos > -1) {
1527                 // We found a non-full buffer, copy data to it
1528                 unsigned int dataToCopy = inNumberFrames - dataPos;
1529                 unsigned int currentRecLen = _recordingLength[insertPos];
1530                 unsigned int roomInBuffer = noSamp10ms - currentRecLen;
1531                 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
1532 
1533                 memcpy(&_recordingBuffer[insertPos][currentRecLen],
1534                        &dataTmp[dataPos], nCopy*sizeof(int16_t));
1535                 if (0 == currentRecLen) {
1536                     _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
1537                     ++_recordingCurrentSeq;
1538                 }
1539                 _recordingBufferTotalSize += nCopy;
1540                 // Has to be done last to avoid interrupt problems
1541                 // between threads
1542                 _recordingLength[insertPos] += nCopy;
1543                 dataPos += nCopy;
1544             } else {
1545                 // Didn't find a non-full buffer
1546                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1547                              "  Could not insert into recording buffer");
1548                 if (_recWarning > 0) {
1549                     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1550                                  "  Pending rec warning exists");
1551                 }
1552                 _recWarning = 1;
1553                 dataPos = inNumberFrames;  // Don't try to insert more
1554             }
1555         }
1556     }
1557 
1558     delete [] dataTmp;
1559 
1560     return 0;
1561 }
1562 
1563 OSStatus
PlayoutProcess(void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,AudioBufferList * ioData)1564     AudioDeviceIPhone::PlayoutProcess(void *inRefCon,
1565                                       AudioUnitRenderActionFlags *ioActionFlags,
1566                                       const AudioTimeStamp *inTimeStamp,
1567                                       UInt32 inBusNumber,
1568                                       UInt32 inNumberFrames,
1569                                       AudioBufferList *ioData) {
1570     AudioDeviceIPhone* ptrThis = static_cast<AudioDeviceIPhone*>(inRefCon);
1571 
1572     return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
1573 }
1574 
1575 OSStatus
PlayoutProcessImpl(uint32_t inNumberFrames,AudioBufferList * ioData)1576     AudioDeviceIPhone::PlayoutProcessImpl(uint32_t inNumberFrames,
1577                                           AudioBufferList *ioData) {
1578     // Setup some basic stuff
1579 //    assert(sizeof(short) == 2); // Assumption for implementation
1580 
1581     int16_t* data =
1582         static_cast<int16_t*>(ioData->mBuffers[0].mData);
1583     unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
1584     unsigned int dataSize = dataSizeBytes/2;  // Number of samples
1585         if (dataSize != inNumberFrames) {  // Should always be the same
1586         WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1587                      "dataSize (%u) != inNumberFrames (%u)",
1588                      dataSize, (unsigned int)inNumberFrames);
1589         if (_playWarning > 0) {
1590             WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1591                          "  Pending play warning exists");
1592         }
1593         _playWarning = 1;
1594     }
1595     memset(data, 0, dataSizeBytes);  // Start with empty buffer
1596 
1597 
1598     // Get playout data from Audio Device Buffer
1599 
1600     if (_playing) {
1601         unsigned int noSamp10ms = _adbSampFreq / 100;
1602         // todo: Member variable and allocate when samp freq is determined
1603         int16_t* dataTmp = new int16_t[noSamp10ms];
1604         memset(dataTmp, 0, 2*noSamp10ms);
1605         unsigned int dataPos = 0;
1606         int noSamplesOut = 0;
1607         unsigned int nCopy = 0;
1608 
1609         // First insert data from playout buffer if any
1610         if (_playoutBufferUsed > 0) {
1611             nCopy = (dataSize < _playoutBufferUsed) ?
1612                     dataSize : _playoutBufferUsed;
1613             if (nCopy != _playoutBufferUsed) {
1614                 // todo: If dataSize < _playoutBufferUsed
1615                 //       (should normally never be)
1616                 //       we must move the remaining data
1617                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1618                              "nCopy (%u) != _playoutBufferUsed (%u)",
1619                              nCopy, _playoutBufferUsed);
1620                 if (_playWarning > 0) {
1621                     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1622                                  "  Pending play warning exists");
1623                 }
1624                 _playWarning = 1;
1625             }
1626             memcpy(data, _playoutBuffer, 2*nCopy);
1627             dataPos = nCopy;
1628             memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
1629             _playoutBufferUsed = 0;
1630         }
1631 
1632         // Now get the rest from Audio Device Buffer
1633         while (dataPos < dataSize) {
1634             // Update playout delay
1635             UpdatePlayoutDelay();
1636 
1637             // Ask for new PCM data to be played out using the AudioDeviceBuffer
1638             noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms);
1639 
1640             // Get data from Audio Device Buffer
1641             noSamplesOut =
1642                 _ptrAudioBuffer->GetPlayoutData(
1643                     reinterpret_cast<int8_t*>(dataTmp));
1644             // Cast OK since only equality comparison
1645             if (noSamp10ms != (unsigned int)noSamplesOut) {
1646                 // Should never happen
1647                 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1648                              "noSamp10ms (%u) != noSamplesOut (%d)",
1649                              noSamp10ms, noSamplesOut);
1650 
1651                 if (_playWarning > 0) {
1652                     WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1653                                  "  Pending play warning exists");
1654                 }
1655                 _playWarning = 1;
1656             }
1657 
1658             // Insert as much as fits in data buffer
1659             nCopy = (dataSize-dataPos) > noSamp10ms ?
1660                     noSamp10ms : (dataSize-dataPos);
1661             memcpy(&data[dataPos], dataTmp, 2*nCopy);
1662 
1663             // Save rest in playout buffer if any
1664             if (nCopy < noSamp10ms) {
1665                 memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy));
1666                 _playoutBufferUsed = noSamp10ms - nCopy;
1667             }
1668 
1669             // Update loop/index counter, if we copied less than noSamp10ms
1670             // samples we shall quit loop anyway
1671             dataPos += noSamp10ms;
1672         }
1673 
1674         delete [] dataTmp;
1675     }
1676 
1677     return 0;
1678 }
1679 
UpdatePlayoutDelay()1680 void AudioDeviceIPhone::UpdatePlayoutDelay() {
1681     ++_playoutDelayMeasurementCounter;
1682 
1683     if (_playoutDelayMeasurementCounter >= 100) {
1684         // Update HW and OS delay every second, unlikely to change
1685 
1686         // Since this is eventually rounded to integral ms, add 0.5ms
1687         // here to get round-to-nearest-int behavior instead of
1688         // truncation.
1689         float totalDelaySeconds = 0.0005;
1690 
1691         // HW output latency
1692         Float32 f32(0);
1693         UInt32 size = sizeof(f32);
1694         OSStatus result = AudioSessionGetProperty(
1695             kAudioSessionProperty_CurrentHardwareOutputLatency, &size, &f32);
1696         if (0 != result) {
1697             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1698                          "error HW latency (result=%d)", result);
1699         }
1700         assert(f32 >= 0);
1701         totalDelaySeconds += f32;
1702 
1703         // HW buffer duration
1704         f32 = 0;
1705         result = AudioSessionGetProperty(
1706             kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &f32);
1707         if (0 != result) {
1708             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1709                          "error HW buffer duration (result=%d)", result);
1710         }
1711         assert(f32 >= 0);
1712         totalDelaySeconds += f32;
1713 
1714         // AU latency
1715         Float64 f64(0);
1716         size = sizeof(f64);
1717         result = AudioUnitGetProperty(_auVoiceProcessing,
1718             kAudioUnitProperty_Latency, kAudioUnitScope_Global, 0, &f64, &size);
1719         if (0 != result) {
1720             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1721                          "error AU latency (result=%d)", result);
1722         }
1723         assert(f64 >= 0);
1724         totalDelaySeconds += f64;
1725 
1726         // To ms
1727         _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
1728 
1729         // Reset counter
1730         _playoutDelayMeasurementCounter = 0;
1731     }
1732 
1733     // todo: Add playout buffer?
1734 }
1735 
UpdateRecordingDelay()1736 void AudioDeviceIPhone::UpdateRecordingDelay() {
1737     ++_recordingDelayMeasurementCounter;
1738 
1739     if (_recordingDelayMeasurementCounter >= 100) {
1740         // Update HW and OS delay every second, unlikely to change
1741 
1742         // Since this is eventually rounded to integral ms, add 0.5ms
1743         // here to get round-to-nearest-int behavior instead of
1744         // truncation.
1745         float totalDelaySeconds = 0.0005;
1746 
1747         // HW input latency
1748         Float32 f32(0);
1749         UInt32 size = sizeof(f32);
1750         OSStatus result = AudioSessionGetProperty(
1751             kAudioSessionProperty_CurrentHardwareInputLatency, &size, &f32);
1752         if (0 != result) {
1753             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1754                          "error HW latency (result=%d)", result);
1755         }
1756         assert(f32 >= 0);
1757         totalDelaySeconds += f32;
1758 
1759         // HW buffer duration
1760         f32 = 0;
1761         result = AudioSessionGetProperty(
1762             kAudioSessionProperty_CurrentHardwareIOBufferDuration, &size, &f32);
1763         if (0 != result) {
1764             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1765                          "error HW buffer duration (result=%d)", result);
1766         }
1767         assert(f32 >= 0);
1768         totalDelaySeconds += f32;
1769 
1770         // AU latency
1771         Float64 f64(0);
1772         size = sizeof(f64);
1773         result = AudioUnitGetProperty(_auVoiceProcessing,
1774                                       kAudioUnitProperty_Latency,
1775                                       kAudioUnitScope_Global, 0, &f64, &size);
1776         if (0 != result) {
1777             WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1778                          "error AU latency (result=%d)", result);
1779         }
1780         assert(f64 >= 0);
1781         totalDelaySeconds += f64;
1782 
1783         // To ms
1784         _recordingDelayHWAndOS =
1785             static_cast<uint32_t>(totalDelaySeconds / 1000);
1786 
1787         // Reset counter
1788         _recordingDelayMeasurementCounter = 0;
1789     }
1790 
1791     _recordingDelay = _recordingDelayHWAndOS;
1792 
1793     // ADB recording buffer size, update every time
1794     // Don't count the one next 10 ms to be sent, then convert samples => ms
1795     const uint32_t noSamp10ms = _adbSampFreq / 100;
1796     if (_recordingBufferTotalSize > noSamp10ms) {
1797         _recordingDelay +=
1798             (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
1799     }
1800 }
1801 
RunCapture(void * ptrThis)1802 bool AudioDeviceIPhone::RunCapture(void* ptrThis) {
1803     return static_cast<AudioDeviceIPhone*>(ptrThis)->CaptureWorkerThread();
1804 }
1805 
CaptureWorkerThread()1806 bool AudioDeviceIPhone::CaptureWorkerThread() {
1807     if (_recording) {
1808         int bufPos = 0;
1809         unsigned int lowestSeq = 0;
1810         int lowestSeqBufPos = 0;
1811         bool foundBuf = true;
1812         const unsigned int noSamp10ms = _adbSampFreq / 100;
1813 
1814         while (foundBuf) {
1815             // Check if we have any buffer with data to insert
1816             // into the Audio Device Buffer,
1817             // and find the one with the lowest seq number
1818             foundBuf = false;
1819             for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
1820                 if (noSamp10ms == _recordingLength[bufPos]) {
1821                     if (!foundBuf) {
1822                         lowestSeq = _recordingSeqNumber[bufPos];
1823                         lowestSeqBufPos = bufPos;
1824                         foundBuf = true;
1825                     } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
1826                         lowestSeq = _recordingSeqNumber[bufPos];
1827                         lowestSeqBufPos = bufPos;
1828                     }
1829                 }
1830             }  // for
1831 
1832             // Insert data into the Audio Device Buffer if found any
1833             if (foundBuf) {
1834                 // Update recording delay
1835                 UpdateRecordingDelay();
1836 
1837                 // Set the recorded buffer
1838                 _ptrAudioBuffer->SetRecordedBuffer(
1839                     reinterpret_cast<int8_t*>(
1840                         _recordingBuffer[lowestSeqBufPos]),
1841                         _recordingLength[lowestSeqBufPos]);
1842 
1843                 // Don't need to set the current mic level in ADB since we only
1844                 // support digital AGC,
1845                 // and besides we cannot get or set the IOS mic level anyway.
1846 
1847                 // Set VQE info, use clockdrift == 0
1848                 _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0);
1849 
1850                 // Deliver recorded samples at specified sample rate, mic level
1851                 // etc. to the observer using callback
1852                 _ptrAudioBuffer->DeliverRecordedData();
1853 
1854                 // Make buffer available
1855                 _recordingSeqNumber[lowestSeqBufPos] = 0;
1856                 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
1857                 // Must be done last to avoid interrupt problems between threads
1858                 _recordingLength[lowestSeqBufPos] = 0;
1859             }
1860         }  // while (foundBuf)
1861     }  // if (_recording)
1862 
1863     {
1864         // Normal case
1865         // Sleep thread (5ms) to let other threads get to work
1866         // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
1867         //       Device Buffer?
1868         timespec t;
1869         t.tv_sec = 0;
1870         t.tv_nsec = 5*1000*1000;
1871         nanosleep(&t, NULL);
1872     }
1873 
1874     return true;
1875 }
1876 
1877 }  // namespace webrtc
1878