1 /* 2 * Copyright 2016 The WebRTC Project Authors. All rights reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #ifndef SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ 12 #define SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ 13 14 #include <AudioUnit/AudioUnit.h> 15 16 namespace webrtc { 17 namespace ios_adm { 18 19 class VoiceProcessingAudioUnitObserver { 20 public: 21 // Callback function called on a real-time priority I/O thread from the audio 22 // unit. This method is used to signal that recorded audio is available. 23 virtual OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags, 24 const AudioTimeStamp* time_stamp, 25 UInt32 bus_number, 26 UInt32 num_frames, 27 AudioBufferList* io_data) = 0; 28 29 // Callback function called on a real-time priority I/O thread from the audio 30 // unit. This method is used to provide audio samples to the audio unit. 31 virtual OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags, 32 const AudioTimeStamp* time_stamp, 33 UInt32 bus_number, 34 UInt32 num_frames, 35 AudioBufferList* io_data) = 0; 36 37 protected: ~VoiceProcessingAudioUnitObserver()38 ~VoiceProcessingAudioUnitObserver() {} 39 }; 40 41 // Convenience class to abstract away the management of a Voice Processing 42 // I/O Audio Unit. The Voice Processing I/O unit has the same characteristics 43 // as the Remote I/O unit (supports full duplex low-latency audio input and 44 // output) and adds AEC for for two-way duplex communication. It also adds AGC, 45 // adjustment of voice-processing quality, and muting. Hence, ideal for 46 // VoIP applications. 47 class VoiceProcessingAudioUnit { 48 public: 49 explicit VoiceProcessingAudioUnit(VoiceProcessingAudioUnitObserver* observer); 50 ~VoiceProcessingAudioUnit(); 51 52 // TODO(tkchin): enum for state and state checking. 53 enum State : int32_t { 54 // Init() should be called. 55 kInitRequired, 56 // Audio unit created but not initialized. 57 kUninitialized, 58 // Initialized but not started. Equivalent to stopped. 59 kInitialized, 60 // Initialized and started. 61 kStarted, 62 }; 63 64 // Number of bytes per audio sample for 16-bit signed integer representation. 65 static const UInt32 kBytesPerSample; 66 67 // Initializes this class by creating the underlying audio unit instance. 68 // Creates a Voice-Processing I/O unit and configures it for full-duplex 69 // audio. The selected stream format is selected to avoid internal resampling 70 // and to match the 10ms callback rate for WebRTC as well as possible. 71 // Does not intialize the audio unit. 72 bool Init(); 73 74 VoiceProcessingAudioUnit::State GetState() const; 75 76 // Initializes the underlying audio unit with the given sample rate. 77 bool Initialize(Float64 sample_rate); 78 79 // Starts the underlying audio unit. 80 bool Start(); 81 82 // Stops the underlying audio unit. 83 bool Stop(); 84 85 // Uninitializes the underlying audio unit. 86 bool Uninitialize(); 87 88 // Calls render on the underlying audio unit. 89 OSStatus Render(AudioUnitRenderActionFlags* flags, 90 const AudioTimeStamp* time_stamp, 91 UInt32 output_bus_number, 92 UInt32 num_frames, 93 AudioBufferList* io_data); 94 95 private: 96 // The C API used to set callbacks requires static functions. When these are 97 // called, they will invoke the relevant instance method by casting 98 // in_ref_con to VoiceProcessingAudioUnit*. 99 static OSStatus OnGetPlayoutData(void* in_ref_con, 100 AudioUnitRenderActionFlags* flags, 101 const AudioTimeStamp* time_stamp, 102 UInt32 bus_number, 103 UInt32 num_frames, 104 AudioBufferList* io_data); 105 static OSStatus OnDeliverRecordedData(void* in_ref_con, 106 AudioUnitRenderActionFlags* flags, 107 const AudioTimeStamp* time_stamp, 108 UInt32 bus_number, 109 UInt32 num_frames, 110 AudioBufferList* io_data); 111 112 // Notifies observer that samples are needed for playback. 113 OSStatus NotifyGetPlayoutData(AudioUnitRenderActionFlags* flags, 114 const AudioTimeStamp* time_stamp, 115 UInt32 bus_number, 116 UInt32 num_frames, 117 AudioBufferList* io_data); 118 // Notifies observer that recorded samples are available for render. 119 OSStatus NotifyDeliverRecordedData(AudioUnitRenderActionFlags* flags, 120 const AudioTimeStamp* time_stamp, 121 UInt32 bus_number, 122 UInt32 num_frames, 123 AudioBufferList* io_data); 124 125 // Returns the predetermined format with a specific sample rate. See 126 // implementation file for details on format. 127 AudioStreamBasicDescription GetFormat(Float64 sample_rate) const; 128 129 // Deletes the underlying audio unit. 130 void DisposeAudioUnit(); 131 132 VoiceProcessingAudioUnitObserver* observer_; 133 AudioUnit vpio_unit_; 134 VoiceProcessingAudioUnit::State state_; 135 }; 136 } // namespace ios_adm 137 } // namespace webrtc 138 139 #endif // SDK_OBJC_NATIVE_SRC_AUDIO_VOICE_PROCESSING_AUDIO_UNIT_H_ 140