1# Developing Audio Call 2 3During an audio call, audio output (playing the peer voice) and audio input (recording the local voice) are carried out simultaneously. You can use the AudioRenderer to implement audio output and the AudioCapturer to implement audio input. 4 5Before starting or stopping using the audio call service, the application needs to check the [audio scene](audio-call-overview.md#audio-scene) and [ringer mode](audio-call-overview.md#ringer-mode) to adopt proper audio management and prompt policies. 6 7The sample code below demonstrates the basic process of using the AudioRenderer and AudioCapturer to implement the audio call service, without the process of call data transmission. In actual development, the peer call data transmitted over the network needs to be decoded and played, and the sample code uses the process of reading an audio file instead; the local call data needs to be encoded and packed and then sent to the peer over the network, and the sample code uses the process of writing an audio file instead. 8 9## Using AudioRenderer to Play the Peer Voice 10 11This process is similar to the process of [using AudioRenderer to develop audio playback](using-audiorenderer-for-playback.md). The key differences lie in the **audioRendererInfo** parameter and audio data source. In the **audioRendererInfo** parameter used for audio calling, **content** must be set to **CONTENT_TYPE_SPEECH**, and **usage** must be set to **STREAM_USAGE_VOICE_COMMUNICATION**. 12 13```ts 14import audio from '@ohos.multimedia.audio'; 15import fs from '@ohos.file.fs'; 16import { BusinessError } from '@ohos.base'; 17 18const TAG = 'VoiceCallDemoForAudioRenderer'; 19// The process is similar to the process of using AudioRenderer to develop audio playback. The key differences lie in the audioRendererInfo parameter and audio data source. 20let context = getContext(this); 21let renderModel: audio.AudioRenderer | undefined = undefined; 22let audioStreamInfo: audio.AudioStreamInfo = { 23 samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // Sampling rate. 24 channels: audio.AudioChannel.CHANNEL_2, // Channel. 25 sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // Sampling format. 26 encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // Encoding format. 27} 28let audioRendererInfo: audio.AudioRendererInfo = { 29 // Parameters corresponding to the call scenario need to be used. 30 content: audio.ContentType.CONTENT_TYPE_SPEECH, // Audio content type: speech. 31 usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // Audio stream usage type: voice communication. 32 rendererFlags: 0 // AudioRenderer flag. The default value is 0. 33} 34let audioRendererOptions: audio.AudioRendererOptions = { 35 streamInfo: audioStreamInfo, 36 rendererInfo: audioRendererInfo 37} 38 39// Create an AudioRenderer instance, and set the events to listen for. 40audio.createAudioRenderer(audioRendererOptions, (err: BusinessError, renderer: audio.AudioRenderer) => { // Create an AudioRenderer instance. 41 if (!err) { 42 console.info(`${TAG}: creating AudioRenderer success`); 43 renderModel = renderer; 44 if (renderModel !== undefined) { 45 renderModel.on('stateChange', (state: audio.AudioState) => { // Set the events to listen for. A callback is invoked when the AudioRenderer is switched to the specified state. 46 if (state == 1) { 47 console.info('audio renderer state is: STATE_PREPARED'); 48 } 49 if (state == 2) { 50 console.info('audio renderer state is: STATE_RUNNING'); 51 } 52 }); 53 renderModel.on('markReach', 1000, (position: number) => { // Subscribe to the markReach event. A callback is triggered when the number of rendered frames reaches 1000. 54 if (position == 1000) { 55 console.info('ON Triggered successfully'); 56 } 57 }); 58 } 59 } else { 60 console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`); 61 } 62}); 63 64// Start audio rendering. 65async function start() { 66 if (renderModel !== undefined) { 67 let stateGroup: number[] = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; 68 if (stateGroup.indexOf(renderModel.state.valueOf()) === -1) { // Rendering can be started only when the AudioRenderer is in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state. 69 console.error(TAG + 'start failed'); 70 return; 71 } 72 await renderModel.start(); // Start rendering. 73 const bufferSize: number = await renderModel.getBufferSize(); 74 // The process of reading audio file data is used as an example. In actual audio call development, audio data transmitted from the peer needs to be read. 75 76 let path = context.filesDir; 77 78 const filePath = path + '/voice_call_data.wav'; // Sandbox path. The actual path is /data/storage/el2/base/haps/entry/files/voice_call_data.wav. 79 let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY); 80 let stat = await fs.stat(filePath); 81 let buf = new ArrayBuffer(bufferSize); 82 let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1); 83 class Option { 84 offset: number = 0 85 length: number = 0 86 } 87 for (let i = 0; i < len; i++) { 88 let options: Option = { 89 offset: i * bufferSize, 90 length: bufferSize 91 }; 92 let readsize = await fs.read(file.fd, buf, options); 93 // buf indicates the audio data to be written to the buffer. Before calling AudioRenderer.write(), you can preprocess the audio data for personalized playback. The AudioRenderer reads the audio data written to the buffer for rendering. 94 let writeSize: number = await renderModel.write(buf); 95 if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) { // The rendering stops if the AudioRenderer is in the STATE_RELEASED state. 96 fs.close(file); 97 await renderModel.stop(); 98 } 99 if (renderModel.state.valueOf() === audio.AudioState.STATE_RUNNING) { 100 if (i === len - 1) { // The rendering stops if the file finishes reading. 101 fs.close(file); 102 await renderModel.stop(); 103 } 104 } 105 } 106 } 107} 108 109// Pause the rendering. 110async function pause() { 111 if (renderModel !== undefined) { 112 // Rendering can be paused only when the AudioRenderer is in the STATE_RUNNING state. 113 if (renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING) { 114 console.info('Renderer is not running'); 115 return; 116 } 117 await renderModel.pause(); // Pause rendering. 118 if (renderModel.state.valueOf() === audio.AudioState.STATE_PAUSED) { 119 console.info('Renderer is paused.'); 120 } else { 121 console.error('Pausing renderer failed.'); 122 } 123 } 124} 125 126// Stop rendering. 127async function stop() { 128 if (renderModel !== undefined) { 129 // Rendering can be stopped only when the AudioRenderer is in the STATE_RUNNING or STATE_PAUSED state. 130 if (renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING && renderModel.state.valueOf() !== audio.AudioState.STATE_PAUSED) { 131 console.info('Renderer is not running or paused.'); 132 return; 133 } 134 await renderModel.stop(); // Stop rendering. 135 if (renderModel.state.valueOf() === audio.AudioState.STATE_STOPPED) { 136 console.info('Renderer stopped.'); 137 } else { 138 console.error('Stopping renderer failed.'); 139 } 140 } 141} 142 143// Release the instance. 144async function release() { 145 if (renderModel !== undefined) { 146 // The AudioRenderer can be released only when it is not in the STATE_RELEASED state. 147 if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) { 148 console.info('Renderer already released'); 149 return; 150 } 151 await renderModel.release(); // Release the instance. 152 if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) { 153 console.info('Renderer released'); 154 } else { 155 console.error('Renderer release failed.'); 156 } 157 } 158} 159``` 160 161## Using AudioCapturer to Record the Local Voice 162 163This process is similar to the process of [using AudioCapturer to develop audio recording](using-audiocapturer-for-recording.md). The key differences lie in the **audioCapturerInfo** parameter and audio data stream direction. In the **audioCapturerInfo** parameter used for audio calling, **source** must be set to **SOURCE_TYPE_VOICE_COMMUNICATION**. 164 165```ts 166import audio from '@ohos.multimedia.audio'; 167import fs from '@ohos.file.fs'; 168import { BusinessError } from '@ohos.base'; 169 170let context = getContext(this); 171const TAG = 'VoiceCallDemoForAudioCapturer'; 172// The process is similar to the process of using AudioCapturer to develop audio recording. The key differences lie in the audioCapturerInfo parameter and audio data stream direction. 173let audioCapturer: audio.AudioCapturer | undefined = undefined; 174let audioStreamInfo: audio.AudioStreamInfo = { 175 samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // Sampling rate. 176 channels: audio.AudioChannel.CHANNEL_1, // Channel. 177 sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // Sampling format. 178 encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // Encoding format. 179} 180let audioCapturerInfo: audio.AudioCapturerInfo = { 181 // Parameters corresponding to the call scenario need to be used. 182 source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // Audio source type: voice communication. 183 capturerFlags: 0 // AudioCapturer flag. The default value is 0. 184} 185let audioCapturerOptions: audio.AudioCapturerOptions = { 186 streamInfo: audioStreamInfo, 187 capturerInfo: audioCapturerInfo 188} 189 190// Create an AudioRenderer instance, and set the events to listen for. 191async function init() { 192 audio.createAudioCapturer(audioCapturerOptions, (err: BusinessError, capturer: audio.AudioCapturer) => { // Create an AudioCapturer instance. 193 if (err) { 194 console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`); 195 return; 196 } 197 console.info(`${TAG}: create AudioCapturer success`); 198 audioCapturer = capturer; 199 if (audioCapturer !== undefined) { 200 audioCapturer.on('markReach', 1000, (position: number) => { // Subscribe to the markReach event. A callback is triggered when the number of captured frames reaches 1000. 201 if (position === 1000) { 202 console.info('ON Triggered successfully'); 203 } 204 }); 205 audioCapturer.on('periodReach', 2000, (position: number) => { // Subscribe to the periodReach event. A callback is triggered when the number of captured frames reaches 2000. 206 if (position === 2000) { 207 console.info('ON Triggered successfully'); 208 } 209 }); 210 } 211 }); 212} 213 214// Start audio recording. 215async function start() { 216 if (audioCapturer !== undefined) { 217 let stateGroup: number[] = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED]; 218 if (stateGroup.indexOf(audioCapturer.state.valueOf()) === -1) { // Recording can be started only when the AudioCapturer is in the STATE_PREPARED, STATE_PAUSED, or STATE_STOPPED state. 219 console.error(`${TAG}: start failed`); 220 return; 221 } 222 await audioCapturer.start(); // Start recording. 223 // The following describes how to write audio data to a file. In actual audio call development, the local audio data needs to be encoded and packed, and then sent to the peer through the network. 224 const path = context.filesDir + '/voice_call_data.wav'; // Path for storing the recorded audio file. 225 let file = fs.openSync(path, 0o2 | 0o100); // Create the file if it does not exist. 226 let fd = file.fd; 227 let numBuffersToCapture = 150; // Write data for 150 times. 228 let count = 0; 229 class Options { 230 offset: number = 0 231 length: number = 0 232 } 233 while (numBuffersToCapture) { 234 let bufferSize: number = await audioCapturer.getBufferSize(); 235 let buffer: ArrayBuffer = await audioCapturer.read(bufferSize, true); 236 let options: Options = { 237 offset: count * bufferSize, 238 length: bufferSize 239 }; 240 if (buffer === undefined) { 241 console.error(`${TAG}: read buffer failed`); 242 } else { 243 let number = fs.writeSync(fd, buffer, options); 244 console.info(`${TAG}: write date: ${number}`); 245 } 246 numBuffersToCapture--; 247 count++; 248 } 249 } 250} 251 252// Stop recording. 253async function stop() { 254 if (audioCapturer !== undefined) { 255 // The AudioCapturer can be stopped only when it is in STATE_RUNNING or STATE_PAUSED state. 256 if (audioCapturer.state.valueOf() !== audio.AudioState.STATE_RUNNING && audioCapturer.state.valueOf() !== audio.AudioState.STATE_PAUSED) { 257 console.info('Capturer is not running or paused'); 258 return; 259 } 260 await audioCapturer.stop(); // Stop recording. 261 if (audioCapturer.state.valueOf() === audio.AudioState.STATE_STOPPED) { 262 console.info('Capturer stopped'); 263 } else { 264 console.error('Capturer stop failed'); 265 } 266 } 267} 268 269// Release the instance. 270async function release() { 271 if (audioCapturer !== undefined) { 272 // The AudioCapturer can be released only when it is not in the STATE_RELEASED or STATE_NEW state. 273 if (audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED || audioCapturer.state.valueOf() === audio.AudioState.STATE_NEW) { 274 console.info('Capturer already released'); 275 return; 276 } 277 await audioCapturer.release(); // Release the instance. 278 if (audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED) { 279 console.info('Capturer released'); 280 } else { 281 console.error('Capturer release failed'); 282 } 283 } 284} 285``` 286