• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1# 开发音频通话功能
2
3在音频通话场景下,音频输出(播放对端声音)和音频输入(录制本端声音)会同时进行,应用可以通过使用AudioRenderer来实现音频输出,通过使用AudioCapturer来实现音频输入,同时使用AudioRenderer和AudioCapturer即可实现音频通话功能。
4
5在音频通话开始和结束时,应用可以自行检查当前的[音频场景模式](audio-call-overview.md#音频场景模式)和[铃声模式](audio-call-overview.md#铃声模式),以便采取合适的音频管理及提示策略。
6
7以下代码示范了同时使用AudioRenderer和AudioCapturer实现音频通话功能的基本过程,其中未包含音频通话数据的传输过程,实际开发中,需要将网络传输来的对端通话数据解码播放,此处仅以读取音频文件的数据代替;同时需要将本端录制的通话数据编码打包,通过网络发送给对端,此处仅以将数据写入音频文件代替。
8
9## 使用AudioRenderer播放对端的通话声音
10
11  该过程与[使用AudioRenderer开发音频播放功能](using-audiorenderer-for-playback.md)过程相似,关键区别在于audioRendererInfo参数和音频数据来源。audioRendererInfo参数中,音频内容类型需设置为语音,CONTENT_TYPE_SPEECH,音频流使用类型需设置为语音通信,STREAM_USAGE_VOICE_COMMUNICATION。
12
13```ts
14import audio from '@ohos.multimedia.audio';
15import fs from '@ohos.file.fs';
16import { BusinessError } from '@ohos.base';
17
18const TAG = 'VoiceCallDemoForAudioRenderer';
19// 与使用AudioRenderer开发音频播放功能过程相似,关键区别在于audioRendererInfo参数和音频数据来源
20let context = getContext(this);
21let renderModel: audio.AudioRenderer | undefined = undefined;
22let audioStreamInfo: audio.AudioStreamInfo = {
23  samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采样率
24  channels: audio.AudioChannel.CHANNEL_2, // 通道
25  sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
26  encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
27}
28let audioRendererInfo: audio.AudioRendererInfo = {
29  // 需使用通话场景相应的参数
30  content: audio.ContentType.CONTENT_TYPE_SPEECH, // 音频内容类型:语音
31  usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // 音频流使用类型:语音通信
32  rendererFlags: 0 // 音频渲染器标志:默认为0即可
33}
34let audioRendererOptions: audio.AudioRendererOptions = {
35  streamInfo: audioStreamInfo,
36  rendererInfo: audioRendererInfo
37}
38
39// 初始化,创建实例,设置监听事件
40audio.createAudioRenderer(audioRendererOptions, (err: BusinessError, renderer: audio.AudioRenderer) => { // 创建AudioRenderer实例
41  if (!err) {
42    console.info(`${TAG}: creating AudioRenderer success`);
43    renderModel = renderer;
44    if (renderModel !== undefined) {
45      renderModel.on('stateChange', (state: audio.AudioState) => { // 设置监听事件,当转换到指定的状态时触发回调
46        if (state == 1) {
47          console.info('audio renderer state is: STATE_PREPARED');
48        }
49        if (state == 2) {
50          console.info('audio renderer state is: STATE_RUNNING');
51        }
52      });
53      renderModel.on('markReach', 1000, (position: number) => { // 订阅markReach事件,当渲染的帧数达到1000帧时触发回调
54        if (position == 1000) {
55          console.info('ON Triggered successfully');
56        }
57      });
58    }
59  } else {
60    console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`);
61  }
62});
63
64// 开始一次音频渲染
65async function start() {
66  if (renderModel !== undefined) {
67    let stateGroup: number[] = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
68    if (stateGroup.indexOf(renderModel.state.valueOf()) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动渲染
69      console.error(TAG + 'start failed');
70      return;
71    }
72    await renderModel.start(); // 启动渲染
73    const bufferSize: number = await renderModel.getBufferSize();
74    // 此处仅以读取音频文件的数据举例,实际音频通话开发中,需要读取的是通话对端传输来的音频数据
75
76    let path = context.filesDir;
77
78    const filePath = path + '/voice_call_data.wav'; // 沙箱路径,实际路径为/data/storage/el2/base/haps/entry/files/voice_call_data.wav
79    let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
80    let stat = await fs.stat(filePath);
81    let buf = new ArrayBuffer(bufferSize);
82    let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
83    class Option {
84      offset: number = 0
85      length: number = 0
86    }
87    for (let i = 0; i < len; i++) {
88      let options: Option = {
89        offset: i * bufferSize,
90        length: bufferSize
91      };
92      let readsize = await fs.read(file.fd, buf, options);
93      // buf是要写入缓冲区的音频数据,在调用AudioRenderer.write()方法前可以进行音频数据的预处理,实现个性化的音频播放功能,AudioRenderer会读出写入缓冲区的音频数据进行渲染
94      let writeSize: number = await renderModel.write(buf);
95      if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) { // 如果渲染器状态为STATE_RELEASED,停止渲染
96        fs.close(file);
97        await renderModel.stop();
98      }
99      if (renderModel.state.valueOf() === audio.AudioState.STATE_RUNNING) {
100        if (i === len - 1) { // 如果音频文件已经被读取完,停止渲染
101          fs.close(file);
102          await renderModel.stop();
103        }
104      }
105    }
106  }
107}
108
109// 暂停渲染
110async function pause() {
111  if (renderModel !== undefined) {
112    // 只有渲染器状态为STATE_RUNNING的时候才能暂停
113    if (renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING) {
114      console.info('Renderer is not running');
115      return;
116    }
117    await renderModel.pause(); // 暂停渲染
118    if (renderModel.state.valueOf() === audio.AudioState.STATE_PAUSED) {
119      console.info('Renderer is paused.');
120    } else {
121      console.error('Pausing renderer failed.');
122    }
123  }
124}
125
126// 停止渲染
127async function stop() {
128  if (renderModel !== undefined) {
129    // 只有渲染器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
130    if (renderModel.state.valueOf() !== audio.AudioState.STATE_RUNNING && renderModel.state.valueOf() !== audio.AudioState.STATE_PAUSED) {
131      console.info('Renderer is not running or paused.');
132      return;
133    }
134    await renderModel.stop(); // 停止渲染
135    if (renderModel.state.valueOf() === audio.AudioState.STATE_STOPPED) {
136      console.info('Renderer stopped.');
137    } else {
138      console.error('Stopping renderer failed.');
139    }
140  }
141}
142
143// 销毁实例,释放资源
144async function release() {
145  if (renderModel !== undefined) {
146    // 渲染器状态不是STATE_RELEASED状态,才能release
147    if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
148      console.info('Renderer already released');
149      return;
150    }
151    await renderModel.release(); // 释放资源
152    if (renderModel.state.valueOf() === audio.AudioState.STATE_RELEASED) {
153      console.info('Renderer released');
154    } else {
155      console.error('Renderer release failed.');
156    }
157  }
158}
159```
160
161## 使用AudioCapturer录制本端的通话声音
162
163  该过程与[使用AudioCapturer开发音频录制功能](using-audiocapturer-for-recording.md)过程相似,关键区别在于audioCapturerInfo参数和音频数据流向。audioCapturerInfo参数中音源类型需设置为语音通话,SOURCE_TYPE_VOICE_COMMUNICATION。
164
165```ts
166import audio from '@ohos.multimedia.audio';
167import fs from '@ohos.file.fs';
168import { BusinessError } from '@ohos.base';
169
170let context = getContext(this);
171const TAG = 'VoiceCallDemoForAudioCapturer';
172// 与使用AudioCapturer开发音频录制功能过程相似,关键区别在于audioCapturerInfo参数和音频数据流向
173let audioCapturer: audio.AudioCapturer | undefined = undefined;
174let audioStreamInfo: audio.AudioStreamInfo = {
175  samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // 采样率
176  channels: audio.AudioChannel.CHANNEL_1, // 通道
177  sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
178  encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
179}
180let audioCapturerInfo: audio.AudioCapturerInfo = {
181  // 需使用通话场景相应的参数
182  source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // 音源类型:语音通话
183  capturerFlags: 0 // 音频采集器标志:默认为0即可
184}
185let audioCapturerOptions: audio.AudioCapturerOptions = {
186  streamInfo: audioStreamInfo,
187  capturerInfo: audioCapturerInfo
188}
189
190// 初始化,创建实例,设置监听事件
191async function init() {
192  audio.createAudioCapturer(audioCapturerOptions, (err: BusinessError, capturer: audio.AudioCapturer) => { // 创建AudioCapturer实例
193    if (err) {
194      console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
195      return;
196    }
197    console.info(`${TAG}: create AudioCapturer success`);
198    audioCapturer = capturer;
199    if (audioCapturer !== undefined) {
200      audioCapturer.on('markReach', 1000, (position: number) => { // 订阅markReach事件,当采集的帧数达到1000时触发回调
201        if (position === 1000) {
202          console.info('ON Triggered successfully');
203        }
204      });
205      audioCapturer.on('periodReach', 2000, (position: number) => { // 订阅periodReach事件,当采集的帧数达到2000时触发回调
206        if (position === 2000) {
207          console.info('ON Triggered successfully');
208        }
209      });
210    }
211  });
212}
213
214// 开始一次音频采集
215async function start() {
216  if (audioCapturer !== undefined) {
217    let stateGroup: number[] = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
218    if (stateGroup.indexOf(audioCapturer.state.valueOf()) === -1) { // 当且仅当状态为STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一时才能启动采集
219      console.error(`${TAG}: start failed`);
220      return;
221    }
222    await audioCapturer.start(); // 启动采集
223    // 此处仅以将音频数据写入文件举例,实际音频通话开发中,需要将本端采集的音频数据编码打包,通过网络发送给通话对端
224    const path = context.filesDir + '/voice_call_data.wav'; // 采集到的音频文件存储路径
225    let file = fs.openSync(path, 0o2 | 0o100); // 如果文件不存在则创建文件
226    let fd = file.fd;
227    let numBuffersToCapture = 150; // 循环写入150次
228    let count = 0;
229    class Options {
230      offset: number = 0
231      length: number = 0
232    }
233    while (numBuffersToCapture) {
234      let bufferSize: number = await audioCapturer.getBufferSize();
235      let buffer: ArrayBuffer = await audioCapturer.read(bufferSize, true);
236      let options: Options = {
237        offset: count * bufferSize,
238        length: bufferSize
239      };
240      if (buffer === undefined) {
241        console.error(`${TAG}: read buffer failed`);
242      } else {
243        let number = fs.writeSync(fd, buffer, options);
244        console.info(`${TAG}: write date: ${number}`);
245      }
246      numBuffersToCapture--;
247      count++;
248    }
249  }
250}
251
252// 停止采集
253async function stop() {
254  if (audioCapturer !== undefined) {
255    // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
256    if (audioCapturer.state.valueOf() !== audio.AudioState.STATE_RUNNING && audioCapturer.state.valueOf() !== audio.AudioState.STATE_PAUSED) {
257      console.info('Capturer is not running or paused');
258      return;
259    }
260    await audioCapturer.stop(); // 停止采集
261    if (audioCapturer.state.valueOf() === audio.AudioState.STATE_STOPPED) {
262      console.info('Capturer stopped');
263    } else {
264      console.error('Capturer stop failed');
265    }
266  }
267}
268
269// 销毁实例,释放资源
270async function release() {
271  if (audioCapturer !== undefined) {
272    // 采集器状态不是STATE_RELEASED或STATE_NEW状态,才能release
273    if (audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED || audioCapturer.state.valueOf() === audio.AudioState.STATE_NEW) {
274      console.info('Capturer already released');
275      return;
276    }
277    await audioCapturer.release(); // 释放资源
278    if (audioCapturer.state.valueOf() === audio.AudioState.STATE_RELEASED) {
279      console.info('Capturer released');
280    } else {
281      console.error('Capturer release failed');
282    }
283  }
284}
285```
286
287## 相关实例
288
289针对音频通话开发,有以下相关实例可供参考:
290
291- [音频通话示例(ArkTS)(Full SDK)(API9)](https://gitee.com/openharmony/applications_app_samples/tree/OpenHarmony-4.0-Release/code/BasicFeature/Media/VoiceCallDemo)