1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "webrtc/voice_engine/output_mixer.h"
12
13 #include "webrtc/modules/audio_processing/include/audio_processing.h"
14 #include "webrtc/modules/utility/interface/audio_frame_operations.h"
15 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
16 #include "webrtc/system_wrappers/interface/file_wrapper.h"
17 #include "webrtc/system_wrappers/interface/trace.h"
18 #include "webrtc/voice_engine/include/voe_external_media.h"
19 #include "webrtc/voice_engine/statistics.h"
20 #include "webrtc/voice_engine/utility.h"
21
22 namespace webrtc {
23 namespace voe {
24
25 void
NewMixedAudio(int32_t id,const AudioFrame & generalAudioFrame,const AudioFrame ** uniqueAudioFrames,uint32_t size)26 OutputMixer::NewMixedAudio(int32_t id,
27 const AudioFrame& generalAudioFrame,
28 const AudioFrame** uniqueAudioFrames,
29 uint32_t size)
30 {
31 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
32 "OutputMixer::NewMixedAudio(id=%d, size=%u)", id, size);
33
34 _audioFrame.CopyFrom(generalAudioFrame);
35 _audioFrame.id_ = id;
36 }
37
MixedParticipants(int32_t id,const ParticipantStatistics * participantStatistics,uint32_t size)38 void OutputMixer::MixedParticipants(
39 int32_t id,
40 const ParticipantStatistics* participantStatistics,
41 uint32_t size)
42 {
43 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
44 "OutputMixer::MixedParticipants(id=%d, size=%u)", id, size);
45 }
46
VADPositiveParticipants(int32_t id,const ParticipantStatistics * participantStatistics,uint32_t size)47 void OutputMixer::VADPositiveParticipants(int32_t id,
48 const ParticipantStatistics* participantStatistics, uint32_t size)
49 {
50 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
51 "OutputMixer::VADPositiveParticipants(id=%d, size=%u)",
52 id, size);
53 }
54
MixedAudioLevel(int32_t id,uint32_t level)55 void OutputMixer::MixedAudioLevel(int32_t id, uint32_t level)
56 {
57 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
58 "OutputMixer::MixedAudioLevel(id=%d, level=%u)", id, level);
59 }
60
PlayNotification(int32_t id,uint32_t durationMs)61 void OutputMixer::PlayNotification(int32_t id, uint32_t durationMs)
62 {
63 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
64 "OutputMixer::PlayNotification(id=%d, durationMs=%d)",
65 id, durationMs);
66 // Not implement yet
67 }
68
RecordNotification(int32_t id,uint32_t durationMs)69 void OutputMixer::RecordNotification(int32_t id,
70 uint32_t durationMs)
71 {
72 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
73 "OutputMixer::RecordNotification(id=%d, durationMs=%d)",
74 id, durationMs);
75
76 // Not implement yet
77 }
78
PlayFileEnded(int32_t id)79 void OutputMixer::PlayFileEnded(int32_t id)
80 {
81 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
82 "OutputMixer::PlayFileEnded(id=%d)", id);
83
84 // not needed
85 }
86
RecordFileEnded(int32_t id)87 void OutputMixer::RecordFileEnded(int32_t id)
88 {
89 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
90 "OutputMixer::RecordFileEnded(id=%d)", id);
91 assert(id == _instanceId);
92
93 CriticalSectionScoped cs(&_fileCritSect);
94 _outputFileRecording = false;
95 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
96 "OutputMixer::RecordFileEnded() =>"
97 "output file recorder module is shutdown");
98 }
99
100 int32_t
Create(OutputMixer * & mixer,uint32_t instanceId)101 OutputMixer::Create(OutputMixer*& mixer, uint32_t instanceId)
102 {
103 WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
104 "OutputMixer::Create(instanceId=%d)", instanceId);
105 mixer = new OutputMixer(instanceId);
106 if (mixer == NULL)
107 {
108 WEBRTC_TRACE(kTraceMemory, kTraceVoice, instanceId,
109 "OutputMixer::Create() unable to allocate memory for"
110 "mixer");
111 return -1;
112 }
113 return 0;
114 }
115
OutputMixer(uint32_t instanceId)116 OutputMixer::OutputMixer(uint32_t instanceId) :
117 _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
118 _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
119 _mixerModule(*AudioConferenceMixer::Create(instanceId)),
120 _audioLevel(),
121 _dtmfGenerator(instanceId),
122 _instanceId(instanceId),
123 _externalMediaCallbackPtr(NULL),
124 _externalMedia(false),
125 _panLeft(1.0f),
126 _panRight(1.0f),
127 _mixingFrequencyHz(8000),
128 _outputFileRecorderPtr(NULL),
129 _outputFileRecording(false)
130 {
131 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
132 "OutputMixer::OutputMixer() - ctor");
133
134 if ((_mixerModule.RegisterMixedStreamCallback(*this) == -1) ||
135 (_mixerModule.RegisterMixerStatusCallback(*this, 100) == -1))
136 {
137 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
138 "OutputMixer::OutputMixer() failed to register mixer"
139 "callbacks");
140 }
141
142 _dtmfGenerator.Init();
143 }
144
145 void
Destroy(OutputMixer * & mixer)146 OutputMixer::Destroy(OutputMixer*& mixer)
147 {
148 if (mixer)
149 {
150 delete mixer;
151 mixer = NULL;
152 }
153 }
154
~OutputMixer()155 OutputMixer::~OutputMixer()
156 {
157 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,-1),
158 "OutputMixer::~OutputMixer() - dtor");
159 if (_externalMedia)
160 {
161 DeRegisterExternalMediaProcessing();
162 }
163 {
164 CriticalSectionScoped cs(&_fileCritSect);
165 if (_outputFileRecorderPtr)
166 {
167 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
168 _outputFileRecorderPtr->StopRecording();
169 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
170 _outputFileRecorderPtr = NULL;
171 }
172 }
173 _mixerModule.UnRegisterMixerStatusCallback();
174 _mixerModule.UnRegisterMixedStreamCallback();
175 delete &_mixerModule;
176 delete &_callbackCritSect;
177 delete &_fileCritSect;
178 }
179
180 int32_t
SetEngineInformation(voe::Statistics & engineStatistics)181 OutputMixer::SetEngineInformation(voe::Statistics& engineStatistics)
182 {
183 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
184 "OutputMixer::SetEngineInformation()");
185 _engineStatisticsPtr = &engineStatistics;
186 return 0;
187 }
188
189 int32_t
SetAudioProcessingModule(AudioProcessing * audioProcessingModule)190 OutputMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
191 {
192 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
193 "OutputMixer::SetAudioProcessingModule("
194 "audioProcessingModule=0x%x)", audioProcessingModule);
195 _audioProcessingModulePtr = audioProcessingModule;
196 return 0;
197 }
198
RegisterExternalMediaProcessing(VoEMediaProcess & proccess_object)199 int OutputMixer::RegisterExternalMediaProcessing(
200 VoEMediaProcess& proccess_object)
201 {
202 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
203 "OutputMixer::RegisterExternalMediaProcessing()");
204
205 CriticalSectionScoped cs(&_callbackCritSect);
206 _externalMediaCallbackPtr = &proccess_object;
207 _externalMedia = true;
208
209 return 0;
210 }
211
DeRegisterExternalMediaProcessing()212 int OutputMixer::DeRegisterExternalMediaProcessing()
213 {
214 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
215 "OutputMixer::DeRegisterExternalMediaProcessing()");
216
217 CriticalSectionScoped cs(&_callbackCritSect);
218 _externalMedia = false;
219 _externalMediaCallbackPtr = NULL;
220
221 return 0;
222 }
223
PlayDtmfTone(uint8_t eventCode,int lengthMs,int attenuationDb)224 int OutputMixer::PlayDtmfTone(uint8_t eventCode, int lengthMs,
225 int attenuationDb)
226 {
227 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
228 "OutputMixer::PlayDtmfTone()");
229 if (_dtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb) != 0)
230 {
231 _engineStatisticsPtr->SetLastError(VE_STILL_PLAYING_PREV_DTMF,
232 kTraceError,
233 "OutputMixer::PlayDtmfTone()");
234 return -1;
235 }
236 return 0;
237 }
238
239 int32_t
SetMixabilityStatus(MixerParticipant & participant,bool mixable)240 OutputMixer::SetMixabilityStatus(MixerParticipant& participant,
241 bool mixable)
242 {
243 return _mixerModule.SetMixabilityStatus(participant, mixable);
244 }
245
246 int32_t
SetAnonymousMixabilityStatus(MixerParticipant & participant,bool mixable)247 OutputMixer::SetAnonymousMixabilityStatus(MixerParticipant& participant,
248 bool mixable)
249 {
250 return _mixerModule.SetAnonymousMixabilityStatus(participant,mixable);
251 }
252
253 int32_t
MixActiveChannels()254 OutputMixer::MixActiveChannels()
255 {
256 return _mixerModule.Process();
257 }
258
259 int
GetSpeechOutputLevel(uint32_t & level)260 OutputMixer::GetSpeechOutputLevel(uint32_t& level)
261 {
262 int8_t currentLevel = _audioLevel.Level();
263 level = static_cast<uint32_t> (currentLevel);
264 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
265 "GetSpeechOutputLevel() => level=%u", level);
266 return 0;
267 }
268
269 int
GetSpeechOutputLevelFullRange(uint32_t & level)270 OutputMixer::GetSpeechOutputLevelFullRange(uint32_t& level)
271 {
272 int16_t currentLevel = _audioLevel.LevelFullRange();
273 level = static_cast<uint32_t> (currentLevel);
274 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
275 "GetSpeechOutputLevelFullRange() => level=%u", level);
276 return 0;
277 }
278
279 int
SetOutputVolumePan(float left,float right)280 OutputMixer::SetOutputVolumePan(float left, float right)
281 {
282 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
283 "OutputMixer::SetOutputVolumePan()");
284 _panLeft = left;
285 _panRight = right;
286 return 0;
287 }
288
289 int
GetOutputVolumePan(float & left,float & right)290 OutputMixer::GetOutputVolumePan(float& left, float& right)
291 {
292 left = _panLeft;
293 right = _panRight;
294 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId,-1),
295 "GetOutputVolumePan() => left=%2.1f, right=%2.1f",
296 left, right);
297 return 0;
298 }
299
StartRecordingPlayout(const char * fileName,const CodecInst * codecInst)300 int OutputMixer::StartRecordingPlayout(const char* fileName,
301 const CodecInst* codecInst)
302 {
303 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
304 "OutputMixer::StartRecordingPlayout(fileName=%s)", fileName);
305
306 if (_outputFileRecording)
307 {
308 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
309 "StartRecordingPlayout() is already recording");
310 return 0;
311 }
312
313 FileFormats format;
314 const uint32_t notificationTime(0);
315 CodecInst dummyCodec={100,"L16",16000,320,1,320000};
316
317 if ((codecInst != NULL) &&
318 ((codecInst->channels < 1) || (codecInst->channels > 2)))
319 {
320 _engineStatisticsPtr->SetLastError(
321 VE_BAD_ARGUMENT, kTraceError,
322 "StartRecordingPlayout() invalid compression");
323 return(-1);
324 }
325 if(codecInst == NULL)
326 {
327 format = kFileFormatPcm16kHzFile;
328 codecInst=&dummyCodec;
329 }
330 else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
331 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
332 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
333 {
334 format = kFileFormatWavFile;
335 }
336 else
337 {
338 format = kFileFormatCompressedFile;
339 }
340
341 CriticalSectionScoped cs(&_fileCritSect);
342
343 // Destroy the old instance
344 if (_outputFileRecorderPtr)
345 {
346 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
347 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
348 _outputFileRecorderPtr = NULL;
349 }
350
351 _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
352 _instanceId,
353 (const FileFormats)format);
354 if (_outputFileRecorderPtr == NULL)
355 {
356 _engineStatisticsPtr->SetLastError(
357 VE_INVALID_ARGUMENT, kTraceError,
358 "StartRecordingPlayout() fileRecorder format isnot correct");
359 return -1;
360 }
361
362 if (_outputFileRecorderPtr->StartRecordingAudioFile(
363 fileName,
364 (const CodecInst&)*codecInst,
365 notificationTime) != 0)
366 {
367 _engineStatisticsPtr->SetLastError(
368 VE_BAD_FILE, kTraceError,
369 "StartRecordingAudioFile() failed to start file recording");
370 _outputFileRecorderPtr->StopRecording();
371 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
372 _outputFileRecorderPtr = NULL;
373 return -1;
374 }
375 _outputFileRecorderPtr->RegisterModuleFileCallback(this);
376 _outputFileRecording = true;
377
378 return 0;
379 }
380
StartRecordingPlayout(OutStream * stream,const CodecInst * codecInst)381 int OutputMixer::StartRecordingPlayout(OutStream* stream,
382 const CodecInst* codecInst)
383 {
384 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
385 "OutputMixer::StartRecordingPlayout()");
386
387 if (_outputFileRecording)
388 {
389 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
390 "StartRecordingPlayout() is already recording");
391 return 0;
392 }
393
394 FileFormats format;
395 const uint32_t notificationTime(0);
396 CodecInst dummyCodec={100,"L16",16000,320,1,320000};
397
398 if (codecInst != NULL && codecInst->channels != 1)
399 {
400 _engineStatisticsPtr->SetLastError(
401 VE_BAD_ARGUMENT, kTraceError,
402 "StartRecordingPlayout() invalid compression");
403 return(-1);
404 }
405 if(codecInst == NULL)
406 {
407 format = kFileFormatPcm16kHzFile;
408 codecInst=&dummyCodec;
409 }
410 else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
411 (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
412 (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
413 {
414 format = kFileFormatWavFile;
415 }
416 else
417 {
418 format = kFileFormatCompressedFile;
419 }
420
421 CriticalSectionScoped cs(&_fileCritSect);
422
423 // Destroy the old instance
424 if (_outputFileRecorderPtr)
425 {
426 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
427 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
428 _outputFileRecorderPtr = NULL;
429 }
430
431 _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
432 _instanceId,
433 (const FileFormats)format);
434 if (_outputFileRecorderPtr == NULL)
435 {
436 _engineStatisticsPtr->SetLastError(
437 VE_INVALID_ARGUMENT, kTraceError,
438 "StartRecordingPlayout() fileRecorder format isnot correct");
439 return -1;
440 }
441
442 if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream,
443 *codecInst,
444 notificationTime) != 0)
445 {
446 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
447 "StartRecordingAudioFile() failed to start file recording");
448 _outputFileRecorderPtr->StopRecording();
449 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
450 _outputFileRecorderPtr = NULL;
451 return -1;
452 }
453
454 _outputFileRecorderPtr->RegisterModuleFileCallback(this);
455 _outputFileRecording = true;
456
457 return 0;
458 }
459
StopRecordingPlayout()460 int OutputMixer::StopRecordingPlayout()
461 {
462 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
463 "OutputMixer::StopRecordingPlayout()");
464
465 if (!_outputFileRecording)
466 {
467 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
468 "StopRecordingPlayout() file isnot recording");
469 return -1;
470 }
471
472 CriticalSectionScoped cs(&_fileCritSect);
473
474 if (_outputFileRecorderPtr->StopRecording() != 0)
475 {
476 _engineStatisticsPtr->SetLastError(
477 VE_STOP_RECORDING_FAILED, kTraceError,
478 "StopRecording(), could not stop recording");
479 return -1;
480 }
481 _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
482 FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
483 _outputFileRecorderPtr = NULL;
484 _outputFileRecording = false;
485
486 return 0;
487 }
488
GetMixedAudio(int sample_rate_hz,int num_channels,AudioFrame * frame)489 int OutputMixer::GetMixedAudio(int sample_rate_hz,
490 int num_channels,
491 AudioFrame* frame) {
492 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
493 "OutputMixer::GetMixedAudio(sample_rate_hz=%d, num_channels=%d)",
494 sample_rate_hz, num_channels);
495
496 // --- Record playout if enabled
497 {
498 CriticalSectionScoped cs(&_fileCritSect);
499 if (_outputFileRecording && _outputFileRecorderPtr)
500 _outputFileRecorderPtr->RecordAudioToFile(_audioFrame);
501 }
502
503 frame->num_channels_ = num_channels;
504 frame->sample_rate_hz_ = sample_rate_hz;
505 // TODO(andrew): Ideally the downmixing would occur much earlier, in
506 // AudioCodingModule.
507 RemixAndResample(_audioFrame, &resampler_, frame);
508 return 0;
509 }
510
511 int32_t
DoOperationsOnCombinedSignal(bool feed_data_to_apm)512 OutputMixer::DoOperationsOnCombinedSignal(bool feed_data_to_apm)
513 {
514 if (_audioFrame.sample_rate_hz_ != _mixingFrequencyHz)
515 {
516 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
517 "OutputMixer::DoOperationsOnCombinedSignal() => "
518 "mixing frequency = %d", _audioFrame.sample_rate_hz_);
519 _mixingFrequencyHz = _audioFrame.sample_rate_hz_;
520 }
521
522 // --- Insert inband Dtmf tone
523 if (_dtmfGenerator.IsAddingTone())
524 {
525 InsertInbandDtmfTone();
526 }
527
528 // Scale left and/or right channel(s) if balance is active
529 if (_panLeft != 1.0 || _panRight != 1.0)
530 {
531 if (_audioFrame.num_channels_ == 1)
532 {
533 AudioFrameOperations::MonoToStereo(&_audioFrame);
534 }
535 else
536 {
537 // Pure stereo mode (we are receiving a stereo signal).
538 }
539
540 assert(_audioFrame.num_channels_ == 2);
541 AudioFrameOperations::Scale(_panLeft, _panRight, _audioFrame);
542 }
543
544 // --- Far-end Voice Quality Enhancement (AudioProcessing Module)
545 if (feed_data_to_apm)
546 APMAnalyzeReverseStream();
547
548 // --- External media processing
549 {
550 CriticalSectionScoped cs(&_callbackCritSect);
551 if (_externalMedia)
552 {
553 const bool is_stereo = (_audioFrame.num_channels_ == 2);
554 if (_externalMediaCallbackPtr)
555 {
556 _externalMediaCallbackPtr->Process(
557 -1,
558 kPlaybackAllChannelsMixed,
559 (int16_t*)_audioFrame.data_,
560 _audioFrame.samples_per_channel_,
561 _audioFrame.sample_rate_hz_,
562 is_stereo);
563 }
564 }
565 }
566
567 // --- Measure audio level (0-9) for the combined signal
568 _audioLevel.ComputeLevel(_audioFrame);
569
570 return 0;
571 }
572
573 // ----------------------------------------------------------------------------
574 // Private methods
575 // ----------------------------------------------------------------------------
576
APMAnalyzeReverseStream()577 void OutputMixer::APMAnalyzeReverseStream() {
578 // Convert from mixing to AudioProcessing sample rate, determined by the send
579 // side. Downmix to mono.
580 AudioFrame frame;
581 frame.num_channels_ = 1;
582 frame.sample_rate_hz_ = _audioProcessingModulePtr->input_sample_rate_hz();
583 RemixAndResample(_audioFrame, &audioproc_resampler_, &frame);
584
585 if (_audioProcessingModulePtr->AnalyzeReverseStream(&frame) == -1) {
586 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
587 "AudioProcessingModule::AnalyzeReverseStream() => error");
588 }
589 }
590
591 int
InsertInbandDtmfTone()592 OutputMixer::InsertInbandDtmfTone()
593 {
594 uint16_t sampleRate(0);
595 _dtmfGenerator.GetSampleRate(sampleRate);
596 if (sampleRate != _audioFrame.sample_rate_hz_)
597 {
598 // Update sample rate of Dtmf tone since the mixing frequency changed.
599 _dtmfGenerator.SetSampleRate(
600 (uint16_t)(_audioFrame.sample_rate_hz_));
601 // Reset the tone to be added taking the new sample rate into account.
602 _dtmfGenerator.ResetTone();
603 }
604
605 int16_t toneBuffer[320];
606 uint16_t toneSamples(0);
607 if (_dtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
608 {
609 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
610 "OutputMixer::InsertInbandDtmfTone() inserting Dtmf"
611 "tone failed");
612 return -1;
613 }
614
615 // replace mixed audio with Dtmf tone
616 if (_audioFrame.num_channels_ == 1)
617 {
618 // mono
619 memcpy(_audioFrame.data_, toneBuffer, sizeof(int16_t)
620 * toneSamples);
621 } else
622 {
623 // stereo
624 for (int i = 0; i < _audioFrame.samples_per_channel_; i++)
625 {
626 _audioFrame.data_[2 * i] = toneBuffer[i];
627 _audioFrame.data_[2 * i + 1] = 0;
628 }
629 }
630 assert(_audioFrame.samples_per_channel_ == toneSamples);
631
632 return 0;
633 }
634
635 } // namespace voe
636 } // namespace webrtc
637