• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "webrtc/voice_engine/channel.h"
12 
13 #include <algorithm>
14 #include <utility>
15 
16 #include "webrtc/base/checks.h"
17 #include "webrtc/base/format_macros.h"
18 #include "webrtc/base/logging.h"
19 #include "webrtc/base/thread_checker.h"
20 #include "webrtc/base/timeutils.h"
21 #include "webrtc/common.h"
22 #include "webrtc/config.h"
23 #include "webrtc/modules/audio_device/include/audio_device.h"
24 #include "webrtc/modules/audio_processing/include/audio_processing.h"
25 #include "webrtc/modules/include/module_common_types.h"
26 #include "webrtc/modules/pacing/packet_router.h"
27 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
28 #include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
29 #include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
30 #include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
31 #include "webrtc/modules/utility/include/audio_frame_operations.h"
32 #include "webrtc/modules/utility/include/process_thread.h"
33 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
34 #include "webrtc/system_wrappers/include/trace.h"
35 #include "webrtc/voice_engine/include/voe_base.h"
36 #include "webrtc/voice_engine/include/voe_external_media.h"
37 #include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
38 #include "webrtc/voice_engine/output_mixer.h"
39 #include "webrtc/voice_engine/statistics.h"
40 #include "webrtc/voice_engine/transmit_mixer.h"
41 #include "webrtc/voice_engine/utility.h"
42 
43 #if defined(_WIN32)
44 #include <Qos.h>
45 #endif
46 
47 namespace webrtc {
48 namespace voe {
49 
50 class TransportFeedbackProxy : public TransportFeedbackObserver {
51  public:
TransportFeedbackProxy()52   TransportFeedbackProxy() : feedback_observer_(nullptr) {
53     pacer_thread_.DetachFromThread();
54     network_thread_.DetachFromThread();
55   }
56 
SetTransportFeedbackObserver(TransportFeedbackObserver * feedback_observer)57   void SetTransportFeedbackObserver(
58       TransportFeedbackObserver* feedback_observer) {
59     RTC_DCHECK(thread_checker_.CalledOnValidThread());
60     rtc::CritScope lock(&crit_);
61     feedback_observer_ = feedback_observer;
62   }
63 
64   // Implements TransportFeedbackObserver.
AddPacket(uint16_t sequence_number,size_t length,bool was_paced)65   void AddPacket(uint16_t sequence_number,
66                  size_t length,
67                  bool was_paced) override {
68     RTC_DCHECK(pacer_thread_.CalledOnValidThread());
69     rtc::CritScope lock(&crit_);
70     if (feedback_observer_)
71       feedback_observer_->AddPacket(sequence_number, length, was_paced);
72   }
OnTransportFeedback(const rtcp::TransportFeedback & feedback)73   void OnTransportFeedback(const rtcp::TransportFeedback& feedback) override {
74     RTC_DCHECK(network_thread_.CalledOnValidThread());
75     rtc::CritScope lock(&crit_);
76     if (feedback_observer_)
77       feedback_observer_->OnTransportFeedback(feedback);
78   }
79 
80  private:
81   rtc::CriticalSection crit_;
82   rtc::ThreadChecker thread_checker_;
83   rtc::ThreadChecker pacer_thread_;
84   rtc::ThreadChecker network_thread_;
85   TransportFeedbackObserver* feedback_observer_ GUARDED_BY(&crit_);
86 };
87 
88 class TransportSequenceNumberProxy : public TransportSequenceNumberAllocator {
89  public:
TransportSequenceNumberProxy()90   TransportSequenceNumberProxy() : seq_num_allocator_(nullptr) {
91     pacer_thread_.DetachFromThread();
92   }
93 
SetSequenceNumberAllocator(TransportSequenceNumberAllocator * seq_num_allocator)94   void SetSequenceNumberAllocator(
95       TransportSequenceNumberAllocator* seq_num_allocator) {
96     RTC_DCHECK(thread_checker_.CalledOnValidThread());
97     rtc::CritScope lock(&crit_);
98     seq_num_allocator_ = seq_num_allocator;
99   }
100 
101   // Implements TransportSequenceNumberAllocator.
AllocateSequenceNumber()102   uint16_t AllocateSequenceNumber() override {
103     RTC_DCHECK(pacer_thread_.CalledOnValidThread());
104     rtc::CritScope lock(&crit_);
105     if (!seq_num_allocator_)
106       return 0;
107     return seq_num_allocator_->AllocateSequenceNumber();
108   }
109 
110  private:
111   rtc::CriticalSection crit_;
112   rtc::ThreadChecker thread_checker_;
113   rtc::ThreadChecker pacer_thread_;
114   TransportSequenceNumberAllocator* seq_num_allocator_ GUARDED_BY(&crit_);
115 };
116 
117 class RtpPacketSenderProxy : public RtpPacketSender {
118  public:
RtpPacketSenderProxy()119   RtpPacketSenderProxy() : rtp_packet_sender_(nullptr) {
120   }
121 
SetPacketSender(RtpPacketSender * rtp_packet_sender)122   void SetPacketSender(RtpPacketSender* rtp_packet_sender) {
123     RTC_DCHECK(thread_checker_.CalledOnValidThread());
124     rtc::CritScope lock(&crit_);
125     rtp_packet_sender_ = rtp_packet_sender;
126   }
127 
128   // Implements RtpPacketSender.
InsertPacket(Priority priority,uint32_t ssrc,uint16_t sequence_number,int64_t capture_time_ms,size_t bytes,bool retransmission)129   void InsertPacket(Priority priority,
130                     uint32_t ssrc,
131                     uint16_t sequence_number,
132                     int64_t capture_time_ms,
133                     size_t bytes,
134                     bool retransmission) override {
135     rtc::CritScope lock(&crit_);
136     if (rtp_packet_sender_) {
137       rtp_packet_sender_->InsertPacket(priority, ssrc, sequence_number,
138                                        capture_time_ms, bytes, retransmission);
139     }
140   }
141 
142  private:
143   rtc::ThreadChecker thread_checker_;
144   rtc::CriticalSection crit_;
145   RtpPacketSender* rtp_packet_sender_ GUARDED_BY(&crit_);
146 };
147 
148 // Extend the default RTCP statistics struct with max_jitter, defined as the
149 // maximum jitter value seen in an RTCP report block.
150 struct ChannelStatistics : public RtcpStatistics {
ChannelStatisticswebrtc::voe::ChannelStatistics151   ChannelStatistics() : rtcp(), max_jitter(0) {}
152 
153   RtcpStatistics rtcp;
154   uint32_t max_jitter;
155 };
156 
157 // Statistics callback, called at each generation of a new RTCP report block.
158 class StatisticsProxy : public RtcpStatisticsCallback {
159  public:
StatisticsProxy(uint32_t ssrc)160   StatisticsProxy(uint32_t ssrc)
161    : stats_lock_(CriticalSectionWrapper::CreateCriticalSection()),
162      ssrc_(ssrc) {}
~StatisticsProxy()163   virtual ~StatisticsProxy() {}
164 
StatisticsUpdated(const RtcpStatistics & statistics,uint32_t ssrc)165   void StatisticsUpdated(const RtcpStatistics& statistics,
166                          uint32_t ssrc) override {
167     if (ssrc != ssrc_)
168       return;
169 
170     CriticalSectionScoped cs(stats_lock_.get());
171     stats_.rtcp = statistics;
172     if (statistics.jitter > stats_.max_jitter) {
173       stats_.max_jitter = statistics.jitter;
174     }
175   }
176 
CNameChanged(const char * cname,uint32_t ssrc)177   void CNameChanged(const char* cname, uint32_t ssrc) override {}
178 
GetStats()179   ChannelStatistics GetStats() {
180     CriticalSectionScoped cs(stats_lock_.get());
181     return stats_;
182   }
183 
184  private:
185   // StatisticsUpdated calls are triggered from threads in the RTP module,
186   // while GetStats calls can be triggered from the public voice engine API,
187   // hence synchronization is needed.
188   rtc::scoped_ptr<CriticalSectionWrapper> stats_lock_;
189   const uint32_t ssrc_;
190   ChannelStatistics stats_;
191 };
192 
193 class VoERtcpObserver : public RtcpBandwidthObserver {
194  public:
VoERtcpObserver(Channel * owner)195   explicit VoERtcpObserver(Channel* owner) : owner_(owner) {}
~VoERtcpObserver()196   virtual ~VoERtcpObserver() {}
197 
OnReceivedEstimatedBitrate(uint32_t bitrate)198   void OnReceivedEstimatedBitrate(uint32_t bitrate) override {
199     // Not used for Voice Engine.
200   }
201 
OnReceivedRtcpReceiverReport(const ReportBlockList & report_blocks,int64_t rtt,int64_t now_ms)202   void OnReceivedRtcpReceiverReport(const ReportBlockList& report_blocks,
203                                     int64_t rtt,
204                                     int64_t now_ms) override {
205     // TODO(mflodman): Do we need to aggregate reports here or can we jut send
206     // what we get? I.e. do we ever get multiple reports bundled into one RTCP
207     // report for VoiceEngine?
208     if (report_blocks.empty())
209       return;
210 
211     int fraction_lost_aggregate = 0;
212     int total_number_of_packets = 0;
213 
214     // If receiving multiple report blocks, calculate the weighted average based
215     // on the number of packets a report refers to.
216     for (ReportBlockList::const_iterator block_it = report_blocks.begin();
217          block_it != report_blocks.end(); ++block_it) {
218       // Find the previous extended high sequence number for this remote SSRC,
219       // to calculate the number of RTP packets this report refers to. Ignore if
220       // we haven't seen this SSRC before.
221       std::map<uint32_t, uint32_t>::iterator seq_num_it =
222           extended_max_sequence_number_.find(block_it->sourceSSRC);
223       int number_of_packets = 0;
224       if (seq_num_it != extended_max_sequence_number_.end()) {
225         number_of_packets = block_it->extendedHighSeqNum - seq_num_it->second;
226       }
227       fraction_lost_aggregate += number_of_packets * block_it->fractionLost;
228       total_number_of_packets += number_of_packets;
229 
230       extended_max_sequence_number_[block_it->sourceSSRC] =
231           block_it->extendedHighSeqNum;
232     }
233     int weighted_fraction_lost = 0;
234     if (total_number_of_packets > 0) {
235       weighted_fraction_lost = (fraction_lost_aggregate +
236           total_number_of_packets / 2) / total_number_of_packets;
237     }
238     owner_->OnIncomingFractionLoss(weighted_fraction_lost);
239   }
240 
241  private:
242   Channel* owner_;
243   // Maps remote side ssrc to extended highest sequence number received.
244   std::map<uint32_t, uint32_t> extended_max_sequence_number_;
245 };
246 
247 int32_t
SendData(FrameType frameType,uint8_t payloadType,uint32_t timeStamp,const uint8_t * payloadData,size_t payloadSize,const RTPFragmentationHeader * fragmentation)248 Channel::SendData(FrameType frameType,
249                   uint8_t   payloadType,
250                   uint32_t  timeStamp,
251                   const uint8_t*  payloadData,
252                   size_t    payloadSize,
253                   const RTPFragmentationHeader* fragmentation)
254 {
255     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
256                  "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
257                  " payloadSize=%" PRIuS ", fragmentation=0x%x)",
258                  frameType, payloadType, timeStamp,
259                  payloadSize, fragmentation);
260 
261     if (_includeAudioLevelIndication)
262     {
263         // Store current audio level in the RTP/RTCP module.
264         // The level will be used in combination with voice-activity state
265         // (frameType) to add an RTP header extension
266         _rtpRtcpModule->SetAudioLevel(rms_level_.RMS());
267     }
268 
269     // Push data from ACM to RTP/RTCP-module to deliver audio frame for
270     // packetization.
271     // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
272     if (_rtpRtcpModule->SendOutgoingData((FrameType&)frameType,
273                                         payloadType,
274                                         timeStamp,
275                                         // Leaving the time when this frame was
276                                         // received from the capture device as
277                                         // undefined for voice for now.
278                                         -1,
279                                         payloadData,
280                                         payloadSize,
281                                         fragmentation) == -1)
282     {
283         _engineStatisticsPtr->SetLastError(
284             VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
285             "Channel::SendData() failed to send data to RTP/RTCP module");
286         return -1;
287     }
288 
289     _lastLocalTimeStamp = timeStamp;
290     _lastPayloadType = payloadType;
291 
292     return 0;
293 }
294 
295 int32_t
InFrameType(FrameType frame_type)296 Channel::InFrameType(FrameType frame_type)
297 {
298     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
299                  "Channel::InFrameType(frame_type=%d)", frame_type);
300 
301     CriticalSectionScoped cs(&_callbackCritSect);
302     _sendFrameType = (frame_type == kAudioFrameSpeech);
303     return 0;
304 }
305 
306 int32_t
OnRxVadDetected(int vadDecision)307 Channel::OnRxVadDetected(int vadDecision)
308 {
309     CriticalSectionScoped cs(&_callbackCritSect);
310     if (_rxVadObserverPtr)
311     {
312         _rxVadObserverPtr->OnRxVad(_channelId, vadDecision);
313     }
314 
315     return 0;
316 }
317 
SendRtp(const uint8_t * data,size_t len,const PacketOptions & options)318 bool Channel::SendRtp(const uint8_t* data,
319                       size_t len,
320                       const PacketOptions& options) {
321     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
322                  "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len);
323 
324     CriticalSectionScoped cs(&_callbackCritSect);
325 
326     if (_transportPtr == NULL)
327     {
328         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
329                      "Channel::SendPacket() failed to send RTP packet due to"
330                      " invalid transport object");
331         return false;
332     }
333 
334     uint8_t* bufferToSendPtr = (uint8_t*)data;
335     size_t bufferLength = len;
336 
337     if (!_transportPtr->SendRtp(bufferToSendPtr, bufferLength, options)) {
338       std::string transport_name =
339           _externalTransport ? "external transport" : "WebRtc sockets";
340       WEBRTC_TRACE(kTraceError, kTraceVoice,
341                    VoEId(_instanceId,_channelId),
342                    "Channel::SendPacket() RTP transmission using %s failed",
343                    transport_name.c_str());
344       return false;
345     }
346     return true;
347 }
348 
349 bool
SendRtcp(const uint8_t * data,size_t len)350 Channel::SendRtcp(const uint8_t *data, size_t len)
351 {
352     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
353                  "Channel::SendRtcp(len=%" PRIuS ")", len);
354 
355     CriticalSectionScoped cs(&_callbackCritSect);
356     if (_transportPtr == NULL)
357     {
358         WEBRTC_TRACE(kTraceError, kTraceVoice,
359                      VoEId(_instanceId,_channelId),
360                      "Channel::SendRtcp() failed to send RTCP packet"
361                      " due to invalid transport object");
362         return false;
363     }
364 
365     uint8_t* bufferToSendPtr = (uint8_t*)data;
366     size_t bufferLength = len;
367 
368     int n = _transportPtr->SendRtcp(bufferToSendPtr, bufferLength);
369     if (n < 0) {
370       std::string transport_name =
371           _externalTransport ? "external transport" : "WebRtc sockets";
372       WEBRTC_TRACE(kTraceInfo, kTraceVoice,
373                    VoEId(_instanceId,_channelId),
374                    "Channel::SendRtcp() transmission using %s failed",
375                    transport_name.c_str());
376       return false;
377     }
378     return true;
379 }
380 
OnPlayTelephoneEvent(uint8_t event,uint16_t lengthMs,uint8_t volume)381 void Channel::OnPlayTelephoneEvent(uint8_t event,
382                                    uint16_t lengthMs,
383                                    uint8_t volume) {
384     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
385                  "Channel::OnPlayTelephoneEvent(event=%u, lengthMs=%u,"
386                  " volume=%u)", event, lengthMs, volume);
387 
388     if (!_playOutbandDtmfEvent || (event > 15))
389     {
390         // Ignore callback since feedback is disabled or event is not a
391         // Dtmf tone event.
392         return;
393     }
394 
395     assert(_outputMixerPtr != NULL);
396 
397     // Start playing out the Dtmf tone (if playout is enabled).
398     // Reduce length of tone with 80ms to the reduce risk of echo.
399     _outputMixerPtr->PlayDtmfTone(event, lengthMs - 80, volume);
400 }
401 
402 void
OnIncomingSSRCChanged(uint32_t ssrc)403 Channel::OnIncomingSSRCChanged(uint32_t ssrc)
404 {
405     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
406                  "Channel::OnIncomingSSRCChanged(SSRC=%d)", ssrc);
407 
408     // Update ssrc so that NTP for AV sync can be updated.
409     _rtpRtcpModule->SetRemoteSSRC(ssrc);
410 }
411 
OnIncomingCSRCChanged(uint32_t CSRC,bool added)412 void Channel::OnIncomingCSRCChanged(uint32_t CSRC, bool added) {
413   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
414                "Channel::OnIncomingCSRCChanged(CSRC=%d, added=%d)", CSRC,
415                added);
416 }
417 
OnInitializeDecoder(int8_t payloadType,const char payloadName[RTP_PAYLOAD_NAME_SIZE],int frequency,size_t channels,uint32_t rate)418 int32_t Channel::OnInitializeDecoder(
419     int8_t payloadType,
420     const char payloadName[RTP_PAYLOAD_NAME_SIZE],
421     int frequency,
422     size_t channels,
423     uint32_t rate) {
424     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
425                  "Channel::OnInitializeDecoder(payloadType=%d, "
426                  "payloadName=%s, frequency=%u, channels=%" PRIuS ", rate=%u)",
427                  payloadType, payloadName, frequency, channels, rate);
428 
429     CodecInst receiveCodec = {0};
430     CodecInst dummyCodec = {0};
431 
432     receiveCodec.pltype = payloadType;
433     receiveCodec.plfreq = frequency;
434     receiveCodec.channels = channels;
435     receiveCodec.rate = rate;
436     strncpy(receiveCodec.plname, payloadName, RTP_PAYLOAD_NAME_SIZE - 1);
437 
438     audio_coding_->Codec(payloadName, &dummyCodec, frequency, channels);
439     receiveCodec.pacsize = dummyCodec.pacsize;
440 
441     // Register the new codec to the ACM
442     if (audio_coding_->RegisterReceiveCodec(receiveCodec) == -1)
443     {
444         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
445                      VoEId(_instanceId, _channelId),
446                      "Channel::OnInitializeDecoder() invalid codec ("
447                      "pt=%d, name=%s) received - 1", payloadType, payloadName);
448         _engineStatisticsPtr->SetLastError(VE_AUDIO_CODING_MODULE_ERROR);
449         return -1;
450     }
451 
452     return 0;
453 }
454 
455 int32_t
OnReceivedPayloadData(const uint8_t * payloadData,size_t payloadSize,const WebRtcRTPHeader * rtpHeader)456 Channel::OnReceivedPayloadData(const uint8_t* payloadData,
457                                size_t payloadSize,
458                                const WebRtcRTPHeader* rtpHeader)
459 {
460     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
461                  "Channel::OnReceivedPayloadData(payloadSize=%" PRIuS ","
462                  " payloadType=%u, audioChannel=%" PRIuS ")",
463                  payloadSize,
464                  rtpHeader->header.payloadType,
465                  rtpHeader->type.Audio.channel);
466 
467     if (!channel_state_.Get().playing)
468     {
469         // Avoid inserting into NetEQ when we are not playing. Count the
470         // packet as discarded.
471         WEBRTC_TRACE(kTraceStream, kTraceVoice,
472                      VoEId(_instanceId, _channelId),
473                      "received packet is discarded since playing is not"
474                      " activated");
475         _numberOfDiscardedPackets++;
476         return 0;
477     }
478 
479     // Push the incoming payload (parsed and ready for decoding) into the ACM
480     if (audio_coding_->IncomingPacket(payloadData,
481                                       payloadSize,
482                                       *rtpHeader) != 0)
483     {
484         _engineStatisticsPtr->SetLastError(
485             VE_AUDIO_CODING_MODULE_ERROR, kTraceWarning,
486             "Channel::OnReceivedPayloadData() unable to push data to the ACM");
487         return -1;
488     }
489 
490     // Update the packet delay.
491     UpdatePacketDelay(rtpHeader->header.timestamp,
492                       rtpHeader->header.sequenceNumber);
493 
494     int64_t round_trip_time = 0;
495     _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), &round_trip_time,
496                         NULL, NULL, NULL);
497 
498     std::vector<uint16_t> nack_list = audio_coding_->GetNackList(
499         round_trip_time);
500     if (!nack_list.empty()) {
501       // Can't use nack_list.data() since it's not supported by all
502       // compilers.
503       ResendPackets(&(nack_list[0]), static_cast<int>(nack_list.size()));
504     }
505     return 0;
506 }
507 
OnRecoveredPacket(const uint8_t * rtp_packet,size_t rtp_packet_length)508 bool Channel::OnRecoveredPacket(const uint8_t* rtp_packet,
509                                 size_t rtp_packet_length) {
510   RTPHeader header;
511   if (!rtp_header_parser_->Parse(rtp_packet, rtp_packet_length, &header)) {
512     WEBRTC_TRACE(kTraceDebug, webrtc::kTraceVoice, _channelId,
513                  "IncomingPacket invalid RTP header");
514     return false;
515   }
516   header.payload_type_frequency =
517       rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
518   if (header.payload_type_frequency < 0)
519     return false;
520   return ReceivePacket(rtp_packet, rtp_packet_length, header, false);
521 }
522 
GetAudioFrame(int32_t id,AudioFrame * audioFrame)523 int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
524 {
525     if (event_log_) {
526       unsigned int ssrc;
527       RTC_CHECK_EQ(GetLocalSSRC(ssrc), 0);
528       event_log_->LogAudioPlayout(ssrc);
529     }
530     // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
531     if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_,
532                                        audioFrame) == -1)
533     {
534         WEBRTC_TRACE(kTraceError, kTraceVoice,
535                      VoEId(_instanceId,_channelId),
536                      "Channel::GetAudioFrame() PlayoutData10Ms() failed!");
537         // In all likelihood, the audio in this frame is garbage. We return an
538         // error so that the audio mixer module doesn't add it to the mix. As
539         // a result, it won't be played out and the actions skipped here are
540         // irrelevant.
541         return -1;
542     }
543 
544     if (_RxVadDetection)
545     {
546         UpdateRxVadDetection(*audioFrame);
547     }
548 
549     // Convert module ID to internal VoE channel ID
550     audioFrame->id_ = VoEChannelId(audioFrame->id_);
551     // Store speech type for dead-or-alive detection
552     _outputSpeechType = audioFrame->speech_type_;
553 
554     ChannelState::State state = channel_state_.Get();
555 
556     if (state.rx_apm_is_enabled) {
557       int err = rx_audioproc_->ProcessStream(audioFrame);
558       if (err) {
559         LOG(LS_ERROR) << "ProcessStream() error: " << err;
560         assert(false);
561       }
562     }
563 
564     {
565       // Pass the audio buffers to an optional sink callback, before applying
566       // scaling/panning, as that applies to the mix operation.
567       // External recipients of the audio (e.g. via AudioTrack), will do their
568       // own mixing/dynamic processing.
569       CriticalSectionScoped cs(&_callbackCritSect);
570       if (audio_sink_) {
571         AudioSinkInterface::Data data(
572             &audioFrame->data_[0],
573             audioFrame->samples_per_channel_, audioFrame->sample_rate_hz_,
574             audioFrame->num_channels_, audioFrame->timestamp_);
575         audio_sink_->OnData(data);
576       }
577     }
578 
579     float output_gain = 1.0f;
580     float left_pan =  1.0f;
581     float right_pan =  1.0f;
582     {
583       CriticalSectionScoped cs(&volume_settings_critsect_);
584       output_gain = _outputGain;
585       left_pan = _panLeft;
586       right_pan= _panRight;
587     }
588 
589     // Output volume scaling
590     if (output_gain < 0.99f || output_gain > 1.01f)
591     {
592         AudioFrameOperations::ScaleWithSat(output_gain, *audioFrame);
593     }
594 
595     // Scale left and/or right channel(s) if stereo and master balance is
596     // active
597 
598     if (left_pan != 1.0f || right_pan != 1.0f)
599     {
600         if (audioFrame->num_channels_ == 1)
601         {
602             // Emulate stereo mode since panning is active.
603             // The mono signal is copied to both left and right channels here.
604             AudioFrameOperations::MonoToStereo(audioFrame);
605         }
606         // For true stereo mode (when we are receiving a stereo signal), no
607         // action is needed.
608 
609         // Do the panning operation (the audio frame contains stereo at this
610         // stage)
611         AudioFrameOperations::Scale(left_pan, right_pan, *audioFrame);
612     }
613 
614     // Mix decoded PCM output with file if file mixing is enabled
615     if (state.output_file_playing)
616     {
617         MixAudioWithFile(*audioFrame, audioFrame->sample_rate_hz_);
618     }
619 
620     // External media
621     if (_outputExternalMedia)
622     {
623         CriticalSectionScoped cs(&_callbackCritSect);
624         const bool isStereo = (audioFrame->num_channels_ == 2);
625         if (_outputExternalMediaCallbackPtr)
626         {
627           _outputExternalMediaCallbackPtr->Process(
628               _channelId, kPlaybackPerChannel, (int16_t*)audioFrame->data_,
629               audioFrame->samples_per_channel_, audioFrame->sample_rate_hz_,
630               isStereo);
631         }
632     }
633 
634     // Record playout if enabled
635     {
636         CriticalSectionScoped cs(&_fileCritSect);
637 
638         if (_outputFileRecording && _outputFileRecorderPtr)
639         {
640             _outputFileRecorderPtr->RecordAudioToFile(*audioFrame);
641         }
642     }
643 
644     // Measure audio level (0-9)
645     _outputAudioLevel.ComputeLevel(*audioFrame);
646 
647     if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) {
648       // The first frame with a valid rtp timestamp.
649       capture_start_rtp_time_stamp_ = audioFrame->timestamp_;
650     }
651 
652     if (capture_start_rtp_time_stamp_ >= 0) {
653       // audioFrame.timestamp_ should be valid from now on.
654 
655       // Compute elapsed time.
656       int64_t unwrap_timestamp =
657           rtp_ts_wraparound_handler_->Unwrap(audioFrame->timestamp_);
658       audioFrame->elapsed_time_ms_ =
659           (unwrap_timestamp - capture_start_rtp_time_stamp_) /
660           (GetPlayoutFrequency() / 1000);
661 
662       {
663         CriticalSectionScoped lock(ts_stats_lock_.get());
664         // Compute ntp time.
665         audioFrame->ntp_time_ms_ = ntp_estimator_.Estimate(
666             audioFrame->timestamp_);
667         // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
668         if (audioFrame->ntp_time_ms_ > 0) {
669           // Compute |capture_start_ntp_time_ms_| so that
670           // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
671           capture_start_ntp_time_ms_ =
672               audioFrame->ntp_time_ms_ - audioFrame->elapsed_time_ms_;
673         }
674       }
675     }
676 
677     return 0;
678 }
679 
680 int32_t
NeededFrequency(int32_t id) const681 Channel::NeededFrequency(int32_t id) const
682 {
683     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
684                  "Channel::NeededFrequency(id=%d)", id);
685 
686     int highestNeeded = 0;
687 
688     // Determine highest needed receive frequency
689     int32_t receiveFrequency = audio_coding_->ReceiveFrequency();
690 
691     // Return the bigger of playout and receive frequency in the ACM.
692     if (audio_coding_->PlayoutFrequency() > receiveFrequency)
693     {
694         highestNeeded = audio_coding_->PlayoutFrequency();
695     }
696     else
697     {
698         highestNeeded = receiveFrequency;
699     }
700 
701     // Special case, if we're playing a file on the playout side
702     // we take that frequency into consideration as well
703     // This is not needed on sending side, since the codec will
704     // limit the spectrum anyway.
705     if (channel_state_.Get().output_file_playing)
706     {
707         CriticalSectionScoped cs(&_fileCritSect);
708         if (_outputFilePlayerPtr)
709         {
710             if(_outputFilePlayerPtr->Frequency()>highestNeeded)
711             {
712                 highestNeeded=_outputFilePlayerPtr->Frequency();
713             }
714         }
715     }
716 
717     return(highestNeeded);
718 }
719 
CreateChannel(Channel * & channel,int32_t channelId,uint32_t instanceId,RtcEventLog * const event_log,const Config & config)720 int32_t Channel::CreateChannel(Channel*& channel,
721                                int32_t channelId,
722                                uint32_t instanceId,
723                                RtcEventLog* const event_log,
724                                const Config& config) {
725     WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId,channelId),
726                  "Channel::CreateChannel(channelId=%d, instanceId=%d)",
727         channelId, instanceId);
728 
729     channel = new Channel(channelId, instanceId, event_log, config);
730     if (channel == NULL)
731     {
732         WEBRTC_TRACE(kTraceMemory, kTraceVoice,
733                      VoEId(instanceId,channelId),
734                      "Channel::CreateChannel() unable to allocate memory for"
735                      " channel");
736         return -1;
737     }
738     return 0;
739 }
740 
741 void
PlayNotification(int32_t id,uint32_t durationMs)742 Channel::PlayNotification(int32_t id, uint32_t durationMs)
743 {
744     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
745                  "Channel::PlayNotification(id=%d, durationMs=%d)",
746                  id, durationMs);
747 
748     // Not implement yet
749 }
750 
751 void
RecordNotification(int32_t id,uint32_t durationMs)752 Channel::RecordNotification(int32_t id, uint32_t durationMs)
753 {
754     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
755                  "Channel::RecordNotification(id=%d, durationMs=%d)",
756                  id, durationMs);
757 
758     // Not implement yet
759 }
760 
761 void
PlayFileEnded(int32_t id)762 Channel::PlayFileEnded(int32_t id)
763 {
764     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
765                  "Channel::PlayFileEnded(id=%d)", id);
766 
767     if (id == _inputFilePlayerId)
768     {
769         channel_state_.SetInputFilePlaying(false);
770         WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
771                      VoEId(_instanceId,_channelId),
772                      "Channel::PlayFileEnded() => input file player module is"
773                      " shutdown");
774     }
775     else if (id == _outputFilePlayerId)
776     {
777         channel_state_.SetOutputFilePlaying(false);
778         WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
779                      VoEId(_instanceId,_channelId),
780                      "Channel::PlayFileEnded() => output file player module is"
781                      " shutdown");
782     }
783 }
784 
785 void
RecordFileEnded(int32_t id)786 Channel::RecordFileEnded(int32_t id)
787 {
788     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
789                  "Channel::RecordFileEnded(id=%d)", id);
790 
791     assert(id == _outputFileRecorderId);
792 
793     CriticalSectionScoped cs(&_fileCritSect);
794 
795     _outputFileRecording = false;
796     WEBRTC_TRACE(kTraceStateInfo, kTraceVoice,
797                  VoEId(_instanceId,_channelId),
798                  "Channel::RecordFileEnded() => output file recorder module is"
799                  " shutdown");
800 }
801 
Channel(int32_t channelId,uint32_t instanceId,RtcEventLog * const event_log,const Config & config)802 Channel::Channel(int32_t channelId,
803                  uint32_t instanceId,
804                  RtcEventLog* const event_log,
805                  const Config& config)
806     : _fileCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
807       _callbackCritSect(*CriticalSectionWrapper::CreateCriticalSection()),
808       volume_settings_critsect_(
809           *CriticalSectionWrapper::CreateCriticalSection()),
810       _instanceId(instanceId),
811       _channelId(channelId),
812       event_log_(event_log),
813       rtp_header_parser_(RtpHeaderParser::Create()),
814       rtp_payload_registry_(
815           new RTPPayloadRegistry(RTPPayloadStrategy::CreateStrategy(true))),
816       rtp_receive_statistics_(
817           ReceiveStatistics::Create(Clock::GetRealTimeClock())),
818       rtp_receiver_(
819           RtpReceiver::CreateAudioReceiver(Clock::GetRealTimeClock(),
820                                            this,
821                                            this,
822                                            this,
823                                            rtp_payload_registry_.get())),
824       telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()),
825       _outputAudioLevel(),
826       _externalTransport(false),
827       _inputFilePlayerPtr(NULL),
828       _outputFilePlayerPtr(NULL),
829       _outputFileRecorderPtr(NULL),
830       // Avoid conflict with other channels by adding 1024 - 1026,
831       // won't use as much as 1024 channels.
832       _inputFilePlayerId(VoEModuleId(instanceId, channelId) + 1024),
833       _outputFilePlayerId(VoEModuleId(instanceId, channelId) + 1025),
834       _outputFileRecorderId(VoEModuleId(instanceId, channelId) + 1026),
835       _outputFileRecording(false),
836       _inbandDtmfQueue(VoEModuleId(instanceId, channelId)),
837       _inbandDtmfGenerator(VoEModuleId(instanceId, channelId)),
838       _outputExternalMedia(false),
839       _inputExternalMediaCallbackPtr(NULL),
840       _outputExternalMediaCallbackPtr(NULL),
841       _timeStamp(0),  // This is just an offset, RTP module will add it's own
842                       // random offset
843       _sendTelephoneEventPayloadType(106),
844       ntp_estimator_(Clock::GetRealTimeClock()),
845       jitter_buffer_playout_timestamp_(0),
846       playout_timestamp_rtp_(0),
847       playout_timestamp_rtcp_(0),
848       playout_delay_ms_(0),
849       _numberOfDiscardedPackets(0),
850       send_sequence_number_(0),
851       ts_stats_lock_(CriticalSectionWrapper::CreateCriticalSection()),
852       rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
853       capture_start_rtp_time_stamp_(-1),
854       capture_start_ntp_time_ms_(-1),
855       _engineStatisticsPtr(NULL),
856       _outputMixerPtr(NULL),
857       _transmitMixerPtr(NULL),
858       _moduleProcessThreadPtr(NULL),
859       _audioDeviceModulePtr(NULL),
860       _voiceEngineObserverPtr(NULL),
861       _callbackCritSectPtr(NULL),
862       _transportPtr(NULL),
863       _rxVadObserverPtr(NULL),
864       _oldVadDecision(-1),
865       _sendFrameType(0),
866       _externalMixing(false),
867       _mixFileWithMicrophone(false),
868       _mute(false),
869       _panLeft(1.0f),
870       _panRight(1.0f),
871       _outputGain(1.0f),
872       _playOutbandDtmfEvent(false),
873       _playInbandDtmfEvent(false),
874       _lastLocalTimeStamp(0),
875       _lastPayloadType(0),
876       _includeAudioLevelIndication(false),
877       _outputSpeechType(AudioFrame::kNormalSpeech),
878       video_sync_lock_(CriticalSectionWrapper::CreateCriticalSection()),
879       _average_jitter_buffer_delay_us(0),
880       _previousTimestamp(0),
881       _recPacketDelayMs(20),
882       _RxVadDetection(false),
883       _rxAgcIsEnabled(false),
884       _rxNsIsEnabled(false),
885       restored_packet_in_use_(false),
886       rtcp_observer_(new VoERtcpObserver(this)),
887       network_predictor_(new NetworkPredictor(Clock::GetRealTimeClock())),
888       assoc_send_channel_lock_(CriticalSectionWrapper::CreateCriticalSection()),
889       associate_send_channel_(ChannelOwner(nullptr)),
890       pacing_enabled_(config.Get<VoicePacing>().enabled),
891       feedback_observer_proxy_(pacing_enabled_ ? new TransportFeedbackProxy()
892                                                : nullptr),
893       seq_num_allocator_proxy_(
894           pacing_enabled_ ? new TransportSequenceNumberProxy() : nullptr),
895       rtp_packet_sender_proxy_(pacing_enabled_ ? new RtpPacketSenderProxy()
896                                                : nullptr) {
897     WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
898                  "Channel::Channel() - ctor");
899     AudioCodingModule::Config acm_config;
900     acm_config.id = VoEModuleId(instanceId, channelId);
901     if (config.Get<NetEqCapacityConfig>().enabled) {
902       // Clamping the buffer capacity at 20 packets. While going lower will
903       // probably work, it makes little sense.
904       acm_config.neteq_config.max_packets_in_buffer =
905           std::max(20, config.Get<NetEqCapacityConfig>().capacity);
906     }
907     acm_config.neteq_config.enable_fast_accelerate =
908         config.Get<NetEqFastAccelerate>().enabled;
909     audio_coding_.reset(AudioCodingModule::Create(acm_config));
910 
911     _inbandDtmfQueue.ResetDtmf();
912     _inbandDtmfGenerator.Init();
913     _outputAudioLevel.Clear();
914 
915     RtpRtcp::Configuration configuration;
916     configuration.audio = true;
917     configuration.outgoing_transport = this;
918     configuration.audio_messages = this;
919     configuration.receive_statistics = rtp_receive_statistics_.get();
920     configuration.bandwidth_callback = rtcp_observer_.get();
921     configuration.paced_sender = rtp_packet_sender_proxy_.get();
922     configuration.transport_sequence_number_allocator =
923         seq_num_allocator_proxy_.get();
924     configuration.transport_feedback_callback = feedback_observer_proxy_.get();
925 
926     _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
927 
928     statistics_proxy_.reset(new StatisticsProxy(_rtpRtcpModule->SSRC()));
929     rtp_receive_statistics_->RegisterRtcpStatisticsCallback(
930         statistics_proxy_.get());
931 
932     Config audioproc_config;
933     audioproc_config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
934     rx_audioproc_.reset(AudioProcessing::Create(audioproc_config));
935 }
936 
~Channel()937 Channel::~Channel()
938 {
939     rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL);
940     WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId,_channelId),
941                  "Channel::~Channel() - dtor");
942 
943     if (_outputExternalMedia)
944     {
945         DeRegisterExternalMediaProcessing(kPlaybackPerChannel);
946     }
947     if (channel_state_.Get().input_external_media)
948     {
949         DeRegisterExternalMediaProcessing(kRecordingPerChannel);
950     }
951     StopSend();
952     StopPlayout();
953 
954     {
955         CriticalSectionScoped cs(&_fileCritSect);
956         if (_inputFilePlayerPtr)
957         {
958             _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
959             _inputFilePlayerPtr->StopPlayingFile();
960             FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
961             _inputFilePlayerPtr = NULL;
962         }
963         if (_outputFilePlayerPtr)
964         {
965             _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
966             _outputFilePlayerPtr->StopPlayingFile();
967             FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
968             _outputFilePlayerPtr = NULL;
969         }
970         if (_outputFileRecorderPtr)
971         {
972             _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
973             _outputFileRecorderPtr->StopRecording();
974             FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
975             _outputFileRecorderPtr = NULL;
976         }
977     }
978 
979     // The order to safely shutdown modules in a channel is:
980     // 1. De-register callbacks in modules
981     // 2. De-register modules in process thread
982     // 3. Destroy modules
983     if (audio_coding_->RegisterTransportCallback(NULL) == -1)
984     {
985         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
986                      VoEId(_instanceId,_channelId),
987                      "~Channel() failed to de-register transport callback"
988                      " (Audio coding module)");
989     }
990     if (audio_coding_->RegisterVADCallback(NULL) == -1)
991     {
992         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
993                      VoEId(_instanceId,_channelId),
994                      "~Channel() failed to de-register VAD callback"
995                      " (Audio coding module)");
996     }
997     // De-register modules in process thread
998     _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
999 
1000     // End of modules shutdown
1001 
1002     // Delete other objects
1003     delete &_callbackCritSect;
1004     delete &_fileCritSect;
1005     delete &volume_settings_critsect_;
1006 }
1007 
1008 int32_t
Init()1009 Channel::Init()
1010 {
1011     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1012                  "Channel::Init()");
1013 
1014     channel_state_.Reset();
1015 
1016     // --- Initial sanity
1017 
1018     if ((_engineStatisticsPtr == NULL) ||
1019         (_moduleProcessThreadPtr == NULL))
1020     {
1021         WEBRTC_TRACE(kTraceError, kTraceVoice,
1022                      VoEId(_instanceId,_channelId),
1023                      "Channel::Init() must call SetEngineInformation() first");
1024         return -1;
1025     }
1026 
1027     // --- Add modules to process thread (for periodic schedulation)
1028 
1029     _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get());
1030 
1031     // --- ACM initialization
1032 
1033     if (audio_coding_->InitializeReceiver() == -1) {
1034         _engineStatisticsPtr->SetLastError(
1035             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1036             "Channel::Init() unable to initialize the ACM - 1");
1037         return -1;
1038     }
1039 
1040     // --- RTP/RTCP module initialization
1041 
1042     // Ensure that RTCP is enabled by default for the created channel.
1043     // Note that, the module will keep generating RTCP until it is explicitly
1044     // disabled by the user.
1045     // After StopListen (when no sockets exists), RTCP packets will no longer
1046     // be transmitted since the Transport object will then be invalid.
1047     telephone_event_handler_->SetTelephoneEventForwardToDecoder(true);
1048     // RTCP is enabled by default.
1049     _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
1050     // --- Register all permanent callbacks
1051     const bool fail =
1052         (audio_coding_->RegisterTransportCallback(this) == -1) ||
1053         (audio_coding_->RegisterVADCallback(this) == -1);
1054 
1055     if (fail)
1056     {
1057         _engineStatisticsPtr->SetLastError(
1058             VE_CANNOT_INIT_CHANNEL, kTraceError,
1059             "Channel::Init() callbacks not registered");
1060         return -1;
1061     }
1062 
1063     // --- Register all supported codecs to the receiving side of the
1064     // RTP/RTCP module
1065 
1066     CodecInst codec;
1067     const uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
1068 
1069     for (int idx = 0; idx < nSupportedCodecs; idx++)
1070     {
1071         // Open up the RTP/RTCP receiver for all supported codecs
1072         if ((audio_coding_->Codec(idx, &codec) == -1) ||
1073             (rtp_receiver_->RegisterReceivePayload(
1074                 codec.plname,
1075                 codec.pltype,
1076                 codec.plfreq,
1077                 codec.channels,
1078                 (codec.rate < 0) ? 0 : codec.rate) == -1))
1079         {
1080             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1081                          VoEId(_instanceId,_channelId),
1082                          "Channel::Init() unable to register %s "
1083                          "(%d/%d/%" PRIuS "/%d) to RTP/RTCP receiver",
1084                          codec.plname, codec.pltype, codec.plfreq,
1085                          codec.channels, codec.rate);
1086         }
1087         else
1088         {
1089             WEBRTC_TRACE(kTraceInfo, kTraceVoice,
1090                          VoEId(_instanceId,_channelId),
1091                          "Channel::Init() %s (%d/%d/%" PRIuS "/%d) has been "
1092                          "added to the RTP/RTCP receiver",
1093                          codec.plname, codec.pltype, codec.plfreq,
1094                          codec.channels, codec.rate);
1095         }
1096 
1097         // Ensure that PCMU is used as default codec on the sending side
1098         if (!STR_CASE_CMP(codec.plname, "PCMU") && (codec.channels == 1))
1099         {
1100             SetSendCodec(codec);
1101         }
1102 
1103         // Register default PT for outband 'telephone-event'
1104         if (!STR_CASE_CMP(codec.plname, "telephone-event"))
1105         {
1106             if ((_rtpRtcpModule->RegisterSendPayload(codec) == -1) ||
1107                 (audio_coding_->RegisterReceiveCodec(codec) == -1))
1108             {
1109                 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1110                              VoEId(_instanceId,_channelId),
1111                              "Channel::Init() failed to register outband "
1112                              "'telephone-event' (%d/%d) correctly",
1113                              codec.pltype, codec.plfreq);
1114             }
1115         }
1116 
1117         if (!STR_CASE_CMP(codec.plname, "CN"))
1118         {
1119             if ((audio_coding_->RegisterSendCodec(codec) == -1) ||
1120                 (audio_coding_->RegisterReceiveCodec(codec) == -1) ||
1121                 (_rtpRtcpModule->RegisterSendPayload(codec) == -1))
1122             {
1123                 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1124                              VoEId(_instanceId,_channelId),
1125                              "Channel::Init() failed to register CN (%d/%d) "
1126                              "correctly - 1",
1127                              codec.pltype, codec.plfreq);
1128             }
1129         }
1130 #ifdef WEBRTC_CODEC_RED
1131         // Register RED to the receiving side of the ACM.
1132         // We will not receive an OnInitializeDecoder() callback for RED.
1133         if (!STR_CASE_CMP(codec.plname, "RED"))
1134         {
1135             if (audio_coding_->RegisterReceiveCodec(codec) == -1)
1136             {
1137                 WEBRTC_TRACE(kTraceWarning, kTraceVoice,
1138                              VoEId(_instanceId,_channelId),
1139                              "Channel::Init() failed to register RED (%d/%d) "
1140                              "correctly",
1141                              codec.pltype, codec.plfreq);
1142             }
1143         }
1144 #endif
1145     }
1146 
1147     if (rx_audioproc_->noise_suppression()->set_level(kDefaultNsMode) != 0) {
1148       LOG(LS_ERROR) << "noise_suppression()->set_level(kDefaultNsMode) failed.";
1149       return -1;
1150     }
1151     if (rx_audioproc_->gain_control()->set_mode(kDefaultRxAgcMode) != 0) {
1152       LOG(LS_ERROR) << "gain_control()->set_mode(kDefaultRxAgcMode) failed.";
1153       return -1;
1154     }
1155 
1156     return 0;
1157 }
1158 
1159 int32_t
SetEngineInformation(Statistics & engineStatistics,OutputMixer & outputMixer,voe::TransmitMixer & transmitMixer,ProcessThread & moduleProcessThread,AudioDeviceModule & audioDeviceModule,VoiceEngineObserver * voiceEngineObserver,CriticalSectionWrapper * callbackCritSect)1160 Channel::SetEngineInformation(Statistics& engineStatistics,
1161                               OutputMixer& outputMixer,
1162                               voe::TransmitMixer& transmitMixer,
1163                               ProcessThread& moduleProcessThread,
1164                               AudioDeviceModule& audioDeviceModule,
1165                               VoiceEngineObserver* voiceEngineObserver,
1166                               CriticalSectionWrapper* callbackCritSect)
1167 {
1168     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1169                  "Channel::SetEngineInformation()");
1170     _engineStatisticsPtr = &engineStatistics;
1171     _outputMixerPtr = &outputMixer;
1172     _transmitMixerPtr = &transmitMixer,
1173     _moduleProcessThreadPtr = &moduleProcessThread;
1174     _audioDeviceModulePtr = &audioDeviceModule;
1175     _voiceEngineObserverPtr = voiceEngineObserver;
1176     _callbackCritSectPtr = callbackCritSect;
1177     return 0;
1178 }
1179 
1180 int32_t
UpdateLocalTimeStamp()1181 Channel::UpdateLocalTimeStamp()
1182 {
1183 
1184     _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
1185     return 0;
1186 }
1187 
SetSink(rtc::scoped_ptr<AudioSinkInterface> sink)1188 void Channel::SetSink(rtc::scoped_ptr<AudioSinkInterface> sink) {
1189   CriticalSectionScoped cs(&_callbackCritSect);
1190   audio_sink_ = std::move(sink);
1191 }
1192 
1193 int32_t
StartPlayout()1194 Channel::StartPlayout()
1195 {
1196     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1197                  "Channel::StartPlayout()");
1198     if (channel_state_.Get().playing)
1199     {
1200         return 0;
1201     }
1202 
1203     if (!_externalMixing) {
1204         // Add participant as candidates for mixing.
1205         if (_outputMixerPtr->SetMixabilityStatus(*this, true) != 0)
1206         {
1207             _engineStatisticsPtr->SetLastError(
1208                 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
1209                 "StartPlayout() failed to add participant to mixer");
1210             return -1;
1211         }
1212     }
1213 
1214     channel_state_.SetPlaying(true);
1215     if (RegisterFilePlayingToMixer() != 0)
1216         return -1;
1217 
1218     return 0;
1219 }
1220 
1221 int32_t
StopPlayout()1222 Channel::StopPlayout()
1223 {
1224     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1225                  "Channel::StopPlayout()");
1226     if (!channel_state_.Get().playing)
1227     {
1228         return 0;
1229     }
1230 
1231     if (!_externalMixing) {
1232         // Remove participant as candidates for mixing
1233         if (_outputMixerPtr->SetMixabilityStatus(*this, false) != 0)
1234         {
1235             _engineStatisticsPtr->SetLastError(
1236                 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
1237                 "StopPlayout() failed to remove participant from mixer");
1238             return -1;
1239         }
1240     }
1241 
1242     channel_state_.SetPlaying(false);
1243     _outputAudioLevel.Clear();
1244 
1245     return 0;
1246 }
1247 
1248 int32_t
StartSend()1249 Channel::StartSend()
1250 {
1251     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1252                  "Channel::StartSend()");
1253     // Resume the previous sequence number which was reset by StopSend().
1254     // This needs to be done before |sending| is set to true.
1255     if (send_sequence_number_)
1256       SetInitSequenceNumber(send_sequence_number_);
1257 
1258     if (channel_state_.Get().sending)
1259     {
1260       return 0;
1261     }
1262     channel_state_.SetSending(true);
1263 
1264     if (_rtpRtcpModule->SetSendingStatus(true) != 0)
1265     {
1266         _engineStatisticsPtr->SetLastError(
1267             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
1268             "StartSend() RTP/RTCP failed to start sending");
1269         CriticalSectionScoped cs(&_callbackCritSect);
1270         channel_state_.SetSending(false);
1271         return -1;
1272     }
1273 
1274     return 0;
1275 }
1276 
1277 int32_t
StopSend()1278 Channel::StopSend()
1279 {
1280     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1281                  "Channel::StopSend()");
1282     if (!channel_state_.Get().sending)
1283     {
1284       return 0;
1285     }
1286     channel_state_.SetSending(false);
1287 
1288     // Store the sequence number to be able to pick up the same sequence for
1289     // the next StartSend(). This is needed for restarting device, otherwise
1290     // it might cause libSRTP to complain about packets being replayed.
1291     // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring
1292     // CL is landed. See issue
1293     // https://code.google.com/p/webrtc/issues/detail?id=2111 .
1294     send_sequence_number_ = _rtpRtcpModule->SequenceNumber();
1295 
1296     // Reset sending SSRC and sequence number and triggers direct transmission
1297     // of RTCP BYE
1298     if (_rtpRtcpModule->SetSendingStatus(false) == -1)
1299     {
1300         _engineStatisticsPtr->SetLastError(
1301             VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
1302             "StartSend() RTP/RTCP failed to stop sending");
1303     }
1304 
1305     return 0;
1306 }
1307 
1308 int32_t
StartReceiving()1309 Channel::StartReceiving()
1310 {
1311     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1312                  "Channel::StartReceiving()");
1313     if (channel_state_.Get().receiving)
1314     {
1315         return 0;
1316     }
1317     channel_state_.SetReceiving(true);
1318     _numberOfDiscardedPackets = 0;
1319     return 0;
1320 }
1321 
1322 int32_t
StopReceiving()1323 Channel::StopReceiving()
1324 {
1325     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1326                  "Channel::StopReceiving()");
1327     if (!channel_state_.Get().receiving)
1328     {
1329         return 0;
1330     }
1331 
1332     channel_state_.SetReceiving(false);
1333     return 0;
1334 }
1335 
1336 int32_t
RegisterVoiceEngineObserver(VoiceEngineObserver & observer)1337 Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
1338 {
1339     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1340                  "Channel::RegisterVoiceEngineObserver()");
1341     CriticalSectionScoped cs(&_callbackCritSect);
1342 
1343     if (_voiceEngineObserverPtr)
1344     {
1345         _engineStatisticsPtr->SetLastError(
1346             VE_INVALID_OPERATION, kTraceError,
1347             "RegisterVoiceEngineObserver() observer already enabled");
1348         return -1;
1349     }
1350     _voiceEngineObserverPtr = &observer;
1351     return 0;
1352 }
1353 
1354 int32_t
DeRegisterVoiceEngineObserver()1355 Channel::DeRegisterVoiceEngineObserver()
1356 {
1357     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1358                  "Channel::DeRegisterVoiceEngineObserver()");
1359     CriticalSectionScoped cs(&_callbackCritSect);
1360 
1361     if (!_voiceEngineObserverPtr)
1362     {
1363         _engineStatisticsPtr->SetLastError(
1364             VE_INVALID_OPERATION, kTraceWarning,
1365             "DeRegisterVoiceEngineObserver() observer already disabled");
1366         return 0;
1367     }
1368     _voiceEngineObserverPtr = NULL;
1369     return 0;
1370 }
1371 
1372 int32_t
GetSendCodec(CodecInst & codec)1373 Channel::GetSendCodec(CodecInst& codec)
1374 {
1375   auto send_codec = audio_coding_->SendCodec();
1376   if (send_codec) {
1377     codec = *send_codec;
1378     return 0;
1379   }
1380   return -1;
1381 }
1382 
1383 int32_t
GetRecCodec(CodecInst & codec)1384 Channel::GetRecCodec(CodecInst& codec)
1385 {
1386     return (audio_coding_->ReceiveCodec(&codec));
1387 }
1388 
1389 int32_t
SetSendCodec(const CodecInst & codec)1390 Channel::SetSendCodec(const CodecInst& codec)
1391 {
1392     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1393                  "Channel::SetSendCodec()");
1394 
1395     if (audio_coding_->RegisterSendCodec(codec) != 0)
1396     {
1397         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
1398                      "SetSendCodec() failed to register codec to ACM");
1399         return -1;
1400     }
1401 
1402     if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
1403     {
1404         _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
1405         if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
1406         {
1407             WEBRTC_TRACE(
1408                     kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
1409                     "SetSendCodec() failed to register codec to"
1410                     " RTP/RTCP module");
1411             return -1;
1412         }
1413     }
1414 
1415     if (_rtpRtcpModule->SetAudioPacketSize(codec.pacsize) != 0)
1416     {
1417         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
1418                      "SetSendCodec() failed to set audio packet size");
1419         return -1;
1420     }
1421 
1422     return 0;
1423 }
1424 
SetBitRate(int bitrate_bps)1425 void Channel::SetBitRate(int bitrate_bps) {
1426   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1427                "Channel::SetBitRate(bitrate_bps=%d)", bitrate_bps);
1428   audio_coding_->SetBitRate(bitrate_bps);
1429 }
1430 
OnIncomingFractionLoss(int fraction_lost)1431 void Channel::OnIncomingFractionLoss(int fraction_lost) {
1432   network_predictor_->UpdatePacketLossRate(fraction_lost);
1433   uint8_t average_fraction_loss = network_predictor_->GetLossRate();
1434 
1435   // Normalizes rate to 0 - 100.
1436   if (audio_coding_->SetPacketLossRate(
1437       100 * average_fraction_loss / 255) != 0) {
1438     assert(false);  // This should not happen.
1439   }
1440 }
1441 
1442 int32_t
SetVADStatus(bool enableVAD,ACMVADMode mode,bool disableDTX)1443 Channel::SetVADStatus(bool enableVAD, ACMVADMode mode, bool disableDTX)
1444 {
1445     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1446                  "Channel::SetVADStatus(mode=%d)", mode);
1447     assert(!(disableDTX && enableVAD));  // disableDTX mode is deprecated.
1448     // To disable VAD, DTX must be disabled too
1449     disableDTX = ((enableVAD == false) ? true : disableDTX);
1450     if (audio_coding_->SetVAD(!disableDTX, enableVAD, mode) != 0)
1451     {
1452         _engineStatisticsPtr->SetLastError(
1453             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1454             "SetVADStatus() failed to set VAD");
1455         return -1;
1456     }
1457     return 0;
1458 }
1459 
1460 int32_t
GetVADStatus(bool & enabledVAD,ACMVADMode & mode,bool & disabledDTX)1461 Channel::GetVADStatus(bool& enabledVAD, ACMVADMode& mode, bool& disabledDTX)
1462 {
1463     if (audio_coding_->VAD(&disabledDTX, &enabledVAD, &mode) != 0)
1464     {
1465         _engineStatisticsPtr->SetLastError(
1466             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1467             "GetVADStatus() failed to get VAD status");
1468         return -1;
1469     }
1470     disabledDTX = !disabledDTX;
1471     return 0;
1472 }
1473 
1474 int32_t
SetRecPayloadType(const CodecInst & codec)1475 Channel::SetRecPayloadType(const CodecInst& codec)
1476 {
1477     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1478                  "Channel::SetRecPayloadType()");
1479 
1480     if (channel_state_.Get().playing)
1481     {
1482         _engineStatisticsPtr->SetLastError(
1483             VE_ALREADY_PLAYING, kTraceError,
1484             "SetRecPayloadType() unable to set PT while playing");
1485         return -1;
1486     }
1487     if (channel_state_.Get().receiving)
1488     {
1489         _engineStatisticsPtr->SetLastError(
1490             VE_ALREADY_LISTENING, kTraceError,
1491             "SetRecPayloadType() unable to set PT while listening");
1492         return -1;
1493     }
1494 
1495     if (codec.pltype == -1)
1496     {
1497         // De-register the selected codec (RTP/RTCP module and ACM)
1498 
1499         int8_t pltype(-1);
1500         CodecInst rxCodec = codec;
1501 
1502         // Get payload type for the given codec
1503         rtp_payload_registry_->ReceivePayloadType(
1504             rxCodec.plname,
1505             rxCodec.plfreq,
1506             rxCodec.channels,
1507             (rxCodec.rate < 0) ? 0 : rxCodec.rate,
1508             &pltype);
1509         rxCodec.pltype = pltype;
1510 
1511         if (rtp_receiver_->DeRegisterReceivePayload(pltype) != 0)
1512         {
1513             _engineStatisticsPtr->SetLastError(
1514                     VE_RTP_RTCP_MODULE_ERROR,
1515                     kTraceError,
1516                     "SetRecPayloadType() RTP/RTCP-module deregistration "
1517                     "failed");
1518             return -1;
1519         }
1520         if (audio_coding_->UnregisterReceiveCodec(rxCodec.pltype) != 0)
1521         {
1522             _engineStatisticsPtr->SetLastError(
1523                 VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1524                 "SetRecPayloadType() ACM deregistration failed - 1");
1525             return -1;
1526         }
1527         return 0;
1528     }
1529 
1530     if (rtp_receiver_->RegisterReceivePayload(
1531         codec.plname,
1532         codec.pltype,
1533         codec.plfreq,
1534         codec.channels,
1535         (codec.rate < 0) ? 0 : codec.rate) != 0)
1536     {
1537         // First attempt to register failed => de-register and try again
1538         rtp_receiver_->DeRegisterReceivePayload(codec.pltype);
1539         if (rtp_receiver_->RegisterReceivePayload(
1540             codec.plname,
1541             codec.pltype,
1542             codec.plfreq,
1543             codec.channels,
1544             (codec.rate < 0) ? 0 : codec.rate) != 0)
1545         {
1546             _engineStatisticsPtr->SetLastError(
1547                 VE_RTP_RTCP_MODULE_ERROR, kTraceError,
1548                 "SetRecPayloadType() RTP/RTCP-module registration failed");
1549             return -1;
1550         }
1551     }
1552     if (audio_coding_->RegisterReceiveCodec(codec) != 0)
1553     {
1554         audio_coding_->UnregisterReceiveCodec(codec.pltype);
1555         if (audio_coding_->RegisterReceiveCodec(codec) != 0)
1556         {
1557             _engineStatisticsPtr->SetLastError(
1558                 VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1559                 "SetRecPayloadType() ACM registration failed - 1");
1560             return -1;
1561         }
1562     }
1563     return 0;
1564 }
1565 
1566 int32_t
GetRecPayloadType(CodecInst & codec)1567 Channel::GetRecPayloadType(CodecInst& codec)
1568 {
1569     int8_t payloadType(-1);
1570     if (rtp_payload_registry_->ReceivePayloadType(
1571         codec.plname,
1572         codec.plfreq,
1573         codec.channels,
1574         (codec.rate < 0) ? 0 : codec.rate,
1575         &payloadType) != 0)
1576     {
1577         _engineStatisticsPtr->SetLastError(
1578             VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
1579             "GetRecPayloadType() failed to retrieve RX payload type");
1580         return -1;
1581     }
1582     codec.pltype = payloadType;
1583     return 0;
1584 }
1585 
1586 int32_t
SetSendCNPayloadType(int type,PayloadFrequencies frequency)1587 Channel::SetSendCNPayloadType(int type, PayloadFrequencies frequency)
1588 {
1589     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1590                  "Channel::SetSendCNPayloadType()");
1591 
1592     CodecInst codec;
1593     int32_t samplingFreqHz(-1);
1594     const size_t kMono = 1;
1595     if (frequency == kFreq32000Hz)
1596         samplingFreqHz = 32000;
1597     else if (frequency == kFreq16000Hz)
1598         samplingFreqHz = 16000;
1599 
1600     if (audio_coding_->Codec("CN", &codec, samplingFreqHz, kMono) == -1)
1601     {
1602         _engineStatisticsPtr->SetLastError(
1603             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1604             "SetSendCNPayloadType() failed to retrieve default CN codec "
1605             "settings");
1606         return -1;
1607     }
1608 
1609     // Modify the payload type (must be set to dynamic range)
1610     codec.pltype = type;
1611 
1612     if (audio_coding_->RegisterSendCodec(codec) != 0)
1613     {
1614         _engineStatisticsPtr->SetLastError(
1615             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1616             "SetSendCNPayloadType() failed to register CN to ACM");
1617         return -1;
1618     }
1619 
1620     if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
1621     {
1622         _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
1623         if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
1624         {
1625             _engineStatisticsPtr->SetLastError(
1626                 VE_RTP_RTCP_MODULE_ERROR, kTraceError,
1627                 "SetSendCNPayloadType() failed to register CN to RTP/RTCP "
1628                 "module");
1629             return -1;
1630         }
1631     }
1632     return 0;
1633 }
1634 
SetOpusMaxPlaybackRate(int frequency_hz)1635 int Channel::SetOpusMaxPlaybackRate(int frequency_hz) {
1636   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1637                "Channel::SetOpusMaxPlaybackRate()");
1638 
1639   if (audio_coding_->SetOpusMaxPlaybackRate(frequency_hz) != 0) {
1640     _engineStatisticsPtr->SetLastError(
1641         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
1642         "SetOpusMaxPlaybackRate() failed to set maximum playback rate");
1643     return -1;
1644   }
1645   return 0;
1646 }
1647 
SetOpusDtx(bool enable_dtx)1648 int Channel::SetOpusDtx(bool enable_dtx) {
1649   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1650                "Channel::SetOpusDtx(%d)", enable_dtx);
1651   int ret = enable_dtx ? audio_coding_->EnableOpusDtx()
1652                        : audio_coding_->DisableOpusDtx();
1653   if (ret != 0) {
1654     _engineStatisticsPtr->SetLastError(
1655         VE_AUDIO_CODING_MODULE_ERROR, kTraceError, "SetOpusDtx() failed");
1656     return -1;
1657   }
1658   return 0;
1659 }
1660 
RegisterExternalTransport(Transport & transport)1661 int32_t Channel::RegisterExternalTransport(Transport& transport)
1662 {
1663     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1664                "Channel::RegisterExternalTransport()");
1665 
1666     CriticalSectionScoped cs(&_callbackCritSect);
1667 
1668     if (_externalTransport)
1669     {
1670         _engineStatisticsPtr->SetLastError(VE_INVALID_OPERATION,
1671                                            kTraceError,
1672               "RegisterExternalTransport() external transport already enabled");
1673        return -1;
1674     }
1675     _externalTransport = true;
1676     _transportPtr = &transport;
1677     return 0;
1678 }
1679 
1680 int32_t
DeRegisterExternalTransport()1681 Channel::DeRegisterExternalTransport()
1682 {
1683     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1684                  "Channel::DeRegisterExternalTransport()");
1685 
1686     CriticalSectionScoped cs(&_callbackCritSect);
1687 
1688     if (!_transportPtr)
1689     {
1690         _engineStatisticsPtr->SetLastError(
1691             VE_INVALID_OPERATION, kTraceWarning,
1692             "DeRegisterExternalTransport() external transport already "
1693             "disabled");
1694         return 0;
1695     }
1696     _externalTransport = false;
1697     _transportPtr = NULL;
1698     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1699                  "DeRegisterExternalTransport() all transport is disabled");
1700     return 0;
1701 }
1702 
ReceivedRTPPacket(const int8_t * data,size_t length,const PacketTime & packet_time)1703 int32_t Channel::ReceivedRTPPacket(const int8_t* data, size_t length,
1704                                    const PacketTime& packet_time) {
1705   WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
1706                "Channel::ReceivedRTPPacket()");
1707 
1708   // Store playout timestamp for the received RTP packet
1709   UpdatePlayoutTimestamp(false);
1710 
1711   const uint8_t* received_packet = reinterpret_cast<const uint8_t*>(data);
1712   RTPHeader header;
1713   if (!rtp_header_parser_->Parse(received_packet, length, &header)) {
1714     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId,
1715                  "Incoming packet: invalid RTP header");
1716     return -1;
1717   }
1718   header.payload_type_frequency =
1719       rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
1720   if (header.payload_type_frequency < 0)
1721     return -1;
1722   bool in_order = IsPacketInOrder(header);
1723   rtp_receive_statistics_->IncomingPacket(header, length,
1724       IsPacketRetransmitted(header, in_order));
1725   rtp_payload_registry_->SetIncomingPayloadType(header);
1726 
1727   return ReceivePacket(received_packet, length, header, in_order) ? 0 : -1;
1728 }
1729 
ReceivePacket(const uint8_t * packet,size_t packet_length,const RTPHeader & header,bool in_order)1730 bool Channel::ReceivePacket(const uint8_t* packet,
1731                             size_t packet_length,
1732                             const RTPHeader& header,
1733                             bool in_order) {
1734   if (rtp_payload_registry_->IsRtx(header)) {
1735     return HandleRtxPacket(packet, packet_length, header);
1736   }
1737   const uint8_t* payload = packet + header.headerLength;
1738   assert(packet_length >= header.headerLength);
1739   size_t payload_length = packet_length - header.headerLength;
1740   PayloadUnion payload_specific;
1741   if (!rtp_payload_registry_->GetPayloadSpecifics(header.payloadType,
1742                                                   &payload_specific)) {
1743     return false;
1744   }
1745   return rtp_receiver_->IncomingRtpPacket(header, payload, payload_length,
1746                                           payload_specific, in_order);
1747 }
1748 
HandleRtxPacket(const uint8_t * packet,size_t packet_length,const RTPHeader & header)1749 bool Channel::HandleRtxPacket(const uint8_t* packet,
1750                               size_t packet_length,
1751                               const RTPHeader& header) {
1752   if (!rtp_payload_registry_->IsRtx(header))
1753     return false;
1754 
1755   // Remove the RTX header and parse the original RTP header.
1756   if (packet_length < header.headerLength)
1757     return false;
1758   if (packet_length > kVoiceEngineMaxIpPacketSizeBytes)
1759     return false;
1760   if (restored_packet_in_use_) {
1761     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId,
1762                  "Multiple RTX headers detected, dropping packet");
1763     return false;
1764   }
1765   if (!rtp_payload_registry_->RestoreOriginalPacket(
1766           restored_packet_, packet, &packet_length, rtp_receiver_->SSRC(),
1767           header)) {
1768     WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVoice, _channelId,
1769                  "Incoming RTX packet: invalid RTP header");
1770     return false;
1771   }
1772   restored_packet_in_use_ = true;
1773   bool ret = OnRecoveredPacket(restored_packet_, packet_length);
1774   restored_packet_in_use_ = false;
1775   return ret;
1776 }
1777 
IsPacketInOrder(const RTPHeader & header) const1778 bool Channel::IsPacketInOrder(const RTPHeader& header) const {
1779   StreamStatistician* statistician =
1780       rtp_receive_statistics_->GetStatistician(header.ssrc);
1781   if (!statistician)
1782     return false;
1783   return statistician->IsPacketInOrder(header.sequenceNumber);
1784 }
1785 
IsPacketRetransmitted(const RTPHeader & header,bool in_order) const1786 bool Channel::IsPacketRetransmitted(const RTPHeader& header,
1787                                     bool in_order) const {
1788   // Retransmissions are handled separately if RTX is enabled.
1789   if (rtp_payload_registry_->RtxEnabled())
1790     return false;
1791   StreamStatistician* statistician =
1792       rtp_receive_statistics_->GetStatistician(header.ssrc);
1793   if (!statistician)
1794     return false;
1795   // Check if this is a retransmission.
1796   int64_t min_rtt = 0;
1797   _rtpRtcpModule->RTT(rtp_receiver_->SSRC(), NULL, NULL, &min_rtt, NULL);
1798   return !in_order &&
1799       statistician->IsRetransmitOfOldPacket(header, min_rtt);
1800 }
1801 
ReceivedRTCPPacket(const int8_t * data,size_t length)1802 int32_t Channel::ReceivedRTCPPacket(const int8_t* data, size_t length) {
1803   WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
1804                "Channel::ReceivedRTCPPacket()");
1805   // Store playout timestamp for the received RTCP packet
1806   UpdatePlayoutTimestamp(true);
1807 
1808   // Deliver RTCP packet to RTP/RTCP module for parsing
1809   if (_rtpRtcpModule->IncomingRtcpPacket((const uint8_t*)data, length) == -1) {
1810     _engineStatisticsPtr->SetLastError(
1811         VE_SOCKET_TRANSPORT_MODULE_ERROR, kTraceWarning,
1812         "Channel::IncomingRTPPacket() RTCP packet is invalid");
1813   }
1814 
1815   int64_t rtt = GetRTT(true);
1816   if (rtt == 0) {
1817     // Waiting for valid RTT.
1818     return 0;
1819   }
1820   uint32_t ntp_secs = 0;
1821   uint32_t ntp_frac = 0;
1822   uint32_t rtp_timestamp = 0;
1823   if (0 != _rtpRtcpModule->RemoteNTP(&ntp_secs, &ntp_frac, NULL, NULL,
1824                                      &rtp_timestamp)) {
1825     // Waiting for RTCP.
1826     return 0;
1827   }
1828 
1829   {
1830     CriticalSectionScoped lock(ts_stats_lock_.get());
1831     ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
1832   }
1833   return 0;
1834 }
1835 
StartPlayingFileLocally(const char * fileName,bool loop,FileFormats format,int startPosition,float volumeScaling,int stopPosition,const CodecInst * codecInst)1836 int Channel::StartPlayingFileLocally(const char* fileName,
1837                                      bool loop,
1838                                      FileFormats format,
1839                                      int startPosition,
1840                                      float volumeScaling,
1841                                      int stopPosition,
1842                                      const CodecInst* codecInst)
1843 {
1844     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1845                  "Channel::StartPlayingFileLocally(fileNameUTF8[]=%s, loop=%d,"
1846                  " format=%d, volumeScaling=%5.3f, startPosition=%d, "
1847                  "stopPosition=%d)", fileName, loop, format, volumeScaling,
1848                  startPosition, stopPosition);
1849 
1850     if (channel_state_.Get().output_file_playing)
1851     {
1852         _engineStatisticsPtr->SetLastError(
1853             VE_ALREADY_PLAYING, kTraceError,
1854             "StartPlayingFileLocally() is already playing");
1855         return -1;
1856     }
1857 
1858     {
1859         CriticalSectionScoped cs(&_fileCritSect);
1860 
1861         if (_outputFilePlayerPtr)
1862         {
1863             _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
1864             FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
1865             _outputFilePlayerPtr = NULL;
1866         }
1867 
1868         _outputFilePlayerPtr = FilePlayer::CreateFilePlayer(
1869             _outputFilePlayerId, (const FileFormats)format);
1870 
1871         if (_outputFilePlayerPtr == NULL)
1872         {
1873             _engineStatisticsPtr->SetLastError(
1874                 VE_INVALID_ARGUMENT, kTraceError,
1875                 "StartPlayingFileLocally() filePlayer format is not correct");
1876             return -1;
1877         }
1878 
1879         const uint32_t notificationTime(0);
1880 
1881         if (_outputFilePlayerPtr->StartPlayingFile(
1882                 fileName,
1883                 loop,
1884                 startPosition,
1885                 volumeScaling,
1886                 notificationTime,
1887                 stopPosition,
1888                 (const CodecInst*)codecInst) != 0)
1889         {
1890             _engineStatisticsPtr->SetLastError(
1891                 VE_BAD_FILE, kTraceError,
1892                 "StartPlayingFile() failed to start file playout");
1893             _outputFilePlayerPtr->StopPlayingFile();
1894             FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
1895             _outputFilePlayerPtr = NULL;
1896             return -1;
1897         }
1898         _outputFilePlayerPtr->RegisterModuleFileCallback(this);
1899         channel_state_.SetOutputFilePlaying(true);
1900     }
1901 
1902     if (RegisterFilePlayingToMixer() != 0)
1903         return -1;
1904 
1905     return 0;
1906 }
1907 
StartPlayingFileLocally(InStream * stream,FileFormats format,int startPosition,float volumeScaling,int stopPosition,const CodecInst * codecInst)1908 int Channel::StartPlayingFileLocally(InStream* stream,
1909                                      FileFormats format,
1910                                      int startPosition,
1911                                      float volumeScaling,
1912                                      int stopPosition,
1913                                      const CodecInst* codecInst)
1914 {
1915     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1916                  "Channel::StartPlayingFileLocally(format=%d,"
1917                  " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
1918                  format, volumeScaling, startPosition, stopPosition);
1919 
1920     if(stream == NULL)
1921     {
1922         _engineStatisticsPtr->SetLastError(
1923             VE_BAD_FILE, kTraceError,
1924             "StartPlayingFileLocally() NULL as input stream");
1925         return -1;
1926     }
1927 
1928 
1929     if (channel_state_.Get().output_file_playing)
1930     {
1931         _engineStatisticsPtr->SetLastError(
1932             VE_ALREADY_PLAYING, kTraceError,
1933             "StartPlayingFileLocally() is already playing");
1934         return -1;
1935     }
1936 
1937     {
1938       CriticalSectionScoped cs(&_fileCritSect);
1939 
1940       // Destroy the old instance
1941       if (_outputFilePlayerPtr)
1942       {
1943           _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
1944           FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
1945           _outputFilePlayerPtr = NULL;
1946       }
1947 
1948       // Create the instance
1949       _outputFilePlayerPtr = FilePlayer::CreateFilePlayer(
1950           _outputFilePlayerId,
1951           (const FileFormats)format);
1952 
1953       if (_outputFilePlayerPtr == NULL)
1954       {
1955           _engineStatisticsPtr->SetLastError(
1956               VE_INVALID_ARGUMENT, kTraceError,
1957               "StartPlayingFileLocally() filePlayer format isnot correct");
1958           return -1;
1959       }
1960 
1961       const uint32_t notificationTime(0);
1962 
1963       if (_outputFilePlayerPtr->StartPlayingFile(*stream, startPosition,
1964                                                  volumeScaling,
1965                                                  notificationTime,
1966                                                  stopPosition, codecInst) != 0)
1967       {
1968           _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
1969                                              "StartPlayingFile() failed to "
1970                                              "start file playout");
1971           _outputFilePlayerPtr->StopPlayingFile();
1972           FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
1973           _outputFilePlayerPtr = NULL;
1974           return -1;
1975       }
1976       _outputFilePlayerPtr->RegisterModuleFileCallback(this);
1977       channel_state_.SetOutputFilePlaying(true);
1978     }
1979 
1980     if (RegisterFilePlayingToMixer() != 0)
1981         return -1;
1982 
1983     return 0;
1984 }
1985 
StopPlayingFileLocally()1986 int Channel::StopPlayingFileLocally()
1987 {
1988     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
1989                  "Channel::StopPlayingFileLocally()");
1990 
1991     if (!channel_state_.Get().output_file_playing)
1992     {
1993         return 0;
1994     }
1995 
1996     {
1997         CriticalSectionScoped cs(&_fileCritSect);
1998 
1999         if (_outputFilePlayerPtr->StopPlayingFile() != 0)
2000         {
2001             _engineStatisticsPtr->SetLastError(
2002                 VE_STOP_RECORDING_FAILED, kTraceError,
2003                 "StopPlayingFile() could not stop playing");
2004             return -1;
2005         }
2006         _outputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2007         FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
2008         _outputFilePlayerPtr = NULL;
2009         channel_state_.SetOutputFilePlaying(false);
2010     }
2011     // _fileCritSect cannot be taken while calling
2012     // SetAnonymousMixibilityStatus. Refer to comments in
2013     // StartPlayingFileLocally(const char* ...) for more details.
2014     if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, false) != 0)
2015     {
2016         _engineStatisticsPtr->SetLastError(
2017             VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
2018             "StopPlayingFile() failed to stop participant from playing as"
2019             "file in the mixer");
2020         return -1;
2021     }
2022 
2023     return 0;
2024 }
2025 
IsPlayingFileLocally() const2026 int Channel::IsPlayingFileLocally() const
2027 {
2028     return channel_state_.Get().output_file_playing;
2029 }
2030 
RegisterFilePlayingToMixer()2031 int Channel::RegisterFilePlayingToMixer()
2032 {
2033     // Return success for not registering for file playing to mixer if:
2034     // 1. playing file before playout is started on that channel.
2035     // 2. starting playout without file playing on that channel.
2036     if (!channel_state_.Get().playing ||
2037         !channel_state_.Get().output_file_playing)
2038     {
2039         return 0;
2040     }
2041 
2042     // |_fileCritSect| cannot be taken while calling
2043     // SetAnonymousMixabilityStatus() since as soon as the participant is added
2044     // frames can be pulled by the mixer. Since the frames are generated from
2045     // the file, _fileCritSect will be taken. This would result in a deadlock.
2046     if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, true) != 0)
2047     {
2048         channel_state_.SetOutputFilePlaying(false);
2049         CriticalSectionScoped cs(&_fileCritSect);
2050         _engineStatisticsPtr->SetLastError(
2051             VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
2052             "StartPlayingFile() failed to add participant as file to mixer");
2053         _outputFilePlayerPtr->StopPlayingFile();
2054         FilePlayer::DestroyFilePlayer(_outputFilePlayerPtr);
2055         _outputFilePlayerPtr = NULL;
2056         return -1;
2057     }
2058 
2059     return 0;
2060 }
2061 
StartPlayingFileAsMicrophone(const char * fileName,bool loop,FileFormats format,int startPosition,float volumeScaling,int stopPosition,const CodecInst * codecInst)2062 int Channel::StartPlayingFileAsMicrophone(const char* fileName,
2063                                           bool loop,
2064                                           FileFormats format,
2065                                           int startPosition,
2066                                           float volumeScaling,
2067                                           int stopPosition,
2068                                           const CodecInst* codecInst)
2069 {
2070     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2071                  "Channel::StartPlayingFileAsMicrophone(fileNameUTF8[]=%s, "
2072                  "loop=%d, format=%d, volumeScaling=%5.3f, startPosition=%d, "
2073                  "stopPosition=%d)", fileName, loop, format, volumeScaling,
2074                  startPosition, stopPosition);
2075 
2076     CriticalSectionScoped cs(&_fileCritSect);
2077 
2078     if (channel_state_.Get().input_file_playing)
2079     {
2080         _engineStatisticsPtr->SetLastError(
2081             VE_ALREADY_PLAYING, kTraceWarning,
2082             "StartPlayingFileAsMicrophone() filePlayer is playing");
2083         return 0;
2084     }
2085 
2086     // Destroy the old instance
2087     if (_inputFilePlayerPtr)
2088     {
2089         _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2090         FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2091         _inputFilePlayerPtr = NULL;
2092     }
2093 
2094     // Create the instance
2095     _inputFilePlayerPtr = FilePlayer::CreateFilePlayer(
2096         _inputFilePlayerId, (const FileFormats)format);
2097 
2098     if (_inputFilePlayerPtr == NULL)
2099     {
2100         _engineStatisticsPtr->SetLastError(
2101             VE_INVALID_ARGUMENT, kTraceError,
2102             "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
2103         return -1;
2104     }
2105 
2106     const uint32_t notificationTime(0);
2107 
2108     if (_inputFilePlayerPtr->StartPlayingFile(
2109         fileName,
2110         loop,
2111         startPosition,
2112         volumeScaling,
2113         notificationTime,
2114         stopPosition,
2115         (const CodecInst*)codecInst) != 0)
2116     {
2117         _engineStatisticsPtr->SetLastError(
2118             VE_BAD_FILE, kTraceError,
2119             "StartPlayingFile() failed to start file playout");
2120         _inputFilePlayerPtr->StopPlayingFile();
2121         FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2122         _inputFilePlayerPtr = NULL;
2123         return -1;
2124     }
2125     _inputFilePlayerPtr->RegisterModuleFileCallback(this);
2126     channel_state_.SetInputFilePlaying(true);
2127 
2128     return 0;
2129 }
2130 
StartPlayingFileAsMicrophone(InStream * stream,FileFormats format,int startPosition,float volumeScaling,int stopPosition,const CodecInst * codecInst)2131 int Channel::StartPlayingFileAsMicrophone(InStream* stream,
2132                                           FileFormats format,
2133                                           int startPosition,
2134                                           float volumeScaling,
2135                                           int stopPosition,
2136                                           const CodecInst* codecInst)
2137 {
2138     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2139                  "Channel::StartPlayingFileAsMicrophone(format=%d, "
2140                  "volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
2141                  format, volumeScaling, startPosition, stopPosition);
2142 
2143     if(stream == NULL)
2144     {
2145         _engineStatisticsPtr->SetLastError(
2146             VE_BAD_FILE, kTraceError,
2147             "StartPlayingFileAsMicrophone NULL as input stream");
2148         return -1;
2149     }
2150 
2151     CriticalSectionScoped cs(&_fileCritSect);
2152 
2153     if (channel_state_.Get().input_file_playing)
2154     {
2155         _engineStatisticsPtr->SetLastError(
2156             VE_ALREADY_PLAYING, kTraceWarning,
2157             "StartPlayingFileAsMicrophone() is playing");
2158         return 0;
2159     }
2160 
2161     // Destroy the old instance
2162     if (_inputFilePlayerPtr)
2163     {
2164         _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2165         FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2166         _inputFilePlayerPtr = NULL;
2167     }
2168 
2169     // Create the instance
2170     _inputFilePlayerPtr = FilePlayer::CreateFilePlayer(
2171         _inputFilePlayerId, (const FileFormats)format);
2172 
2173     if (_inputFilePlayerPtr == NULL)
2174     {
2175         _engineStatisticsPtr->SetLastError(
2176             VE_INVALID_ARGUMENT, kTraceError,
2177             "StartPlayingInputFile() filePlayer format isnot correct");
2178         return -1;
2179     }
2180 
2181     const uint32_t notificationTime(0);
2182 
2183     if (_inputFilePlayerPtr->StartPlayingFile(*stream, startPosition,
2184                                               volumeScaling, notificationTime,
2185                                               stopPosition, codecInst) != 0)
2186     {
2187         _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
2188                                            "StartPlayingFile() failed to start "
2189                                            "file playout");
2190         _inputFilePlayerPtr->StopPlayingFile();
2191         FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2192         _inputFilePlayerPtr = NULL;
2193         return -1;
2194     }
2195 
2196     _inputFilePlayerPtr->RegisterModuleFileCallback(this);
2197     channel_state_.SetInputFilePlaying(true);
2198 
2199     return 0;
2200 }
2201 
StopPlayingFileAsMicrophone()2202 int Channel::StopPlayingFileAsMicrophone()
2203 {
2204     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2205                  "Channel::StopPlayingFileAsMicrophone()");
2206 
2207     CriticalSectionScoped cs(&_fileCritSect);
2208 
2209     if (!channel_state_.Get().input_file_playing)
2210     {
2211         return 0;
2212     }
2213 
2214     if (_inputFilePlayerPtr->StopPlayingFile() != 0)
2215     {
2216         _engineStatisticsPtr->SetLastError(
2217             VE_STOP_RECORDING_FAILED, kTraceError,
2218             "StopPlayingFile() could not stop playing");
2219         return -1;
2220     }
2221     _inputFilePlayerPtr->RegisterModuleFileCallback(NULL);
2222     FilePlayer::DestroyFilePlayer(_inputFilePlayerPtr);
2223     _inputFilePlayerPtr = NULL;
2224     channel_state_.SetInputFilePlaying(false);
2225 
2226     return 0;
2227 }
2228 
IsPlayingFileAsMicrophone() const2229 int Channel::IsPlayingFileAsMicrophone() const
2230 {
2231     return channel_state_.Get().input_file_playing;
2232 }
2233 
StartRecordingPlayout(const char * fileName,const CodecInst * codecInst)2234 int Channel::StartRecordingPlayout(const char* fileName,
2235                                    const CodecInst* codecInst)
2236 {
2237     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2238                  "Channel::StartRecordingPlayout(fileName=%s)", fileName);
2239 
2240     if (_outputFileRecording)
2241     {
2242         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
2243                      "StartRecordingPlayout() is already recording");
2244         return 0;
2245     }
2246 
2247     FileFormats format;
2248     const uint32_t notificationTime(0); // Not supported in VoE
2249     CodecInst dummyCodec={100,"L16",16000,320,1,320000};
2250 
2251     if ((codecInst != NULL) &&
2252       ((codecInst->channels < 1) || (codecInst->channels > 2)))
2253     {
2254         _engineStatisticsPtr->SetLastError(
2255             VE_BAD_ARGUMENT, kTraceError,
2256             "StartRecordingPlayout() invalid compression");
2257         return(-1);
2258     }
2259     if(codecInst == NULL)
2260     {
2261         format = kFileFormatPcm16kHzFile;
2262         codecInst=&dummyCodec;
2263     }
2264     else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
2265         (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
2266         (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
2267     {
2268         format = kFileFormatWavFile;
2269     }
2270     else
2271     {
2272         format = kFileFormatCompressedFile;
2273     }
2274 
2275     CriticalSectionScoped cs(&_fileCritSect);
2276 
2277     // Destroy the old instance
2278     if (_outputFileRecorderPtr)
2279     {
2280         _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
2281         FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2282         _outputFileRecorderPtr = NULL;
2283     }
2284 
2285     _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
2286         _outputFileRecorderId, (const FileFormats)format);
2287     if (_outputFileRecorderPtr == NULL)
2288     {
2289         _engineStatisticsPtr->SetLastError(
2290             VE_INVALID_ARGUMENT, kTraceError,
2291             "StartRecordingPlayout() fileRecorder format isnot correct");
2292         return -1;
2293     }
2294 
2295     if (_outputFileRecorderPtr->StartRecordingAudioFile(
2296         fileName, (const CodecInst&)*codecInst, notificationTime) != 0)
2297     {
2298         _engineStatisticsPtr->SetLastError(
2299             VE_BAD_FILE, kTraceError,
2300             "StartRecordingAudioFile() failed to start file recording");
2301         _outputFileRecorderPtr->StopRecording();
2302         FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2303         _outputFileRecorderPtr = NULL;
2304         return -1;
2305     }
2306     _outputFileRecorderPtr->RegisterModuleFileCallback(this);
2307     _outputFileRecording = true;
2308 
2309     return 0;
2310 }
2311 
StartRecordingPlayout(OutStream * stream,const CodecInst * codecInst)2312 int Channel::StartRecordingPlayout(OutStream* stream,
2313                                    const CodecInst* codecInst)
2314 {
2315     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2316                  "Channel::StartRecordingPlayout()");
2317 
2318     if (_outputFileRecording)
2319     {
2320         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,-1),
2321                      "StartRecordingPlayout() is already recording");
2322         return 0;
2323     }
2324 
2325     FileFormats format;
2326     const uint32_t notificationTime(0); // Not supported in VoE
2327     CodecInst dummyCodec={100,"L16",16000,320,1,320000};
2328 
2329     if (codecInst != NULL && codecInst->channels != 1)
2330     {
2331         _engineStatisticsPtr->SetLastError(
2332             VE_BAD_ARGUMENT, kTraceError,
2333             "StartRecordingPlayout() invalid compression");
2334         return(-1);
2335     }
2336     if(codecInst == NULL)
2337     {
2338         format = kFileFormatPcm16kHzFile;
2339         codecInst=&dummyCodec;
2340     }
2341     else if((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
2342         (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
2343         (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
2344     {
2345         format = kFileFormatWavFile;
2346     }
2347     else
2348     {
2349         format = kFileFormatCompressedFile;
2350     }
2351 
2352     CriticalSectionScoped cs(&_fileCritSect);
2353 
2354     // Destroy the old instance
2355     if (_outputFileRecorderPtr)
2356     {
2357         _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
2358         FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2359         _outputFileRecorderPtr = NULL;
2360     }
2361 
2362     _outputFileRecorderPtr = FileRecorder::CreateFileRecorder(
2363         _outputFileRecorderId, (const FileFormats)format);
2364     if (_outputFileRecorderPtr == NULL)
2365     {
2366         _engineStatisticsPtr->SetLastError(
2367             VE_INVALID_ARGUMENT, kTraceError,
2368             "StartRecordingPlayout() fileRecorder format isnot correct");
2369         return -1;
2370     }
2371 
2372     if (_outputFileRecorderPtr->StartRecordingAudioFile(*stream, *codecInst,
2373                                                         notificationTime) != 0)
2374     {
2375         _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError,
2376                                            "StartRecordingPlayout() failed to "
2377                                            "start file recording");
2378         _outputFileRecorderPtr->StopRecording();
2379         FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2380         _outputFileRecorderPtr = NULL;
2381         return -1;
2382     }
2383 
2384     _outputFileRecorderPtr->RegisterModuleFileCallback(this);
2385     _outputFileRecording = true;
2386 
2387     return 0;
2388 }
2389 
StopRecordingPlayout()2390 int Channel::StopRecordingPlayout()
2391 {
2392     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
2393                  "Channel::StopRecordingPlayout()");
2394 
2395     if (!_outputFileRecording)
2396     {
2397         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,-1),
2398                      "StopRecordingPlayout() isnot recording");
2399         return -1;
2400     }
2401 
2402 
2403     CriticalSectionScoped cs(&_fileCritSect);
2404 
2405     if (_outputFileRecorderPtr->StopRecording() != 0)
2406     {
2407         _engineStatisticsPtr->SetLastError(
2408             VE_STOP_RECORDING_FAILED, kTraceError,
2409             "StopRecording() could not stop recording");
2410         return(-1);
2411     }
2412     _outputFileRecorderPtr->RegisterModuleFileCallback(NULL);
2413     FileRecorder::DestroyFileRecorder(_outputFileRecorderPtr);
2414     _outputFileRecorderPtr = NULL;
2415     _outputFileRecording = false;
2416 
2417     return 0;
2418 }
2419 
2420 void
SetMixWithMicStatus(bool mix)2421 Channel::SetMixWithMicStatus(bool mix)
2422 {
2423     CriticalSectionScoped cs(&_fileCritSect);
2424     _mixFileWithMicrophone=mix;
2425 }
2426 
2427 int
GetSpeechOutputLevel(uint32_t & level) const2428 Channel::GetSpeechOutputLevel(uint32_t& level) const
2429 {
2430     int8_t currentLevel = _outputAudioLevel.Level();
2431     level = static_cast<int32_t> (currentLevel);
2432     return 0;
2433 }
2434 
2435 int
GetSpeechOutputLevelFullRange(uint32_t & level) const2436 Channel::GetSpeechOutputLevelFullRange(uint32_t& level) const
2437 {
2438     int16_t currentLevel = _outputAudioLevel.LevelFullRange();
2439     level = static_cast<int32_t> (currentLevel);
2440     return 0;
2441 }
2442 
2443 int
SetMute(bool enable)2444 Channel::SetMute(bool enable)
2445 {
2446     CriticalSectionScoped cs(&volume_settings_critsect_);
2447     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2448                "Channel::SetMute(enable=%d)", enable);
2449     _mute = enable;
2450     return 0;
2451 }
2452 
2453 bool
Mute() const2454 Channel::Mute() const
2455 {
2456     CriticalSectionScoped cs(&volume_settings_critsect_);
2457     return _mute;
2458 }
2459 
2460 int
SetOutputVolumePan(float left,float right)2461 Channel::SetOutputVolumePan(float left, float right)
2462 {
2463     CriticalSectionScoped cs(&volume_settings_critsect_);
2464     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2465                "Channel::SetOutputVolumePan()");
2466     _panLeft = left;
2467     _panRight = right;
2468     return 0;
2469 }
2470 
2471 int
GetOutputVolumePan(float & left,float & right) const2472 Channel::GetOutputVolumePan(float& left, float& right) const
2473 {
2474     CriticalSectionScoped cs(&volume_settings_critsect_);
2475     left = _panLeft;
2476     right = _panRight;
2477     return 0;
2478 }
2479 
2480 int
SetChannelOutputVolumeScaling(float scaling)2481 Channel::SetChannelOutputVolumeScaling(float scaling)
2482 {
2483     CriticalSectionScoped cs(&volume_settings_critsect_);
2484     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2485                "Channel::SetChannelOutputVolumeScaling()");
2486     _outputGain = scaling;
2487     return 0;
2488 }
2489 
2490 int
GetChannelOutputVolumeScaling(float & scaling) const2491 Channel::GetChannelOutputVolumeScaling(float& scaling) const
2492 {
2493     CriticalSectionScoped cs(&volume_settings_critsect_);
2494     scaling = _outputGain;
2495     return 0;
2496 }
2497 
SendTelephoneEventOutband(unsigned char eventCode,int lengthMs,int attenuationDb,bool playDtmfEvent)2498 int Channel::SendTelephoneEventOutband(unsigned char eventCode,
2499                                        int lengthMs, int attenuationDb,
2500                                        bool playDtmfEvent)
2501 {
2502     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
2503                "Channel::SendTelephoneEventOutband(..., playDtmfEvent=%d)",
2504                playDtmfEvent);
2505     if (!Sending()) {
2506       return -1;
2507     }
2508 
2509     _playOutbandDtmfEvent = playDtmfEvent;
2510 
2511     if (_rtpRtcpModule->SendTelephoneEventOutband(eventCode, lengthMs,
2512                                                  attenuationDb) != 0)
2513     {
2514         _engineStatisticsPtr->SetLastError(
2515             VE_SEND_DTMF_FAILED,
2516             kTraceWarning,
2517             "SendTelephoneEventOutband() failed to send event");
2518         return -1;
2519     }
2520     return 0;
2521 }
2522 
SendTelephoneEventInband(unsigned char eventCode,int lengthMs,int attenuationDb,bool playDtmfEvent)2523 int Channel::SendTelephoneEventInband(unsigned char eventCode,
2524                                          int lengthMs,
2525                                          int attenuationDb,
2526                                          bool playDtmfEvent)
2527 {
2528     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
2529                "Channel::SendTelephoneEventInband(..., playDtmfEvent=%d)",
2530                playDtmfEvent);
2531 
2532     _playInbandDtmfEvent = playDtmfEvent;
2533     _inbandDtmfQueue.AddDtmf(eventCode, lengthMs, attenuationDb);
2534 
2535     return 0;
2536 }
2537 
2538 int
SetSendTelephoneEventPayloadType(unsigned char type)2539 Channel::SetSendTelephoneEventPayloadType(unsigned char type)
2540 {
2541     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2542                "Channel::SetSendTelephoneEventPayloadType()");
2543     if (type > 127)
2544     {
2545         _engineStatisticsPtr->SetLastError(
2546             VE_INVALID_ARGUMENT, kTraceError,
2547             "SetSendTelephoneEventPayloadType() invalid type");
2548         return -1;
2549     }
2550     CodecInst codec = {};
2551     codec.plfreq = 8000;
2552     codec.pltype = type;
2553     memcpy(codec.plname, "telephone-event", 16);
2554     if (_rtpRtcpModule->RegisterSendPayload(codec) != 0)
2555     {
2556         _rtpRtcpModule->DeRegisterSendPayload(codec.pltype);
2557         if (_rtpRtcpModule->RegisterSendPayload(codec) != 0) {
2558             _engineStatisticsPtr->SetLastError(
2559                 VE_RTP_RTCP_MODULE_ERROR, kTraceError,
2560                 "SetSendTelephoneEventPayloadType() failed to register send"
2561                 "payload type");
2562             return -1;
2563         }
2564     }
2565     _sendTelephoneEventPayloadType = type;
2566     return 0;
2567 }
2568 
2569 int
GetSendTelephoneEventPayloadType(unsigned char & type)2570 Channel::GetSendTelephoneEventPayloadType(unsigned char& type)
2571 {
2572     type = _sendTelephoneEventPayloadType;
2573     return 0;
2574 }
2575 
2576 int
UpdateRxVadDetection(AudioFrame & audioFrame)2577 Channel::UpdateRxVadDetection(AudioFrame& audioFrame)
2578 {
2579     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
2580                  "Channel::UpdateRxVadDetection()");
2581 
2582     int vadDecision = 1;
2583 
2584     vadDecision = (audioFrame.vad_activity_ == AudioFrame::kVadActive)? 1 : 0;
2585 
2586     if ((vadDecision != _oldVadDecision) && _rxVadObserverPtr)
2587     {
2588         OnRxVadDetected(vadDecision);
2589         _oldVadDecision = vadDecision;
2590     }
2591 
2592     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
2593                  "Channel::UpdateRxVadDetection() => vadDecision=%d",
2594                  vadDecision);
2595     return 0;
2596 }
2597 
2598 int
RegisterRxVadObserver(VoERxVadCallback & observer)2599 Channel::RegisterRxVadObserver(VoERxVadCallback &observer)
2600 {
2601     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2602                  "Channel::RegisterRxVadObserver()");
2603     CriticalSectionScoped cs(&_callbackCritSect);
2604 
2605     if (_rxVadObserverPtr)
2606     {
2607         _engineStatisticsPtr->SetLastError(
2608             VE_INVALID_OPERATION, kTraceError,
2609             "RegisterRxVadObserver() observer already enabled");
2610         return -1;
2611     }
2612     _rxVadObserverPtr = &observer;
2613     _RxVadDetection = true;
2614     return 0;
2615 }
2616 
2617 int
DeRegisterRxVadObserver()2618 Channel::DeRegisterRxVadObserver()
2619 {
2620     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2621                  "Channel::DeRegisterRxVadObserver()");
2622     CriticalSectionScoped cs(&_callbackCritSect);
2623 
2624     if (!_rxVadObserverPtr)
2625     {
2626         _engineStatisticsPtr->SetLastError(
2627             VE_INVALID_OPERATION, kTraceWarning,
2628             "DeRegisterRxVadObserver() observer already disabled");
2629         return 0;
2630     }
2631     _rxVadObserverPtr = NULL;
2632     _RxVadDetection = false;
2633     return 0;
2634 }
2635 
2636 int
VoiceActivityIndicator(int & activity)2637 Channel::VoiceActivityIndicator(int &activity)
2638 {
2639     activity = _sendFrameType;
2640     return 0;
2641 }
2642 
2643 #ifdef WEBRTC_VOICE_ENGINE_AGC
2644 
2645 int
SetRxAgcStatus(bool enable,AgcModes mode)2646 Channel::SetRxAgcStatus(bool enable, AgcModes mode)
2647 {
2648     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2649                  "Channel::SetRxAgcStatus(enable=%d, mode=%d)",
2650                  (int)enable, (int)mode);
2651 
2652     GainControl::Mode agcMode = kDefaultRxAgcMode;
2653     switch (mode)
2654     {
2655         case kAgcDefault:
2656             break;
2657         case kAgcUnchanged:
2658             agcMode = rx_audioproc_->gain_control()->mode();
2659             break;
2660         case kAgcFixedDigital:
2661             agcMode = GainControl::kFixedDigital;
2662             break;
2663         case kAgcAdaptiveDigital:
2664             agcMode =GainControl::kAdaptiveDigital;
2665             break;
2666         default:
2667             _engineStatisticsPtr->SetLastError(
2668                 VE_INVALID_ARGUMENT, kTraceError,
2669                 "SetRxAgcStatus() invalid Agc mode");
2670             return -1;
2671     }
2672 
2673     if (rx_audioproc_->gain_control()->set_mode(agcMode) != 0)
2674     {
2675         _engineStatisticsPtr->SetLastError(
2676             VE_APM_ERROR, kTraceError,
2677             "SetRxAgcStatus() failed to set Agc mode");
2678         return -1;
2679     }
2680     if (rx_audioproc_->gain_control()->Enable(enable) != 0)
2681     {
2682         _engineStatisticsPtr->SetLastError(
2683             VE_APM_ERROR, kTraceError,
2684             "SetRxAgcStatus() failed to set Agc state");
2685         return -1;
2686     }
2687 
2688     _rxAgcIsEnabled = enable;
2689     channel_state_.SetRxApmIsEnabled(_rxAgcIsEnabled || _rxNsIsEnabled);
2690 
2691     return 0;
2692 }
2693 
2694 int
GetRxAgcStatus(bool & enabled,AgcModes & mode)2695 Channel::GetRxAgcStatus(bool& enabled, AgcModes& mode)
2696 {
2697     bool enable = rx_audioproc_->gain_control()->is_enabled();
2698     GainControl::Mode agcMode =
2699         rx_audioproc_->gain_control()->mode();
2700 
2701     enabled = enable;
2702 
2703     switch (agcMode)
2704     {
2705         case GainControl::kFixedDigital:
2706             mode = kAgcFixedDigital;
2707             break;
2708         case GainControl::kAdaptiveDigital:
2709             mode = kAgcAdaptiveDigital;
2710             break;
2711         default:
2712             _engineStatisticsPtr->SetLastError(
2713                 VE_APM_ERROR, kTraceError,
2714                 "GetRxAgcStatus() invalid Agc mode");
2715             return -1;
2716     }
2717 
2718     return 0;
2719 }
2720 
2721 int
SetRxAgcConfig(AgcConfig config)2722 Channel::SetRxAgcConfig(AgcConfig config)
2723 {
2724     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2725                  "Channel::SetRxAgcConfig()");
2726 
2727     if (rx_audioproc_->gain_control()->set_target_level_dbfs(
2728         config.targetLeveldBOv) != 0)
2729     {
2730         _engineStatisticsPtr->SetLastError(
2731             VE_APM_ERROR, kTraceError,
2732             "SetRxAgcConfig() failed to set target peak |level|"
2733             "(or envelope) of the Agc");
2734         return -1;
2735     }
2736     if (rx_audioproc_->gain_control()->set_compression_gain_db(
2737         config.digitalCompressionGaindB) != 0)
2738     {
2739         _engineStatisticsPtr->SetLastError(
2740             VE_APM_ERROR, kTraceError,
2741             "SetRxAgcConfig() failed to set the range in |gain| the"
2742             " digital compression stage may apply");
2743         return -1;
2744     }
2745     if (rx_audioproc_->gain_control()->enable_limiter(
2746         config.limiterEnable) != 0)
2747     {
2748         _engineStatisticsPtr->SetLastError(
2749             VE_APM_ERROR, kTraceError,
2750             "SetRxAgcConfig() failed to set hard limiter to the signal");
2751         return -1;
2752     }
2753 
2754     return 0;
2755 }
2756 
2757 int
GetRxAgcConfig(AgcConfig & config)2758 Channel::GetRxAgcConfig(AgcConfig& config)
2759 {
2760     config.targetLeveldBOv =
2761         rx_audioproc_->gain_control()->target_level_dbfs();
2762     config.digitalCompressionGaindB =
2763         rx_audioproc_->gain_control()->compression_gain_db();
2764     config.limiterEnable =
2765         rx_audioproc_->gain_control()->is_limiter_enabled();
2766 
2767     return 0;
2768 }
2769 
2770 #endif // #ifdef WEBRTC_VOICE_ENGINE_AGC
2771 
2772 #ifdef WEBRTC_VOICE_ENGINE_NR
2773 
2774 int
SetRxNsStatus(bool enable,NsModes mode)2775 Channel::SetRxNsStatus(bool enable, NsModes mode)
2776 {
2777     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
2778                  "Channel::SetRxNsStatus(enable=%d, mode=%d)",
2779                  (int)enable, (int)mode);
2780 
2781     NoiseSuppression::Level nsLevel = kDefaultNsMode;
2782     switch (mode)
2783     {
2784 
2785         case kNsDefault:
2786             break;
2787         case kNsUnchanged:
2788             nsLevel = rx_audioproc_->noise_suppression()->level();
2789             break;
2790         case kNsConference:
2791             nsLevel = NoiseSuppression::kHigh;
2792             break;
2793         case kNsLowSuppression:
2794             nsLevel = NoiseSuppression::kLow;
2795             break;
2796         case kNsModerateSuppression:
2797             nsLevel = NoiseSuppression::kModerate;
2798             break;
2799         case kNsHighSuppression:
2800             nsLevel = NoiseSuppression::kHigh;
2801             break;
2802         case kNsVeryHighSuppression:
2803             nsLevel = NoiseSuppression::kVeryHigh;
2804             break;
2805     }
2806 
2807     if (rx_audioproc_->noise_suppression()->set_level(nsLevel)
2808         != 0)
2809     {
2810         _engineStatisticsPtr->SetLastError(
2811             VE_APM_ERROR, kTraceError,
2812             "SetRxNsStatus() failed to set NS level");
2813         return -1;
2814     }
2815     if (rx_audioproc_->noise_suppression()->Enable(enable) != 0)
2816     {
2817         _engineStatisticsPtr->SetLastError(
2818             VE_APM_ERROR, kTraceError,
2819             "SetRxNsStatus() failed to set NS state");
2820         return -1;
2821     }
2822 
2823     _rxNsIsEnabled = enable;
2824     channel_state_.SetRxApmIsEnabled(_rxAgcIsEnabled || _rxNsIsEnabled);
2825 
2826     return 0;
2827 }
2828 
2829 int
GetRxNsStatus(bool & enabled,NsModes & mode)2830 Channel::GetRxNsStatus(bool& enabled, NsModes& mode)
2831 {
2832     bool enable =
2833         rx_audioproc_->noise_suppression()->is_enabled();
2834     NoiseSuppression::Level ncLevel =
2835         rx_audioproc_->noise_suppression()->level();
2836 
2837     enabled = enable;
2838 
2839     switch (ncLevel)
2840     {
2841         case NoiseSuppression::kLow:
2842             mode = kNsLowSuppression;
2843             break;
2844         case NoiseSuppression::kModerate:
2845             mode = kNsModerateSuppression;
2846             break;
2847         case NoiseSuppression::kHigh:
2848             mode = kNsHighSuppression;
2849             break;
2850         case NoiseSuppression::kVeryHigh:
2851             mode = kNsVeryHighSuppression;
2852             break;
2853     }
2854 
2855     return 0;
2856 }
2857 
2858 #endif // #ifdef WEBRTC_VOICE_ENGINE_NR
2859 
2860 int
SetLocalSSRC(unsigned int ssrc)2861 Channel::SetLocalSSRC(unsigned int ssrc)
2862 {
2863     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
2864                  "Channel::SetLocalSSRC()");
2865     if (channel_state_.Get().sending)
2866     {
2867         _engineStatisticsPtr->SetLastError(
2868             VE_ALREADY_SENDING, kTraceError,
2869             "SetLocalSSRC() already sending");
2870         return -1;
2871     }
2872     _rtpRtcpModule->SetSSRC(ssrc);
2873     return 0;
2874 }
2875 
2876 int
GetLocalSSRC(unsigned int & ssrc)2877 Channel::GetLocalSSRC(unsigned int& ssrc)
2878 {
2879     ssrc = _rtpRtcpModule->SSRC();
2880     return 0;
2881 }
2882 
2883 int
GetRemoteSSRC(unsigned int & ssrc)2884 Channel::GetRemoteSSRC(unsigned int& ssrc)
2885 {
2886     ssrc = rtp_receiver_->SSRC();
2887     return 0;
2888 }
2889 
SetSendAudioLevelIndicationStatus(bool enable,unsigned char id)2890 int Channel::SetSendAudioLevelIndicationStatus(bool enable, unsigned char id) {
2891   _includeAudioLevelIndication = enable;
2892   return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
2893 }
2894 
SetReceiveAudioLevelIndicationStatus(bool enable,unsigned char id)2895 int Channel::SetReceiveAudioLevelIndicationStatus(bool enable,
2896                                                   unsigned char id) {
2897   rtp_header_parser_->DeregisterRtpHeaderExtension(
2898       kRtpExtensionAudioLevel);
2899   if (enable && !rtp_header_parser_->RegisterRtpHeaderExtension(
2900           kRtpExtensionAudioLevel, id)) {
2901     return -1;
2902   }
2903   return 0;
2904 }
2905 
SetSendAbsoluteSenderTimeStatus(bool enable,unsigned char id)2906 int Channel::SetSendAbsoluteSenderTimeStatus(bool enable, unsigned char id) {
2907   return SetSendRtpHeaderExtension(enable, kRtpExtensionAbsoluteSendTime, id);
2908 }
2909 
SetReceiveAbsoluteSenderTimeStatus(bool enable,unsigned char id)2910 int Channel::SetReceiveAbsoluteSenderTimeStatus(bool enable, unsigned char id) {
2911   rtp_header_parser_->DeregisterRtpHeaderExtension(
2912       kRtpExtensionAbsoluteSendTime);
2913   if (enable && !rtp_header_parser_->RegisterRtpHeaderExtension(
2914       kRtpExtensionAbsoluteSendTime, id)) {
2915     return -1;
2916   }
2917   return 0;
2918 }
2919 
EnableSendTransportSequenceNumber(int id)2920 void Channel::EnableSendTransportSequenceNumber(int id) {
2921   int ret =
2922       SetSendRtpHeaderExtension(true, kRtpExtensionTransportSequenceNumber, id);
2923   RTC_DCHECK_EQ(0, ret);
2924 }
2925 
SetCongestionControlObjects(RtpPacketSender * rtp_packet_sender,TransportFeedbackObserver * transport_feedback_observer,PacketRouter * packet_router)2926 void Channel::SetCongestionControlObjects(
2927     RtpPacketSender* rtp_packet_sender,
2928     TransportFeedbackObserver* transport_feedback_observer,
2929     PacketRouter* packet_router) {
2930   RTC_DCHECK(packet_router != nullptr || packet_router_ != nullptr);
2931   if (transport_feedback_observer) {
2932     RTC_DCHECK(feedback_observer_proxy_.get());
2933     feedback_observer_proxy_->SetTransportFeedbackObserver(
2934         transport_feedback_observer);
2935   }
2936   if (rtp_packet_sender) {
2937     RTC_DCHECK(rtp_packet_sender_proxy_.get());
2938     rtp_packet_sender_proxy_->SetPacketSender(rtp_packet_sender);
2939   }
2940   if (seq_num_allocator_proxy_.get()) {
2941     seq_num_allocator_proxy_->SetSequenceNumberAllocator(packet_router);
2942   }
2943   _rtpRtcpModule->SetStorePacketsStatus(rtp_packet_sender != nullptr, 600);
2944   if (packet_router != nullptr) {
2945     packet_router->AddRtpModule(_rtpRtcpModule.get());
2946   } else {
2947     packet_router_->RemoveRtpModule(_rtpRtcpModule.get());
2948   }
2949   packet_router_ = packet_router;
2950 }
2951 
SetRTCPStatus(bool enable)2952 void Channel::SetRTCPStatus(bool enable) {
2953   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
2954                "Channel::SetRTCPStatus()");
2955   _rtpRtcpModule->SetRTCPStatus(enable ? RtcpMode::kCompound : RtcpMode::kOff);
2956 }
2957 
2958 int
GetRTCPStatus(bool & enabled)2959 Channel::GetRTCPStatus(bool& enabled)
2960 {
2961   RtcpMode method = _rtpRtcpModule->RTCP();
2962   enabled = (method != RtcpMode::kOff);
2963     return 0;
2964 }
2965 
2966 int
SetRTCP_CNAME(const char cName[256])2967 Channel::SetRTCP_CNAME(const char cName[256])
2968 {
2969     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
2970                  "Channel::SetRTCP_CNAME()");
2971     if (_rtpRtcpModule->SetCNAME(cName) != 0)
2972     {
2973         _engineStatisticsPtr->SetLastError(
2974             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
2975             "SetRTCP_CNAME() failed to set RTCP CNAME");
2976         return -1;
2977     }
2978     return 0;
2979 }
2980 
2981 int
GetRemoteRTCP_CNAME(char cName[256])2982 Channel::GetRemoteRTCP_CNAME(char cName[256])
2983 {
2984     if (cName == NULL)
2985     {
2986         _engineStatisticsPtr->SetLastError(
2987             VE_INVALID_ARGUMENT, kTraceError,
2988             "GetRemoteRTCP_CNAME() invalid CNAME input buffer");
2989         return -1;
2990     }
2991     char cname[RTCP_CNAME_SIZE];
2992     const uint32_t remoteSSRC = rtp_receiver_->SSRC();
2993     if (_rtpRtcpModule->RemoteCNAME(remoteSSRC, cname) != 0)
2994     {
2995         _engineStatisticsPtr->SetLastError(
2996             VE_CANNOT_RETRIEVE_CNAME, kTraceError,
2997             "GetRemoteRTCP_CNAME() failed to retrieve remote RTCP CNAME");
2998         return -1;
2999     }
3000     strcpy(cName, cname);
3001     return 0;
3002 }
3003 
3004 int
GetRemoteRTCPData(unsigned int & NTPHigh,unsigned int & NTPLow,unsigned int & timestamp,unsigned int & playoutTimestamp,unsigned int * jitter,unsigned short * fractionLost)3005 Channel::GetRemoteRTCPData(
3006     unsigned int& NTPHigh,
3007     unsigned int& NTPLow,
3008     unsigned int& timestamp,
3009     unsigned int& playoutTimestamp,
3010     unsigned int* jitter,
3011     unsigned short* fractionLost)
3012 {
3013     // --- Information from sender info in received Sender Reports
3014 
3015     RTCPSenderInfo senderInfo;
3016     if (_rtpRtcpModule->RemoteRTCPStat(&senderInfo) != 0)
3017     {
3018         _engineStatisticsPtr->SetLastError(
3019             VE_RTP_RTCP_MODULE_ERROR, kTraceError,
3020             "GetRemoteRTCPData() failed to retrieve sender info for remote "
3021             "side");
3022         return -1;
3023     }
3024 
3025     // We only utilize 12 out of 20 bytes in the sender info (ignores packet
3026     // and octet count)
3027     NTPHigh = senderInfo.NTPseconds;
3028     NTPLow = senderInfo.NTPfraction;
3029     timestamp = senderInfo.RTPtimeStamp;
3030 
3031     // --- Locally derived information
3032 
3033     // This value is updated on each incoming RTCP packet (0 when no packet
3034     // has been received)
3035     playoutTimestamp = playout_timestamp_rtcp_;
3036 
3037     if (NULL != jitter || NULL != fractionLost)
3038     {
3039         // Get all RTCP receiver report blocks that have been received on this
3040         // channel. If we receive RTP packets from a remote source we know the
3041         // remote SSRC and use the report block from him.
3042         // Otherwise use the first report block.
3043         std::vector<RTCPReportBlock> remote_stats;
3044         if (_rtpRtcpModule->RemoteRTCPStat(&remote_stats) != 0 ||
3045             remote_stats.empty()) {
3046           WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3047                        VoEId(_instanceId, _channelId),
3048                        "GetRemoteRTCPData() failed to measure statistics due"
3049                        " to lack of received RTP and/or RTCP packets");
3050           return -1;
3051         }
3052 
3053         uint32_t remoteSSRC = rtp_receiver_->SSRC();
3054         std::vector<RTCPReportBlock>::const_iterator it = remote_stats.begin();
3055         for (; it != remote_stats.end(); ++it) {
3056           if (it->remoteSSRC == remoteSSRC)
3057             break;
3058         }
3059 
3060         if (it == remote_stats.end()) {
3061           // If we have not received any RTCP packets from this SSRC it probably
3062           // means that we have not received any RTP packets.
3063           // Use the first received report block instead.
3064           it = remote_stats.begin();
3065           remoteSSRC = it->remoteSSRC;
3066         }
3067 
3068         if (jitter) {
3069           *jitter = it->jitter;
3070         }
3071 
3072         if (fractionLost) {
3073           *fractionLost = it->fractionLost;
3074         }
3075     }
3076     return 0;
3077 }
3078 
3079 int
SendApplicationDefinedRTCPPacket(unsigned char subType,unsigned int name,const char * data,unsigned short dataLengthInBytes)3080 Channel::SendApplicationDefinedRTCPPacket(unsigned char subType,
3081                                              unsigned int name,
3082                                              const char* data,
3083                                              unsigned short dataLengthInBytes)
3084 {
3085     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3086                  "Channel::SendApplicationDefinedRTCPPacket()");
3087     if (!channel_state_.Get().sending)
3088     {
3089         _engineStatisticsPtr->SetLastError(
3090             VE_NOT_SENDING, kTraceError,
3091             "SendApplicationDefinedRTCPPacket() not sending");
3092         return -1;
3093     }
3094     if (NULL == data)
3095     {
3096         _engineStatisticsPtr->SetLastError(
3097             VE_INVALID_ARGUMENT, kTraceError,
3098             "SendApplicationDefinedRTCPPacket() invalid data value");
3099         return -1;
3100     }
3101     if (dataLengthInBytes % 4 != 0)
3102     {
3103         _engineStatisticsPtr->SetLastError(
3104             VE_INVALID_ARGUMENT, kTraceError,
3105             "SendApplicationDefinedRTCPPacket() invalid length value");
3106         return -1;
3107     }
3108     RtcpMode status = _rtpRtcpModule->RTCP();
3109     if (status == RtcpMode::kOff) {
3110         _engineStatisticsPtr->SetLastError(
3111             VE_RTCP_ERROR, kTraceError,
3112             "SendApplicationDefinedRTCPPacket() RTCP is disabled");
3113         return -1;
3114     }
3115 
3116     // Create and schedule the RTCP APP packet for transmission
3117     if (_rtpRtcpModule->SetRTCPApplicationSpecificData(
3118         subType,
3119         name,
3120         (const unsigned char*) data,
3121         dataLengthInBytes) != 0)
3122     {
3123         _engineStatisticsPtr->SetLastError(
3124             VE_SEND_ERROR, kTraceError,
3125             "SendApplicationDefinedRTCPPacket() failed to send RTCP packet");
3126         return -1;
3127     }
3128     return 0;
3129 }
3130 
3131 int
GetRTPStatistics(unsigned int & averageJitterMs,unsigned int & maxJitterMs,unsigned int & discardedPackets)3132 Channel::GetRTPStatistics(
3133         unsigned int& averageJitterMs,
3134         unsigned int& maxJitterMs,
3135         unsigned int& discardedPackets)
3136 {
3137     // The jitter statistics is updated for each received RTP packet and is
3138     // based on received packets.
3139     if (_rtpRtcpModule->RTCP() == RtcpMode::kOff) {
3140       // If RTCP is off, there is no timed thread in the RTCP module regularly
3141       // generating new stats, trigger the update manually here instead.
3142       StreamStatistician* statistician =
3143           rtp_receive_statistics_->GetStatistician(rtp_receiver_->SSRC());
3144       if (statistician) {
3145         // Don't use returned statistics, use data from proxy instead so that
3146         // max jitter can be fetched atomically.
3147         RtcpStatistics s;
3148         statistician->GetStatistics(&s, true);
3149       }
3150     }
3151 
3152     ChannelStatistics stats = statistics_proxy_->GetStats();
3153     const int32_t playoutFrequency = audio_coding_->PlayoutFrequency();
3154     if (playoutFrequency > 0) {
3155       // Scale RTP statistics given the current playout frequency
3156       maxJitterMs = stats.max_jitter / (playoutFrequency / 1000);
3157       averageJitterMs = stats.rtcp.jitter / (playoutFrequency / 1000);
3158     }
3159 
3160     discardedPackets = _numberOfDiscardedPackets;
3161 
3162     return 0;
3163 }
3164 
GetRemoteRTCPReportBlocks(std::vector<ReportBlock> * report_blocks)3165 int Channel::GetRemoteRTCPReportBlocks(
3166     std::vector<ReportBlock>* report_blocks) {
3167   if (report_blocks == NULL) {
3168     _engineStatisticsPtr->SetLastError(VE_INVALID_ARGUMENT, kTraceError,
3169       "GetRemoteRTCPReportBlock()s invalid report_blocks.");
3170     return -1;
3171   }
3172 
3173   // Get the report blocks from the latest received RTCP Sender or Receiver
3174   // Report. Each element in the vector contains the sender's SSRC and a
3175   // report block according to RFC 3550.
3176   std::vector<RTCPReportBlock> rtcp_report_blocks;
3177   if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks) != 0) {
3178     return -1;
3179   }
3180 
3181   if (rtcp_report_blocks.empty())
3182     return 0;
3183 
3184   std::vector<RTCPReportBlock>::const_iterator it = rtcp_report_blocks.begin();
3185   for (; it != rtcp_report_blocks.end(); ++it) {
3186     ReportBlock report_block;
3187     report_block.sender_SSRC = it->remoteSSRC;
3188     report_block.source_SSRC = it->sourceSSRC;
3189     report_block.fraction_lost = it->fractionLost;
3190     report_block.cumulative_num_packets_lost = it->cumulativeLost;
3191     report_block.extended_highest_sequence_number = it->extendedHighSeqNum;
3192     report_block.interarrival_jitter = it->jitter;
3193     report_block.last_SR_timestamp = it->lastSR;
3194     report_block.delay_since_last_SR = it->delaySinceLastSR;
3195     report_blocks->push_back(report_block);
3196   }
3197   return 0;
3198 }
3199 
3200 int
GetRTPStatistics(CallStatistics & stats)3201 Channel::GetRTPStatistics(CallStatistics& stats)
3202 {
3203     // --- RtcpStatistics
3204 
3205     // The jitter statistics is updated for each received RTP packet and is
3206     // based on received packets.
3207     RtcpStatistics statistics;
3208     StreamStatistician* statistician =
3209         rtp_receive_statistics_->GetStatistician(rtp_receiver_->SSRC());
3210     if (!statistician ||
3211         !statistician->GetStatistics(
3212             &statistics, _rtpRtcpModule->RTCP() == RtcpMode::kOff)) {
3213       _engineStatisticsPtr->SetLastError(
3214           VE_CANNOT_RETRIEVE_RTP_STAT, kTraceWarning,
3215           "GetRTPStatistics() failed to read RTP statistics from the "
3216           "RTP/RTCP module");
3217     }
3218 
3219     stats.fractionLost = statistics.fraction_lost;
3220     stats.cumulativeLost = statistics.cumulative_lost;
3221     stats.extendedMax = statistics.extended_max_sequence_number;
3222     stats.jitterSamples = statistics.jitter;
3223 
3224     // --- RTT
3225     stats.rttMs = GetRTT(true);
3226 
3227     // --- Data counters
3228 
3229     size_t bytesSent(0);
3230     uint32_t packetsSent(0);
3231     size_t bytesReceived(0);
3232     uint32_t packetsReceived(0);
3233 
3234     if (statistician) {
3235       statistician->GetDataCounters(&bytesReceived, &packetsReceived);
3236     }
3237 
3238     if (_rtpRtcpModule->DataCountersRTP(&bytesSent,
3239                                         &packetsSent) != 0)
3240     {
3241         WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3242                      VoEId(_instanceId, _channelId),
3243                      "GetRTPStatistics() failed to retrieve RTP datacounters =>"
3244                      " output will not be complete");
3245     }
3246 
3247     stats.bytesSent = bytesSent;
3248     stats.packetsSent = packetsSent;
3249     stats.bytesReceived = bytesReceived;
3250     stats.packetsReceived = packetsReceived;
3251 
3252     // --- Timestamps
3253     {
3254       CriticalSectionScoped lock(ts_stats_lock_.get());
3255       stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
3256     }
3257     return 0;
3258 }
3259 
SetREDStatus(bool enable,int redPayloadtype)3260 int Channel::SetREDStatus(bool enable, int redPayloadtype) {
3261   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3262                "Channel::SetREDStatus()");
3263 
3264   if (enable) {
3265     if (redPayloadtype < 0 || redPayloadtype > 127) {
3266       _engineStatisticsPtr->SetLastError(
3267           VE_PLTYPE_ERROR, kTraceError,
3268           "SetREDStatus() invalid RED payload type");
3269       return -1;
3270     }
3271 
3272     if (SetRedPayloadType(redPayloadtype) < 0) {
3273       _engineStatisticsPtr->SetLastError(
3274           VE_CODEC_ERROR, kTraceError,
3275           "SetSecondarySendCodec() Failed to register RED ACM");
3276       return -1;
3277     }
3278   }
3279 
3280   if (audio_coding_->SetREDStatus(enable) != 0) {
3281     _engineStatisticsPtr->SetLastError(
3282         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
3283         "SetREDStatus() failed to set RED state in the ACM");
3284     return -1;
3285   }
3286   return 0;
3287 }
3288 
3289 int
GetREDStatus(bool & enabled,int & redPayloadtype)3290 Channel::GetREDStatus(bool& enabled, int& redPayloadtype)
3291 {
3292     enabled = audio_coding_->REDStatus();
3293     if (enabled)
3294     {
3295       int8_t payloadType = 0;
3296       if (_rtpRtcpModule->SendREDPayloadType(&payloadType) != 0) {
3297             _engineStatisticsPtr->SetLastError(
3298                 VE_RTP_RTCP_MODULE_ERROR, kTraceError,
3299                 "GetREDStatus() failed to retrieve RED PT from RTP/RTCP "
3300                 "module");
3301             return -1;
3302         }
3303         redPayloadtype = payloadType;
3304         return 0;
3305     }
3306     return 0;
3307 }
3308 
SetCodecFECStatus(bool enable)3309 int Channel::SetCodecFECStatus(bool enable) {
3310   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3311                "Channel::SetCodecFECStatus()");
3312 
3313   if (audio_coding_->SetCodecFEC(enable) != 0) {
3314     _engineStatisticsPtr->SetLastError(
3315         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
3316         "SetCodecFECStatus() failed to set FEC state");
3317     return -1;
3318   }
3319   return 0;
3320 }
3321 
GetCodecFECStatus()3322 bool Channel::GetCodecFECStatus() {
3323   bool enabled = audio_coding_->CodecFEC();
3324   return enabled;
3325 }
3326 
SetNACKStatus(bool enable,int maxNumberOfPackets)3327 void Channel::SetNACKStatus(bool enable, int maxNumberOfPackets) {
3328   // None of these functions can fail.
3329   // If pacing is enabled we always store packets.
3330   if (!pacing_enabled_)
3331     _rtpRtcpModule->SetStorePacketsStatus(enable, maxNumberOfPackets);
3332   rtp_receive_statistics_->SetMaxReorderingThreshold(maxNumberOfPackets);
3333   rtp_receiver_->SetNACKStatus(enable ? kNackRtcp : kNackOff);
3334   if (enable)
3335     audio_coding_->EnableNack(maxNumberOfPackets);
3336   else
3337     audio_coding_->DisableNack();
3338 }
3339 
3340 // Called when we are missing one or more packets.
ResendPackets(const uint16_t * sequence_numbers,int length)3341 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
3342   return _rtpRtcpModule->SendNACK(sequence_numbers, length);
3343 }
3344 
3345 uint32_t
Demultiplex(const AudioFrame & audioFrame)3346 Channel::Demultiplex(const AudioFrame& audioFrame)
3347 {
3348     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
3349                  "Channel::Demultiplex()");
3350     _audioFrame.CopyFrom(audioFrame);
3351     _audioFrame.id_ = _channelId;
3352     return 0;
3353 }
3354 
Demultiplex(const int16_t * audio_data,int sample_rate,size_t number_of_frames,size_t number_of_channels)3355 void Channel::Demultiplex(const int16_t* audio_data,
3356                           int sample_rate,
3357                           size_t number_of_frames,
3358                           size_t number_of_channels) {
3359   CodecInst codec;
3360   GetSendCodec(codec);
3361 
3362   // Never upsample or upmix the capture signal here. This should be done at the
3363   // end of the send chain.
3364   _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
3365   _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels);
3366   RemixAndResample(audio_data, number_of_frames, number_of_channels,
3367                    sample_rate, &input_resampler_, &_audioFrame);
3368 }
3369 
3370 uint32_t
PrepareEncodeAndSend(int mixingFrequency)3371 Channel::PrepareEncodeAndSend(int mixingFrequency)
3372 {
3373     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
3374                  "Channel::PrepareEncodeAndSend()");
3375 
3376     if (_audioFrame.samples_per_channel_ == 0)
3377     {
3378         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
3379                      "Channel::PrepareEncodeAndSend() invalid audio frame");
3380         return 0xFFFFFFFF;
3381     }
3382 
3383     if (channel_state_.Get().input_file_playing)
3384     {
3385         MixOrReplaceAudioWithFile(mixingFrequency);
3386     }
3387 
3388     bool is_muted = Mute();  // Cache locally as Mute() takes a lock.
3389     if (is_muted) {
3390       AudioFrameOperations::Mute(_audioFrame);
3391     }
3392 
3393     if (channel_state_.Get().input_external_media)
3394     {
3395         CriticalSectionScoped cs(&_callbackCritSect);
3396         const bool isStereo = (_audioFrame.num_channels_ == 2);
3397         if (_inputExternalMediaCallbackPtr)
3398         {
3399             _inputExternalMediaCallbackPtr->Process(
3400                 _channelId,
3401                 kRecordingPerChannel,
3402                (int16_t*)_audioFrame.data_,
3403                 _audioFrame.samples_per_channel_,
3404                 _audioFrame.sample_rate_hz_,
3405                 isStereo);
3406         }
3407     }
3408 
3409     InsertInbandDtmfTone();
3410 
3411     if (_includeAudioLevelIndication) {
3412       size_t length =
3413           _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
3414       if (is_muted) {
3415         rms_level_.ProcessMuted(length);
3416       } else {
3417         rms_level_.Process(_audioFrame.data_, length);
3418       }
3419     }
3420 
3421     return 0;
3422 }
3423 
3424 uint32_t
EncodeAndSend()3425 Channel::EncodeAndSend()
3426 {
3427     WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
3428                  "Channel::EncodeAndSend()");
3429 
3430     assert(_audioFrame.num_channels_ <= 2);
3431     if (_audioFrame.samples_per_channel_ == 0)
3432     {
3433         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
3434                      "Channel::EncodeAndSend() invalid audio frame");
3435         return 0xFFFFFFFF;
3436     }
3437 
3438     _audioFrame.id_ = _channelId;
3439 
3440     // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
3441 
3442     // The ACM resamples internally.
3443     _audioFrame.timestamp_ = _timeStamp;
3444     // This call will trigger AudioPacketizationCallback::SendData if encoding
3445     // is done and payload is ready for packetization and transmission.
3446     // Otherwise, it will return without invoking the callback.
3447     if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0)
3448     {
3449         WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId,_channelId),
3450                      "Channel::EncodeAndSend() ACM encoding failed");
3451         return 0xFFFFFFFF;
3452     }
3453 
3454     _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
3455     return 0;
3456 }
3457 
DisassociateSendChannel(int channel_id)3458 void Channel::DisassociateSendChannel(int channel_id) {
3459   CriticalSectionScoped lock(assoc_send_channel_lock_.get());
3460   Channel* channel = associate_send_channel_.channel();
3461   if (channel && channel->ChannelId() == channel_id) {
3462     // If this channel is associated with a send channel of the specified
3463     // Channel ID, disassociate with it.
3464     ChannelOwner ref(NULL);
3465     associate_send_channel_ = ref;
3466   }
3467 }
3468 
RegisterExternalMediaProcessing(ProcessingTypes type,VoEMediaProcess & processObject)3469 int Channel::RegisterExternalMediaProcessing(
3470     ProcessingTypes type,
3471     VoEMediaProcess& processObject)
3472 {
3473     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3474                  "Channel::RegisterExternalMediaProcessing()");
3475 
3476     CriticalSectionScoped cs(&_callbackCritSect);
3477 
3478     if (kPlaybackPerChannel == type)
3479     {
3480         if (_outputExternalMediaCallbackPtr)
3481         {
3482             _engineStatisticsPtr->SetLastError(
3483                 VE_INVALID_OPERATION, kTraceError,
3484                 "Channel::RegisterExternalMediaProcessing() "
3485                 "output external media already enabled");
3486             return -1;
3487         }
3488         _outputExternalMediaCallbackPtr = &processObject;
3489         _outputExternalMedia = true;
3490     }
3491     else if (kRecordingPerChannel == type)
3492     {
3493         if (_inputExternalMediaCallbackPtr)
3494         {
3495             _engineStatisticsPtr->SetLastError(
3496                 VE_INVALID_OPERATION, kTraceError,
3497                 "Channel::RegisterExternalMediaProcessing() "
3498                 "output external media already enabled");
3499             return -1;
3500         }
3501         _inputExternalMediaCallbackPtr = &processObject;
3502         channel_state_.SetInputExternalMedia(true);
3503     }
3504     return 0;
3505 }
3506 
DeRegisterExternalMediaProcessing(ProcessingTypes type)3507 int Channel::DeRegisterExternalMediaProcessing(ProcessingTypes type)
3508 {
3509     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3510                  "Channel::DeRegisterExternalMediaProcessing()");
3511 
3512     CriticalSectionScoped cs(&_callbackCritSect);
3513 
3514     if (kPlaybackPerChannel == type)
3515     {
3516         if (!_outputExternalMediaCallbackPtr)
3517         {
3518             _engineStatisticsPtr->SetLastError(
3519                 VE_INVALID_OPERATION, kTraceWarning,
3520                 "Channel::DeRegisterExternalMediaProcessing() "
3521                 "output external media already disabled");
3522             return 0;
3523         }
3524         _outputExternalMedia = false;
3525         _outputExternalMediaCallbackPtr = NULL;
3526     }
3527     else if (kRecordingPerChannel == type)
3528     {
3529         if (!_inputExternalMediaCallbackPtr)
3530         {
3531             _engineStatisticsPtr->SetLastError(
3532                 VE_INVALID_OPERATION, kTraceWarning,
3533                 "Channel::DeRegisterExternalMediaProcessing() "
3534                 "input external media already disabled");
3535             return 0;
3536         }
3537         channel_state_.SetInputExternalMedia(false);
3538         _inputExternalMediaCallbackPtr = NULL;
3539     }
3540 
3541     return 0;
3542 }
3543 
SetExternalMixing(bool enabled)3544 int Channel::SetExternalMixing(bool enabled) {
3545     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3546                  "Channel::SetExternalMixing(enabled=%d)", enabled);
3547 
3548     if (channel_state_.Get().playing)
3549     {
3550         _engineStatisticsPtr->SetLastError(
3551             VE_INVALID_OPERATION, kTraceError,
3552             "Channel::SetExternalMixing() "
3553             "external mixing cannot be changed while playing.");
3554         return -1;
3555     }
3556 
3557     _externalMixing = enabled;
3558 
3559     return 0;
3560 }
3561 
3562 int
GetNetworkStatistics(NetworkStatistics & stats)3563 Channel::GetNetworkStatistics(NetworkStatistics& stats)
3564 {
3565     return audio_coding_->GetNetworkStatistics(&stats);
3566 }
3567 
GetDecodingCallStatistics(AudioDecodingCallStats * stats) const3568 void Channel::GetDecodingCallStatistics(AudioDecodingCallStats* stats) const {
3569   audio_coding_->GetDecodingCallStatistics(stats);
3570 }
3571 
GetDelayEstimate(int * jitter_buffer_delay_ms,int * playout_buffer_delay_ms) const3572 bool Channel::GetDelayEstimate(int* jitter_buffer_delay_ms,
3573                                int* playout_buffer_delay_ms) const {
3574   CriticalSectionScoped cs(video_sync_lock_.get());
3575   if (_average_jitter_buffer_delay_us == 0) {
3576     return false;
3577   }
3578   *jitter_buffer_delay_ms = (_average_jitter_buffer_delay_us + 500) / 1000 +
3579       _recPacketDelayMs;
3580   *playout_buffer_delay_ms = playout_delay_ms_;
3581   return true;
3582 }
3583 
GetDelayEstimate() const3584 uint32_t Channel::GetDelayEstimate() const {
3585   int jitter_buffer_delay_ms = 0;
3586   int playout_buffer_delay_ms = 0;
3587   GetDelayEstimate(&jitter_buffer_delay_ms, &playout_buffer_delay_ms);
3588   return jitter_buffer_delay_ms + playout_buffer_delay_ms;
3589 }
3590 
LeastRequiredDelayMs() const3591 int Channel::LeastRequiredDelayMs() const {
3592   return audio_coding_->LeastRequiredDelayMs();
3593 }
3594 
3595 int
SetMinimumPlayoutDelay(int delayMs)3596 Channel::SetMinimumPlayoutDelay(int delayMs)
3597 {
3598     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3599                  "Channel::SetMinimumPlayoutDelay()");
3600     if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
3601         (delayMs > kVoiceEngineMaxMinPlayoutDelayMs))
3602     {
3603         _engineStatisticsPtr->SetLastError(
3604             VE_INVALID_ARGUMENT, kTraceError,
3605             "SetMinimumPlayoutDelay() invalid min delay");
3606         return -1;
3607     }
3608     if (audio_coding_->SetMinimumPlayoutDelay(delayMs) != 0)
3609     {
3610         _engineStatisticsPtr->SetLastError(
3611             VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
3612             "SetMinimumPlayoutDelay() failed to set min playout delay");
3613         return -1;
3614     }
3615     return 0;
3616 }
3617 
GetPlayoutTimestamp(unsigned int & timestamp)3618 int Channel::GetPlayoutTimestamp(unsigned int& timestamp) {
3619   uint32_t playout_timestamp_rtp = 0;
3620   {
3621     CriticalSectionScoped cs(video_sync_lock_.get());
3622     playout_timestamp_rtp = playout_timestamp_rtp_;
3623   }
3624   if (playout_timestamp_rtp == 0)  {
3625     _engineStatisticsPtr->SetLastError(
3626         VE_CANNOT_RETRIEVE_VALUE, kTraceError,
3627         "GetPlayoutTimestamp() failed to retrieve timestamp");
3628     return -1;
3629   }
3630   timestamp = playout_timestamp_rtp;
3631   return 0;
3632 }
3633 
SetInitTimestamp(unsigned int timestamp)3634 int Channel::SetInitTimestamp(unsigned int timestamp) {
3635   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3636                "Channel::SetInitTimestamp()");
3637   if (channel_state_.Get().sending) {
3638     _engineStatisticsPtr->SetLastError(VE_SENDING, kTraceError,
3639                                        "SetInitTimestamp() already sending");
3640     return -1;
3641   }
3642   _rtpRtcpModule->SetStartTimestamp(timestamp);
3643   return 0;
3644 }
3645 
SetInitSequenceNumber(short sequenceNumber)3646 int Channel::SetInitSequenceNumber(short sequenceNumber) {
3647   WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
3648                "Channel::SetInitSequenceNumber()");
3649   if (channel_state_.Get().sending) {
3650     _engineStatisticsPtr->SetLastError(
3651         VE_SENDING, kTraceError, "SetInitSequenceNumber() already sending");
3652     return -1;
3653   }
3654   _rtpRtcpModule->SetSequenceNumber(sequenceNumber);
3655   return 0;
3656 }
3657 
3658 int
GetRtpRtcp(RtpRtcp ** rtpRtcpModule,RtpReceiver ** rtp_receiver) const3659 Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, RtpReceiver** rtp_receiver) const
3660 {
3661     *rtpRtcpModule = _rtpRtcpModule.get();
3662     *rtp_receiver = rtp_receiver_.get();
3663     return 0;
3664 }
3665 
3666 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
3667 // a shared helper.
3668 int32_t
MixOrReplaceAudioWithFile(int mixingFrequency)3669 Channel::MixOrReplaceAudioWithFile(int mixingFrequency)
3670 {
3671   rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[640]);
3672     size_t fileSamples(0);
3673 
3674     {
3675         CriticalSectionScoped cs(&_fileCritSect);
3676 
3677         if (_inputFilePlayerPtr == NULL)
3678         {
3679             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3680                          VoEId(_instanceId, _channelId),
3681                          "Channel::MixOrReplaceAudioWithFile() fileplayer"
3682                              " doesnt exist");
3683             return -1;
3684         }
3685 
3686         if (_inputFilePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
3687                                                       fileSamples,
3688                                                       mixingFrequency) == -1)
3689         {
3690             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3691                          VoEId(_instanceId, _channelId),
3692                          "Channel::MixOrReplaceAudioWithFile() file mixing "
3693                          "failed");
3694             return -1;
3695         }
3696         if (fileSamples == 0)
3697         {
3698             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3699                          VoEId(_instanceId, _channelId),
3700                          "Channel::MixOrReplaceAudioWithFile() file is ended");
3701             return 0;
3702         }
3703     }
3704 
3705     assert(_audioFrame.samples_per_channel_ == fileSamples);
3706 
3707     if (_mixFileWithMicrophone)
3708     {
3709         // Currently file stream is always mono.
3710         // TODO(xians): Change the code when FilePlayer supports real stereo.
3711         MixWithSat(_audioFrame.data_,
3712                    _audioFrame.num_channels_,
3713                    fileBuffer.get(),
3714                    1,
3715                    fileSamples);
3716     }
3717     else
3718     {
3719         // Replace ACM audio with file.
3720         // Currently file stream is always mono.
3721         // TODO(xians): Change the code when FilePlayer supports real stereo.
3722         _audioFrame.UpdateFrame(_channelId,
3723                                 0xFFFFFFFF,
3724                                 fileBuffer.get(),
3725                                 fileSamples,
3726                                 mixingFrequency,
3727                                 AudioFrame::kNormalSpeech,
3728                                 AudioFrame::kVadUnknown,
3729                                 1);
3730 
3731     }
3732     return 0;
3733 }
3734 
3735 int32_t
MixAudioWithFile(AudioFrame & audioFrame,int mixingFrequency)3736 Channel::MixAudioWithFile(AudioFrame& audioFrame,
3737                           int mixingFrequency)
3738 {
3739     assert(mixingFrequency <= 48000);
3740 
3741     rtc::scoped_ptr<int16_t[]> fileBuffer(new int16_t[960]);
3742     size_t fileSamples(0);
3743 
3744     {
3745         CriticalSectionScoped cs(&_fileCritSect);
3746 
3747         if (_outputFilePlayerPtr == NULL)
3748         {
3749             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3750                          VoEId(_instanceId, _channelId),
3751                          "Channel::MixAudioWithFile() file mixing failed");
3752             return -1;
3753         }
3754 
3755         // We should get the frequency we ask for.
3756         if (_outputFilePlayerPtr->Get10msAudioFromFile(fileBuffer.get(),
3757                                                        fileSamples,
3758                                                        mixingFrequency) == -1)
3759         {
3760             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3761                          VoEId(_instanceId, _channelId),
3762                          "Channel::MixAudioWithFile() file mixing failed");
3763             return -1;
3764         }
3765     }
3766 
3767     if (audioFrame.samples_per_channel_ == fileSamples)
3768     {
3769         // Currently file stream is always mono.
3770         // TODO(xians): Change the code when FilePlayer supports real stereo.
3771         MixWithSat(audioFrame.data_,
3772                    audioFrame.num_channels_,
3773                    fileBuffer.get(),
3774                    1,
3775                    fileSamples);
3776     }
3777     else
3778     {
3779         WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
3780             "Channel::MixAudioWithFile() samples_per_channel_(%" PRIuS ") != "
3781             "fileSamples(%" PRIuS ")",
3782             audioFrame.samples_per_channel_, fileSamples);
3783         return -1;
3784     }
3785 
3786     return 0;
3787 }
3788 
3789 int
InsertInbandDtmfTone()3790 Channel::InsertInbandDtmfTone()
3791 {
3792     // Check if we should start a new tone.
3793     if (_inbandDtmfQueue.PendingDtmf() &&
3794         !_inbandDtmfGenerator.IsAddingTone() &&
3795         _inbandDtmfGenerator.DelaySinceLastTone() >
3796         kMinTelephoneEventSeparationMs)
3797     {
3798         int8_t eventCode(0);
3799         uint16_t lengthMs(0);
3800         uint8_t attenuationDb(0);
3801 
3802         eventCode = _inbandDtmfQueue.NextDtmf(&lengthMs, &attenuationDb);
3803         _inbandDtmfGenerator.AddTone(eventCode, lengthMs, attenuationDb);
3804         if (_playInbandDtmfEvent)
3805         {
3806             // Add tone to output mixer using a reduced length to minimize
3807             // risk of echo.
3808             _outputMixerPtr->PlayDtmfTone(eventCode, lengthMs - 80,
3809                                           attenuationDb);
3810         }
3811     }
3812 
3813     if (_inbandDtmfGenerator.IsAddingTone())
3814     {
3815         uint16_t frequency(0);
3816         _inbandDtmfGenerator.GetSampleRate(frequency);
3817 
3818         if (frequency != _audioFrame.sample_rate_hz_)
3819         {
3820             // Update sample rate of Dtmf tone since the mixing frequency
3821             // has changed.
3822             _inbandDtmfGenerator.SetSampleRate(
3823                 (uint16_t) (_audioFrame.sample_rate_hz_));
3824             // Reset the tone to be added taking the new sample rate into
3825             // account.
3826             _inbandDtmfGenerator.ResetTone();
3827         }
3828 
3829         int16_t toneBuffer[320];
3830         uint16_t toneSamples(0);
3831         // Get 10ms tone segment and set time since last tone to zero
3832         if (_inbandDtmfGenerator.Get10msTone(toneBuffer, toneSamples) == -1)
3833         {
3834             WEBRTC_TRACE(kTraceWarning, kTraceVoice,
3835                        VoEId(_instanceId, _channelId),
3836                        "Channel::EncodeAndSend() inserting Dtmf failed");
3837             return -1;
3838         }
3839 
3840         // Replace mixed audio with DTMF tone.
3841         for (size_t sample = 0;
3842             sample < _audioFrame.samples_per_channel_;
3843             sample++)
3844         {
3845             for (size_t channel = 0;
3846                 channel < _audioFrame.num_channels_;
3847                 channel++)
3848             {
3849                 const size_t index =
3850                     sample * _audioFrame.num_channels_ + channel;
3851                 _audioFrame.data_[index] = toneBuffer[sample];
3852             }
3853         }
3854 
3855         assert(_audioFrame.samples_per_channel_ == toneSamples);
3856     } else
3857     {
3858         // Add 10ms to "delay-since-last-tone" counter
3859         _inbandDtmfGenerator.UpdateDelaySinceLastTone();
3860     }
3861     return 0;
3862 }
3863 
UpdatePlayoutTimestamp(bool rtcp)3864 void Channel::UpdatePlayoutTimestamp(bool rtcp) {
3865   uint32_t playout_timestamp = 0;
3866 
3867   if (audio_coding_->PlayoutTimestamp(&playout_timestamp) == -1)  {
3868     // This can happen if this channel has not been received any RTP packet. In
3869     // this case, NetEq is not capable of computing playout timestamp.
3870     return;
3871   }
3872 
3873   uint16_t delay_ms = 0;
3874   if (_audioDeviceModulePtr->PlayoutDelay(&delay_ms) == -1) {
3875     WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId,_channelId),
3876                  "Channel::UpdatePlayoutTimestamp() failed to read playout"
3877                  " delay from the ADM");
3878     _engineStatisticsPtr->SetLastError(
3879         VE_CANNOT_RETRIEVE_VALUE, kTraceError,
3880         "UpdatePlayoutTimestamp() failed to retrieve playout delay");
3881     return;
3882   }
3883 
3884   jitter_buffer_playout_timestamp_ = playout_timestamp;
3885 
3886   // Remove the playout delay.
3887   playout_timestamp -= (delay_ms * (GetPlayoutFrequency() / 1000));
3888 
3889   WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
3890                "Channel::UpdatePlayoutTimestamp() => playoutTimestamp = %lu",
3891                playout_timestamp);
3892 
3893   {
3894     CriticalSectionScoped cs(video_sync_lock_.get());
3895     if (rtcp) {
3896       playout_timestamp_rtcp_ = playout_timestamp;
3897     } else {
3898       playout_timestamp_rtp_ = playout_timestamp;
3899     }
3900     playout_delay_ms_ = delay_ms;
3901   }
3902 }
3903 
3904 // Called for incoming RTP packets after successful RTP header parsing.
UpdatePacketDelay(uint32_t rtp_timestamp,uint16_t sequence_number)3905 void Channel::UpdatePacketDelay(uint32_t rtp_timestamp,
3906                                 uint16_t sequence_number) {
3907   WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
3908                "Channel::UpdatePacketDelay(timestamp=%lu, sequenceNumber=%u)",
3909                rtp_timestamp, sequence_number);
3910 
3911   // Get frequency of last received payload
3912   int rtp_receive_frequency = GetPlayoutFrequency();
3913 
3914   // |jitter_buffer_playout_timestamp_| updated in UpdatePlayoutTimestamp for
3915   // every incoming packet.
3916   uint32_t timestamp_diff_ms = (rtp_timestamp -
3917       jitter_buffer_playout_timestamp_) / (rtp_receive_frequency / 1000);
3918   if (!IsNewerTimestamp(rtp_timestamp, jitter_buffer_playout_timestamp_) ||
3919       timestamp_diff_ms > (2 * kVoiceEngineMaxMinPlayoutDelayMs)) {
3920     // If |jitter_buffer_playout_timestamp_| is newer than the incoming RTP
3921     // timestamp, the resulting difference is negative, but is set to zero.
3922     // This can happen when a network glitch causes a packet to arrive late,
3923     // and during long comfort noise periods with clock drift.
3924     timestamp_diff_ms = 0;
3925   }
3926 
3927   uint16_t packet_delay_ms = (rtp_timestamp - _previousTimestamp) /
3928       (rtp_receive_frequency / 1000);
3929 
3930   _previousTimestamp = rtp_timestamp;
3931 
3932   if (timestamp_diff_ms == 0) return;
3933 
3934   {
3935     CriticalSectionScoped cs(video_sync_lock_.get());
3936 
3937     if (packet_delay_ms >= 10 && packet_delay_ms <= 60) {
3938       _recPacketDelayMs = packet_delay_ms;
3939     }
3940 
3941     if (_average_jitter_buffer_delay_us == 0) {
3942       _average_jitter_buffer_delay_us = timestamp_diff_ms * 1000;
3943       return;
3944     }
3945 
3946     // Filter average delay value using exponential filter (alpha is
3947     // 7/8). We derive 1000 *_average_jitter_buffer_delay_us here (reduces
3948     // risk of rounding error) and compensate for it in GetDelayEstimate()
3949     // later.
3950     _average_jitter_buffer_delay_us = (_average_jitter_buffer_delay_us * 7 +
3951         1000 * timestamp_diff_ms + 500) / 8;
3952   }
3953 }
3954 
3955 void
RegisterReceiveCodecsToRTPModule()3956 Channel::RegisterReceiveCodecsToRTPModule()
3957 {
3958     WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,_channelId),
3959                  "Channel::RegisterReceiveCodecsToRTPModule()");
3960 
3961     CodecInst codec;
3962     const uint8_t nSupportedCodecs = AudioCodingModule::NumberOfCodecs();
3963 
3964     for (int idx = 0; idx < nSupportedCodecs; idx++)
3965     {
3966         // Open up the RTP/RTCP receiver for all supported codecs
3967         if ((audio_coding_->Codec(idx, &codec) == -1) ||
3968             (rtp_receiver_->RegisterReceivePayload(
3969                 codec.plname,
3970                 codec.pltype,
3971                 codec.plfreq,
3972                 codec.channels,
3973                 (codec.rate < 0) ? 0 : codec.rate) == -1))
3974         {
3975             WEBRTC_TRACE(kTraceWarning,
3976                          kTraceVoice,
3977                          VoEId(_instanceId, _channelId),
3978                          "Channel::RegisterReceiveCodecsToRTPModule() unable"
3979                          " to register %s (%d/%d/%" PRIuS "/%d) to RTP/RTCP "
3980                          "receiver",
3981                          codec.plname, codec.pltype, codec.plfreq,
3982                          codec.channels, codec.rate);
3983         }
3984         else
3985         {
3986             WEBRTC_TRACE(kTraceInfo,
3987                          kTraceVoice,
3988                          VoEId(_instanceId, _channelId),
3989                          "Channel::RegisterReceiveCodecsToRTPModule() %s "
3990                          "(%d/%d/%" PRIuS "/%d) has been added to the RTP/RTCP "
3991                          "receiver",
3992                          codec.plname, codec.pltype, codec.plfreq,
3993                          codec.channels, codec.rate);
3994         }
3995     }
3996 }
3997 
3998 // Assuming this method is called with valid payload type.
SetRedPayloadType(int red_payload_type)3999 int Channel::SetRedPayloadType(int red_payload_type) {
4000   CodecInst codec;
4001   bool found_red = false;
4002 
4003   // Get default RED settings from the ACM database
4004   const int num_codecs = AudioCodingModule::NumberOfCodecs();
4005   for (int idx = 0; idx < num_codecs; idx++) {
4006     audio_coding_->Codec(idx, &codec);
4007     if (!STR_CASE_CMP(codec.plname, "RED")) {
4008       found_red = true;
4009       break;
4010     }
4011   }
4012 
4013   if (!found_red) {
4014     _engineStatisticsPtr->SetLastError(
4015         VE_CODEC_ERROR, kTraceError,
4016         "SetRedPayloadType() RED is not supported");
4017     return -1;
4018   }
4019 
4020   codec.pltype = red_payload_type;
4021   if (audio_coding_->RegisterSendCodec(codec) < 0) {
4022     _engineStatisticsPtr->SetLastError(
4023         VE_AUDIO_CODING_MODULE_ERROR, kTraceError,
4024         "SetRedPayloadType() RED registration in ACM module failed");
4025     return -1;
4026   }
4027 
4028   if (_rtpRtcpModule->SetSendREDPayloadType(red_payload_type) != 0) {
4029     _engineStatisticsPtr->SetLastError(
4030         VE_RTP_RTCP_MODULE_ERROR, kTraceError,
4031         "SetRedPayloadType() RED registration in RTP/RTCP module failed");
4032     return -1;
4033   }
4034   return 0;
4035 }
4036 
SetSendRtpHeaderExtension(bool enable,RTPExtensionType type,unsigned char id)4037 int Channel::SetSendRtpHeaderExtension(bool enable, RTPExtensionType type,
4038                                        unsigned char id) {
4039   int error = 0;
4040   _rtpRtcpModule->DeregisterSendRtpHeaderExtension(type);
4041   if (enable) {
4042     error = _rtpRtcpModule->RegisterSendRtpHeaderExtension(type, id);
4043   }
4044   return error;
4045 }
4046 
GetPlayoutFrequency()4047 int32_t Channel::GetPlayoutFrequency() {
4048   int32_t playout_frequency = audio_coding_->PlayoutFrequency();
4049   CodecInst current_recive_codec;
4050   if (audio_coding_->ReceiveCodec(&current_recive_codec) == 0) {
4051     if (STR_CASE_CMP("G722", current_recive_codec.plname) == 0) {
4052       // Even though the actual sampling rate for G.722 audio is
4053       // 16,000 Hz, the RTP clock rate for the G722 payload format is
4054       // 8,000 Hz because that value was erroneously assigned in
4055       // RFC 1890 and must remain unchanged for backward compatibility.
4056       playout_frequency = 8000;
4057     } else if (STR_CASE_CMP("opus", current_recive_codec.plname) == 0) {
4058       // We are resampling Opus internally to 32,000 Hz until all our
4059       // DSP routines can operate at 48,000 Hz, but the RTP clock
4060       // rate for the Opus payload format is standardized to 48,000 Hz,
4061       // because that is the maximum supported decoding sampling rate.
4062       playout_frequency = 48000;
4063     }
4064   }
4065   return playout_frequency;
4066 }
4067 
GetRTT(bool allow_associate_channel) const4068 int64_t Channel::GetRTT(bool allow_associate_channel) const {
4069   RtcpMode method = _rtpRtcpModule->RTCP();
4070   if (method == RtcpMode::kOff) {
4071     return 0;
4072   }
4073   std::vector<RTCPReportBlock> report_blocks;
4074   _rtpRtcpModule->RemoteRTCPStat(&report_blocks);
4075 
4076   int64_t rtt = 0;
4077   if (report_blocks.empty()) {
4078     if (allow_associate_channel) {
4079       CriticalSectionScoped lock(assoc_send_channel_lock_.get());
4080       Channel* channel = associate_send_channel_.channel();
4081       // Tries to get RTT from an associated channel. This is important for
4082       // receive-only channels.
4083       if (channel) {
4084         // To prevent infinite recursion and deadlock, calling GetRTT of
4085         // associate channel should always use "false" for argument:
4086         // |allow_associate_channel|.
4087         rtt = channel->GetRTT(false);
4088       }
4089     }
4090     return rtt;
4091   }
4092 
4093   uint32_t remoteSSRC = rtp_receiver_->SSRC();
4094   std::vector<RTCPReportBlock>::const_iterator it = report_blocks.begin();
4095   for (; it != report_blocks.end(); ++it) {
4096     if (it->remoteSSRC == remoteSSRC)
4097       break;
4098   }
4099   if (it == report_blocks.end()) {
4100     // We have not received packets with SSRC matching the report blocks.
4101     // To calculate RTT we try with the SSRC of the first report block.
4102     // This is very important for send-only channels where we don't know
4103     // the SSRC of the other end.
4104     remoteSSRC = report_blocks[0].remoteSSRC;
4105   }
4106 
4107   int64_t avg_rtt = 0;
4108   int64_t max_rtt= 0;
4109   int64_t min_rtt = 0;
4110   if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt)
4111       != 0) {
4112     return 0;
4113   }
4114   return rtt;
4115 }
4116 
4117 }  // namespace voe
4118 }  // namespace webrtc
4119