• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "content/renderer/media/webrtc_audio_renderer.h"
6 
7 #include "base/logging.h"
8 #include "base/metrics/histogram.h"
9 #include "base/strings/string_util.h"
10 #include "base/strings/stringprintf.h"
11 #include "content/renderer/media/audio_device_factory.h"
12 #include "content/renderer/media/webrtc_audio_device_impl.h"
13 #include "content/renderer/media/webrtc_logging.h"
14 #include "media/audio/audio_output_device.h"
15 #include "media/audio/audio_parameters.h"
16 #include "media/audio/sample_rates.h"
17 #include "third_party/libjingle/source/talk/app/webrtc/mediastreaminterface.h"
18 #include "third_party/libjingle/source/talk/media/base/audiorenderer.h"
19 
20 
21 #if defined(OS_WIN)
22 #include "base/win/windows_version.h"
23 #include "media/audio/win/core_audio_util_win.h"
24 #endif
25 
26 namespace content {
27 
28 namespace {
29 
30 // Supported hardware sample rates for output sides.
31 #if defined(OS_WIN) || defined(OS_MACOSX)
32 // AudioHardwareConfig::GetOutputSampleRate() asks the audio layer for its
33 // current sample rate (set by the user) on Windows and Mac OS X.  The listed
34 // rates below adds restrictions and Initialize() will fail if the user selects
35 // any rate outside these ranges.
36 const int kValidOutputRates[] = {96000, 48000, 44100, 32000, 16000};
37 #elif defined(OS_LINUX) || defined(OS_OPENBSD)
38 const int kValidOutputRates[] = {48000, 44100};
39 #elif defined(OS_ANDROID)
40 // TODO(leozwang): We want to use native sampling rate on Android to achieve
41 // low latency, currently 16000 is used to work around audio problem on some
42 // Android devices.
43 const int kValidOutputRates[] = {48000, 44100, 16000};
44 #else
45 const int kValidOutputRates[] = {44100};
46 #endif
47 
48 // TODO(xians): Merge the following code to WebRtcAudioCapturer, or remove.
49 enum AudioFramesPerBuffer {
50   k160,
51   k320,
52   k440,
53   k480,
54   k640,
55   k880,
56   k960,
57   k1440,
58   k1920,
59   kUnexpectedAudioBufferSize  // Must always be last!
60 };
61 
62 // Helper method to convert integral values to their respective enum values
63 // above, or kUnexpectedAudioBufferSize if no match exists.
64 // We map 441 to k440 to avoid changes in the XML part for histograms.
65 // It is still possible to map the histogram result to the actual buffer size.
66 // See http://crbug.com/243450 for details.
AsAudioFramesPerBuffer(int frames_per_buffer)67 AudioFramesPerBuffer AsAudioFramesPerBuffer(int frames_per_buffer) {
68   switch (frames_per_buffer) {
69     case 160: return k160;
70     case 320: return k320;
71     case 441: return k440;
72     case 480: return k480;
73     case 640: return k640;
74     case 880: return k880;
75     case 960: return k960;
76     case 1440: return k1440;
77     case 1920: return k1920;
78   }
79   return kUnexpectedAudioBufferSize;
80 }
81 
AddHistogramFramesPerBuffer(int param)82 void AddHistogramFramesPerBuffer(int param) {
83   AudioFramesPerBuffer afpb = AsAudioFramesPerBuffer(param);
84   if (afpb != kUnexpectedAudioBufferSize) {
85     UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer",
86                               afpb, kUnexpectedAudioBufferSize);
87   } else {
88     // Report unexpected sample rates using a unique histogram name.
89     UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputFramesPerBufferUnexpected", param);
90   }
91 }
92 
93 // This is a simple wrapper class that's handed out to users of a shared
94 // WebRtcAudioRenderer instance.  This class maintains the per-user 'playing'
95 // and 'started' states to avoid problems related to incorrect usage which
96 // might violate the implementation assumptions inside WebRtcAudioRenderer
97 // (see the play reference count).
98 class SharedAudioRenderer : public MediaStreamAudioRenderer {
99  public:
100   // Callback definition for a callback that is called when when Play(), Pause()
101   // or SetVolume are called (whenever the internal |playing_state_| changes).
102   typedef base::Callback<
103       void(const scoped_refptr<webrtc::MediaStreamInterface>&,
104            WebRtcAudioRenderer::PlayingState*)> OnPlayStateChanged;
105 
SharedAudioRenderer(const scoped_refptr<MediaStreamAudioRenderer> & delegate,const scoped_refptr<webrtc::MediaStreamInterface> & media_stream,const OnPlayStateChanged & on_play_state_changed)106   SharedAudioRenderer(
107       const scoped_refptr<MediaStreamAudioRenderer>& delegate,
108       const scoped_refptr<webrtc::MediaStreamInterface>& media_stream,
109       const OnPlayStateChanged& on_play_state_changed)
110       : delegate_(delegate), media_stream_(media_stream), started_(false),
111         on_play_state_changed_(on_play_state_changed) {
112     DCHECK(!on_play_state_changed_.is_null());
113     DCHECK(media_stream_.get());
114   }
115 
116  protected:
~SharedAudioRenderer()117   virtual ~SharedAudioRenderer() {
118     DCHECK(thread_checker_.CalledOnValidThread());
119     DVLOG(1) << __FUNCTION__;
120     Stop();
121   }
122 
Start()123   virtual void Start() OVERRIDE {
124     DCHECK(thread_checker_.CalledOnValidThread());
125     if (started_)
126       return;
127     started_ = true;
128     delegate_->Start();
129   }
130 
Play()131   virtual void Play() OVERRIDE {
132     DCHECK(thread_checker_.CalledOnValidThread());
133     DCHECK(started_);
134     if (playing_state_.playing())
135       return;
136     playing_state_.set_playing(true);
137     on_play_state_changed_.Run(media_stream_, &playing_state_);
138   }
139 
Pause()140   virtual void Pause() OVERRIDE {
141     DCHECK(thread_checker_.CalledOnValidThread());
142     DCHECK(started_);
143     if (!playing_state_.playing())
144       return;
145     playing_state_.set_playing(false);
146     on_play_state_changed_.Run(media_stream_, &playing_state_);
147   }
148 
Stop()149   virtual void Stop() OVERRIDE {
150     DCHECK(thread_checker_.CalledOnValidThread());
151     if (!started_)
152       return;
153     Pause();
154     started_ = false;
155     delegate_->Stop();
156   }
157 
SetVolume(float volume)158   virtual void SetVolume(float volume) OVERRIDE {
159     DCHECK(thread_checker_.CalledOnValidThread());
160     DCHECK(volume >= 0.0f && volume <= 1.0f);
161     playing_state_.set_volume(volume);
162     on_play_state_changed_.Run(media_stream_, &playing_state_);
163   }
164 
GetCurrentRenderTime() const165   virtual base::TimeDelta GetCurrentRenderTime() const OVERRIDE {
166     DCHECK(thread_checker_.CalledOnValidThread());
167     return delegate_->GetCurrentRenderTime();
168   }
169 
IsLocalRenderer() const170   virtual bool IsLocalRenderer() const OVERRIDE {
171     DCHECK(thread_checker_.CalledOnValidThread());
172     return delegate_->IsLocalRenderer();
173   }
174 
175  private:
176   base::ThreadChecker thread_checker_;
177   const scoped_refptr<MediaStreamAudioRenderer> delegate_;
178   const scoped_refptr<webrtc::MediaStreamInterface> media_stream_;
179   bool started_;
180   WebRtcAudioRenderer::PlayingState playing_state_;
181   OnPlayStateChanged on_play_state_changed_;
182 };
183 
184 }  // namespace
185 
WebRtcAudioRenderer(const scoped_refptr<webrtc::MediaStreamInterface> & media_stream,int source_render_view_id,int source_render_frame_id,int session_id,int sample_rate,int frames_per_buffer)186 WebRtcAudioRenderer::WebRtcAudioRenderer(
187     const scoped_refptr<webrtc::MediaStreamInterface>& media_stream,
188     int source_render_view_id,
189     int source_render_frame_id,
190     int session_id,
191     int sample_rate,
192     int frames_per_buffer)
193     : state_(UNINITIALIZED),
194       source_render_view_id_(source_render_view_id),
195       source_render_frame_id_(source_render_frame_id),
196       session_id_(session_id),
197       media_stream_(media_stream),
198       source_(NULL),
199       play_ref_count_(0),
200       start_ref_count_(0),
201       audio_delay_milliseconds_(0),
202       fifo_delay_milliseconds_(0),
203       sink_params_(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
204                    media::CHANNEL_LAYOUT_STEREO, 0, sample_rate, 16,
205                    frames_per_buffer, media::AudioParameters::DUCKING) {
206   WebRtcLogMessage(base::StringPrintf(
207       "WAR::WAR. source_render_view_id=%d"
208       ", session_id=%d, sample_rate=%d, frames_per_buffer=%d",
209       source_render_view_id,
210       session_id,
211       sample_rate,
212       frames_per_buffer));
213 }
214 
~WebRtcAudioRenderer()215 WebRtcAudioRenderer::~WebRtcAudioRenderer() {
216   DCHECK(thread_checker_.CalledOnValidThread());
217   DCHECK_EQ(state_, UNINITIALIZED);
218 }
219 
Initialize(WebRtcAudioRendererSource * source)220 bool WebRtcAudioRenderer::Initialize(WebRtcAudioRendererSource* source) {
221   DVLOG(1) << "WebRtcAudioRenderer::Initialize()";
222   DCHECK(thread_checker_.CalledOnValidThread());
223   base::AutoLock auto_lock(lock_);
224   DCHECK_EQ(state_, UNINITIALIZED);
225   DCHECK(source);
226   DCHECK(!sink_.get());
227   DCHECK(!source_);
228 
229   // WebRTC does not yet support higher rates than 96000 on the client side
230   // and 48000 is the preferred sample rate. Therefore, if 192000 is detected,
231   // we change the rate to 48000 instead. The consequence is that the native
232   // layer will be opened up at 192kHz but WebRTC will provide data at 48kHz
233   // which will then be resampled by the audio converted on the browser side
234   // to match the native audio layer.
235   int sample_rate = sink_params_.sample_rate();
236   DVLOG(1) << "Audio output hardware sample rate: " << sample_rate;
237   if (sample_rate == 192000) {
238     DVLOG(1) << "Resampling from 48000 to 192000 is required";
239     sample_rate = 48000;
240   }
241   media::AudioSampleRate asr;
242   if (media::ToAudioSampleRate(sample_rate, &asr)) {
243     UMA_HISTOGRAM_ENUMERATION(
244         "WebRTC.AudioOutputSampleRate", asr, media::kAudioSampleRateMax + 1);
245   } else {
246     UMA_HISTOGRAM_COUNTS("WebRTC.AudioOutputSampleRateUnexpected",
247                          sample_rate);
248   }
249 
250   // Verify that the reported output hardware sample rate is supported
251   // on the current platform.
252   if (std::find(&kValidOutputRates[0],
253                 &kValidOutputRates[0] + arraysize(kValidOutputRates),
254                 sample_rate) ==
255                     &kValidOutputRates[arraysize(kValidOutputRates)]) {
256     DLOG(ERROR) << sample_rate << " is not a supported output rate.";
257     return false;
258   }
259 
260   // Set up audio parameters for the source, i.e., the WebRTC client.
261 
262   // The WebRTC client only supports multiples of 10ms as buffer size where
263   // 10ms is preferred for lowest possible delay.
264   media::AudioParameters source_params;
265   const int frames_per_10ms = (sample_rate / 100);
266   DVLOG(1) << "Using WebRTC output buffer size: " << frames_per_10ms;
267 
268   source_params.Reset(media::AudioParameters::AUDIO_PCM_LOW_LATENCY,
269                       sink_params_.channel_layout(), sink_params_.channels(), 0,
270                       sample_rate, 16, frames_per_10ms);
271 
272   // Update audio parameters for the sink, i.e., the native audio output stream.
273   // We strive to open up using native parameters to achieve best possible
274   // performance and to ensure that no FIFO is needed on the browser side to
275   // match the client request. Any mismatch between the source and the sink is
276   // taken care of in this class instead using a pull FIFO.
277 
278   // Use native output size as default.
279   int frames_per_buffer = sink_params_.frames_per_buffer();
280 #if defined(OS_ANDROID)
281   // TODO(henrika): Keep tuning this scheme and espcicially for low-latency
282   // cases. Might not be possible to come up with the perfect solution using
283   // the render side only.
284   if (frames_per_buffer < 2 * frames_per_10ms) {
285     // Examples of low-latency frame sizes and the resulting |buffer_size|:
286     //  Nexus 7     : 240 audio frames => 2*480 = 960
287     //  Nexus 10    : 256              => 2*441 = 882
288     //  Galaxy Nexus: 144              => 2*441 = 882
289     frames_per_buffer = 2 * frames_per_10ms;
290     DVLOG(1) << "Low-latency output detected on Android";
291   }
292 #endif
293   DVLOG(1) << "Using sink output buffer size: " << frames_per_buffer;
294 
295   sink_params_.Reset(sink_params_.format(), sink_params_.channel_layout(),
296                      sink_params_.channels(), 0, sample_rate, 16,
297                      frames_per_buffer);
298 
299   // Create a FIFO if re-buffering is required to match the source input with
300   // the sink request. The source acts as provider here and the sink as
301   // consumer.
302   fifo_delay_milliseconds_ = 0;
303   if (source_params.frames_per_buffer() != sink_params_.frames_per_buffer()) {
304     DVLOG(1) << "Rebuffering from " << source_params.frames_per_buffer()
305              << " to " << sink_params_.frames_per_buffer();
306     audio_fifo_.reset(new media::AudioPullFifo(
307         source_params.channels(),
308         source_params.frames_per_buffer(),
309         base::Bind(
310             &WebRtcAudioRenderer::SourceCallback,
311             base::Unretained(this))));
312 
313     if (sink_params_.frames_per_buffer() > source_params.frames_per_buffer()) {
314       int frame_duration_milliseconds = base::Time::kMillisecondsPerSecond /
315           static_cast<double>(source_params.sample_rate());
316       fifo_delay_milliseconds_ = (sink_params_.frames_per_buffer() -
317         source_params.frames_per_buffer()) * frame_duration_milliseconds;
318     }
319   }
320 
321   source_ = source;
322 
323   // Configure the audio rendering client and start rendering.
324   sink_ = AudioDeviceFactory::NewOutputDevice(
325       source_render_view_id_, source_render_frame_id_);
326 
327   DCHECK_GE(session_id_, 0);
328   sink_->InitializeWithSessionId(sink_params_, this, session_id_);
329 
330   sink_->Start();
331 
332   // User must call Play() before any audio can be heard.
333   state_ = PAUSED;
334 
335   UMA_HISTOGRAM_ENUMERATION("WebRTC.AudioOutputFramesPerBuffer",
336                             source_params.frames_per_buffer(),
337                             kUnexpectedAudioBufferSize);
338   AddHistogramFramesPerBuffer(source_params.frames_per_buffer());
339 
340   return true;
341 }
342 
343 scoped_refptr<MediaStreamAudioRenderer>
CreateSharedAudioRendererProxy(const scoped_refptr<webrtc::MediaStreamInterface> & media_stream)344 WebRtcAudioRenderer::CreateSharedAudioRendererProxy(
345     const scoped_refptr<webrtc::MediaStreamInterface>& media_stream) {
346   content::SharedAudioRenderer::OnPlayStateChanged on_play_state_changed =
347       base::Bind(&WebRtcAudioRenderer::OnPlayStateChanged, this);
348   return new SharedAudioRenderer(this, media_stream, on_play_state_changed);
349 }
350 
IsStarted() const351 bool WebRtcAudioRenderer::IsStarted() const {
352   DCHECK(thread_checker_.CalledOnValidThread());
353   return start_ref_count_ != 0;
354 }
355 
Start()356 void WebRtcAudioRenderer::Start() {
357   DVLOG(1) << "WebRtcAudioRenderer::Start()";
358   DCHECK(thread_checker_.CalledOnValidThread());
359   ++start_ref_count_;
360 }
361 
Play()362 void WebRtcAudioRenderer::Play() {
363   DVLOG(1) << "WebRtcAudioRenderer::Play()";
364   DCHECK(thread_checker_.CalledOnValidThread());
365 
366   if (playing_state_.playing())
367     return;
368 
369   playing_state_.set_playing(true);
370 
371   OnPlayStateChanged(media_stream_, &playing_state_);
372 }
373 
EnterPlayState()374 void WebRtcAudioRenderer::EnterPlayState() {
375   DVLOG(1) << "WebRtcAudioRenderer::EnterPlayState()";
376   DCHECK(thread_checker_.CalledOnValidThread());
377   DCHECK_GT(start_ref_count_, 0) << "Did you forget to call Start()?";
378   base::AutoLock auto_lock(lock_);
379   if (state_ == UNINITIALIZED)
380     return;
381 
382   DCHECK(play_ref_count_ == 0 || state_ == PLAYING);
383   ++play_ref_count_;
384 
385   if (state_ != PLAYING) {
386     state_ = PLAYING;
387 
388     if (audio_fifo_) {
389       audio_delay_milliseconds_ = 0;
390       audio_fifo_->Clear();
391     }
392   }
393 }
394 
Pause()395 void WebRtcAudioRenderer::Pause() {
396   DVLOG(1) << "WebRtcAudioRenderer::Pause()";
397   DCHECK(thread_checker_.CalledOnValidThread());
398   if (!playing_state_.playing())
399     return;
400 
401   playing_state_.set_playing(false);
402 
403   OnPlayStateChanged(media_stream_, &playing_state_);
404 }
405 
EnterPauseState()406 void WebRtcAudioRenderer::EnterPauseState() {
407   DVLOG(1) << "WebRtcAudioRenderer::EnterPauseState()";
408   DCHECK(thread_checker_.CalledOnValidThread());
409   DCHECK_GT(start_ref_count_, 0) << "Did you forget to call Start()?";
410   base::AutoLock auto_lock(lock_);
411   if (state_ == UNINITIALIZED)
412     return;
413 
414   DCHECK_EQ(state_, PLAYING);
415   DCHECK_GT(play_ref_count_, 0);
416   if (!--play_ref_count_)
417     state_ = PAUSED;
418 }
419 
Stop()420 void WebRtcAudioRenderer::Stop() {
421   DVLOG(1) << "WebRtcAudioRenderer::Stop()";
422   DCHECK(thread_checker_.CalledOnValidThread());
423   {
424     base::AutoLock auto_lock(lock_);
425     if (state_ == UNINITIALIZED)
426       return;
427 
428     if (--start_ref_count_)
429       return;
430 
431     DVLOG(1) << "Calling RemoveAudioRenderer and Stop().";
432 
433     source_->RemoveAudioRenderer(this);
434     source_ = NULL;
435     state_ = UNINITIALIZED;
436   }
437 
438   // Make sure to stop the sink while _not_ holding the lock since the Render()
439   // callback may currently be executing and try to grab the lock while we're
440   // stopping the thread on which it runs.
441   sink_->Stop();
442 }
443 
SetVolume(float volume)444 void WebRtcAudioRenderer::SetVolume(float volume) {
445   DCHECK(thread_checker_.CalledOnValidThread());
446   DCHECK(volume >= 0.0f && volume <= 1.0f);
447 
448   playing_state_.set_volume(volume);
449   OnPlayStateChanged(media_stream_, &playing_state_);
450 }
451 
GetCurrentRenderTime() const452 base::TimeDelta WebRtcAudioRenderer::GetCurrentRenderTime() const {
453   DCHECK(thread_checker_.CalledOnValidThread());
454   base::AutoLock auto_lock(lock_);
455   return current_time_;
456 }
457 
IsLocalRenderer() const458 bool WebRtcAudioRenderer::IsLocalRenderer() const {
459   return false;
460 }
461 
Render(media::AudioBus * audio_bus,int audio_delay_milliseconds)462 int WebRtcAudioRenderer::Render(media::AudioBus* audio_bus,
463                                 int audio_delay_milliseconds) {
464   base::AutoLock auto_lock(lock_);
465   if (!source_)
466     return 0;
467 
468   DVLOG(2) << "WebRtcAudioRenderer::Render()";
469   DVLOG(2) << "audio_delay_milliseconds: " << audio_delay_milliseconds;
470 
471   audio_delay_milliseconds_ = audio_delay_milliseconds;
472 
473   if (audio_fifo_)
474     audio_fifo_->Consume(audio_bus, audio_bus->frames());
475   else
476     SourceCallback(0, audio_bus);
477 
478   return (state_ == PLAYING) ? audio_bus->frames() : 0;
479 }
480 
OnRenderError()481 void WebRtcAudioRenderer::OnRenderError() {
482   NOTIMPLEMENTED();
483   LOG(ERROR) << "OnRenderError()";
484 }
485 
486 // Called by AudioPullFifo when more data is necessary.
SourceCallback(int fifo_frame_delay,media::AudioBus * audio_bus)487 void WebRtcAudioRenderer::SourceCallback(
488     int fifo_frame_delay, media::AudioBus* audio_bus) {
489   DVLOG(2) << "WebRtcAudioRenderer::SourceCallback("
490            << fifo_frame_delay << ", "
491            << audio_bus->frames() << ")";
492 
493   int output_delay_milliseconds = audio_delay_milliseconds_;
494   output_delay_milliseconds += fifo_delay_milliseconds_;
495   DVLOG(2) << "output_delay_milliseconds: " << output_delay_milliseconds;
496 
497   // We need to keep render data for the |source_| regardless of |state_|,
498   // otherwise the data will be buffered up inside |source_|.
499   source_->RenderData(audio_bus, sink_params_.sample_rate(),
500                       output_delay_milliseconds,
501                       &current_time_);
502 
503   // Avoid filling up the audio bus if we are not playing; instead
504   // return here and ensure that the returned value in Render() is 0.
505   if (state_ != PLAYING)
506     audio_bus->Zero();
507 }
508 
UpdateSourceVolume(webrtc::AudioSourceInterface * source)509 void WebRtcAudioRenderer::UpdateSourceVolume(
510     webrtc::AudioSourceInterface* source) {
511   DCHECK(thread_checker_.CalledOnValidThread());
512 
513   // Note: If there are no playing audio renderers, then the volume will be
514   // set to 0.0.
515   float volume = 0.0f;
516 
517   SourcePlayingStates::iterator entry = source_playing_states_.find(source);
518   if (entry != source_playing_states_.end()) {
519     PlayingStates& states = entry->second;
520     for (PlayingStates::const_iterator it = states.begin();
521          it != states.end(); ++it) {
522       if ((*it)->playing())
523         volume += (*it)->volume();
524     }
525   }
526 
527   // The valid range for volume scaling of a remote webrtc source is
528   // 0.0-10.0 where 1.0 is no attenuation/boost.
529   DCHECK(volume >= 0.0f);
530   if (volume > 10.0f)
531     volume = 10.0f;
532 
533   DVLOG(1) << "Setting remote source volume: " << volume;
534   source->SetVolume(volume);
535 }
536 
AddPlayingState(webrtc::AudioSourceInterface * source,PlayingState * state)537 bool WebRtcAudioRenderer::AddPlayingState(
538     webrtc::AudioSourceInterface* source,
539     PlayingState* state) {
540   DCHECK(thread_checker_.CalledOnValidThread());
541   DCHECK(state->playing());
542   // Look up or add the |source| to the map.
543   PlayingStates& array = source_playing_states_[source];
544   if (std::find(array.begin(), array.end(), state) != array.end())
545     return false;
546 
547   array.push_back(state);
548 
549   return true;
550 }
551 
RemovePlayingState(webrtc::AudioSourceInterface * source,PlayingState * state)552 bool WebRtcAudioRenderer::RemovePlayingState(
553     webrtc::AudioSourceInterface* source,
554     PlayingState* state) {
555   DCHECK(thread_checker_.CalledOnValidThread());
556   DCHECK(!state->playing());
557   SourcePlayingStates::iterator found = source_playing_states_.find(source);
558   if (found == source_playing_states_.end())
559     return false;
560 
561   PlayingStates& array = found->second;
562   PlayingStates::iterator state_it =
563       std::find(array.begin(), array.end(), state);
564   if (state_it == array.end())
565     return false;
566 
567   array.erase(state_it);
568 
569   if (array.empty())
570     source_playing_states_.erase(found);
571 
572   return true;
573 }
574 
OnPlayStateChanged(const scoped_refptr<webrtc::MediaStreamInterface> & media_stream,PlayingState * state)575 void WebRtcAudioRenderer::OnPlayStateChanged(
576     const scoped_refptr<webrtc::MediaStreamInterface>& media_stream,
577     PlayingState* state) {
578   webrtc::AudioTrackVector tracks(media_stream->GetAudioTracks());
579   for (webrtc::AudioTrackVector::iterator it = tracks.begin();
580        it != tracks.end(); ++it) {
581     webrtc::AudioSourceInterface* source = (*it)->GetSource();
582     DCHECK(source);
583     if (!state->playing()) {
584       if (RemovePlayingState(source, state))
585         EnterPauseState();
586     } else if (AddPlayingState(source, state)) {
587       EnterPlayState();
588     }
589     UpdateSourceVolume(source);
590   }
591 }
592 
593 }  // namespace content
594