1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include "config.h"
26
27 #if ENABLE(WEB_AUDIO)
28
29 #include "modules/webaudio/AudioContext.h"
30
31 #include "bindings/v8/ExceptionMessages.h"
32 #include "bindings/v8/ExceptionState.h"
33 #include "core/dom/Document.h"
34 #include "core/dom/ExceptionCode.h"
35 #include "core/html/HTMLMediaElement.h"
36 #include "core/inspector/ScriptCallStack.h"
37 #include "platform/audio/FFTFrame.h"
38 #include "platform/audio/HRTFPanner.h"
39 #include "modules/mediastream/MediaStream.h"
40 #include "modules/webaudio/AnalyserNode.h"
41 #include "modules/webaudio/AudioBuffer.h"
42 #include "modules/webaudio/AudioBufferCallback.h"
43 #include "modules/webaudio/AudioBufferSourceNode.h"
44 #include "modules/webaudio/AudioListener.h"
45 #include "modules/webaudio/AudioNodeInput.h"
46 #include "modules/webaudio/AudioNodeOutput.h"
47 #include "modules/webaudio/BiquadFilterNode.h"
48 #include "modules/webaudio/ChannelMergerNode.h"
49 #include "modules/webaudio/ChannelSplitterNode.h"
50 #include "modules/webaudio/ConvolverNode.h"
51 #include "modules/webaudio/DefaultAudioDestinationNode.h"
52 #include "modules/webaudio/DelayNode.h"
53 #include "modules/webaudio/DynamicsCompressorNode.h"
54 #include "modules/webaudio/GainNode.h"
55 #include "modules/webaudio/MediaElementAudioSourceNode.h"
56 #include "modules/webaudio/MediaStreamAudioDestinationNode.h"
57 #include "modules/webaudio/MediaStreamAudioSourceNode.h"
58 #include "modules/webaudio/OfflineAudioCompletionEvent.h"
59 #include "modules/webaudio/OfflineAudioContext.h"
60 #include "modules/webaudio/OfflineAudioDestinationNode.h"
61 #include "modules/webaudio/OscillatorNode.h"
62 #include "modules/webaudio/PannerNode.h"
63 #include "modules/webaudio/PeriodicWave.h"
64 #include "modules/webaudio/ScriptProcessorNode.h"
65 #include "modules/webaudio/WaveShaperNode.h"
66
67 #if DEBUG_AUDIONODE_REFERENCES
68 #include <stdio.h>
69 #endif
70
71 #include "wtf/ArrayBuffer.h"
72 #include "wtf/Atomics.h"
73 #include "wtf/PassOwnPtr.h"
74 #include "wtf/text/WTFString.h"
75
76 // FIXME: check the proper way to reference an undefined thread ID
77 const int UndefinedThreadIdentifier = 0xffffffff;
78
79 namespace WebCore {
80
isSampleRateRangeGood(float sampleRate)81 bool AudioContext::isSampleRateRangeGood(float sampleRate)
82 {
83 // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
84 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
85 return sampleRate >= 44100 && sampleRate <= 96000;
86 }
87
88 // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
89 const unsigned MaxHardwareContexts = 6;
90 unsigned AudioContext::s_hardwareContextCount = 0;
91
create(Document & document,ExceptionState & exceptionState)92 PassRefPtrWillBeRawPtr<AudioContext> AudioContext::create(Document& document, ExceptionState& exceptionState)
93 {
94 ASSERT(isMainThread());
95 if (s_hardwareContextCount >= MaxHardwareContexts) {
96 exceptionState.throwDOMException(
97 SyntaxError,
98 "number of hardware contexts reached maximum (" + String::number(MaxHardwareContexts) + ").");
99 return nullptr;
100 }
101
102 RefPtrWillBeRawPtr<AudioContext> audioContext(adoptRefWillBeThreadSafeRefCountedGarbageCollected(new AudioContext(&document)));
103 audioContext->suspendIfNeeded();
104 return audioContext.release();
105 }
106
107 // Constructor for rendering to the audio hardware.
AudioContext(Document * document)108 AudioContext::AudioContext(Document* document)
109 : ActiveDOMObject(document)
110 , m_isStopScheduled(false)
111 , m_isCleared(false)
112 , m_isInitialized(false)
113 , m_destinationNode(nullptr)
114 , m_isDeletionScheduled(false)
115 , m_automaticPullNodesNeedUpdating(false)
116 , m_connectionCount(0)
117 , m_audioThread(0)
118 , m_graphOwnerThread(UndefinedThreadIdentifier)
119 , m_isOfflineContext(false)
120 {
121 ScriptWrappable::init(this);
122
123 m_destinationNode = DefaultAudioDestinationNode::create(this);
124
125 initialize();
126 }
127
128 // Constructor for offline (non-realtime) rendering.
AudioContext(Document * document,unsigned numberOfChannels,size_t numberOfFrames,float sampleRate)129 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
130 : ActiveDOMObject(document)
131 , m_isStopScheduled(false)
132 , m_isCleared(false)
133 , m_isInitialized(false)
134 , m_destinationNode(nullptr)
135 , m_isDeletionScheduled(false)
136 , m_automaticPullNodesNeedUpdating(false)
137 , m_connectionCount(0)
138 , m_audioThread(0)
139 , m_graphOwnerThread(UndefinedThreadIdentifier)
140 , m_isOfflineContext(true)
141 {
142 ScriptWrappable::init(this);
143
144 // Create a new destination for offline rendering.
145 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
146 if (m_renderTarget.get())
147 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
148
149 initialize();
150 }
151
~AudioContext()152 AudioContext::~AudioContext()
153 {
154 #if DEBUG_AUDIONODE_REFERENCES
155 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
156 #endif
157 // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
158 ASSERT(!m_isInitialized);
159 ASSERT(!m_nodesToDelete.size());
160 ASSERT(!m_referencedNodes.size());
161 ASSERT(!m_finishedNodes.size());
162 ASSERT(!m_automaticPullNodes.size());
163 if (m_automaticPullNodesNeedUpdating)
164 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
165 ASSERT(!m_renderingAutomaticPullNodes.size());
166 }
167
initialize()168 void AudioContext::initialize()
169 {
170 if (isInitialized())
171 return;
172
173 FFTFrame::initialize();
174 m_listener = AudioListener::create();
175
176 if (m_destinationNode.get()) {
177 m_destinationNode->initialize();
178
179 if (!isOfflineContext()) {
180 // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
181 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
182 // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
183 // We may want to consider requiring it for symmetry with OfflineAudioContext.
184 m_destinationNode->startRendering();
185 ++s_hardwareContextCount;
186 }
187
188 m_isInitialized = true;
189 }
190 }
191
clear()192 void AudioContext::clear()
193 {
194 // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
195 if (m_destinationNode)
196 m_destinationNode.clear();
197
198 // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
199 do {
200 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
201 m_nodesMarkedForDeletion.clear();
202 deleteMarkedNodes();
203 } while (m_nodesToDelete.size());
204
205 m_isCleared = true;
206 }
207
uninitialize()208 void AudioContext::uninitialize()
209 {
210 ASSERT(isMainThread());
211
212 if (!isInitialized())
213 return;
214
215 // This stops the audio thread and all audio rendering.
216 m_destinationNode->uninitialize();
217
218 if (!isOfflineContext()) {
219 ASSERT(s_hardwareContextCount);
220 --s_hardwareContextCount;
221 }
222
223 // Get rid of the sources which may still be playing.
224 derefUnfinishedSourceNodes();
225
226 m_isInitialized = false;
227 }
228
stopDispatch(void * userData)229 void AudioContext::stopDispatch(void* userData)
230 {
231 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
232 ASSERT(context);
233 if (!context)
234 return;
235
236 context->uninitialize();
237 context->clear();
238 }
239
stop()240 void AudioContext::stop()
241 {
242 // Usually ExecutionContext calls stop twice.
243 if (m_isStopScheduled)
244 return;
245 m_isStopScheduled = true;
246
247 // Don't call uninitialize() immediately here because the ExecutionContext is in the middle
248 // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
249 // ActiveDOMObjects so let's schedule uninitialize() to be called later.
250 // FIXME: see if there's a more direct way to handle this issue.
251 callOnMainThread(stopDispatch, this);
252 }
253
hasPendingActivity() const254 bool AudioContext::hasPendingActivity() const
255 {
256 // According to spec AudioContext must die only after page navigates.
257 return !m_isCleared;
258 }
259
createBuffer(unsigned numberOfChannels,size_t numberOfFrames,float sampleRate,ExceptionState & exceptionState)260 PassRefPtrWillBeRawPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionState& exceptionState)
261 {
262 RefPtrWillBeRawPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate, exceptionState);
263
264 return audioBuffer;
265 }
266
decodeAudioData(ArrayBuffer * audioData,PassOwnPtr<AudioBufferCallback> successCallback,PassOwnPtr<AudioBufferCallback> errorCallback,ExceptionState & exceptionState)267 void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassOwnPtr<AudioBufferCallback> successCallback, PassOwnPtr<AudioBufferCallback> errorCallback, ExceptionState& exceptionState)
268 {
269 if (!audioData) {
270 exceptionState.throwDOMException(
271 SyntaxError,
272 "invalid ArrayBuffer for audioData.");
273 return;
274 }
275 m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
276 }
277
createBufferSource()278 PassRefPtrWillBeRawPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
279 {
280 ASSERT(isMainThread());
281 RefPtrWillBeRawPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
282
283 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
284 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
285 refNode(node.get());
286
287 return node;
288 }
289
createMediaElementSource(HTMLMediaElement * mediaElement,ExceptionState & exceptionState)290 PassRefPtrWillBeRawPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionState& exceptionState)
291 {
292 ASSERT(isMainThread());
293 if (!mediaElement) {
294 exceptionState.throwDOMException(
295 InvalidStateError,
296 "invalid HTMLMedialElement.");
297 return nullptr;
298 }
299
300 // First check if this media element already has a source node.
301 if (mediaElement->audioSourceNode()) {
302 exceptionState.throwDOMException(
303 InvalidStateError,
304 "invalid HTMLMediaElement.");
305 return nullptr;
306 }
307
308 RefPtrWillBeRawPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
309
310 mediaElement->setAudioSourceNode(node.get());
311
312 refNode(node.get()); // context keeps reference until node is disconnected
313 return node;
314 }
315
createMediaStreamSource(MediaStream * mediaStream,ExceptionState & exceptionState)316 PassRefPtrWillBeRawPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
317 {
318 ASSERT(isMainThread());
319 if (!mediaStream) {
320 exceptionState.throwDOMException(
321 InvalidStateError,
322 "invalid MediaStream source");
323 return nullptr;
324 }
325
326 MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
327 if (audioTracks.isEmpty()) {
328 exceptionState.throwDOMException(
329 InvalidStateError,
330 "MediaStream has no audio track");
331 return nullptr;
332 }
333
334 // Use the first audio track in the media stream.
335 RefPtrWillBeRawPtr<MediaStreamTrack> audioTrack = audioTracks[0];
336 OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
337 RefPtrWillBeRawPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack.get(), provider.release());
338
339 // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
340 node->setFormat(2, sampleRate());
341
342 refNode(node.get()); // context keeps reference until node is disconnected
343 return node;
344 }
345
createMediaStreamDestination()346 PassRefPtrWillBeRawPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
347 {
348 // Set number of output channels to stereo by default.
349 return MediaStreamAudioDestinationNode::create(this, 2);
350 }
351
createScriptProcessor(ExceptionState & exceptionState)352 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(ExceptionState& exceptionState)
353 {
354 // Set number of input/output channels to stereo by default.
355 return createScriptProcessor(0, 2, 2, exceptionState);
356 }
357
createScriptProcessor(size_t bufferSize,ExceptionState & exceptionState)358 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionState& exceptionState)
359 {
360 // Set number of input/output channels to stereo by default.
361 return createScriptProcessor(bufferSize, 2, 2, exceptionState);
362 }
363
createScriptProcessor(size_t bufferSize,size_t numberOfInputChannels,ExceptionState & exceptionState)364 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionState& exceptionState)
365 {
366 // Set number of output channels to stereo by default.
367 return createScriptProcessor(bufferSize, numberOfInputChannels, 2, exceptionState);
368 }
369
createScriptProcessor(size_t bufferSize,size_t numberOfInputChannels,size_t numberOfOutputChannels,ExceptionState & exceptionState)370 PassRefPtrWillBeRawPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionState& exceptionState)
371 {
372 ASSERT(isMainThread());
373 RefPtrWillBeRawPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
374
375 if (!node.get()) {
376 if (!numberOfInputChannels && !numberOfOutputChannels) {
377 exceptionState.throwDOMException(
378 IndexSizeError,
379 "number of input channels and output channels cannot both be zero.");
380 } else if (numberOfInputChannels > AudioContext::maxNumberOfChannels()) {
381 exceptionState.throwDOMException(
382 IndexSizeError,
383 "number of input channels (" + String::number(numberOfInputChannels)
384 + ") exceeds maximum ("
385 + String::number(AudioContext::maxNumberOfChannels()) + ").");
386 } else if (numberOfOutputChannels > AudioContext::maxNumberOfChannels()) {
387 exceptionState.throwDOMException(
388 IndexSizeError,
389 "number of output channels (" + String::number(numberOfInputChannels)
390 + ") exceeds maximum ("
391 + String::number(AudioContext::maxNumberOfChannels()) + ").");
392 } else {
393 exceptionState.throwDOMException(
394 IndexSizeError,
395 "buffer size (" + String::number(bufferSize)
396 + ") must be a power of two between 256 and 16384.");
397 }
398 return nullptr;
399 }
400
401 refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
402 return node;
403 }
404
createBiquadFilter()405 PassRefPtrWillBeRawPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
406 {
407 ASSERT(isMainThread());
408 return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
409 }
410
createWaveShaper()411 PassRefPtrWillBeRawPtr<WaveShaperNode> AudioContext::createWaveShaper()
412 {
413 ASSERT(isMainThread());
414 return WaveShaperNode::create(this);
415 }
416
createPanner()417 PassRefPtrWillBeRawPtr<PannerNode> AudioContext::createPanner()
418 {
419 ASSERT(isMainThread());
420 return PannerNode::create(this, m_destinationNode->sampleRate());
421 }
422
createConvolver()423 PassRefPtrWillBeRawPtr<ConvolverNode> AudioContext::createConvolver()
424 {
425 ASSERT(isMainThread());
426 return ConvolverNode::create(this, m_destinationNode->sampleRate());
427 }
428
createDynamicsCompressor()429 PassRefPtrWillBeRawPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
430 {
431 ASSERT(isMainThread());
432 return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
433 }
434
createAnalyser()435 PassRefPtrWillBeRawPtr<AnalyserNode> AudioContext::createAnalyser()
436 {
437 ASSERT(isMainThread());
438 return AnalyserNode::create(this, m_destinationNode->sampleRate());
439 }
440
createGain()441 PassRefPtrWillBeRawPtr<GainNode> AudioContext::createGain()
442 {
443 ASSERT(isMainThread());
444 return GainNode::create(this, m_destinationNode->sampleRate());
445 }
446
createDelay(ExceptionState & exceptionState)447 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(ExceptionState& exceptionState)
448 {
449 const double defaultMaxDelayTime = 1;
450 return createDelay(defaultMaxDelayTime, exceptionState);
451 }
452
createDelay(double maxDelayTime,ExceptionState & exceptionState)453 PassRefPtrWillBeRawPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionState& exceptionState)
454 {
455 ASSERT(isMainThread());
456 RefPtrWillBeRawPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, exceptionState);
457 if (exceptionState.hadException())
458 return nullptr;
459 return node;
460 }
461
createChannelSplitter(ExceptionState & exceptionState)462 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionState& exceptionState)
463 {
464 const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
465 return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, exceptionState);
466 }
467
createChannelSplitter(size_t numberOfOutputs,ExceptionState & exceptionState)468 PassRefPtrWillBeRawPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionState& exceptionState)
469 {
470 ASSERT(isMainThread());
471
472 RefPtrWillBeRawPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
473
474 if (!node.get()) {
475 exceptionState.throwDOMException(
476 IndexSizeError,
477 "number of outputs (" + String::number(numberOfOutputs)
478 + ") must be between 1 and "
479 + String::number(AudioContext::maxNumberOfChannels()) + ".");
480 return nullptr;
481 }
482
483 return node;
484 }
485
createChannelMerger(ExceptionState & exceptionState)486 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionState& exceptionState)
487 {
488 const unsigned ChannelMergerDefaultNumberOfInputs = 6;
489 return createChannelMerger(ChannelMergerDefaultNumberOfInputs, exceptionState);
490 }
491
createChannelMerger(size_t numberOfInputs,ExceptionState & exceptionState)492 PassRefPtrWillBeRawPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionState& exceptionState)
493 {
494 ASSERT(isMainThread());
495
496 RefPtrWillBeRawPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
497
498 if (!node.get()) {
499 exceptionState.throwDOMException(
500 IndexSizeError,
501 "number of inputs (" + String::number(numberOfInputs)
502 + ") must be between 1 and "
503 + String::number(AudioContext::maxNumberOfChannels()) + ".");
504 return nullptr;
505 }
506
507 return node;
508 }
509
createOscillator()510 PassRefPtrWillBeRawPtr<OscillatorNode> AudioContext::createOscillator()
511 {
512 ASSERT(isMainThread());
513
514 RefPtrWillBeRawPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
515
516 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
517 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
518 refNode(node.get());
519
520 return node;
521 }
522
createPeriodicWave(Float32Array * real,Float32Array * imag,ExceptionState & exceptionState)523 PassRefPtrWillBeRawPtr<PeriodicWave> AudioContext::createPeriodicWave(Float32Array* real, Float32Array* imag, ExceptionState& exceptionState)
524 {
525 ASSERT(isMainThread());
526
527 if (!real) {
528 exceptionState.throwDOMException(
529 SyntaxError,
530 "invalid real array");
531 return nullptr;
532 }
533
534 if (!imag) {
535 exceptionState.throwDOMException(
536 SyntaxError,
537 "invalid imaginary array");
538 return nullptr;
539 }
540
541 if (real->length() != imag->length()) {
542 exceptionState.throwDOMException(
543 IndexSizeError,
544 "length of real array (" + String::number(real->length())
545 + ") and length of imaginary array (" + String::number(imag->length())
546 + ") must match.");
547 return nullptr;
548 }
549
550 if (real->length() > 4096) {
551 exceptionState.throwDOMException(
552 IndexSizeError,
553 "length of real array (" + String::number(real->length())
554 + ") exceeds allowed maximum of 4096");
555 return nullptr;
556 }
557
558 if (imag->length() > 4096) {
559 exceptionState.throwDOMException(
560 IndexSizeError,
561 "length of imaginary array (" + String::number(imag->length())
562 + ") exceeds allowed maximum of 4096");
563 return nullptr;
564 }
565
566 return PeriodicWave::create(sampleRate(), real, imag);
567 }
568
notifyNodeFinishedProcessing(AudioNode * node)569 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
570 {
571 ASSERT(isAudioThread());
572 m_finishedNodes.append(node);
573 }
574
derefFinishedSourceNodes()575 void AudioContext::derefFinishedSourceNodes()
576 {
577 ASSERT(isGraphOwner());
578 ASSERT(isAudioThread());
579 for (unsigned i = 0; i < m_finishedNodes.size(); i++)
580 derefNode(m_finishedNodes[i]);
581
582 m_finishedNodes.clear();
583 }
584
refNode(AudioNode * node)585 void AudioContext::refNode(AudioNode* node)
586 {
587 ASSERT(isMainThread());
588 AutoLocker locker(this);
589
590 node->ref(AudioNode::RefTypeConnection);
591 m_referencedNodes.append(node);
592 }
593
derefNode(AudioNode * node)594 void AudioContext::derefNode(AudioNode* node)
595 {
596 ASSERT(isGraphOwner());
597
598 node->deref(AudioNode::RefTypeConnection);
599
600 for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
601 if (node == m_referencedNodes[i]) {
602 m_referencedNodes.remove(i);
603 break;
604 }
605 }
606 }
607
derefUnfinishedSourceNodes()608 void AudioContext::derefUnfinishedSourceNodes()
609 {
610 ASSERT(isMainThread());
611 for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
612 m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
613
614 m_referencedNodes.clear();
615 }
616
lock(bool & mustReleaseLock)617 void AudioContext::lock(bool& mustReleaseLock)
618 {
619 // Don't allow regular lock in real-time audio thread.
620 ASSERT(isMainThread());
621
622 ThreadIdentifier thisThread = currentThread();
623
624 if (thisThread == m_graphOwnerThread) {
625 // We already have the lock.
626 mustReleaseLock = false;
627 } else {
628 // Acquire the lock.
629 m_contextGraphMutex.lock();
630 m_graphOwnerThread = thisThread;
631 mustReleaseLock = true;
632 }
633 }
634
tryLock(bool & mustReleaseLock)635 bool AudioContext::tryLock(bool& mustReleaseLock)
636 {
637 ThreadIdentifier thisThread = currentThread();
638 bool isAudioThread = thisThread == audioThread();
639
640 // Try to catch cases of using try lock on main thread - it should use regular lock.
641 ASSERT(isAudioThread);
642
643 if (!isAudioThread) {
644 // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
645 lock(mustReleaseLock);
646 return true;
647 }
648
649 bool hasLock;
650
651 if (thisThread == m_graphOwnerThread) {
652 // Thread already has the lock.
653 hasLock = true;
654 mustReleaseLock = false;
655 } else {
656 // Don't already have the lock - try to acquire it.
657 hasLock = m_contextGraphMutex.tryLock();
658
659 if (hasLock)
660 m_graphOwnerThread = thisThread;
661
662 mustReleaseLock = hasLock;
663 }
664
665 return hasLock;
666 }
667
unlock()668 void AudioContext::unlock()
669 {
670 ASSERT(currentThread() == m_graphOwnerThread);
671
672 m_graphOwnerThread = UndefinedThreadIdentifier;
673 m_contextGraphMutex.unlock();
674 }
675
isAudioThread() const676 bool AudioContext::isAudioThread() const
677 {
678 return currentThread() == m_audioThread;
679 }
680
isGraphOwner() const681 bool AudioContext::isGraphOwner() const
682 {
683 return currentThread() == m_graphOwnerThread;
684 }
685
addDeferredFinishDeref(AudioNode * node)686 void AudioContext::addDeferredFinishDeref(AudioNode* node)
687 {
688 ASSERT(isAudioThread());
689 m_deferredFinishDerefList.append(node);
690 }
691
handlePreRenderTasks()692 void AudioContext::handlePreRenderTasks()
693 {
694 ASSERT(isAudioThread());
695
696 // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
697 // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
698 bool mustReleaseLock;
699 if (tryLock(mustReleaseLock)) {
700 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
701 handleDirtyAudioSummingJunctions();
702 handleDirtyAudioNodeOutputs();
703
704 updateAutomaticPullNodes();
705
706 if (mustReleaseLock)
707 unlock();
708 }
709 }
710
handlePostRenderTasks()711 void AudioContext::handlePostRenderTasks()
712 {
713 ASSERT(isAudioThread());
714
715 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
716 // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
717 // from the render graph (in which case they'll render silence).
718 bool mustReleaseLock;
719 if (tryLock(mustReleaseLock)) {
720 // Take care of finishing any derefs where the tryLock() failed previously.
721 handleDeferredFinishDerefs();
722
723 // Dynamically clean up nodes which are no longer needed.
724 derefFinishedSourceNodes();
725
726 // Don't delete in the real-time thread. Let the main thread do it.
727 // Ref-counted objects held by certain AudioNodes may not be thread-safe.
728 scheduleNodeDeletion();
729
730 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
731 handleDirtyAudioSummingJunctions();
732 handleDirtyAudioNodeOutputs();
733
734 updateAutomaticPullNodes();
735
736 if (mustReleaseLock)
737 unlock();
738 }
739 }
740
handleDeferredFinishDerefs()741 void AudioContext::handleDeferredFinishDerefs()
742 {
743 ASSERT(isAudioThread() && isGraphOwner());
744 for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
745 AudioNode* node = m_deferredFinishDerefList[i];
746 node->finishDeref(AudioNode::RefTypeConnection);
747 }
748
749 m_deferredFinishDerefList.clear();
750 }
751
markForDeletion(AudioNode * node)752 void AudioContext::markForDeletion(AudioNode* node)
753 {
754 ASSERT(isGraphOwner());
755
756 if (!isInitialized())
757 m_nodesToDelete.append(node);
758 else
759 m_nodesMarkedForDeletion.append(node);
760
761 // This is probably the best time for us to remove the node from automatic pull list,
762 // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
763 // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
764 // modify m_renderingAutomaticPullNodes.
765 removeAutomaticPullNode(node);
766 }
767
scheduleNodeDeletion()768 void AudioContext::scheduleNodeDeletion()
769 {
770 bool isGood = isInitialized() && isGraphOwner();
771 ASSERT(isGood);
772 if (!isGood)
773 return;
774
775 // Make sure to call deleteMarkedNodes() on main thread.
776 if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
777 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
778 m_nodesMarkedForDeletion.clear();
779
780 m_isDeletionScheduled = true;
781
782 // Don't let ourself get deleted before the callback.
783 // See matching deref() in deleteMarkedNodesDispatch().
784 ref();
785 callOnMainThread(deleteMarkedNodesDispatch, this);
786 }
787 }
788
deleteMarkedNodesDispatch(void * userData)789 void AudioContext::deleteMarkedNodesDispatch(void* userData)
790 {
791 AudioContext* context = reinterpret_cast<AudioContext*>(userData);
792 ASSERT(context);
793 if (!context)
794 return;
795
796 context->deleteMarkedNodes();
797 context->deref();
798 }
799
deleteMarkedNodes()800 void AudioContext::deleteMarkedNodes()
801 {
802 ASSERT(isMainThread());
803
804 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
805 RefPtrWillBeRawPtr<AudioContext> protect(this);
806 {
807 AutoLocker locker(this);
808
809 while (size_t n = m_nodesToDelete.size()) {
810 AudioNode* node = m_nodesToDelete[n - 1];
811 m_nodesToDelete.removeLast();
812
813 // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
814 unsigned numberOfInputs = node->numberOfInputs();
815 for (unsigned i = 0; i < numberOfInputs; ++i)
816 m_dirtySummingJunctions.remove(node->input(i));
817
818 // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
819 unsigned numberOfOutputs = node->numberOfOutputs();
820 for (unsigned i = 0; i < numberOfOutputs; ++i)
821 m_dirtyAudioNodeOutputs.remove(node->output(i));
822 #if ENABLE(OILPAN)
823 // Finally, clear the keep alive handle that keeps this
824 // object from being collected.
825 node->clearKeepAlive();
826 #else
827 // Finally, delete it.
828 delete node;
829 #endif
830 }
831 m_isDeletionScheduled = false;
832 }
833 }
834
markSummingJunctionDirty(AudioSummingJunction * summingJunction)835 void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
836 {
837 ASSERT(isGraphOwner());
838 m_dirtySummingJunctions.add(summingJunction);
839 }
840
removeMarkedSummingJunction(AudioSummingJunction * summingJunction)841 void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
842 {
843 ASSERT(isMainThread());
844 AutoLocker locker(this);
845 m_dirtySummingJunctions.remove(summingJunction);
846 }
847
markAudioNodeOutputDirty(AudioNodeOutput * output)848 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
849 {
850 ASSERT(isGraphOwner());
851 m_dirtyAudioNodeOutputs.add(output);
852 }
853
handleDirtyAudioSummingJunctions()854 void AudioContext::handleDirtyAudioSummingJunctions()
855 {
856 ASSERT(isGraphOwner());
857
858 for (HashSet<AudioSummingJunction* >::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
859 (*i)->updateRenderingState();
860
861 m_dirtySummingJunctions.clear();
862 }
863
handleDirtyAudioNodeOutputs()864 void AudioContext::handleDirtyAudioNodeOutputs()
865 {
866 ASSERT(isGraphOwner());
867
868 for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
869 (*i)->updateRenderingState();
870
871 m_dirtyAudioNodeOutputs.clear();
872 }
873
addAutomaticPullNode(AudioNode * node)874 void AudioContext::addAutomaticPullNode(AudioNode* node)
875 {
876 ASSERT(isGraphOwner());
877
878 if (!m_automaticPullNodes.contains(node)) {
879 m_automaticPullNodes.add(node);
880 m_automaticPullNodesNeedUpdating = true;
881 }
882 }
883
removeAutomaticPullNode(AudioNode * node)884 void AudioContext::removeAutomaticPullNode(AudioNode* node)
885 {
886 ASSERT(isGraphOwner());
887
888 if (m_automaticPullNodes.contains(node)) {
889 m_automaticPullNodes.remove(node);
890 m_automaticPullNodesNeedUpdating = true;
891 }
892 }
893
updateAutomaticPullNodes()894 void AudioContext::updateAutomaticPullNodes()
895 {
896 ASSERT(isGraphOwner());
897
898 if (m_automaticPullNodesNeedUpdating) {
899 // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
900 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
901
902 unsigned j = 0;
903 for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
904 AudioNode* output = *i;
905 m_renderingAutomaticPullNodes[j] = output;
906 }
907
908 m_automaticPullNodesNeedUpdating = false;
909 }
910 }
911
processAutomaticPullNodes(size_t framesToProcess)912 void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
913 {
914 ASSERT(isAudioThread());
915
916 for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
917 m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
918 }
919
interfaceName() const920 const AtomicString& AudioContext::interfaceName() const
921 {
922 return EventTargetNames::AudioContext;
923 }
924
executionContext() const925 ExecutionContext* AudioContext::executionContext() const
926 {
927 return m_isStopScheduled ? 0 : ActiveDOMObject::executionContext();
928 }
929
startRendering()930 void AudioContext::startRendering()
931 {
932 destination()->startRendering();
933 }
934
fireCompletionEvent()935 void AudioContext::fireCompletionEvent()
936 {
937 ASSERT(isMainThread());
938 if (!isMainThread())
939 return;
940
941 AudioBuffer* renderedBuffer = m_renderTarget.get();
942
943 ASSERT(renderedBuffer);
944 if (!renderedBuffer)
945 return;
946
947 // Avoid firing the event if the document has already gone away.
948 if (executionContext()) {
949 // Call the offline rendering completion event listener.
950 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
951 }
952 }
953
trace(Visitor * visitor)954 void AudioContext::trace(Visitor* visitor)
955 {
956 visitor->trace(m_renderTarget);
957 visitor->trace(m_destinationNode);
958 visitor->trace(m_listener);
959 visitor->trace(m_dirtySummingJunctions);
960 EventTargetWithInlineData::trace(visitor);
961 }
962
963 } // namespace WebCore
964
965 #endif // ENABLE(WEB_AUDIO)
966