1 /*
2 * Copyright (C) 2010, Google Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 */
24
25 #include "config.h"
26
27 #if ENABLE(WEB_AUDIO)
28
29 #include "AudioContext.h"
30
31 #include "ArrayBuffer.h"
32 #include "AudioBuffer.h"
33 #include "AudioBufferSourceNode.h"
34 #include "AudioChannelMerger.h"
35 #include "AudioChannelSplitter.h"
36 #include "AudioGainNode.h"
37 #include "AudioListener.h"
38 #include "AudioNodeInput.h"
39 #include "AudioNodeOutput.h"
40 #include "AudioPannerNode.h"
41 #include "ConvolverNode.h"
42 #include "DefaultAudioDestinationNode.h"
43 #include "DelayNode.h"
44 #include "Document.h"
45 #include "FFTFrame.h"
46 #include "HRTFDatabaseLoader.h"
47 #include "HRTFPanner.h"
48 #include "HighPass2FilterNode.h"
49 #include "JavaScriptAudioNode.h"
50 #include "LowPass2FilterNode.h"
51 #include "OfflineAudioCompletionEvent.h"
52 #include "OfflineAudioDestinationNode.h"
53 #include "PlatformString.h"
54 #include "RealtimeAnalyserNode.h"
55
56 #if DEBUG_AUDIONODE_REFERENCES
57 #include <stdio.h>
58 #endif
59
60 #include <wtf/OwnPtr.h>
61 #include <wtf/PassOwnPtr.h>
62 #include <wtf/RefCounted.h>
63
64 // FIXME: check the proper way to reference an undefined thread ID
65 const int UndefinedThreadIdentifier = 0xffffffff;
66
67 const unsigned MaxNodesToDeletePerQuantum = 10;
68
69 namespace WebCore {
70
create(Document * document)71 PassRefPtr<AudioContext> AudioContext::create(Document* document)
72 {
73 return adoptRef(new AudioContext(document));
74 }
75
createOfflineContext(Document * document,unsigned numberOfChannels,size_t numberOfFrames,double sampleRate)76 PassRefPtr<AudioContext> AudioContext::createOfflineContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
77 {
78 return adoptRef(new AudioContext(document, numberOfChannels, numberOfFrames, sampleRate));
79 }
80
81 // Constructor for rendering to the audio hardware.
AudioContext(Document * document)82 AudioContext::AudioContext(Document* document)
83 : ActiveDOMObject(document, this)
84 , m_isInitialized(false)
85 , m_isAudioThreadFinished(false)
86 , m_document(document)
87 , m_destinationNode(0)
88 , m_connectionCount(0)
89 , m_audioThread(0)
90 , m_graphOwnerThread(UndefinedThreadIdentifier)
91 , m_isOfflineContext(false)
92 {
93 constructCommon();
94
95 m_destinationNode = DefaultAudioDestinationNode::create(this);
96
97 // This sets in motion an asynchronous loading mechanism on another thread.
98 // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
99 // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
100 // when this has finished (see AudioDestinationNode).
101 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
102
103 // FIXME: for now default AudioContext does not need an explicit startRendering() call.
104 // We may want to consider requiring it for symmetry with OfflineAudioContext
105 m_destinationNode->startRendering();
106 }
107
108 // Constructor for offline (non-realtime) rendering.
AudioContext(Document * document,unsigned numberOfChannels,size_t numberOfFrames,double sampleRate)109 AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
110 : ActiveDOMObject(document, this)
111 , m_isInitialized(false)
112 , m_isAudioThreadFinished(false)
113 , m_document(document)
114 , m_destinationNode(0)
115 , m_connectionCount(0)
116 , m_audioThread(0)
117 , m_graphOwnerThread(UndefinedThreadIdentifier)
118 , m_isOfflineContext(true)
119 {
120 constructCommon();
121
122 // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton.
123 m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
124
125 // Create a new destination for offline rendering.
126 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
127 m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
128 }
129
constructCommon()130 void AudioContext::constructCommon()
131 {
132 // Note: because adoptRef() won't be called until we leave this constructor, but code in this constructor needs to reference this context,
133 // relax the check.
134 relaxAdoptionRequirement();
135
136 FFTFrame::initialize();
137
138 m_listener = AudioListener::create();
139 m_temporaryMonoBus = adoptPtr(new AudioBus(1, AudioNode::ProcessingSizeInFrames));
140 m_temporaryStereoBus = adoptPtr(new AudioBus(2, AudioNode::ProcessingSizeInFrames));
141 }
142
~AudioContext()143 AudioContext::~AudioContext()
144 {
145 #if DEBUG_AUDIONODE_REFERENCES
146 printf("%p: AudioContext::~AudioContext()\n", this);
147 #endif
148 // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
149 ASSERT(!m_nodesToDelete.size());
150 ASSERT(!m_referencedNodes.size());
151 ASSERT(!m_finishedNodes.size());
152 }
153
lazyInitialize()154 void AudioContext::lazyInitialize()
155 {
156 if (!m_isInitialized) {
157 // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
158 ASSERT(!m_isAudioThreadFinished);
159 if (!m_isAudioThreadFinished) {
160 if (m_destinationNode.get()) {
161 // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
162 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
163 m_destinationNode->initialize();
164 }
165 m_isInitialized = true;
166 }
167 }
168 }
169
uninitialize()170 void AudioContext::uninitialize()
171 {
172 if (m_isInitialized) {
173 // This stops the audio thread and all audio rendering.
174 m_destinationNode->uninitialize();
175
176 // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
177 m_isAudioThreadFinished = true;
178
179 // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
180 m_destinationNode.clear();
181
182 // Get rid of the sources which may still be playing.
183 derefUnfinishedSourceNodes();
184
185 // Because the AudioBuffers are garbage collected, we can't delete them here.
186 // Instead, at least release the potentially large amount of allocated memory for the audio data.
187 // Note that we do this *after* the context is uninitialized and stops processing audio.
188 for (unsigned i = 0; i < m_allocatedBuffers.size(); ++i)
189 m_allocatedBuffers[i]->releaseMemory();
190 m_allocatedBuffers.clear();
191
192 m_isInitialized = false;
193 }
194 }
195
isInitialized() const196 bool AudioContext::isInitialized() const
197 {
198 return m_isInitialized;
199 }
200
isRunnable() const201 bool AudioContext::isRunnable() const
202 {
203 if (!isInitialized())
204 return false;
205
206 // Check with the HRTF spatialization system to see if it's finished loading.
207 return m_hrtfDatabaseLoader->isLoaded();
208 }
209
stop()210 void AudioContext::stop()
211 {
212 m_document = 0; // document is going away
213 uninitialize();
214 }
215
document() const216 Document* AudioContext::document() const
217 {
218 ASSERT(m_document);
219 return m_document;
220 }
221
hasDocument()222 bool AudioContext::hasDocument()
223 {
224 return m_document;
225 }
226
refBuffer(PassRefPtr<AudioBuffer> buffer)227 void AudioContext::refBuffer(PassRefPtr<AudioBuffer> buffer)
228 {
229 m_allocatedBuffers.append(buffer);
230 }
231
createBuffer(unsigned numberOfChannels,size_t numberOfFrames,double sampleRate)232 PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, double sampleRate)
233 {
234 return AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
235 }
236
createBuffer(ArrayBuffer * arrayBuffer,bool mixToMono)237 PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono)
238 {
239 ASSERT(arrayBuffer);
240 if (!arrayBuffer)
241 return 0;
242
243 return AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
244 }
245
createBufferSource()246 PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
247 {
248 ASSERT(isMainThread());
249 lazyInitialize();
250 RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
251
252 refNode(node.get()); // context keeps reference until source has finished playing
253 return node;
254 }
255
createJavaScriptNode(size_t bufferSize)256 PassRefPtr<JavaScriptAudioNode> AudioContext::createJavaScriptNode(size_t bufferSize)
257 {
258 ASSERT(isMainThread());
259 lazyInitialize();
260 RefPtr<JavaScriptAudioNode> node = JavaScriptAudioNode::create(this, m_destinationNode->sampleRate(), bufferSize);
261
262 refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
263 return node;
264 }
265
createLowPass2Filter()266 PassRefPtr<LowPass2FilterNode> AudioContext::createLowPass2Filter()
267 {
268 ASSERT(isMainThread());
269 lazyInitialize();
270 return LowPass2FilterNode::create(this, m_destinationNode->sampleRate());
271 }
272
createHighPass2Filter()273 PassRefPtr<HighPass2FilterNode> AudioContext::createHighPass2Filter()
274 {
275 ASSERT(isMainThread());
276 lazyInitialize();
277 return HighPass2FilterNode::create(this, m_destinationNode->sampleRate());
278 }
279
createPanner()280 PassRefPtr<AudioPannerNode> AudioContext::createPanner()
281 {
282 ASSERT(isMainThread());
283 lazyInitialize();
284 return AudioPannerNode::create(this, m_destinationNode->sampleRate());
285 }
286
createConvolver()287 PassRefPtr<ConvolverNode> AudioContext::createConvolver()
288 {
289 ASSERT(isMainThread());
290 lazyInitialize();
291 return ConvolverNode::create(this, m_destinationNode->sampleRate());
292 }
293
createAnalyser()294 PassRefPtr<RealtimeAnalyserNode> AudioContext::createAnalyser()
295 {
296 ASSERT(isMainThread());
297 lazyInitialize();
298 return RealtimeAnalyserNode::create(this, m_destinationNode->sampleRate());
299 }
300
createGainNode()301 PassRefPtr<AudioGainNode> AudioContext::createGainNode()
302 {
303 ASSERT(isMainThread());
304 lazyInitialize();
305 return AudioGainNode::create(this, m_destinationNode->sampleRate());
306 }
307
createDelayNode()308 PassRefPtr<DelayNode> AudioContext::createDelayNode()
309 {
310 ASSERT(isMainThread());
311 lazyInitialize();
312 return DelayNode::create(this, m_destinationNode->sampleRate());
313 }
314
createChannelSplitter()315 PassRefPtr<AudioChannelSplitter> AudioContext::createChannelSplitter()
316 {
317 ASSERT(isMainThread());
318 lazyInitialize();
319 return AudioChannelSplitter::create(this, m_destinationNode->sampleRate());
320 }
321
createChannelMerger()322 PassRefPtr<AudioChannelMerger> AudioContext::createChannelMerger()
323 {
324 ASSERT(isMainThread());
325 lazyInitialize();
326 return AudioChannelMerger::create(this, m_destinationNode->sampleRate());
327 }
328
notifyNodeFinishedProcessing(AudioNode * node)329 void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
330 {
331 ASSERT(isAudioThread());
332 m_finishedNodes.append(node);
333 }
334
derefFinishedSourceNodes()335 void AudioContext::derefFinishedSourceNodes()
336 {
337 ASSERT(isGraphOwner());
338 ASSERT(isAudioThread() || isAudioThreadFinished());
339 for (unsigned i = 0; i < m_finishedNodes.size(); i++)
340 derefNode(m_finishedNodes[i]);
341
342 m_finishedNodes.clear();
343 }
344
refNode(AudioNode * node)345 void AudioContext::refNode(AudioNode* node)
346 {
347 ASSERT(isMainThread());
348 AutoLocker locker(this);
349
350 node->ref(AudioNode::RefTypeConnection);
351 m_referencedNodes.append(node);
352 }
353
derefNode(AudioNode * node)354 void AudioContext::derefNode(AudioNode* node)
355 {
356 ASSERT(isGraphOwner());
357
358 node->deref(AudioNode::RefTypeConnection);
359
360 for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
361 if (node == m_referencedNodes[i]) {
362 m_referencedNodes.remove(i);
363 break;
364 }
365 }
366 }
367
derefUnfinishedSourceNodes()368 void AudioContext::derefUnfinishedSourceNodes()
369 {
370 ASSERT(isMainThread() && isAudioThreadFinished());
371 for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
372 m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
373
374 m_referencedNodes.clear();
375 }
376
lock(bool & mustReleaseLock)377 void AudioContext::lock(bool& mustReleaseLock)
378 {
379 // Don't allow regular lock in real-time audio thread.
380 ASSERT(isMainThread());
381
382 ThreadIdentifier thisThread = currentThread();
383
384 if (thisThread == m_graphOwnerThread) {
385 // We already have the lock.
386 mustReleaseLock = false;
387 } else {
388 // Acquire the lock.
389 m_contextGraphMutex.lock();
390 m_graphOwnerThread = thisThread;
391 mustReleaseLock = true;
392 }
393 }
394
tryLock(bool & mustReleaseLock)395 bool AudioContext::tryLock(bool& mustReleaseLock)
396 {
397 ThreadIdentifier thisThread = currentThread();
398 bool isAudioThread = thisThread == audioThread();
399
400 // Try to catch cases of using try lock on main thread - it should use regular lock.
401 ASSERT(isAudioThread || isAudioThreadFinished());
402
403 if (!isAudioThread) {
404 // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
405 lock(mustReleaseLock);
406 return true;
407 }
408
409 bool hasLock;
410
411 if (thisThread == m_graphOwnerThread) {
412 // Thread already has the lock.
413 hasLock = true;
414 mustReleaseLock = false;
415 } else {
416 // Don't already have the lock - try to acquire it.
417 hasLock = m_contextGraphMutex.tryLock();
418
419 if (hasLock)
420 m_graphOwnerThread = thisThread;
421
422 mustReleaseLock = hasLock;
423 }
424
425 return hasLock;
426 }
427
unlock()428 void AudioContext::unlock()
429 {
430 ASSERT(currentThread() == m_graphOwnerThread);
431
432 m_graphOwnerThread = UndefinedThreadIdentifier;
433 m_contextGraphMutex.unlock();
434 }
435
isAudioThread() const436 bool AudioContext::isAudioThread() const
437 {
438 return currentThread() == m_audioThread;
439 }
440
isGraphOwner() const441 bool AudioContext::isGraphOwner() const
442 {
443 return currentThread() == m_graphOwnerThread;
444 }
445
addDeferredFinishDeref(AudioNode * node,AudioNode::RefType refType)446 void AudioContext::addDeferredFinishDeref(AudioNode* node, AudioNode::RefType refType)
447 {
448 ASSERT(isAudioThread());
449 m_deferredFinishDerefList.append(AudioContext::RefInfo(node, refType));
450 }
451
handlePreRenderTasks()452 void AudioContext::handlePreRenderTasks()
453 {
454 ASSERT(isAudioThread());
455
456 // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
457 // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
458 bool mustReleaseLock;
459 if (tryLock(mustReleaseLock)) {
460 // Fixup the state of any dirty AudioNodeInputs and AudioNodeOutputs.
461 handleDirtyAudioNodeInputs();
462 handleDirtyAudioNodeOutputs();
463
464 if (mustReleaseLock)
465 unlock();
466 }
467 }
468
handlePostRenderTasks()469 void AudioContext::handlePostRenderTasks()
470 {
471 ASSERT(isAudioThread());
472
473 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
474 // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
475 // from the render graph (in which case they'll render silence).
476 bool mustReleaseLock;
477 if (tryLock(mustReleaseLock)) {
478 // Take care of finishing any derefs where the tryLock() failed previously.
479 handleDeferredFinishDerefs();
480
481 // Dynamically clean up nodes which are no longer needed.
482 derefFinishedSourceNodes();
483
484 // Finally actually delete.
485 deleteMarkedNodes();
486
487 // Fixup the state of any dirty AudioNodeInputs and AudioNodeOutputs.
488 handleDirtyAudioNodeInputs();
489 handleDirtyAudioNodeOutputs();
490
491 if (mustReleaseLock)
492 unlock();
493 }
494 }
495
handleDeferredFinishDerefs()496 void AudioContext::handleDeferredFinishDerefs()
497 {
498 ASSERT(isAudioThread() && isGraphOwner());
499 for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
500 AudioNode* node = m_deferredFinishDerefList[i].m_node;
501 AudioNode::RefType refType = m_deferredFinishDerefList[i].m_refType;
502 node->finishDeref(refType);
503 }
504
505 m_deferredFinishDerefList.clear();
506 }
507
markForDeletion(AudioNode * node)508 void AudioContext::markForDeletion(AudioNode* node)
509 {
510 ASSERT(isGraphOwner());
511 m_nodesToDelete.append(node);
512 }
513
deleteMarkedNodes()514 void AudioContext::deleteMarkedNodes()
515 {
516 ASSERT(isGraphOwner() || isAudioThreadFinished());
517
518 // Note: deleting an AudioNode can cause m_nodesToDelete to grow.
519 size_t nodesDeleted = 0;
520 while (size_t n = m_nodesToDelete.size()) {
521 AudioNode* node = m_nodesToDelete[n - 1];
522 m_nodesToDelete.removeLast();
523
524 // Before deleting the node, clear out any AudioNodeInputs from m_dirtyAudioNodeInputs.
525 unsigned numberOfInputs = node->numberOfInputs();
526 for (unsigned i = 0; i < numberOfInputs; ++i)
527 m_dirtyAudioNodeInputs.remove(node->input(i));
528
529 // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
530 unsigned numberOfOutputs = node->numberOfOutputs();
531 for (unsigned i = 0; i < numberOfOutputs; ++i)
532 m_dirtyAudioNodeOutputs.remove(node->output(i));
533
534 // Finally, delete it.
535 delete node;
536
537 // Don't delete too many nodes per render quantum since we don't want to do too much work in the realtime audio thread.
538 if (++nodesDeleted > MaxNodesToDeletePerQuantum)
539 break;
540 }
541 }
542
markAudioNodeInputDirty(AudioNodeInput * input)543 void AudioContext::markAudioNodeInputDirty(AudioNodeInput* input)
544 {
545 ASSERT(isGraphOwner());
546 m_dirtyAudioNodeInputs.add(input);
547 }
548
markAudioNodeOutputDirty(AudioNodeOutput * output)549 void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
550 {
551 ASSERT(isGraphOwner());
552 m_dirtyAudioNodeOutputs.add(output);
553 }
554
handleDirtyAudioNodeInputs()555 void AudioContext::handleDirtyAudioNodeInputs()
556 {
557 ASSERT(isGraphOwner());
558
559 for (HashSet<AudioNodeInput*>::iterator i = m_dirtyAudioNodeInputs.begin(); i != m_dirtyAudioNodeInputs.end(); ++i)
560 (*i)->updateRenderingState();
561
562 m_dirtyAudioNodeInputs.clear();
563 }
564
handleDirtyAudioNodeOutputs()565 void AudioContext::handleDirtyAudioNodeOutputs()
566 {
567 ASSERT(isGraphOwner());
568
569 for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
570 (*i)->updateRenderingState();
571
572 m_dirtyAudioNodeOutputs.clear();
573 }
574
scriptExecutionContext() const575 ScriptExecutionContext* AudioContext::scriptExecutionContext() const
576 {
577 return document();
578 }
579
toAudioContext()580 AudioContext* AudioContext::toAudioContext()
581 {
582 return this;
583 }
584
startRendering()585 void AudioContext::startRendering()
586 {
587 destination()->startRendering();
588 }
589
fireCompletionEvent()590 void AudioContext::fireCompletionEvent()
591 {
592 ASSERT(isMainThread());
593 if (!isMainThread())
594 return;
595
596 AudioBuffer* renderedBuffer = m_renderTarget.get();
597
598 ASSERT(renderedBuffer);
599 if (!renderedBuffer)
600 return;
601
602 // Avoid firing the event if the document has already gone away.
603 if (hasDocument()) {
604 // Call the offline rendering completion event listener.
605 dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
606 }
607 }
608
609 } // namespace WebCore
610
611 #endif // ENABLE(WEB_AUDIO)
612