1 /* 2 * Copyright (C) 2010, Google Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #ifndef AudioNode_h 26 #define AudioNode_h 27 28 #include "modules/EventTargetModules.h" 29 #include "platform/audio/AudioBus.h" 30 #include "wtf/Forward.h" 31 #include "wtf/OwnPtr.h" 32 #include "wtf/PassOwnPtr.h" 33 #include "wtf/RefPtr.h" 34 #include "wtf/Vector.h" 35 36 #define DEBUG_AUDIONODE_REFERENCES 0 37 38 namespace blink { 39 40 class AudioContext; 41 class AudioNodeInput; 42 class AudioNodeOutput; 43 class AudioParam; 44 class ExceptionState; 45 46 // An AudioNode is the basic building block for handling audio within an AudioContext. 47 // It may be an audio source, an intermediate processing module, or an audio destination. 48 // Each AudioNode can have inputs and/or outputs. An AudioSourceNode has no inputs and a single output. 49 // An AudioDestinationNode has one input and no outputs and represents the final destination to the audio hardware. 50 // Most processing nodes such as filters will have one input and one output, although multiple inputs and outputs are possible. 51 52 class AudioNode : public RefCountedGarbageCollectedWillBeGarbageCollectedFinalized<AudioNode>, public EventTargetWithInlineData { 53 DEFINE_EVENT_TARGET_REFCOUNTING_WILL_BE_REMOVED(RefCountedGarbageCollected<AudioNode>); 54 DEFINE_WRAPPERTYPEINFO(); 55 WILL_BE_USING_GARBAGE_COLLECTED_MIXIN(AudioNode); 56 public: 57 enum { ProcessingSizeInFrames = 128 }; 58 59 AudioNode(AudioContext*, float sampleRate); 60 virtual ~AudioNode(); 61 // dispose() is called just before the destructor. This must be called in 62 // the main thread, and while the graph lock is held. 63 virtual void dispose(); instanceCount()64 static unsigned instanceCount() { return s_instanceCount; } 65 context()66 AudioContext* context() { return m_context.get(); } context()67 const AudioContext* context() const { return m_context.get(); } 68 69 enum NodeType { 70 NodeTypeUnknown, 71 NodeTypeDestination, 72 NodeTypeOscillator, 73 NodeTypeAudioBufferSource, 74 NodeTypeMediaElementAudioSource, 75 NodeTypeMediaStreamAudioDestination, 76 NodeTypeMediaStreamAudioSource, 77 NodeTypeJavaScript, 78 NodeTypeBiquadFilter, 79 NodeTypePanner, 80 NodeTypeConvolver, 81 NodeTypeDelay, 82 NodeTypeGain, 83 NodeTypeChannelSplitter, 84 NodeTypeChannelMerger, 85 NodeTypeAnalyser, 86 NodeTypeDynamicsCompressor, 87 NodeTypeWaveShaper, 88 NodeTypeEnd 89 }; 90 91 enum ChannelCountMode { 92 Max, 93 ClampedMax, 94 Explicit 95 }; 96 nodeType()97 NodeType nodeType() const { return m_nodeType; } 98 String nodeTypeName() const; 99 void setNodeType(NodeType); 100 101 // This object has been connected to another object. This might have 102 // existing connections from others. 103 // This function must be called after acquiring a connection reference. 104 void makeConnection(); 105 // This object will be disconnected from another object. This might have 106 // remaining connections from others. 107 // This function must be called before releasing a connection reference. 108 void breakConnection(); 109 110 // Can be called from main thread or context's audio thread. It must be called while the context's graph lock is held. 111 void breakConnectionWithLock(); 112 113 // The AudioNodeInput(s) (if any) will already have their input data available when process() is called. 114 // Subclasses will take this input data and put the results in the AudioBus(s) of its AudioNodeOutput(s) (if any). 115 // Called from context's audio thread. 116 virtual void process(size_t framesToProcess) = 0; 117 118 // No significant resources should be allocated until initialize() is called. 119 // Processing may not occur until a node is initialized. 120 virtual void initialize(); 121 virtual void uninitialize(); 122 isInitialized()123 bool isInitialized() const { return m_isInitialized; } 124 numberOfInputs()125 unsigned numberOfInputs() const { return m_inputs.size(); } numberOfOutputs()126 unsigned numberOfOutputs() const { return m_outputs.size(); } 127 128 AudioNodeInput* input(unsigned); 129 AudioNodeOutput* output(unsigned); 130 131 // Called from main thread by corresponding JavaScript methods. 132 virtual void connect(AudioNode*, unsigned outputIndex, unsigned inputIndex, ExceptionState&); 133 void connect(AudioParam*, unsigned outputIndex, ExceptionState&); 134 virtual void disconnect(unsigned outputIndex, ExceptionState&); 135 sampleRate()136 virtual float sampleRate() const { return m_sampleRate; } 137 138 // processIfNecessary() is called by our output(s) when the rendering graph needs this AudioNode to process. 139 // This method ensures that the AudioNode will only process once per rendering time quantum even if it's called repeatedly. 140 // This handles the case of "fanout" where an output is connected to multiple AudioNode inputs. 141 // Called from context's audio thread. 142 void processIfNecessary(size_t framesToProcess); 143 144 // Called when a new connection has been made to one of our inputs or the connection number of channels has changed. 145 // This potentially gives us enough information to perform a lazy initialization or, if necessary, a re-initialization. 146 // Called from main thread. 147 virtual void checkNumberOfChannelsForInput(AudioNodeInput*); 148 149 #if DEBUG_AUDIONODE_REFERENCES 150 static void printNodeCounts(); 151 #endif 152 153 // tailTime() is the length of time (not counting latency time) where non-zero output may occur after continuous silent input. 154 virtual double tailTime() const = 0; 155 // latencyTime() is the length of time it takes for non-zero output to appear after non-zero input is provided. This only applies to 156 // processing delay which is an artifact of the processing algorithm chosen and is *not* part of the intrinsic desired effect. For 157 // example, a "delay" effect is expected to delay the signal, and thus would not be considered latency. 158 virtual double latencyTime() const = 0; 159 160 // propagatesSilence() should return true if the node will generate silent output when given silent input. By default, AudioNode 161 // will take tailTime() and latencyTime() into account when determining whether the node will propagate silence. 162 virtual bool propagatesSilence() const; 163 bool inputsAreSilent(); 164 void silenceOutputs(); 165 void unsilenceOutputs(); 166 167 void enableOutputsIfNecessary(); 168 void disableOutputsIfNecessary(); 169 170 unsigned long channelCount(); 171 virtual void setChannelCount(unsigned long, ExceptionState&); 172 173 String channelCountMode(); 174 void setChannelCountMode(const String&, ExceptionState&); 175 176 String channelInterpretation(); 177 void setChannelInterpretation(const String&, ExceptionState&); 178 internalChannelCountMode()179 ChannelCountMode internalChannelCountMode() const { return m_channelCountMode; } internalChannelInterpretation()180 AudioBus::ChannelInterpretation internalChannelInterpretation() const { return m_channelInterpretation; } 181 182 // EventTarget 183 virtual const AtomicString& interfaceName() const OVERRIDE FINAL; 184 virtual ExecutionContext* executionContext() const OVERRIDE FINAL; 185 186 void updateChannelCountMode(); 187 188 virtual void trace(Visitor*) OVERRIDE; 189 190 protected: 191 // Inputs and outputs must be created before the AudioNode is initialized. 192 void addInput(); 193 void addOutput(AudioNodeOutput*); 194 195 // Called by processIfNecessary() to cause all parts of the rendering graph connected to us to process. 196 // Each rendering quantum, the audio data for each of the AudioNode's inputs will be available after this method is called. 197 // Called from context's audio thread. 198 virtual void pullInputs(size_t framesToProcess); 199 200 // Force all inputs to take any channel interpretation changes into account. 201 void updateChannelsForInputs(); 202 203 private: 204 volatile bool m_isInitialized; 205 NodeType m_nodeType; 206 Member<AudioContext> m_context; 207 float m_sampleRate; 208 HeapVector<Member<AudioNodeInput> > m_inputs; 209 HeapVector<Member<AudioNodeOutput> > m_outputs; 210 211 double m_lastProcessingTime; 212 double m_lastNonSilentTime; 213 214 volatile int m_connectionRefCount; 215 216 bool m_isDisabled; 217 218 #if DEBUG_AUDIONODE_REFERENCES 219 static bool s_isNodeCountInitialized; 220 static int s_nodeCount[NodeTypeEnd]; 221 #endif 222 static unsigned s_instanceCount; 223 224 // The new channel count mode that will be used to set the actual mode in the pre or post 225 // rendering phase. 226 ChannelCountMode m_newChannelCountMode; 227 protected: 228 unsigned m_channelCount; 229 ChannelCountMode m_channelCountMode; 230 AudioBus::ChannelInterpretation m_channelInterpretation; 231 }; 232 233 } // namespace blink 234 235 #endif // AudioNode_h 236