• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2013 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE COMPUTER, INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include "config.h"
27 #include "modules/speech/SpeechSynthesis.h"
28 
29 #include "bindings/v8/ExceptionState.h"
30 #include "core/dom/ExecutionContext.h"
31 #include "modules/speech/SpeechSynthesisEvent.h"
32 #include "platform/speech/PlatformSpeechSynthesisVoice.h"
33 #include "wtf/CurrentTime.h"
34 
35 namespace WebCore {
36 
create(ExecutionContext * context)37 SpeechSynthesis* SpeechSynthesis::create(ExecutionContext* context)
38 {
39     return adoptRefCountedGarbageCollectedWillBeNoop(new SpeechSynthesis(context));
40 }
41 
SpeechSynthesis(ExecutionContext * context)42 SpeechSynthesis::SpeechSynthesis(ExecutionContext* context)
43     : ContextLifecycleObserver(context)
44     , m_platformSpeechSynthesizer(PlatformSpeechSynthesizer::create(this))
45     , m_isPaused(false)
46 {
47     ScriptWrappable::init(this);
48 }
49 
setPlatformSynthesizer(PlatformSpeechSynthesizer * synthesizer)50 void SpeechSynthesis::setPlatformSynthesizer(PlatformSpeechSynthesizer* synthesizer)
51 {
52     m_platformSpeechSynthesizer = synthesizer;
53 }
54 
executionContext() const55 ExecutionContext* SpeechSynthesis::executionContext() const
56 {
57     return ContextLifecycleObserver::executionContext();
58 }
59 
voicesDidChange()60 void SpeechSynthesis::voicesDidChange()
61 {
62     m_voiceList.clear();
63     if (executionContext() && !executionContext()->activeDOMObjectsAreStopped())
64         dispatchEvent(Event::create(EventTypeNames::voiceschanged));
65 }
66 
getVoices()67 const HeapVector<Member<SpeechSynthesisVoice> >& SpeechSynthesis::getVoices()
68 {
69     if (m_voiceList.size())
70         return m_voiceList;
71 
72     // If the voiceList is empty, that's the cue to get the voices from the platform again.
73     const HeapVector<Member<PlatformSpeechSynthesisVoice> >& platformVoices = m_platformSpeechSynthesizer->voiceList();
74     size_t voiceCount = platformVoices.size();
75     for (size_t k = 0; k < voiceCount; k++)
76         m_voiceList.append(SpeechSynthesisVoice::create(platformVoices[k].get()));
77 
78     return m_voiceList;
79 }
80 
speaking() const81 bool SpeechSynthesis::speaking() const
82 {
83     // If we have a current speech utterance, then that means we're assumed to be in a speaking state.
84     // This state is independent of whether the utterance happens to be paused.
85     return currentSpeechUtterance();
86 }
87 
pending() const88 bool SpeechSynthesis::pending() const
89 {
90     // This is true if there are any utterances that have not started.
91     // That means there will be more than one in the queue.
92     return m_utteranceQueue.size() > 1;
93 }
94 
paused() const95 bool SpeechSynthesis::paused() const
96 {
97     return m_isPaused;
98 }
99 
startSpeakingImmediately()100 void SpeechSynthesis::startSpeakingImmediately()
101 {
102     SpeechSynthesisUtterance* utterance = currentSpeechUtterance();
103     ASSERT(utterance);
104 
105     utterance->setStartTime(monotonicallyIncreasingTime());
106     m_isPaused = false;
107     m_platformSpeechSynthesizer->speak(utterance->platformUtterance());
108 }
109 
speak(SpeechSynthesisUtterance * utterance,ExceptionState & exceptionState)110 void SpeechSynthesis::speak(SpeechSynthesisUtterance* utterance, ExceptionState& exceptionState)
111 {
112     if (!utterance) {
113         exceptionState.throwTypeError("Invalid utterance argument");
114         return;
115     }
116 
117     m_utteranceQueue.append(utterance);
118 
119     // If the queue was empty, speak this immediately.
120     if (m_utteranceQueue.size() == 1)
121         startSpeakingImmediately();
122 }
123 
cancel()124 void SpeechSynthesis::cancel()
125 {
126     // Remove all the items from the utterance queue. The platform
127     // may still have references to some of these utterances and may
128     // fire events on them asynchronously.
129     m_utteranceQueue.clear();
130     m_platformSpeechSynthesizer->cancel();
131 }
132 
pause()133 void SpeechSynthesis::pause()
134 {
135     if (!m_isPaused)
136         m_platformSpeechSynthesizer->pause();
137 }
138 
resume()139 void SpeechSynthesis::resume()
140 {
141     if (!currentSpeechUtterance())
142         return;
143     m_platformSpeechSynthesizer->resume();
144 }
145 
fireEvent(const AtomicString & type,SpeechSynthesisUtterance * utterance,unsigned long charIndex,const String & name)146 void SpeechSynthesis::fireEvent(const AtomicString& type, SpeechSynthesisUtterance* utterance, unsigned long charIndex, const String& name)
147 {
148     if (executionContext() && !executionContext()->activeDOMObjectsAreStopped())
149         utterance->dispatchEvent(SpeechSynthesisEvent::create(type, charIndex, (currentTime() - utterance->startTime()), name));
150 }
151 
handleSpeakingCompleted(SpeechSynthesisUtterance * utterance,bool errorOccurred)152 void SpeechSynthesis::handleSpeakingCompleted(SpeechSynthesisUtterance* utterance, bool errorOccurred)
153 {
154     ASSERT(utterance);
155 
156     bool didJustFinishCurrentUtterance = false;
157     // If the utterance that completed was the one we're currently speaking,
158     // remove it from the queue and start speaking the next one.
159     if (utterance == currentSpeechUtterance()) {
160         m_utteranceQueue.removeFirst();
161         didJustFinishCurrentUtterance = true;
162     }
163 
164     // Always fire the event, because the platform may have asynchronously
165     // sent an event on an utterance before it got the message that we
166     // canceled it, and we should always report to the user what actually
167     // happened.
168     fireEvent(errorOccurred ? EventTypeNames::error : EventTypeNames::end, utterance, 0, String());
169 
170     // Start the next utterance if we just finished one and one was pending.
171     if (didJustFinishCurrentUtterance && !m_utteranceQueue.isEmpty())
172         startSpeakingImmediately();
173 }
174 
boundaryEventOccurred(PlatformSpeechSynthesisUtterance * utterance,SpeechBoundary boundary,unsigned charIndex)175 void SpeechSynthesis::boundaryEventOccurred(PlatformSpeechSynthesisUtterance* utterance, SpeechBoundary boundary, unsigned charIndex)
176 {
177     DEFINE_STATIC_LOCAL(const String, wordBoundaryString, ("word"));
178     DEFINE_STATIC_LOCAL(const String, sentenceBoundaryString, ("sentence"));
179 
180     switch (boundary) {
181     case SpeechWordBoundary:
182         fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance*>(utterance->client()), charIndex, wordBoundaryString);
183         break;
184     case SpeechSentenceBoundary:
185         fireEvent(EventTypeNames::boundary, static_cast<SpeechSynthesisUtterance*>(utterance->client()), charIndex, sentenceBoundaryString);
186         break;
187     default:
188         ASSERT_NOT_REACHED();
189     }
190 }
191 
didStartSpeaking(PlatformSpeechSynthesisUtterance * utterance)192 void SpeechSynthesis::didStartSpeaking(PlatformSpeechSynthesisUtterance* utterance)
193 {
194     if (utterance->client())
195         fireEvent(EventTypeNames::start, static_cast<SpeechSynthesisUtterance*>(utterance->client()), 0, String());
196 }
197 
didPauseSpeaking(PlatformSpeechSynthesisUtterance * utterance)198 void SpeechSynthesis::didPauseSpeaking(PlatformSpeechSynthesisUtterance* utterance)
199 {
200     m_isPaused = true;
201     if (utterance->client())
202         fireEvent(EventTypeNames::pause, static_cast<SpeechSynthesisUtterance*>(utterance->client()), 0, String());
203 }
204 
didResumeSpeaking(PlatformSpeechSynthesisUtterance * utterance)205 void SpeechSynthesis::didResumeSpeaking(PlatformSpeechSynthesisUtterance* utterance)
206 {
207     m_isPaused = false;
208     if (utterance->client())
209         fireEvent(EventTypeNames::resume, static_cast<SpeechSynthesisUtterance*>(utterance->client()), 0, String());
210 }
211 
didFinishSpeaking(PlatformSpeechSynthesisUtterance * utterance)212 void SpeechSynthesis::didFinishSpeaking(PlatformSpeechSynthesisUtterance* utterance)
213 {
214     if (utterance->client())
215         handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance->client()), false);
216 }
217 
speakingErrorOccurred(PlatformSpeechSynthesisUtterance * utterance)218 void SpeechSynthesis::speakingErrorOccurred(PlatformSpeechSynthesisUtterance* utterance)
219 {
220     if (utterance->client())
221         handleSpeakingCompleted(static_cast<SpeechSynthesisUtterance*>(utterance->client()), true);
222 }
223 
currentSpeechUtterance() const224 SpeechSynthesisUtterance* SpeechSynthesis::currentSpeechUtterance() const
225 {
226     if (!m_utteranceQueue.isEmpty())
227         return m_utteranceQueue.first().get();
228     return 0;
229 }
230 
interfaceName() const231 const AtomicString& SpeechSynthesis::interfaceName() const
232 {
233     return EventTargetNames::SpeechSynthesis;
234 }
235 
trace(Visitor * visitor)236 void SpeechSynthesis::trace(Visitor* visitor)
237 {
238     visitor->trace(m_platformSpeechSynthesizer);
239     visitor->trace(m_voiceList);
240     visitor->trace(m_utteranceQueue);
241     PlatformSpeechSynthesizerClient::trace(visitor);
242     EventTargetWithInlineData::trace(visitor);
243 }
244 
245 } // namespace WebCore
246