/external/webrtc/common_audio/vad/ |
D | vad_core_unittest.cc | 59 int16_t speech[kMaxFrameLength]; in TEST_F() local 66 memset(speech, 0, sizeof(speech)); in TEST_F() 70 EXPECT_EQ(0, WebRtcVad_CalcVad8khz(self, speech, kFrameLengths[j])); in TEST_F() 73 EXPECT_EQ(0, WebRtcVad_CalcVad16khz(self, speech, kFrameLengths[j])); in TEST_F() 76 EXPECT_EQ(0, WebRtcVad_CalcVad32khz(self, speech, kFrameLengths[j])); in TEST_F() 79 EXPECT_EQ(0, WebRtcVad_CalcVad48khz(self, speech, kFrameLengths[j])); in TEST_F() 86 speech[i] = static_cast<int16_t>(i * i); in TEST_F() 90 EXPECT_EQ(1, WebRtcVad_CalcVad8khz(self, speech, kFrameLengths[j])); in TEST_F() 93 EXPECT_EQ(1, WebRtcVad_CalcVad16khz(self, speech, kFrameLengths[j])); in TEST_F() 96 EXPECT_EQ(1, WebRtcVad_CalcVad32khz(self, speech, kFrameLengths[j])); in TEST_F() [all …]
|
D | vad_filterbank_unittest.cc | 38 int16_t speech[kMaxFrameLength]; in TEST_F() local 40 speech[i] = static_cast<int16_t>(i * i); in TEST_F() 48 WebRtcVad_CalculateFeatures(self, speech, kFrameLengths[j], in TEST_F() 60 memset(speech, 0, sizeof(speech)); in TEST_F() 64 EXPECT_EQ(0, WebRtcVad_CalculateFeatures(self, speech, kFrameLengths[j], in TEST_F() 75 speech[i] = 1; in TEST_F() 80 EXPECT_EQ(0, WebRtcVad_CalculateFeatures(self, speech, kFrameLengths[j], in TEST_F()
|
D | vad_unittest.cc | 66 int16_t speech[kMaxFrameLength]; in TEST_F() local 68 speech[i] = static_cast<int16_t>(i * i); in TEST_F() 75 WebRtcVad_Process(nullptr, kRates[0], speech, kFrameLengths[0])); in TEST_F() 81 EXPECT_EQ(-1, WebRtcVad_Process(handle, kRates[0], speech, kFrameLengths[0])); in TEST_F() 99 EXPECT_EQ(-1, WebRtcVad_Process(handle, 9999, speech, kFrameLengths[0])); in TEST_F() 109 EXPECT_EQ(1, WebRtcVad_Process(handle, kRates[i], speech, in TEST_F() 112 EXPECT_EQ(-1, WebRtcVad_Process(handle, kRates[i], speech, in TEST_F()
|
/external/webrtc/modules/audio_coding/codecs/pcm16b/ |
D | pcm16b.c | 13 size_t WebRtcPcm16b_Encode(const int16_t* speech, in WebRtcPcm16b_Encode() argument 18 uint16_t s = speech[i]; in WebRtcPcm16b_Encode() 27 int16_t* speech) { in WebRtcPcm16b_Decode() argument 30 speech[i] = encoded[2 * i] << 8 | encoded[2 * i + 1]; in WebRtcPcm16b_Decode()
|
D | pcm16b.h | 40 size_t WebRtcPcm16b_Encode(const int16_t* speech, size_t len, uint8_t* encoded); 57 size_t WebRtcPcm16b_Decode(const uint8_t* encoded, size_t len, int16_t* speech);
|
/external/robolectric/shadows/framework/src/main/java/org/robolectric/shadows/ |
D | ShadowSpeechRecognizer.java | 13 import android.speech.IRecognitionService; 14 import android.speech.RecognitionListener; 15 import android.speech.SpeechRecognizer; 166 supportListener instanceof android.speech.RecognitionSupportCallback); in checkRecognitionSupport() 182 Preconditions.checkArgument(recognitionSupport instanceof android.speech.RecognitionSupport); in triggerSupportResult() 185 ((android.speech.RecognitionSupportCallback) recognitionSupportCallback) in triggerSupportResult() 186 .onSupportResult((android.speech.RecognitionSupport) recognitionSupport)); in triggerSupportResult() 193 ((android.speech.RecognitionSupportCallback) recognitionSupportCallback) in triggerSupportError()
|
D | ShadowTextToSpeech.java | 12 import android.speech.tts.TextToSpeech; 13 import android.speech.tts.TextToSpeech.Engine; 14 import android.speech.tts.UtteranceProgressListener; 15 import android.speech.tts.Voice;
|
/external/sl4a/Common/src/com/googlecode/android_scripting/facade/ |
D | SpeechRecognitionFacade.java | 20 import android.speech.RecognizerIntent; 52 new Intent(android.speech.RecognizerIntent.ACTION_RECOGNIZE_SPEECH); in recognizeSpeech() 68 if (data.hasExtra(android.speech.RecognizerIntent.EXTRA_RESULTS)) { in recognizeSpeech() 72 data.getStringArrayListExtra(android.speech.RecognizerIntent.EXTRA_RESULTS); in recognizeSpeech()
|
D | TextToSpeechFacade.java | 20 import android.speech.tts.TextToSpeech; 21 import android.speech.tts.TextToSpeech.OnInitListener;
|
/external/autotest/server/site_tests/audio_AudioNoiseCancellation/ |
D | control.quiet_env | 21 This test checks if the speech quality is not degraded by NC when the input is clean. During 22 the test, the speech file will be played by Chameleon, while DUT records via the internal mic 24 speech file as reference should not be less than the threshold specified in test_data. 31 # The speech file is 15-second, 1-channel, 16k-rate.
|
D | control.noisy_env | 21 This test checks if the speech quality is improved by NC when the input is mixed with noises. 22 During the test, the speech and noise files will be mixed and played by Chameleon, while DUT 24 calculated by ViSQOL with the speech file as reference should not be less than the threshold
|
/external/autotest/client/site_tests/desktopui_SpeechSynthesisSemiAuto/ |
D | desktopui_SpeechSynthesisSemiAuto.py | 20 speech = dbus.Interface(proxy, "org.chromium.SpeechSynthesizerInterface") 21 res = speech.Speak("Welcome to Chromium O S")
|
/external/robolectric-shadows/shadows/framework/src/main/java/org/robolectric/shadows/ |
D | ShadowTextToSpeech.java | 10 import android.speech.tts.TextToSpeech; 11 import android.speech.tts.TextToSpeech.Engine; 12 import android.speech.tts.UtteranceProgressListener;
|
/external/tensorflow/tensorflow/lite/g3doc/examples/modify/model_maker/ |
D | speech_recognition.ipynb | 40 "# Retrain a speech recognition model with TensorFlow Lite Model Maker\n" 73 …r](https://www.tensorflow.org/lite/models/modify/model_maker) to train a speech recognition model … 75 …ensorflow/tfjs-models/tree/master/speech-commands#speech-command-recognizer)) using a subset of wo… 79 …"**Note:** The model we'll be training is optimized for speech recognition with one-second samples… 81 …"If you want to run the notebook with the default speech dataset, you can run the whole thing now … 158 "To train with the default speech dataset, just run all the code below as-is.\n", 160 "But if you want to train with your own speech dataset, follow these steps:\n", 165 …arate subfolders for each classification. For example, each sample for a speech command \"yes\" sh… 200 …ing the default speech dataset or a custom dataset, you should have a good set of background noise… 216 " cache_subdir='dataset-speech',\n", [all …]
|
/external/sonic/doc/ |
D | index.md | 21 Sonic is free software for speeding up or slowing down speech. While similar to 34 to improve their productivity with free software speech engines, like espeak. 48 In short, Sonic is better for speech, while WSOLA is better for music. 52 for speech (contrary to the inventor's estimate of WSOLA). Listen to [this 55 introduces unacceptable levels of distortion, making speech impossible to 58 However, there are decent free software algorithms for speeding up speech. They 59 are all in the TD-PSOLA family. For speech rates below 2X, sonic uses PICOLA, 131 double speed of speech. A pitch of 0.95 means to lower the pitch by about 5%, 134 speech is played. A 2.0 value will make you sound like a chipmunk talking very 153 You read the sped up speech samples from sonic like this:
|
/external/robolectric/robolectric/src/test/java/org/robolectric/shadows/ |
D | ShadowSpeechRecognizerTest.java | 13 import android.speech.RecognitionListener; 14 import android.speech.RecognitionSupport; 15 import android.speech.RecognitionSupportCallback; 16 import android.speech.RecognizerIntent; 17 import android.speech.SpeechRecognizer;
|
/external/sonic/debian/ |
D | control | 14 Description: Simple utility to speed up or slow down speech 24 Description: Simple library to speed up or slow down speech 27 down speech. It has only basic dependencies, and is meant to
|
/external/sonic/ |
D | README | 1 Sonic is a simple algorithm for speeding up or slowing down speech. However, 3 speech rate. The Sonic library is a very simple ANSI C library that is designed 7 to improve their productivity with open source speech engines, like espeak.
|
/external/webrtc/resources/audio_coding/ |
D | READ.ME | 3 testfile32kHz.pcm - mono speech file samples at 32 kHz 4 teststereo32kHz.pcm - stereo speech file samples at 32 kHz
|
/external/robolectric-shadows/robolectric/src/test/java/org/robolectric/shadows/ |
D | ShadowTextToSpeechTest.java | 11 import android.speech.tts.TextToSpeech; 12 import android.speech.tts.TextToSpeech.Engine; 13 import android.speech.tts.UtteranceProgressListener;
|
/external/robolectric/integration_tests/compat-target28/src/test/java/org/robolectric/integration/compat/target28/ |
D | NormalCompatibilityTest.kt | 7 import android.speech.SpeechRecognizer 49 fun `Create speech recognizer succeed`() { in Create speech recognizer succeed()
|
/external/armnn/python/pyarmnn/examples/speech_recognition/ |
D | README.md | 3 This sample application guides the user to perform automatic speech recognition (ASR) with PyArmNN … 42 …itable wav files containing human speech can be found [here](https://github.com/Azure-Samples/cogn… 78 3. [Automatic speech recognition pipeline](#automatic-speech-recognition-pipeline) 125 ### Automatic speech recognition pipeline 167 The output from the inference must be decoded to obtain the recognised characters from the speech. … 171 Having now gained a solid understanding of performing automatic speech recognition with PyArmNN, yo…
|
/external/webrtc/modules/audio_processing/test/conversational_speech/ |
D | README.md | 3 Tool to generate multiple-end audio tracks to simulate conversational speech 7 a text file indicating how to time the sequence of speech turns (see the Example 66 100 ms, "." is silence and "*" is speech).
|
/external/armnn/samples/SpeechRecognition/ |
D | Readme.md | 4 This is a sample code showing automatic speech recognition using Arm NN public C++ API. The compile… 42 * BUILD_UNIT_TESTS - set to `1` to build tests. Additionally to the main application, `speech-reco… 68 * `speech-recognition-example` - application executable 78 * --audio-file-path: Path to the audio file to run speech recognition on **[REQUIRED]** 87 To run speech recognition on a supplied audio file and output the result to console: 89 ./speech-recognition-example --audio-file-path /path/to/audio/file --model-file-path /path/to/model… 195 The speech recognition pipeline has 3 steps to perform, data pre-processing, run inference and deco… 238 The output from the inference must be decoded to obtain the recognised characters from the speech.
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_Mfcc.pbtxt | 42 summary: "Transforms a spectrogram into a form that\'s useful for speech recognition." 48 history in the speech recognition world, and https://en.wikipedia.org/wiki/Mel-frequency_cepstrum
|