/external/sonic/samples/ |
D | README | 1 These wav files show how Sonic performs at increasing speech rates. All sound 4 sonic.wav 8 soundtouch.wav 9 This is the same recording as sonic.wav, but sped up using soundtouch, which 13 talking.wav 16 talking_2x.wav 19 espeak_s450.wav 21 espeak_s450.wav was generated using 'espeak -s450 -f test1.txt -w 22 espeak_s450.wav'. This is the highest speed currently supported by espeak, 25 espeak_sonic.wav [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | summary_audio_op_test.cc | 101 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 in TEST_F() 104 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 in TEST_F() 107 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 in TEST_F() 135 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F() 138 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F() 141 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F() 167 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F() 170 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F() 173 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F()
|
D | spectrogram_convert_test_data.cc | 24 namespace wav { namespace 50 tensorflow::Status status = tensorflow::wav::ConvertCsvToRaw(filename); in main()
|
/external/tensorflow/tensorflow/examples/speech_commands/ |
D | label_wav.py | 73 def label_wav(wav, labels, graph, input_name, output_name, how_many_labels): argument 75 if not wav or not tf.io.gfile.exists(wav): 76 raise ValueError('Audio file does not exist at {0}'.format(wav)) 88 with open(wav, 'rb') as wav_file: 96 label_wav(FLAGS.wav, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
|
D | test_streaming_accuracy.cc | 134 string wav = ""; in main() local 149 Flag("wav", &wav, "audio file to be identified"), in main() 212 tensorflow::Env::Default(), wav, &wav_string); in main() 221 Status decode_wav_status = tensorflow::wav::DecodeLin16WaveAsFloatVector( in main()
|
D | label_wav.cc | 102 string wav = ""; in main() local 109 Flag("wav", &wav, "audio file to be identified"), in main() 147 tensorflow::Env::Default(), wav, &wav_string); in main()
|
/external/libwebsockets/minimal-examples/secure-streams/minimal-secure-streams-avs/ |
D | avs.c | 28 static uint8_t *wav; variable 182 memcpy(buf, wav + m->pos, *len); in ss_avs_metadata_tx() 353 if (wav) { in ss_avs_event_state() 354 free(wav); in ss_avs_event_state() 355 wav = NULL; in ss_avs_event_state() 388 wav = malloc(wav_len); in avs_example_start() 389 if (!wav) { in avs_example_start() 394 if (read(fd, wav, in avs_example_start() 421 free(wav); in avs_example_start() 422 wav = NULL; in avs_example_start()
|
/external/oboe/samples/parselib/src/main/cpp/ |
D | CMakeLists.txt | 48 # wav 49 ${CMAKE_CURRENT_LIST_DIR}/wav/AudioEncoding.cpp 50 ${CMAKE_CURRENT_LIST_DIR}/wav/WavChunkHeader.cpp 51 ${CMAKE_CURRENT_LIST_DIR}/wav/WavFmtChunkHeader.cpp 52 ${CMAKE_CURRENT_LIST_DIR}/wav/WavRIFFChunkHeader.cpp 53 ${CMAKE_CURRENT_LIST_DIR}/wav/WavStreamReader.cpp)
|
/external/flac/test/ |
D | test_flac.sh | 71 if [ ! -f wacky1.wav ] ; then 80 cp wacky1.wav exist.wav 82 if run_flac -0 exist.wav ; then 89 if run_flac -0 --force exist.wav ; then 109 rm -f exist.wav exist.flac 178 run_flac --force --decode --channel-map=none -o rt.wav $extra rt.flac || die "ERROR" 180 cmp $f rt.wav || die "ERROR: file mismatch" 182 rm -f rt.flac rt.wav 194 cmp $f rt.wav || die "ERROR: file mismatch" 196 rm -f rt.flac rt.wav [all …]
|
D | test_compression.sh | 30 last_size=$(wc -c < noisy-sine.wav) 35 flac${EXE} -${k} --silent noisy-sine.wav -o ${fname}
|
/external/libgsm/tst/ |
D | cod2lin.c | 20 int wav = 0; variable 46 gsm_option(r, GSM_OPT_WAV49, &wav); 86 case 'w': wav++; break;
|
D | lin2cod.c | 19 int wav = 0; variable 42 gsm_option(r, GSM_OPT_WAV49, &wav); 87 case 'w': wav++; break;
|
/external/webrtc/modules/audio_coding/neteq/tools/ |
D | neteq_rtpplay_test.sh | 92 CASE1_WAV=$TMP_DIR/case1.wav 98 CASE1_TEXTLOG=$TMP_DIR/case1.wav.text_log.txt 128 CASE4_WAV=$TMP_DIR/case4.wav 129 $BIN $TEST_RTC_EVENT_LOG $TMP_DIR/case4.wav \
|
/external/autotest/server/site_tests/audio_MediaBasicVerification/ |
D | control.test_wav | 8 NAME = "audio_MediaBasicVerification.wav" 9 PURPOSE = "Verify wav audio formats playback quality" 26 'public/audio_test/chameleon/Headphone/test_256_16.wav')
|
/external/skia/third_party/oboe/ |
D | BUILD.gn | 27 "../externals/oboe/samples/parselib/src/main/cpp/wav/AudioEncoding.cpp", 28 "../externals/oboe/samples/parselib/src/main/cpp/wav/WavChunkHeader.cpp", 29 "../externals/oboe/samples/parselib/src/main/cpp/wav/WavFmtChunkHeader.cpp", 30 "../externals/oboe/samples/parselib/src/main/cpp/wav/WavRIFFChunkHeader.cpp", 31 "../externals/oboe/samples/parselib/src/main/cpp/wav/WavStreamReader.cpp",
|
/external/flac/test/foreign-metadata-test-files/ |
D | Makefile.am | 20 24bit-WaveFmtPCM.wav \ 24 BWF-WaveFmtEx.wav
|
/external/flac/src/test_streams/ |
D | main.c | 955 FLAC__byte wav[] = { in generate_wackywavs() local 972 if(fwrite(wav, 1, 84, f) < 84) in generate_wackywavs() 976 wav[4] += 12; in generate_wackywavs() 979 if(fwrite(wav, 1, 96, f) < 96) in generate_wackywavs() 1079 FLAC__byte wav[] = { in generate_wackywav64s() local 1107 if(fwrite(wav, 1, wav[16], f) < wav[16]) in generate_wackywav64s() 1111 wav[16] += 32; in generate_wackywav64s() 1114 if(fwrite(wav, 1, wav[16], f) < wav[16]) in generate_wackywav64s() 1127 FLAC__byte wav[] = { in generate_wackyrf64s() local 1149 if(fwrite(wav, 1, 120, f) < 120) in generate_wackyrf64s() [all …]
|
/external/tensorflow/tensorflow/examples/wav_to_spectrogram/ |
D | README.md | 3 This example shows how you can load audio from a .wav file, convert it to a 27 To load your own audio, you need to supply a .wav file in LIN16 format, and use 45 --input_wav=/tmp/my_audio.wav \
|
/external/webrtc/resources/audio_processing/test/py_quality_assessment/ |
D | BUILD.gn | 14 sources = [ "noise_tracks/city.wav" ] 22 sources = [ "probing_signals/tone-880.wav" ]
|
/external/armnn/samples/SpeechRecognition/cmake/ |
D | unit_tests.cmake | 18 …amples/cognitive-services-speech-sdk/master/sampledata/audiofiles/myVoiceIsMyPassportVerifyMe04.wav 21 …BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/myVoiceIsMyPassportVerifyMe04.wav ${CMAKE_CU…
|
/external/flac/man/ |
D | flac.md | 9 **flac** \[ *OPTIONS* \] \[ *infile.wav* \| *infile.rf64* \| 30 flac assumes that files ending in ".wav" or that have the RIFF WAVE 68 decoding, the extension will be ".wav" for WAVE output and ".raw" for raw 92 flac -V -- -01-filename.wav 121 `flac abc.wav` 122 : Encode abc.wav to abc.flac using the default compression setting. abc.wav is not deleted. 124 `flac --delete-input-file abc.wav` 125 : Like above, except abc.wav is deleted if there were no errors. 127 `flac --delete-input-file -w abc.wav` 128 : Like above, except abc.wav is deleted if there were no errors or warnings. [all …]
|
/external/webrtc/audio/ |
D | BUILD.gn | 263 "../resources/voice_engine/audio_tiny16.wav", 264 "../resources/voice_engine/audio_tiny48.wav", 279 "../resources/voice_engine/audio_tiny16.wav", 280 "../resources/voice_engine/audio_tiny48.wav", 338 data = [ "//resources/voice_engine/audio_dtx16.wav" ]
|
/external/autotest/server/site_tests/audio_AudioQualityAfterSuspend/ |
D | control.headphone_wav | 10 NAME = "audio_AudioQualityAfterSuspend.headphone.wav" 30 'public/audio_test/chameleon/Headphone/test_256_16.wav')
|
D | control.internal_speaker_wav | 10 NAME = "audio_AudioQualityAfterSuspend.internal_speaker.wav" 31 'public/audio_test/chameleon/Speaker/test_512_16.wav')
|
/external/autotest/server/site_tests/audio_AudioNoiseCancellation/ |
D | control.noisy_env | 33 test_data = dict(speech_file='speech_ref.wav', 34 noise_file='office_noise.wav',
|