Home
last modified time | relevance | path

Searched refs:wav (Results 1 – 25 of 161) sorted by relevance

1234567

/external/sonic/samples/
DREADME1 These wav files show how Sonic performs at increasing speech rates. All sound
4 sonic.wav
8 soundtouch.wav
9 This is the same recording as sonic.wav, but sped up using soundtouch, which
13 talking.wav
16 talking_2x.wav
19 espeak_s450.wav
21 espeak_s450.wav was generated using 'espeak -s450 -f test1.txt -w
22 espeak_s450.wav'. This is the highest speed currently supported by espeak,
25 espeak_sonic.wav
[all …]
/external/tensorflow/tensorflow/core/kernels/
Dsummary_audio_op_test.cc101 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 in TEST_F()
104 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 in TEST_F()
107 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 2 in TEST_F()
135 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F()
138 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F()
141 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F()
167 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F()
170 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F()
173 audio { content_type: "audio/wav" sample_rate: 44100 num_channels: 1 in TEST_F()
Dspectrogram_convert_test_data.cc24 namespace wav { namespace
50 tensorflow::Status status = tensorflow::wav::ConvertCsvToRaw(filename); in main()
/external/tensorflow/tensorflow/examples/speech_commands/
Dlabel_wav.py73 def label_wav(wav, labels, graph, input_name, output_name, how_many_labels): argument
75 if not wav or not tf.io.gfile.exists(wav):
76 raise ValueError('Audio file does not exist at {0}'.format(wav))
88 with open(wav, 'rb') as wav_file:
96 label_wav(FLAGS.wav, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
Dtest_streaming_accuracy.cc134 string wav = ""; in main() local
149 Flag("wav", &wav, "audio file to be identified"), in main()
212 tensorflow::Env::Default(), wav, &wav_string); in main()
221 Status decode_wav_status = tensorflow::wav::DecodeLin16WaveAsFloatVector( in main()
Dlabel_wav.cc102 string wav = ""; in main() local
109 Flag("wav", &wav, "audio file to be identified"), in main()
147 tensorflow::Env::Default(), wav, &wav_string); in main()
/external/libwebsockets/minimal-examples/secure-streams/minimal-secure-streams-avs/
Davs.c28 static uint8_t *wav; variable
182 memcpy(buf, wav + m->pos, *len); in ss_avs_metadata_tx()
353 if (wav) { in ss_avs_event_state()
354 free(wav); in ss_avs_event_state()
355 wav = NULL; in ss_avs_event_state()
388 wav = malloc(wav_len); in avs_example_start()
389 if (!wav) { in avs_example_start()
394 if (read(fd, wav, in avs_example_start()
421 free(wav); in avs_example_start()
422 wav = NULL; in avs_example_start()
/external/oboe/samples/parselib/src/main/cpp/
DCMakeLists.txt48 # wav
49 ${CMAKE_CURRENT_LIST_DIR}/wav/AudioEncoding.cpp
50 ${CMAKE_CURRENT_LIST_DIR}/wav/WavChunkHeader.cpp
51 ${CMAKE_CURRENT_LIST_DIR}/wav/WavFmtChunkHeader.cpp
52 ${CMAKE_CURRENT_LIST_DIR}/wav/WavRIFFChunkHeader.cpp
53 ${CMAKE_CURRENT_LIST_DIR}/wav/WavStreamReader.cpp)
/external/flac/test/
Dtest_flac.sh71 if [ ! -f wacky1.wav ] ; then
80 cp wacky1.wav exist.wav
82 if run_flac -0 exist.wav ; then
89 if run_flac -0 --force exist.wav ; then
109 rm -f exist.wav exist.flac
178 run_flac --force --decode --channel-map=none -o rt.wav $extra rt.flac || die "ERROR"
180 cmp $f rt.wav || die "ERROR: file mismatch"
182 rm -f rt.flac rt.wav
194 cmp $f rt.wav || die "ERROR: file mismatch"
196 rm -f rt.flac rt.wav
[all …]
Dtest_compression.sh30 last_size=$(wc -c < noisy-sine.wav)
35 flac${EXE} -${k} --silent noisy-sine.wav -o ${fname}
/external/libgsm/tst/
Dcod2lin.c20 int wav = 0; variable
46 gsm_option(r, GSM_OPT_WAV49, &wav);
86 case 'w': wav++; break;
Dlin2cod.c19 int wav = 0; variable
42 gsm_option(r, GSM_OPT_WAV49, &wav);
87 case 'w': wav++; break;
/external/webrtc/modules/audio_coding/neteq/tools/
Dneteq_rtpplay_test.sh92 CASE1_WAV=$TMP_DIR/case1.wav
98 CASE1_TEXTLOG=$TMP_DIR/case1.wav.text_log.txt
128 CASE4_WAV=$TMP_DIR/case4.wav
129 $BIN $TEST_RTC_EVENT_LOG $TMP_DIR/case4.wav \
/external/autotest/server/site_tests/audio_MediaBasicVerification/
Dcontrol.test_wav8 NAME = "audio_MediaBasicVerification.wav"
9 PURPOSE = "Verify wav audio formats playback quality"
26 'public/audio_test/chameleon/Headphone/test_256_16.wav')
/external/skia/third_party/oboe/
DBUILD.gn27 "../externals/oboe/samples/parselib/src/main/cpp/wav/AudioEncoding.cpp",
28 "../externals/oboe/samples/parselib/src/main/cpp/wav/WavChunkHeader.cpp",
29 "../externals/oboe/samples/parselib/src/main/cpp/wav/WavFmtChunkHeader.cpp",
30 "../externals/oboe/samples/parselib/src/main/cpp/wav/WavRIFFChunkHeader.cpp",
31 "../externals/oboe/samples/parselib/src/main/cpp/wav/WavStreamReader.cpp",
/external/flac/test/foreign-metadata-test-files/
DMakefile.am20 24bit-WaveFmtPCM.wav \
24 BWF-WaveFmtEx.wav
/external/flac/src/test_streams/
Dmain.c955 FLAC__byte wav[] = { in generate_wackywavs() local
972 if(fwrite(wav, 1, 84, f) < 84) in generate_wackywavs()
976 wav[4] += 12; in generate_wackywavs()
979 if(fwrite(wav, 1, 96, f) < 96) in generate_wackywavs()
1079 FLAC__byte wav[] = { in generate_wackywav64s() local
1107 if(fwrite(wav, 1, wav[16], f) < wav[16]) in generate_wackywav64s()
1111 wav[16] += 32; in generate_wackywav64s()
1114 if(fwrite(wav, 1, wav[16], f) < wav[16]) in generate_wackywav64s()
1127 FLAC__byte wav[] = { in generate_wackyrf64s() local
1149 if(fwrite(wav, 1, 120, f) < 120) in generate_wackyrf64s()
[all …]
/external/tensorflow/tensorflow/examples/wav_to_spectrogram/
DREADME.md3 This example shows how you can load audio from a .wav file, convert it to a
27 To load your own audio, you need to supply a .wav file in LIN16 format, and use
45 --input_wav=/tmp/my_audio.wav \
/external/webrtc/resources/audio_processing/test/py_quality_assessment/
DBUILD.gn14 sources = [ "noise_tracks/city.wav" ]
22 sources = [ "probing_signals/tone-880.wav" ]
/external/armnn/samples/SpeechRecognition/cmake/
Dunit_tests.cmake18 …amples/cognitive-services-speech-sdk/master/sampledata/audiofiles/myVoiceIsMyPassportVerifyMe04.wav
21 …BUILD_COMMAND ${CMAKE_COMMAND} -E copy <DOWNLOAD_DIR>/myVoiceIsMyPassportVerifyMe04.wav ${CMAKE_CU…
/external/flac/man/
Dflac.md9 **flac** \[ *OPTIONS* \] \[ *infile.wav* \| *infile.rf64* \|
30 flac assumes that files ending in ".wav" or that have the RIFF WAVE
68 decoding, the extension will be ".wav" for WAVE output and ".raw" for raw
92 flac -V -- -01-filename.wav
121 `flac abc.wav`
122 : Encode abc.wav to abc.flac using the default compression setting. abc.wav is not deleted.
124 `flac --delete-input-file abc.wav`
125 : Like above, except abc.wav is deleted if there were no errors.
127 `flac --delete-input-file -w abc.wav`
128 : Like above, except abc.wav is deleted if there were no errors or warnings.
[all …]
/external/webrtc/audio/
DBUILD.gn263 "../resources/voice_engine/audio_tiny16.wav",
264 "../resources/voice_engine/audio_tiny48.wav",
279 "../resources/voice_engine/audio_tiny16.wav",
280 "../resources/voice_engine/audio_tiny48.wav",
338 data = [ "//resources/voice_engine/audio_dtx16.wav" ]
/external/autotest/server/site_tests/audio_AudioQualityAfterSuspend/
Dcontrol.headphone_wav10 NAME = "audio_AudioQualityAfterSuspend.headphone.wav"
30 'public/audio_test/chameleon/Headphone/test_256_16.wav')
Dcontrol.internal_speaker_wav10 NAME = "audio_AudioQualityAfterSuspend.internal_speaker.wav"
31 'public/audio_test/chameleon/Speaker/test_512_16.wav')
/external/autotest/server/site_tests/audio_AudioNoiseCancellation/
Dcontrol.noisy_env33 test_data = dict(speech_file='speech_ref.wav',
34 noise_file='office_noise.wav',

1234567