/external/libxaac/decoder/ |
D | ixheaacd_sbrdecoder.c | 252 FLAG stereo = 0; in ixheaacd_applysbr() local 364 stereo = 0; in ixheaacd_applysbr() 367 stereo = 1; in ixheaacd_applysbr() 413 ptr_header_data[k], it_bit_buff, stereo, ptr_sbr_dflt_header); in ixheaacd_applysbr() 453 ptr_header_data[k], it_bit_buff, stereo, ptr_sbr_dflt_header); in ixheaacd_applysbr() 506 if (stereo) { in ixheaacd_applysbr() 546 stereo = (num_channels == 2) ? 1 : 0; in ixheaacd_applysbr() 548 ptr_header_data[0]->channel_mode = stereo ? SBR_STEREO : SBR_MONO; in ixheaacd_applysbr() 552 if (!(stereo || dual_mono)) { in ixheaacd_applysbr() 573 (stereo || dual_mono) ? ptr_frame_data[1] : NULL, in ixheaacd_applysbr() [all …]
|
/external/adhd/cras/src/tests/ |
D | audio_area_unittest.cc | 13 static const int8_t stereo[CRAS_CH_MAX] = { variable 37 fmt.channel_layout[i] = stereo[i]; in TEST() 68 fmt.channel_layout[i] = stereo[i]; in TEST() 104 fmt.channel_layout[i] = stereo[i]; in TEST() 137 fmt.channel_layout[i] = stereo[i]; in TEST() 175 dst_fmt.channel_layout[i] = stereo[i]; in TEST() 218 fmt.channel_layout[i] = stereo[i]; in TEST() 250 fmt.channel_layout[i] = stereo[i]; in TEST()
|
/external/webrtc/talk/media/base/ |
D | audioframe.h | 41 AudioFrame(int16_t* audio, size_t audio_length, int sample_freq, bool stereo) in AudioFrame() argument 45 stereo_(stereo) {} in AudioFrame()
|
/external/aac/libSBRdec/src/ |
D | sbrdecoder.cpp | 1147 int stereo; in sbrDecoder_Parse() local 1228 stereo = (hSbrElement->elementID == ID_CPE) ? 1 : 0; in sbrDecoder_Parse() 1233 if (stereo) { in sbrDecoder_Parse() 1241 if (stereo) { in sbrDecoder_Parse() 1345 if (stereo && newSbrInfo.pvc_mode > 0) { in sbrDecoder_Parse() 1429 if (!stereo && (self->hParametricStereoDec != NULL)) { in sbrDecoder_Parse() 1436 hSbrHeader, hFrameDataLeft, (stereo) ? hFrameDataRight : NULL, in sbrDecoder_Parse() 1439 (stereo) ? NULL : self->hParametricStereoDec, self->flags, in sbrDecoder_Parse() 1516 if (stereo) { in sbrDecoder_Parse() 1525 if (!stereo) { in sbrDecoder_Parse() [all …]
|
/external/webrtc/webrtc/voice_engine/test/auto_test/fakes/ |
D | fake_media_process.h | 23 bool stereo) { in Process() argument 25 if (!stereo) { in Process()
|
/external/deqp/framework/platform/win32/ |
D | tcuWGL.hpp | 103 bool stereo; member in tcu::wgl::PixelFormatInfo 143 , stereo (false) in PixelFormatInfo()
|
/external/webrtc/webrtc/modules/audio_device/ |
D | audio_device_impl.cc | 1031 bool stereo(false); in StereoRecording() local 1033 if (_ptrAudioDevice->StereoRecording(stereo) == -1) in StereoRecording() 1038 *enabled = stereo; in StereoRecording() 1061 bool stereo(false); in SetRecordingChannel() local 1063 if (_ptrAudioDevice->StereoRecording(stereo) == -1) in SetRecordingChannel() 1161 bool stereo(false); in StereoPlayout() local 1163 if (_ptrAudioDevice->StereoPlayout(stereo) == -1) in StereoPlayout() 1168 *enabled = stereo; in StereoPlayout()
|
/external/libopus/celt/ |
D | bands.c | 647 static int compute_qn(int N, int b, int offset, int pulse_cap, int stereo) in compute_qn() argument 653 if (stereo && N==2) in compute_qn() 703 int stereo, int *fill) in compute_theta() argument 730 offset = (pulse_cap>>1) - (stereo&&N==2 ? QTHETA_OFFSET_TWOPHASE : QTHETA_OFFSET); in compute_theta() 731 qn = compute_qn(N, *b, offset, pulse_cap, stereo); in compute_theta() 732 if (stereo && i>=intensity) in compute_theta() 740 itheta = stereo_itheta(X, Y, stereo, N, ctx->arch); in compute_theta() 747 if (!stereo || ctx->theta_round == 0) in compute_theta() 750 if (!stereo && ctx->avoid_split_noise && itheta > 0 && itheta < qn) in compute_theta() 777 if (stereo && N>2) in compute_theta() [all …]
|
D | rate.c | 257 int stereo; in interp_bits2pulses() local 266 stereo = C>1; in interp_bits2pulses() 478 ebits[j] = bits[j] >> stereo >> BITRES; in interp_bits2pulses() 505 extra_fine = IMIN(excess>>(stereo+BITRES),MAX_FINE_BITS-ebits[j]); in interp_bits2pulses() 523 ebits[j] = bits[j] >> stereo >> BITRES; in interp_bits2pulses()
|
D | vq.h | 81 int stereo_itheta(const celt_norm *X, const celt_norm *Y, int stereo, int N, int arch);
|
D | vq.c | 406 int stereo_itheta(const celt_norm *X, const celt_norm *Y, int stereo, int N, int arch) in stereo_itheta() argument 414 if (stereo) in stereo_itheta()
|
/external/webrtc/data/voice_engine/stereo_rtp_files/ |
D | README.txt | 3 This sends the stereo rtp file to port 1236.
|
/external/python/cpython2/Doc/library/ |
D | audioop.rst | 213 Convert a stereo fragment to a mono fragment. The left channel is multiplied by 220 Generate a stereo fragment from a mono fragment. Each pair of samples in the 221 stereo fragment are computed from the mono sample, whereby left channel samples 232 between mono and stereo fragments, i.e. all samples are treated equal. If this 233 is a problem the stereo fragment should be split into two mono fragments first
|
D | aifc.rst | 30 sampled. The number of channels indicate if the audio is mono, stereo, or 37 channels (stereo) and has a frame rate of 44,100 frames/second. This gives a 61 Return the number of audio channels (1 for mono, 2 for stereo).
|
D | wave.rst | 14 It does not support compression/decompression, but it does support mono/stereo. 70 Returns number of audio channels (``1`` for mono, ``2`` for stereo).
|
/external/python/cpython3/Doc/library/ |
D | audioop.rst | 221 Convert a stereo fragment to a mono fragment. The left channel is multiplied by 228 Generate a stereo fragment from a mono fragment. Each pair of samples in the 229 stereo fragment are computed from the mono sample, whereby left channel samples 240 between mono and stereo fragments, i.e. all samples are treated equal. If this 241 is a problem the stereo fragment should be split into two mono fragments first
|
D | aifc.rst | 23 sampled. The number of channels indicate if the audio is mono, stereo, or 30 channels (stereo) and has a frame rate of 44,100 frames/second. This gives a 59 Return the number of audio channels (1 for mono, 2 for stereo).
|
/external/autotest/client/site_tests/audio_Microphone/ |
D | control | 10 Mono and stereo capturing should work fine @ 44.1KHz and 48KHz.
|
/external/webrtc/resources/audio_coding/ |
D | READ.ME | 4 teststereo32kHz.pcm - stereo speech file samples at 32 kHz
|
/external/libopus/tests/ |
D | run_vectors.sh | 104 echo Testing stereo
|
/external/sonic/debian/ |
D | changelog.vinux | 3 * Converted to mostly integer arithmetic, and added stereo support.
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_DecodeWav.pbtxt | 47 number of samples. For example, a ten-sample-long stereo WAV file should give an
|
/external/perfetto/src/traced/probes/ftrace/test/data/android_flounder_lte_LRX16F_3.10.40/events/display/display_mode/ |
D | format | 24 …h: H=%d V=%d back_porch: H=%d V=%d active: H=%d V=%d front_porch: H=%d V=%d pclk=%ld stereo mode=%d
|
/external/webrtc/webrtc/modules/audio_processing/test/ |
D | audio_processing_unittest.cc | 105 void MixStereoToMono(const float* stereo, float* mono, in MixStereoToMono() argument 108 mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) / 2; in MixStereoToMono() 111 void MixStereoToMono(const int16_t* stereo, int16_t* mono, in MixStereoToMono() argument 114 mono[i] = (stereo[i * 2] + stereo[i * 2 + 1]) >> 1; in MixStereoToMono() 117 void CopyLeftToRightChannel(int16_t* stereo, size_t samples_per_channel) { in CopyLeftToRightChannel() argument 119 stereo[i * 2 + 1] = stereo[i * 2]; in CopyLeftToRightChannel() 123 void VerifyChannelsAreEqual(int16_t* stereo, size_t samples_per_channel) { in VerifyChannelsAreEqual() argument 125 EXPECT_EQ(stereo[i * 2 + 1], stereo[i * 2]); in VerifyChannelsAreEqual()
|
/external/libopus/ |
D | README | 8 quality stereo music. 109 -forcemono : force mono encoding, even for stereo input
|