Home
last modified time | relevance | path

Searched refs:sample_size (Results 1 – 25 of 59) sorted by relevance

123

/external/libvpx/libvpx/third_party/libyuv/source/
Dmjpeg_validate.cc21 static LIBYUV_BOOL ScanEOI(const uint8_t* sample, size_t sample_size) { in ScanEOI() argument
22 if (sample_size >= 2) { in ScanEOI()
23 const uint8_t* end = sample + sample_size - 1; in ScanEOI()
42 LIBYUV_BOOL ValidateJpeg(const uint8_t* sample, size_t sample_size) { in ValidateJpeg() argument
46 if (sample_size < 64 || sample_size > kMaxJpegSize || !sample) { in ValidateJpeg()
56 if (sample_size > kBackSearchSize) { in ValidateJpeg()
57 if (ScanEOI(sample + sample_size - kBackSearchSize, kBackSearchSize)) { in ValidateJpeg()
61 sample_size = sample_size - kBackSearchSize + 1; in ValidateJpeg()
64 return ScanEOI(sample + 2, sample_size - 2); in ValidateJpeg()
Dconvert_jpeg.cc93 size_t sample_size, in MJPGSize() argument
97 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGSize()
111 size_t sample_size, in MJPGToI420() argument
122 if (sample_size == kUnknownDataSize) { in MJPGToI420()
129 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGToI420()
249 size_t sample_size, in MJPGToARGB() argument
256 if (sample_size == kUnknownDataSize) { in MJPGToARGB()
263 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGToARGB()
/external/libyuv/files/source/
Dmjpeg_validate.cc21 static LIBYUV_BOOL ScanEOI(const uint8* sample, size_t sample_size) { in ScanEOI() argument
22 if (sample_size >= 2) { in ScanEOI()
23 const uint8* end = sample + sample_size - 1; in ScanEOI()
42 LIBYUV_BOOL ValidateJpeg(const uint8* sample, size_t sample_size) { in ValidateJpeg() argument
46 if (sample_size < 64 || sample_size > kMaxJpegSize || !sample) { in ValidateJpeg()
56 if (sample_size > kBackSearchSize) { in ValidateJpeg()
57 if (ScanEOI(sample + sample_size - kBackSearchSize, kBackSearchSize)) { in ValidateJpeg()
61 sample_size = sample_size - kBackSearchSize + 1; in ValidateJpeg()
64 return ScanEOI(sample + 2, sample_size - 2); in ValidateJpeg()
Dconvert_jpeg.cc92 int MJPGSize(const uint8* sample, size_t sample_size, int* width, int* height) { in MJPGSize() argument
94 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGSize()
107 size_t sample_size, in MJPGToI420() argument
118 if (sample_size == kUnknownDataSize) { in MJPGToI420()
125 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGToI420()
239 size_t sample_size, in MJPGToARGB() argument
246 if (sample_size == kUnknownDataSize) { in MJPGToARGB()
253 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGToARGB()
/external/libaom/libaom/third_party/libyuv/source/
Dmjpeg_validate.cc49 static LIBYUV_BOOL ScanEOI(const uint8* sample, size_t sample_size) { in ScanEOI() argument
50 const uint8* end = sample + sample_size - 1; in ScanEOI()
71 LIBYUV_BOOL ValidateJpeg(const uint8* sample, size_t sample_size) { in ValidateJpeg() argument
73 if (sample_size < 64) { in ValidateJpeg()
83 sample_size -= 2; in ValidateJpeg()
86 if (sample_size > kBackSearchSize) { in ValidateJpeg()
87 if (ScanEOI(sample + sample_size - kBackSearchSize, kBackSearchSize)) { in ValidateJpeg()
91 sample_size = sample_size - kBackSearchSize + 1; in ValidateJpeg()
93 return ScanEOI(sample, sample_size); in ValidateJpeg()
Dconvert_jpeg.cc124 int MJPGSize(const uint8* sample, size_t sample_size, in MJPGSize() argument
127 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGSize()
140 size_t sample_size, in MJPGToI420() argument
146 if (sample_size == kUnknownDataSize) { in MJPGToI420()
153 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGToI420()
304 size_t sample_size, in MJPGToARGB() argument
308 if (sample_size == kUnknownDataSize) { in MJPGToARGB()
315 LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size); in MJPGToARGB()
/external/tensorflow/tensorflow/core/kernels/
Dfused_batch_norm_op.cu.cc48 int sample_size, T* variance) { in InvVarianceToVarianceKernel() argument
53 var *= T(sample_size) / T((sample_size > 1) ? sample_size - 1 : 1); in InvVarianceToVarianceKernel()
60 double epsilon, int sample_size, in operator ()() argument
65 epsilon, sample_size, variance); in operator ()()
Dclustering_ops_test.cc55 Tensor sample_size(DT_INT64, TensorShape({})); in SetUpKmeansPlusPlusInitialization() local
59 sample_size.flat<int64>().setConstant(num_to_sample); in SetUpKmeansPlusPlusInitialization()
66 .Input(test::graph::Constant(g, sample_size)) in SetUpKmeansPlusPlusInitialization()
/external/webrtc/talk/media/base/
Dvideoframe.cc247 size_t sample_size) { in Validate() argument
317 << " " << sample_size; in Validate()
322 for (size_t i = 0; i < arraysize(four_samples) && i < sample_size; ++i) { in Validate()
325 if (sample_size < expected_size) { in Validate()
330 << " " << sample_size in Validate()
338 if (sample_size > kMaxSampleSize) { in Validate()
343 << " " << sample_size in Validate()
356 (sample_size > large_expected_size || sample_size > kMaxSampleSize) && in Validate()
363 << " bytes: " << sample_size in Validate()
380 << " bytes: " << sample_size in Validate()
Dvideoframe.h58 size_t sample_size,
203 size_t sample_size);
/external/tensorflow/tensorflow/contrib/eager/python/examples/l2hmc/
Dl2hmc_test.py149 for sample_size in [10, 25, 50, 100, 200]:
150 hparams.n_samples = sample_size
186 ("gpu" if tf.test.is_gpu_available() else "cpu", sample_size),
201 for sample_size in [10, 25, 50, 100, 200]:
202 hparams.n_samples = sample_size
235 "_defun" if defun else "", sample_size),
/external/webrtc/talk/media/webrtc/
Dwebrtcvideoframe.cc67 size_t sample_size, in Init() argument
72 return Reset(format, w, h, dw, dh, sample, sample_size, pixel_width, in Init()
198 size_t sample_size, in Reset() argument
204 if (!Validate(format, w, h, sample, sample_size)) { in Reset()
231 sample, sample_size, in Reset()
Dwebrtcvideoframe.h61 size_t sample_size,
82 size_t sample_size,
/external/tensorflow/tensorflow/lite/experimental/micro/examples/micro_speech/osx/
Daudio_provider.cc46 const int sample_size = buffer->mAudioDataByteSize / sizeof(float); in OnAudioBufferFilledCallback() local
49 (sample_offset + sample_size) / (kAudioSampleFrequency / 1000); in OnAudioBufferFilledCallback()
51 for (int i = 0; i < sample_size; ++i) { in OnAudioBufferFilledCallback()
/external/tensorflow/tensorflow/lite/experimental/micro/examples/micro_speech/disco_f746ng/
Daudio_provider.cc112 const int sample_size = AUDIO_BLOCK_SIZE / (sizeof(int16_t) * 2); in CaptureSamples() local
114 g_latest_audio_timestamp + (sample_size / (kAudioSampleFrequency / 1000)); in CaptureSamples()
118 for (int i = 0; i < sample_size; ++i) { in CaptureSamples()
/external/python/cpython2/Lib/
Dsndhdr.py92 sample_size = 1 # default
99 sample_size = 2
102 frame_size = sample_size * nchannels
/external/python/cpython3/Lib/
Dsndhdr.py109 sample_size = 1 # default
116 sample_size = 2
119 frame_size = sample_size * nchannels
/external/perfetto/tools/
Dprofiling_sample_distribution.cc133 size_t sample_size = in ProfilingSampleDistributionMain() local
138 totals[pair.first] += sample_size; in ProfilingSampleDistributionMain()
/external/adhd/cras/src/server/
Dcras_audio_area.c99 const int sample_size = snd_pcm_format_physical_width(fmt->format) / 8; in cras_audio_area_config_buf_pointers() local
104 area->channels[i].buf = base_buffer + i * sample_size; in cras_audio_area_config_buf_pointers()
/external/libvpx/libvpx/third_party/libyuv/include/libyuv/
Dconvert.h341 size_t sample_size,
356 size_t sample_size,
385 size_t sample_size,
/external/scapy/scapy/modules/
Dvoip.py31 def _merge_sound_bytes(x,y,sample_size=2): argument
36 ss=sample_size
/external/tensorflow/tensorflow/contrib/distributions/python/kernel_tests/
Dmixture_test.py824 sample_size): argument
835 sample_op = mixture.sample(sample_size).op
843 sample_size)))
845 use_gpu, num_components, batch_size, num_features, sample_size,
874 for sample_size in 1, 32, 128:
882 sample_size=sample_size)
916 for sample_size in 1, 32, 128:
924 sample_size=sample_size)
/external/libyuv/files/include/libyuv/
Dconvert.h304 size_t sample_size,
318 int MJPGSize(const uint8* sample, size_t sample_size, int* width, int* height);
/external/libaom/libaom/third_party/libyuv/include/libyuv/
Dconvert.h195 int MJPGToI420(const uint8* sample, size_t sample_size,
204 int MJPGSize(const uint8* sample, size_t sample_size,
/external/autotest/client/cros/audio/
Daudio_helper.py245 sample_size=16): argument
254 cmdargs = [SOX_PATH, '-b', str(sample_size), '-n', '-t', 'alsa',
266 sample_size=16): argument
275 cmdargs = get_play_sine_args(channel, odev, freq, duration, sample_size)

123