/external/v4l2_codec2/vda/ |
D | bit_reader_core.cc | 83 const int window_size = in SkipBits() local 85 DCHECK_GE(window_size, 0); in SkipBits() 86 DCHECK_LE(window_size, nbytes); in SkipBits() 87 if (window_size < nbytes) { in SkipBits() 89 bits_read_ += 8 * window_size; in SkipBits() 152 int window_size = in Refill() local 154 DCHECK_GE(window_size, 0); in Refill() 155 DCHECK_LE(window_size, max_nbytes); in Refill() 156 if (window_size == 0) in Refill() 160 memcpy(®_next_, byte_stream_window, window_size); in Refill() [all …]
|
/external/squashfs-tools/squashfs-tools/ |
D | gzip_wrapper.c | 48 static int window_size = GZIP_DEFAULT_WINDOW_SIZE; variable 92 window_size = atoi(argv[1]); in gzip_options() 93 if(window_size < 8 || window_size > 15) { in gzip_options() 186 window_size == GZIP_DEFAULT_WINDOW_SIZE && in gzip_dump_options() 194 comp_opts.window_size = window_size; in gzip_dump_options() 232 window_size = GZIP_DEFAULT_WINDOW_SIZE; in gzip_extract_options() 252 if(comp_opts->window_size < 8 || in gzip_extract_options() 253 comp_opts->window_size > 15) { in gzip_extract_options() 258 window_size = comp_opts->window_size; in gzip_extract_options() 299 if(comp_opts->window_size < 8 || in gzip_display_options() [all …]
|
D | gzip_wrapper.h | 41 (s)->window_size = inswap_le16((s)->window_size); \ 54 short window_size; member
|
/external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/ |
D | input_pipeline_test.py | 84 self, time_series_reader, window_size, batch_size, num_features, argument 88 window_size=window_size, batch_size=batch_size) 98 self.assertAllEqual([batch_size, window_size], 100 for window_position in range(window_size - 1): 108 self.assertAllEqual([batch_size, window_size, num_features], 120 num_features=1, window_size=2, batch_size=5, 159 num_features=1, window_size=3, batch_size=5, 169 num_features=1, window_size=3, batch_size=5, 189 num_features=1, window_size=3, batch_size=5, 198 num_features=1, window_size=3, batch_size=5, [all …]
|
D | ar_model_test.py | 103 window_size = input_window_size + output_window_size 124 window_size=window_size, 130 window_size=window_size) 268 window_size=3)()
|
D | ar_model.py | 90 self.window_size = self.input_window_size + self.output_window_size 181 times.get_shape().assert_is_compatible_with([None, self.window_size]) 189 activation_size = self.window_size * self._buckets * len(self._periods) 378 expected_times_shape = [None, self.window_size] 391 window_size=self.window_size, 471 and static_window_size < self.window_size): 501 feature_value[:, base_offset:base_offset + self.window_size]
|
/external/zopfli/src/zopfli/ |
D | hash.c | 29 void ZopfliInitHash(size_t window_size, ZopfliHash* h) { in ZopfliInitHash() argument 34 h->prev = (unsigned short*)malloc(sizeof(*h->prev) * window_size); in ZopfliInitHash() 35 h->hashval = (int*)malloc(sizeof(*h->hashval) * window_size); in ZopfliInitHash() 39 for (i = 0; i < window_size; i++) { in ZopfliInitHash() 45 h->same = (unsigned short*)malloc(sizeof(*h->same) * window_size); in ZopfliInitHash() 46 for (i = 0; i < window_size; i++) { in ZopfliInitHash() 54 h->prev2 = (unsigned short*)malloc(sizeof(*h->prev2) * window_size); in ZopfliInitHash() 55 h->hashval2 = (int*)malloc(sizeof(*h->hashval2) * window_size); in ZopfliInitHash() 59 for (i = 0; i < window_size; i++) { in ZopfliInitHash()
|
/external/webrtc/webrtc/modules/audio_processing/agc/ |
D | histogram.cc | 76 Histogram::Histogram(int window_size) in Histogram() argument 80 activity_probability_(new int[window_size]), in Histogram() 81 hist_bin_index_(new int[window_size]), in Histogram() 84 len_circular_buffer_(window_size), in Histogram() 174 Histogram* Histogram::Create(int window_size) { in Create() argument 175 if (window_size < 0) in Create() 177 return new Histogram(window_size); in Create()
|
/external/sfntly/cpp/src/test/ |
D | font_data_test.cc | 165 void ReadFontDataWithSlidingWindow(ReadableFontData* rfd, int32_t window_size, in ReadFontDataWithSlidingWindow() argument 171 std::min<int32_t>(window_size, b->size() - index); in ReadFontDataWithSlidingWindow() 200 int32_t window_size) { in WriteFontDataWithSlidingWindow() argument 204 int32_t sliding_size = std::min<int32_t>(window_size, b.size() - index); in WriteFontDataWithSlidingWindow() 240 for (int32_t window_size = 1; window_size <= length; in ReadComparison() local 241 window_size += increments) { in ReadComparison() 246 ReadFontDataWithSlidingWindow(rfd1, window_size, &b1); in ReadComparison() 247 ReadFontDataWithSlidingWindow(rfd2, window_size, &b2); in ReadComparison() 293 for (int window_size = 1; window_size < length; window_size += increments) { in SlicingWriteTest() local 296 WriteFontDataWithSlidingWindow(r_slice, w_slice, window_size); in SlicingWriteTest()
|
D | byte_array_test.cc | 46 void ReadByteArrayWithSlidingWindow(ByteArray* ba, int window_size, in ReadByteArrayWithSlidingWindow() argument 50 int32_t actual_window_size = window_size; in ReadByteArrayWithSlidingWindow() 81 for (int window_size = 1; window_size < ba1->Length(); in ReadComparison() local 82 window_size += increments) { in ReadComparison() 83 ReadByteArrayWithSlidingWindow(ba1, window_size, &b1); in ReadComparison() 84 ReadByteArrayWithSlidingWindow(ba2, window_size, &b2); in ReadComparison()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | attention_ops.cc | 54 const Tensor& window_size = context->input(1); in Compute() local 56 (window_size.shape().dims() == 1) && in Compute() 57 window_size.shape().dim_size(0) == 2, in Compute() 60 window_size.shape().DebugString())); in Compute() 62 const int64 output_height = window_size.tensor<int, 1>()(0); in Compute() 63 const int64 output_width = window_size.tensor<int, 1>()(1); in Compute()
|
/external/tensorflow/tensorflow/contrib/data/python/ops/ |
D | grouping.py | 31 window_size=None, argument 67 if (window_size is not None and window_size_func or 68 not (window_size is not None or window_size_func)): 71 if window_size is not None: 74 return ops.convert_to_tensor(window_size, dtype=dtypes.int64) 134 window_size = ops.convert_to_tensor( 136 if window_size.dtype != dtypes.int64: 139 return window_size
|
/external/libvpx/libvpx/examples/ |
D | vpx_temporal_svc_encoder.c | 78 int window_size; member 114 rc->window_size = 15; in set_rate_control_metrics() 159 printf("Short-time stats, for window of %d frames: \n", rc->window_size); in printout_rate_control_summary() 870 if (frame_cnt > rc.window_size) { in main() 872 if (frame_cnt % rc.window_size == 0) { in main() 874 rc.avg_st_encoding_bitrate += sum_bitrate / rc.window_size; in main() 876 (sum_bitrate / rc.window_size) * in main() 877 (sum_bitrate / rc.window_size); in main() 882 if (frame_cnt > rc.window_size + rc.window_size / 2) { in main() 884 if (frame_cnt > 2 * rc.window_size && in main() [all …]
|
D | vp9_spatial_svc_encoder.c | 409 int window_size; member 448 rc->window_size = 15; in set_rate_control_stats() 501 printf("Short-time stats, for window of %d frames: \n", rc->window_size); in printout_rate_control_summary() 827 if (frame_cnt > (unsigned int)rc.window_size) { in main() 832 if (frame_cnt % rc.window_size == 0) { in main() 834 rc.avg_st_encoding_bitrate += sum_bitrate / rc.window_size; in main() 836 (sum_bitrate / rc.window_size) * in main() 837 (sum_bitrate / rc.window_size); in main() 844 (unsigned int)(rc.window_size + rc.window_size / 2)) { in main() 850 if (frame_cnt > (unsigned int)(2 * rc.window_size) && in main() [all …]
|
/external/adhd/cras/src/server/ |
D | rate_estimator.c | 42 const struct timespec *window_size, in rate_estimator_create() argument 51 re->window_size = *window_size; in rate_estimator_create() 101 if (timespec_after(&td, &re->window_size) && in rate_estimator_check()
|
D | rate_estimator.h | 40 struct timespec window_size; member 55 const struct timespec *window_size,
|
/external/tensorflow/tensorflow/examples/wav_to_spectrogram/ |
D | main.cc | 29 tensorflow::int32 window_size = 256; in main() local 35 tensorflow::Flag("window_size", &window_size, in main() 59 input_wav, window_size, stride, brightness, output_image); in main()
|
/external/perfetto/src/ftrace_reader/test/data/android_seed_N2F62_3.10.49/events/sched/sched_reset_all_window_stats/ |
D | format | 10 field:u64 window_size; offset:16; size:8; signed:0; 16 …n %llu window_start %llu window_size %llu reason %s old_val %u new_val %u", REC->time_taken, REC->…
|
/external/tensorflow/tensorflow/core/ops/ |
D | audio_ops.cc | 73 int32 window_size; in SpectrogramShapeFn() local 74 TF_RETURN_IF_ERROR(c->GetAttr("window_size", &window_size)); in SpectrogramShapeFn() 86 const int64 length_minus_window = (input_length_value - window_size); in SpectrogramShapeFn() 97 c->MakeDim(1 + NextPowerOfTwo(window_size) / 2); in SpectrogramShapeFn()
|
/external/tensorflow/tensorflow/examples/android/jni/object_tracking/ |
D | image_utils.h | 160 const int window_size = 2 * window_radius + 1; in CalculateG() local 161 for (int y = 0; y < window_size; ++y) { in CalculateG() 164 for (int x = 0; x < window_size; ++x) { in CalculateG() 173 CalculateGInt16(vals_x, vals_y, window_size * window_size, g_temp); in CalculateG()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | window_util.cc | 210 int64 StridedBound(int64 bound, int64 window_size, int64 stride) { in StridedBound() argument 211 CHECK_GE(window_size, 0); in StridedBound() 215 if (window_size > bound) { in StridedBound() 224 return (bound - window_size) / stride + 1; in StridedBound()
|
/external/autotest/client/cros/audio/ |
D | audio_analysis_unittest.py | 17 def dummy_peak_detection(self, array, window_size): argument 33 half_window_size = window_size / 2 76 window_size = 100 78 dummy_answer = self.dummy_peak_detection(array, window_size) 80 improved_answer = audio_analysis.peak_detection(array, window_size)
|
/external/tensorflow/tensorflow/python/keras/_impl/keras/preprocessing/ |
D | sequence.py | 146 window_size=4, argument 196 window_start = max(0, i - window_size) 197 window_end = min(len(sequence), i + window_size + 1)
|
/external/autotest/client/site_tests/power_BacklightControl/ |
D | power_BacklightControl.py | 200 window_size=10, argument 231 if len(samples) >= window_size: 236 if len(samples) >= window_size and \
|
/external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/state_space_models/ |
D | state_space_model_test.py | 312 input_pipeline.NumpyReader(data), window_size=chunk_size) 349 self, predicted_mean, predicted_covariance, window_size): argument 352 window_size, 357 window_size, 359 for position in range(window_size - 2): 382 window_size=4) 412 window_size=3) 566 input_pipeline.NumpyReader(data), window_size=chunk_size) 667 input_pipeline.NumpyReader(dataset), batch_size=16, window_size=2) 732 input_pipeline.NumpyReader(data), batch_size=16, window_size=16)
|