/external/libaom/libaom/test/ |
D | comp_avg_pred_test.h | 108 const int in_w = block_size_wide[block_idx]; in RunCheckOutput() local 120 const int offset_c = 3 + rnd_.PseudoUniform(w - in_w - 7); in RunCheckOutput() 122 in_w, in_h, ref8 + offset_r * w + offset_c, in RunCheckOutput() 123 in_w, &dist_wtd_comp_params); in RunCheckOutput() 124 test_impl(output2, pred8 + offset_r * w + offset_c, in_w, in_h, in RunCheckOutput() 125 ref8 + offset_r * w + offset_c, in_w, &dist_wtd_comp_params); in RunCheckOutput() 128 for (int j = 0; j < in_w; ++j) { in RunCheckOutput() 129 int idx = i * in_w + j; in RunCheckOutput() 132 << in_w << "x" << in_h << " Pixel mismatch at index " << idx in RunCheckOutput() 153 const int in_w = block_size_wide[block_idx]; in RunSpeedTest() local [all …]
|
/external/curl/lib/ |
D | idn_win32.c | 75 wchar_t *in_w = Curl_convert_UTF8_to_wchar(in); in curl_win32_idn_to_ascii() local 76 if(in_w) { in curl_win32_idn_to_ascii() 78 int chars = IdnToAscii(0, in_w, -1, punycode, IDN_MAX_LENGTH); in curl_win32_idn_to_ascii() 79 free(in_w); in curl_win32_idn_to_ascii() 94 wchar_t *in_w = Curl_convert_UTF8_to_wchar(in); in curl_win32_ascii_to_idn() local 95 if(in_w) { in curl_win32_ascii_to_idn() 96 size_t in_len = wcslen(in_w) + 1; in curl_win32_ascii_to_idn() 98 int chars = IdnToUnicode(0, in_w, curlx_uztosi(in_len), in curl_win32_ascii_to_idn() 100 free(in_w); in curl_win32_ascii_to_idn()
|
/external/tensorflow/tensorflow/core/lib/jpeg/ |
D | jpeg_mem_unittest.cc | 289 const int in_w = 256; in TEST() local 291 const int stride1 = 3 * in_w; in TEST() 294 for (int j = 0; j < in_w; j++) { in TEST() 306 memcpy(&refdata2[i * stride2], &refdata1[i * stride1], 3 * in_w); in TEST() 319 cpdata1 = Compress(refdata1.get(), in_w, in_h, flags); in TEST() 321 cpdata2 = Compress(refdata2.get(), in_w, in_h, flags); in TEST() 331 Compress(refdata1.get(), in_w, in_h, flags, &cptest); in TEST() 334 Compress(refdata2.get(), in_w, in_h, flags, &cptest); in TEST() 349 CHECK_EQ(w, in_w); in TEST() 356 imgdata1.get(), refdata1.get(), in_w, in_h, stride1, stride1); in TEST() [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/integer_ops/ |
D | mean.h | 57 for (int in_w = 0; in_w < input_width; ++in_w) { in Mean() local 58 acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)] - in Mean()
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | resolve_constant_slice.cc | 75 for (int in_w = begin[2]; in_w <= end[2]; ++in_w) { in Slice() local 78 input_data[Offset(padded_shape, {in_b, in_h, in_w, in_d})]; in Slice()
|
/external/deqp/external/openglcts/modules/glesext/tessellation_shader/ |
D | esextcTessellationShaderUtils.hpp | 52 _ivec4(int in_x, int in_y, int in_z, int in_w) in _ivec4() 57 w = in_w; in _ivec4() 134 _vec4(float in_x, float in_y, float in_z, float in_w) in _vec4() 139 w = in_w; in _vec4()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | resize_area_op_test.cc | 162 for (int in_w : {2, 4, 7, 20, 165}) { in RunManyRandomTests() 166 RunRandomTest(in_h, in_w, target_height, target_width, channels); in RunManyRandomTests()
|
D | depthtospace_op.cc | 163 const int in_w = w / block_size; in operator ()() local 169 output(b, h, w, d) = input(b, in_h, in_w, in_d); in operator ()()
|
D | depthtospace_op_gpu.cu.cc | 52 const int in_w = w / block_size; in D2S_NHWC() local 57 in_d + input_depth * (in_w + input_width * (in_h + input_height * b)); in D2S_NHWC()
|
D | resize_bicubic_op_test.cc | 188 for (int in_w : {2, 4, 7, 20, 165}) { in RunManyRandomTests() 192 RunRandomTest(batch_size, in_h, in_w, target_height, target_width, in RunManyRandomTests()
|
D | resize_bilinear_op_test.cc | 140 for (int in_w : {2, 4, 7, 20, 165}) { in RunManyRandomTests() 144 TestResize(batch_size, in_w, in_h, channels, target_width, in RunManyRandomTests()
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | reference_ops.h | 327 const int in_w = out_w / block_size; in DepthToSpace() local 331 const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d); in DepthToSpace() 374 for (int in_w = 0; in_w < input_width; ++in_w) { in SpaceToDepth() local 377 in_d + ((in_h % block_size) * block_size + in_w % block_size) * in SpaceToDepth() 379 const int out_w = in_w / block_size; in SpaceToDepth() 383 const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d); in SpaceToDepth() 3170 for (int in_w = 0; in_w < input_width; ++in_w) { in BatchToSpaceND() local 3171 const int out_w = in_w * block_shape_width + in BatchToSpaceND() 3179 input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0); in BatchToSpaceND() 3367 for (int in_w = start_w; in StridedSlice() local [all …]
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | optimized_ops.h | 1808 for (int in_w = 0; in_w < input_width; ++in_w) { in MeanImpl() local 1810 input_data + Offset(input_shape, out_b, in_h, in_w, out_d); in MeanImpl() 1858 for (int in_w = 0; in_w < input_width; ++in_w) { in MeanImpl() local 1860 input_data[Offset(input_shape, out_b, in_h, in_w, out_d)]; in MeanImpl() 2499 for (int in_w = 0; in_w < input_width; ++in_w) { in DepthToSpace() local 5831 for (int in_w = in_w_start; in_w < in_w_end; ++in_w) { in BatchToSpaceND() local 5832 const int out_w = in_w * block_shape_width + in BatchToSpaceND() 5838 input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0); in BatchToSpaceND() 6195 for (int in_w = start_w; in_w < stop_w; ++in_w) { in Slice() local 6198 input_data + Offset(ext_shape, in_b, in_h, in_w, start_d), in Slice()
|
/external/tensorflow/tensorflow/contrib/lite/kernels/internal/optimized/ |
D | optimized_ops.h | 2667 for (int in_w = 0; in_w < input_width; ++in_w) { in DepthToSpace() local 6159 for (int in_w = in_w_start; in_w < in_w_end; ++in_w) { in BatchToSpaceND() local 6160 const int out_w = in_w * block_shape_width + in BatchToSpaceND() 6166 input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0); in BatchToSpaceND() 6364 for (int in_w = start_w; in_w < stop_w; ++in_w) { in Slice() local 6367 input_data + Offset(ext_shape, in_b, in_h, in_w, start_d), in Slice()
|