Home
last modified time | relevance | path

Searched refs:x_stride (Results 1 – 25 of 36) sorted by relevance

12

/external/XNNPACK/test/
Dpack-microkernel-tester.h55 inline PackMicrokernelTester& x_stride(size_t x_stride) { in x_stride() argument
56 assert(x_stride != 0); in x_stride()
57 this->x_stride_ = x_stride; in x_stride()
61 inline size_t x_stride() const { in x_stride() function
85 std::vector<uint32_t> x(k() + (m() - 1) * x_stride() + XNN_EXTRA_BYTES / sizeof(uint32_t)); in Test()
96 y_ref[j * mr() + i] = x[std::min(i, m() - 1) * x_stride() + j]; in Test()
103 x.data(), x_stride() * sizeof(uint32_t), in Test()
Dprelu-operator-tester.h51 inline PReLUOperatorTester& x_stride(size_t x_stride) { in x_stride() argument
52 assert(x_stride != 0); in x_stride()
53 this->x_stride_ = x_stride; in x_stride()
57 inline size_t x_stride() const { in x_stride() function
123 …std::vector<uint16_t> x((batch_size() - 1) * x_stride() + channels() + XNN_EXTRA_BYTES / sizeof(ui… in TestF16()
137 const float x_value = fp16_ieee_to_fp32_value(x[i * x_stride() + c]); in TestF16()
167 channels(), x_stride(), y_stride(), in TestF16()
197 channels(), x_stride(), y_stride(), in TestF16()
243 …std::vector<float> x((batch_size() - 1) * x_stride() + channels() + XNN_EXTRA_BYTES / sizeof(float… in TestF32()
255 …ef[i * channels() + c] = std::signbit(x[i * x_stride() + c]) ? x[i * x_stride() + c] * w[c] : x[i … in TestF32()
[all …]
Dprelu-nc.cc41 .x_stride(337) in TEST()
65 .x_stride(337) in TEST()
89 .x_stride(337) in TEST()
113 .x_stride(337) in TEST()
126 .x_stride(337) in TEST()
152 .x_stride(345) in TEST()
189 .x_stride(337) in TEST()
213 .x_stride(337) in TEST()
237 .x_stride(337) in TEST()
261 .x_stride(337) in TEST()
Dx32-packx.cc120 .x_stride(23) in TEST()
227 .x_stride(23) in TEST()
325 .x_stride(23) in TEST()
378 .x_stride(7) in TEST()
429 .x_stride(7) in TEST()
480 .x_stride(7) in TEST()
/external/webp/src/dsp/
Drescaler.c31 const int x_stride = wrk->num_channels; in WebPRescalerImportRowExpand_C() local
36 for (channel = 0; channel < x_stride; ++channel) { in WebPRescalerImportRowExpand_C()
43 (wrk->src_width > 1) ? (rescaler_t)src[x_in + x_stride] : left; in WebPRescalerImportRowExpand_C()
44 x_in += x_stride; in WebPRescalerImportRowExpand_C()
47 x_out += x_stride; in WebPRescalerImportRowExpand_C()
52 x_in += x_stride; in WebPRescalerImportRowExpand_C()
53 assert(x_in < wrk->src_width * x_stride); in WebPRescalerImportRowExpand_C()
64 const int x_stride = wrk->num_channels; in WebPRescalerImportRowShrink_C() local
69 for (channel = 0; channel < x_stride; ++channel) { in WebPRescalerImportRowShrink_C()
79 assert(x_in < wrk->src_width * x_stride); in WebPRescalerImportRowShrink_C()
[all …]
Drescaler_mips32.c26 const int x_stride = wrk->num_channels; in ImportRowShrink_MIPS32() local
31 const int x_stride1 = x_stride << 2; in ImportRowShrink_MIPS32()
36 for (channel = 0; channel < x_stride; ++channel) { in ImportRowShrink_MIPS32()
75 : [x_stride]"r"(x_stride), [fx_scale]"r"(fx_scale), in ImportRowShrink_MIPS32()
86 const int x_stride = wrk->num_channels; in ImportRowExpand_MIPS32() local
91 const int x_stride1 = x_stride << 2; in ImportRowExpand_MIPS32()
96 for (channel = 0; channel < x_stride; ++channel) { in ImportRowExpand_MIPS32()
137 : [x_stride]"r"(x_stride), [x_add]"r"(x_add), [x_sub]"r"(x_sub), in ImportRowExpand_MIPS32()
/external/XNNPACK/src/x32-packx/
Dx4-scalar.c15 size_t x_stride, in xnn_x32_packx_ukernel_4x__scalar() argument
22 const float* x1 = (const float*) ((uintptr_t) x0 + x_stride); in xnn_x32_packx_ukernel_4x__scalar()
26 const float* x2 = (const float*) ((uintptr_t) x1 + x_stride); in xnn_x32_packx_ukernel_4x__scalar()
30 const float* x3 = (const float*) ((uintptr_t) x2 + x_stride); in xnn_x32_packx_ukernel_4x__scalar()
Dx4-neon-st4.c17 size_t x_stride, in xnn_x32_packx_ukernel_4x__neon_st4() argument
24 const uint32_t* x1 = (const uint32_t*) ((uintptr_t) x0 + x_stride); in xnn_x32_packx_ukernel_4x__neon_st4()
28 const uint32_t* x2 = (const uint32_t*) ((uintptr_t) x1 + x_stride); in xnn_x32_packx_ukernel_4x__neon_st4()
32 const uint32_t* x3 = (const uint32_t*) ((uintptr_t) x2 + x_stride); in xnn_x32_packx_ukernel_4x__neon_st4()
Dx4-wasmsimd.c17 size_t x_stride, in xnn_x32_packx_ukernel_4x__wasmsimd() argument
24 const float* x1 = (const float*) ((uintptr_t) x0 + x_stride); in xnn_x32_packx_ukernel_4x__wasmsimd()
28 const float* x2 = (const float*) ((uintptr_t) x1 + x_stride); in xnn_x32_packx_ukernel_4x__wasmsimd()
32 const float* x3 = (const float*) ((uintptr_t) x2 + x_stride); in xnn_x32_packx_ukernel_4x__wasmsimd()
Dx3-scalar.c15 size_t x_stride, in xnn_x32_packx_ukernel_3x__scalar() argument
22 const float* x1 = (const float*) ((uintptr_t) x0 + x_stride); in xnn_x32_packx_ukernel_3x__scalar()
26 const float* x2 = (const float*) ((uintptr_t) x1 + x_stride); in xnn_x32_packx_ukernel_3x__scalar()
Dx4-sse.c17 size_t x_stride, in xnn_x32_packx_ukernel_4x__sse() argument
24 const float* x1 = (const float*) ((uintptr_t) x0 + x_stride); in xnn_x32_packx_ukernel_4x__sse()
28 const float* x2 = (const float*) ((uintptr_t) x1 + x_stride); in xnn_x32_packx_ukernel_4x__sse()
32 const float* x3 = (const float*) ((uintptr_t) x2 + x_stride); in xnn_x32_packx_ukernel_4x__sse()
Dx2-scalar.c15 size_t x_stride, in xnn_x32_packx_ukernel_2x__scalar() argument
22 const float* x1 = (const float*) ((uintptr_t) x0 + x_stride); in xnn_x32_packx_ukernel_2x__scalar()
/external/webp/extras/
Dget_disto.c60 int x_stride, int y_stride, int max) { in RescalePlane() argument
65 for (x = 0; x < width * x_stride; x += x_stride) { in RescalePlane()
75 int x_stride, int w, int h, int do_scaling) { in DiffScaleChannel() argument
81 for (x = 0; x < w * x_stride; x += x_stride) { in DiffScaleChannel()
88 if (do_scaling) RescalePlane(src1, w, h, x_stride, stride1, max); in DiffScaleChannel()
165 int x_stride, int w, int h, int do_scaling) { in SSIMScaleChannel() argument
175 plane1[x + y * w] = src1[x * x_stride + y * stride1]; in SSIMScaleChannel()
176 plane2[x + y * w] = src2[x * x_stride + y * stride2]; in SSIMScaleChannel()
188 src1[x * x_stride + y * stride1] = (diff > 255) ? 255u : (uint8_t)diff; in SSIMScaleChannel()
193 if (do_scaling) RescalePlane(src1, w, h, x_stride, stride1, max); in SSIMScaleChannel()
/external/tensorflow/tensorflow/compiler/xla/service/cpu/
Druntime_single_threaded_conv3d.cc27 int64_t output_x, int64_t output_y, int64_t output_z, int64_t x_stride, in __xla_cpu_runtime_EigenSingleThreadedConv3DF32() argument
37 kernel_filters, output_x, output_y, output_z, x_stride, y_stride, in __xla_cpu_runtime_EigenSingleThreadedConv3DF32()
50 int64_t output_x, int64_t output_y, int64_t output_z, int64_t x_stride, in __xla_cpu_runtime_EigenSingleThreadedConv3DF16() argument
60 kernel_filters, output_x, output_y, output_z, x_stride, y_stride, in __xla_cpu_runtime_EigenSingleThreadedConv3DF16()
Druntime_conv3d.cc30 int64_t output_x, int64_t output_y, int64_t output_z, int64_t x_stride, in __xla_cpu_runtime_EigenConv3DF32() argument
43 kernel_channels, kernel_filters, output_x, output_y, output_z, x_stride, in __xla_cpu_runtime_EigenConv3DF32()
55 int64_t output_x, int64_t output_y, int64_t output_z, int64_t x_stride, in __xla_cpu_runtime_EigenConv3DF16() argument
68 kernel_channels, kernel_filters, output_x, output_y, output_z, x_stride, in __xla_cpu_runtime_EigenConv3DF16()
Druntime_conv3d.h31 int64_t output_y, int64_t output_z, int64_t x_stride, int64_t y_stride,
44 int64_t output_z, int64_t x_stride, int64_t y_stride, int64_t z_stride,
Druntime_single_threaded_conv3d.h31 int64_t output_y, int64_t output_z, int64_t x_stride, int64_t y_stride,
44 int64_t output_z, int64_t x_stride, int64_t y_stride, int64_t z_stride,
Druntime_conv_impl.h36 Eigen::Index x_stride, Eigen::Index y_stride, Eigen::Index padding_x_before, in EigenConv2DImpl() argument
96 kernel_y, kernel_x, y_stride, x_stride, rhs_y_dilation, in EigenConv2DImpl()
114 Eigen::Index x_stride, Eigen::Index y_stride, Eigen::Index z_stride, in EigenConv3DImpl() argument
182 x_stride, rhs_z_dilation, rhs_y_dilation, rhs_x_dilation, in EigenConv3DImpl()
/external/XNNPACK/src/xnnpack/
Dvmulcaddc.h24 size_t x_stride, \
64 size_t x_stride, \
Dcompute.h861 size_t x_stride; member
885 size_t x_stride; member
900 size_t x_stride; member
917 size_t x_stride; member
1010 size_t x_stride; member
1027 size_t x_stride; member
1068 size_t x_stride; member
1087 size_t x_stride; member
Dpackx.h23 size_t x_stride, \
/external/XNNPACK/src/
Doperator-run.c940 const size_t x_stride = context->x_stride; in xnn_compute_prelu() local
942 const void* x = (const void*) ((uintptr_t) context->x + x_stride * batch_start); in xnn_compute_prelu()
945 context->ukernel(batch_range, context->n, x, x_stride, context->w, y, y_stride); in xnn_compute_prelu()
1045 const void* x = (const void*) ((uintptr_t) context->x + index * context->x_stride); in xnn_compute_channel_shuffle_fixed()
1055 const void* x = (const void*) ((uintptr_t) context->x + index * context->x_stride); in xnn_compute_channel_shuffle_variable()
1065 const void* x = (const void*) ((uintptr_t) context->x + context->x_stride * batch_index); in xnn_compute_lut_strided()
1087 const size_t x_stride = context->x_stride; in xnn_compute_univector_strided() local
1090 const void* x = (const void*) ((uintptr_t) context->x + x_stride * batch_index); in xnn_compute_univector_strided()
1094 x = (const void*) ((uintptr_t) x + x_stride); in xnn_compute_univector_strided()
1115 const uint8_t* x = (const uint8_t*) ((uintptr_t) context->x + context->x_stride * batch_index); in xnn_compute_u8_softmax()
[all …]
/external/tensorflow/tensorflow/cc/framework/
Dgradient_checker.cc143 const int x_stride = JacobianStride<X_T>::value; in ComputeTheoreticalJacobianTranspose() local
172 SetJacobian<X_T, JAC_T>(&jacobian, r * x_stride, in ComputeTheoreticalJacobianTranspose()
224 const int x_stride = JacobianStride<X_T>::value; in ComputeNumericJacobianTranspose() local
260 SetJacobian<Y_T, JAC_T>(&jacobian, r * x_stride + unit_dimension, in ComputeNumericJacobianTranspose()
/external/mesa3d/src/gallium/auxiliary/gallivm/
Dlp_bld_sample_aos.c455 LLVMValueRef x_stride; in lp_build_sample_image_nearest() local
503 x_stride = lp_build_const_vec(bld->gallivm, in lp_build_sample_image_nearest()
511 width_vec, x_stride, offsets[0], in lp_build_sample_image_nearest()
767 LLVMValueRef x_stride, y_stride, z_stride; in lp_build_sample_image_linear() local
859 x_stride = lp_build_const_vec(bld->gallivm, bld->int_coord_bld.type, in lp_build_sample_image_linear()
868 width_vec, x_stride, offsets[0], in lp_build_sample_image_linear()
/external/XNNPACK/src/operators/
Dsoftmax-nc.c173 .x_stride = softmax_op->input_pixel_stride * sizeof(uint8_t), in xnn_setup_softmax_nc_qu8()
338 .x_stride = softmax_op->input_pixel_stride << log2_element_size, in setup_softmax_nc_floating_point()

12