Home
last modified time | relevance | path

Searched refs:input_stride (Results 1 – 25 of 274) sorted by relevance

1234567891011

/external/libaom/libaom/av1/common/x86/
Dcfl_simd.h18 void cfl_subsample_lbd_420_4x4_ssse3(const uint8_t *cfl_type, int input_stride,
20 void cfl_subsample_lbd_420_4x8_ssse3(const uint8_t *cfl_type, int input_stride,
22 void cfl_subsample_lbd_420_4x16_ssse3(const uint8_t *cfl_type, int input_stride,
26 void cfl_subsample_lbd_420_8x4_ssse3(const uint8_t *cfl_type, int input_stride,
28 void cfl_subsample_lbd_420_8x8_ssse3(const uint8_t *cfl_type, int input_stride,
30 void cfl_subsample_lbd_420_8x16_ssse3(const uint8_t *cfl_type, int input_stride,
32 void cfl_subsample_lbd_420_8x32_ssse3(const uint8_t *cfl_type, int input_stride,
36 void cfl_subsample_lbd_420_16x4_ssse3(const uint8_t *cfl_type, int input_stride,
38 void cfl_subsample_lbd_420_16x8_ssse3(const uint8_t *cfl_type, int input_stride,
41 int input_stride, uint16_t *output_q3);
[all …]
Dcfl_ssse3.c41 int input_stride, in cfl_luma_subsampling_420_lbd_ssse3() argument
47 const int luma_stride = input_stride << 1; in cfl_luma_subsampling_420_lbd_ssse3()
52 __m128i bot = _mm_loadh_epi32((__m128i *)(input + input_stride)); in cfl_luma_subsampling_420_lbd_ssse3()
59 __m128i bot = _mm_loadl_epi64((__m128i *)(input + input_stride)); in cfl_luma_subsampling_420_lbd_ssse3()
66 __m128i bot = _mm_loadu_si128((__m128i *)(input + input_stride)); in cfl_luma_subsampling_420_lbd_ssse3()
73 _mm_loadu_si128(((__m128i *)(input + input_stride)) + 1); in cfl_luma_subsampling_420_lbd_ssse3()
96 int input_stride, in cfl_luma_subsampling_422_lbd_ssse3() argument
121 input += input_stride; in cfl_luma_subsampling_422_lbd_ssse3()
136 int input_stride, in cfl_luma_subsampling_444_lbd_ssse3() argument
140 const int luma_stride = input_stride; in cfl_luma_subsampling_444_lbd_ssse3()
[all …]
/external/XNNPACK/test/
Dsoftmax-operator-tester.h37 inline SoftMaxOperatorTester& input_stride(size_t input_stride) { in input_stride() function
38 assert(input_stride != 0); in input_stride()
39 this->input_stride_ = input_stride; in input_stride()
43 inline size_t input_stride() const { in input_stride() function
119 std::vector<uint8_t> input((batch_size() - 1) * input_stride() + channels()); in TestQ8()
129 input.data() + i * input_stride(), in TestQ8()
130 input.data() + i * input_stride() + channels()); in TestQ8()
134 std::exp((int32_t(input[i * input_stride() + c]) - max_input) * in TestQ8()
139 std::exp((int32_t(input[i * input_stride() + c]) - max_input) * in TestQ8()
152 channels(), input_stride(), output_stride(), in TestQ8()
[all …]
Dchannel-shuffle-nc.cc112 .input_stride(511) in TEST()
124 .input_stride(511) in TEST()
136 .input_stride(511) in TEST()
149 .input_stride(1007) in TEST()
212 .input_stride(511) in TEST()
225 .input_stride(511) in TEST()
238 .input_stride(511) in TEST()
252 .input_stride(1007) in TEST()
358 .input_stride(511) in TEST()
370 .input_stride(511) in TEST()
[all …]
Dchannel-shuffle-operator-tester.h50 inline ChannelShuffleOperatorTester& input_stride(size_t input_stride) { in input_stride() argument
51 assert(input_stride != 0); in input_stride()
52 this->input_stride_ = input_stride; in input_stride()
56 inline size_t input_stride() const { in input_stride() function
104 …nt8_t> input(XNN_EXTRA_BYTES / sizeof(uint8_t) + (batch_size() - 1) * input_stride() + channels()); in TestX8()
117 input_stride(), output_stride(), in TestX8()
138 ASSERT_EQ(uint32_t(input[i * input_stride() + g * group_channels() + c]), in TestX8()
152 …std::vector<float> input(XNN_EXTRA_BYTES / sizeof(float) + (batch_size() - 1) * input_stride() + c… in TestX32()
165 input_stride(), output_stride(), in TestX32()
186 ASSERT_EQ(input[i * input_stride() + g * group_channels() + c], in TestX32()
Dhardswish-operator-tester.h33 inline HardSwishOperatorTester& input_stride(size_t input_stride) { in input_stride() argument
34 assert(input_stride != 0); in input_stride()
35 this->input_stride_ = input_stride; in input_stride()
39 inline size_t input_stride() const { in input_stride() function
88 (batch_size() - 1) * input_stride() + channels()); in TestF32()
98 const float x = input[i * input_stride() + c]; in TestF32()
110 channels(), input_stride(), output_stride(), in TestF32()
Dchannel-pad-operator-tester.h65 inline ChannelPadOperatorTester& input_stride(size_t input_stride) { in input_stride() argument
66 assert(input_stride != 0); in input_stride()
67 this->input_stride_ = input_stride; in input_stride()
71 inline size_t input_stride() const { in input_stride() function
110 …std::vector<uint32_t> input(input_channels() + (batch_size() - 1) * input_stride() + XNN_EXTRA_BYT… in TestX32()
123 input_stride(), output_stride(), in TestX32()
149 ASSERT_EQ(input[i * input_stride() + k], in TestX32()
Dsigmoid-operator-tester.h37 inline SigmoidOperatorTester& input_stride(size_t input_stride) { in input_stride() argument
38 assert(input_stride != 0); in input_stride()
39 this->input_stride_ = input_stride; in input_stride()
43 inline size_t input_stride() const { in input_stride() function
137 …std::vector<uint8_t> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / si… in TestQ8()
148 (int32_t(input[i * input_stride() + c]) - int32_t(input_zero_point())); in TestQ8()
164 channels(), input_stride(), output_stride(), in TestQ8()
198 …std::vector<float> input((batch_size() - 1) * input_stride() + channels() + XNN_EXTRA_BYTES / size… in TestF32()
208 const double x = input[i * input_stride() + c]; in TestF32()
220 channels(), input_stride(), output_stride(), in TestF32()
Dclamp-operator-tester.h36 inline ClampOperatorTester& input_stride(size_t input_stride) { in input_stride() function
37 assert(input_stride != 0); in input_stride()
38 this->input_stride_ = input_stride; in input_stride()
42 inline size_t input_stride() const { in input_stride() function
109 (batch_size() - 1) * input_stride() + channels()); in TestU8()
119 const uint8_t x = input[i * input_stride() + c]; in TestU8()
131 channels(), input_stride(), output_stride(), in TestU8()
170 (batch_size() - 1) * input_stride() + channels()); in TestF32()
180 const float x = input[i * input_stride() + c]; in TestF32()
192 channels(), input_stride(), output_stride(), in TestF32()
Dsigmoid-nc.cc87 .input_stride(129) in TEST()
157 .input_stride(129) in TEST()
169 .input_stride(129) in TEST()
182 .input_stride(129) in TEST()
196 .input_stride(129) in TEST()
211 .input_stride(129) in TEST()
245 .input_stride(129) in TEST()
267 .input_stride(129) in TEST()
/external/XNNPACK/src/
Dclamp-nc.c24 size_t input_stride, in xnn_create_clamp_nc_u8() argument
47 if (input_stride < channels) { in xnn_create_clamp_nc_u8()
51 input_stride, channels); in xnn_create_clamp_nc_u8()
79 clamp_op->input_pixel_stride = input_stride; in xnn_create_clamp_nc_u8()
98 size_t input_stride, in xnn_create_clamp_nc_f32() argument
121 if (input_stride < channels) { in xnn_create_clamp_nc_f32()
125 input_stride, channels); in xnn_create_clamp_nc_f32()
165 clamp_op->input_pixel_stride = input_stride; in xnn_create_clamp_nc_f32()
206 const size_t input_stride = clamp_op->input_pixel_stride; in xnn_setup_clamp_nc_u8() local
208 if ((((input_stride ^ channels) | (output_stride ^ channels)) == 0) || batch_size == 1) { in xnn_setup_clamp_nc_u8()
[all …]
Dsigmoid-nc.c23 size_t input_stride, in xnn_create_sigmoid_nc_q8() argument
50 if (input_stride < channels) { in xnn_create_sigmoid_nc_q8()
54 input_stride, channels); in xnn_create_sigmoid_nc_q8()
134 sigmoid_op->input_pixel_stride = input_stride; in xnn_create_sigmoid_nc_q8()
152 size_t input_stride, in xnn_create_sigmoid_nc_f32() argument
173 if (input_stride < channels) { in xnn_create_sigmoid_nc_f32()
177 input_stride, channels); in xnn_create_sigmoid_nc_f32()
207 sigmoid_op->input_pixel_stride = input_stride; in xnn_create_sigmoid_nc_f32()
251 const size_t input_stride = sigmoid_op->input_pixel_stride; in xnn_setup_sigmoid_nc_q8() local
253 if ((((input_stride ^ channels) | (output_stride ^ channels)) == 0) || batch_size == 1) { in xnn_setup_sigmoid_nc_q8()
[all …]
Dhardswish-nc.c21 size_t input_stride, in xnn_create_hardswish_nc_f32() argument
42 if (input_stride < channels) { in xnn_create_hardswish_nc_f32()
46 input_stride, channels); in xnn_create_hardswish_nc_f32()
67 hardswish_op->input_pixel_stride = input_stride; in xnn_create_hardswish_nc_f32()
108 const size_t input_stride = hardswish_op->input_pixel_stride; in xnn_setup_hardswish_nc_f32() local
110 if ((((input_stride ^ channels) | (output_stride ^ channels)) == 0) || batch_size == 1) { in xnn_setup_hardswish_nc_f32()
114 .x_stride = input_stride * sizeof(float), in xnn_setup_hardswish_nc_f32()
128 .x_stride = input_stride * sizeof(float), in xnn_setup_hardswish_nc_f32()
Dchannel-shuffle-nc.c25 size_t input_stride, in create_channel_shuffle_nc() argument
55 if (input_stride < channels) { in create_channel_shuffle_nc()
59 input_stride, groups, group_channels); in create_channel_shuffle_nc()
81 channel_shuffle_op->input_pixel_stride = input_stride; in create_channel_shuffle_nc()
101 size_t input_stride, in xnn_create_channel_shuffle_nc_x8() argument
109 input_stride, in xnn_create_channel_shuffle_nc_x8()
119 size_t input_stride, in xnn_create_channel_shuffle_nc_x32() argument
127 input_stride, in xnn_create_channel_shuffle_nc_x32()
Dleaky-relu-nc.c23 size_t input_stride, in xnn_create_leaky_relu_nc_q8() argument
51 if (input_stride < channels) { in xnn_create_leaky_relu_nc_q8()
55 input_stride, channels); in xnn_create_leaky_relu_nc_q8()
143 leaky_relu_op->input_pixel_stride = input_stride; in xnn_create_leaky_relu_nc_q8()
183 const size_t input_stride = leaky_relu_op->input_pixel_stride; in xnn_setup_leaky_relu_nc_q8() local
185 if ((((input_stride ^ channels) | (output_stride ^ channels)) == 0) || batch_size == 1) { in xnn_setup_leaky_relu_nc_q8()
189 .x_stride = input_stride * sizeof(uint8_t), in xnn_setup_leaky_relu_nc_q8()
203 .x_stride = input_stride * sizeof(uint8_t), in xnn_setup_leaky_relu_nc_q8()
/external/XNNPACK/src/f32-gavgpool/
Dup7-scalar.c16 size_t input_stride, in xnn_f32_gavgpool_ukernel_up7__scalar() argument
26 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride); in xnn_f32_gavgpool_ukernel_up7__scalar()
30 const float* i2 = (const float*) ((uintptr_t) i1 + input_stride); in xnn_f32_gavgpool_ukernel_up7__scalar()
34 const float* i3 = (const float*) ((uintptr_t) i2 + input_stride); in xnn_f32_gavgpool_ukernel_up7__scalar()
38 const float* i4 = (const float*) ((uintptr_t) i3 + input_stride); in xnn_f32_gavgpool_ukernel_up7__scalar()
42 const float* i5 = (const float*) ((uintptr_t) i4 + input_stride); in xnn_f32_gavgpool_ukernel_up7__scalar()
46 const float* i6 = (const float*) ((uintptr_t) i5 + input_stride); in xnn_f32_gavgpool_ukernel_up7__scalar()
Dup7-wasm.c16 size_t input_stride, in xnn_f32_gavgpool_ukernel_up7__wasm() argument
26 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride); in xnn_f32_gavgpool_ukernel_up7__wasm()
30 const float* i2 = (const float*) ((uintptr_t) i1 + input_stride); in xnn_f32_gavgpool_ukernel_up7__wasm()
34 const float* i3 = (const float*) ((uintptr_t) i2 + input_stride); in xnn_f32_gavgpool_ukernel_up7__wasm()
38 const float* i4 = (const float*) ((uintptr_t) i3 + input_stride); in xnn_f32_gavgpool_ukernel_up7__wasm()
42 const float* i5 = (const float*) ((uintptr_t) i4 + input_stride); in xnn_f32_gavgpool_ukernel_up7__wasm()
46 const float* i6 = (const float*) ((uintptr_t) i5 + input_stride); in xnn_f32_gavgpool_ukernel_up7__wasm()
Dmp7p7q-scalar.c16 size_t input_stride, in xnn_f32_gavgpool_ukernel_mp7p7q__scalar() argument
26 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__scalar()
27 const float* i2 = (const float*) ((uintptr_t) i1 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__scalar()
28 const float* i3 = (const float*) ((uintptr_t) i2 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__scalar()
29 const float* i4 = (const float*) ((uintptr_t) i3 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__scalar()
30 const float* i5 = (const float*) ((uintptr_t) i4 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__scalar()
31 const float* i6 = (const float*) ((uintptr_t) i5 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__scalar()
32 const size_t input_increment = 7 * input_stride - n * sizeof(float); in xnn_f32_gavgpool_ukernel_mp7p7q__scalar()
Dmp7p7q-wasm.c16 size_t input_stride, in xnn_f32_gavgpool_ukernel_mp7p7q__wasm() argument
26 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__wasm()
27 const float* i2 = (const float*) ((uintptr_t) i1 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__wasm()
28 const float* i3 = (const float*) ((uintptr_t) i2 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__wasm()
29 const float* i4 = (const float*) ((uintptr_t) i3 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__wasm()
30 const float* i5 = (const float*) ((uintptr_t) i4 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__wasm()
31 const float* i6 = (const float*) ((uintptr_t) i5 + input_stride); in xnn_f32_gavgpool_ukernel_mp7p7q__wasm()
32 const size_t input_increment = 7 * input_stride - n * sizeof(float); in xnn_f32_gavgpool_ukernel_mp7p7q__wasm()
Dup7-psimd.c17 size_t input_stride, in xnn_f32_gavgpool_ukernel_up7__psimd() argument
27 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride); in xnn_f32_gavgpool_ukernel_up7__psimd()
31 const float* i2 = (const float*) ((uintptr_t) i1 + input_stride); in xnn_f32_gavgpool_ukernel_up7__psimd()
35 const float* i3 = (const float*) ((uintptr_t) i2 + input_stride); in xnn_f32_gavgpool_ukernel_up7__psimd()
39 const float* i4 = (const float*) ((uintptr_t) i3 + input_stride); in xnn_f32_gavgpool_ukernel_up7__psimd()
43 const float* i5 = (const float*) ((uintptr_t) i4 + input_stride); in xnn_f32_gavgpool_ukernel_up7__psimd()
47 const float* i6 = (const float*) ((uintptr_t) i5 + input_stride); in xnn_f32_gavgpool_ukernel_up7__psimd()
Dup7-sse.c17 size_t input_stride, in xnn_f32_gavgpool_ukernel_up7__sse() argument
27 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride); in xnn_f32_gavgpool_ukernel_up7__sse()
31 const float* i2 = (const float*) ((uintptr_t) i1 + input_stride); in xnn_f32_gavgpool_ukernel_up7__sse()
35 const float* i3 = (const float*) ((uintptr_t) i2 + input_stride); in xnn_f32_gavgpool_ukernel_up7__sse()
39 const float* i4 = (const float*) ((uintptr_t) i3 + input_stride); in xnn_f32_gavgpool_ukernel_up7__sse()
43 const float* i5 = (const float*) ((uintptr_t) i4 + input_stride); in xnn_f32_gavgpool_ukernel_up7__sse()
47 const float* i6 = (const float*) ((uintptr_t) i5 + input_stride); in xnn_f32_gavgpool_ukernel_up7__sse()
Dup7-neon.c17 size_t input_stride, in xnn_f32_gavgpool_ukernel_up7__neon() argument
27 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride); in xnn_f32_gavgpool_ukernel_up7__neon()
31 const float* i2 = (const float*) ((uintptr_t) i1 + input_stride); in xnn_f32_gavgpool_ukernel_up7__neon()
35 const float* i3 = (const float*) ((uintptr_t) i2 + input_stride); in xnn_f32_gavgpool_ukernel_up7__neon()
39 const float* i4 = (const float*) ((uintptr_t) i3 + input_stride); in xnn_f32_gavgpool_ukernel_up7__neon()
43 const float* i5 = (const float*) ((uintptr_t) i4 + input_stride); in xnn_f32_gavgpool_ukernel_up7__neon()
47 const float* i6 = (const float*) ((uintptr_t) i5 + input_stride); in xnn_f32_gavgpool_ukernel_up7__neon()
/external/XNNPACK/src/q8-gavgpool/
Dup7-scalar.c16 size_t input_stride, in xnn_q8_gavgpool_ukernel_up7__scalar() argument
26 const uint8_t* i1 = (const uint8_t*) ((uintptr_t) i0 + input_stride); in xnn_q8_gavgpool_ukernel_up7__scalar()
30 const uint8_t* i2 = (const uint8_t*) ((uintptr_t) i1 + input_stride); in xnn_q8_gavgpool_ukernel_up7__scalar()
34 const uint8_t* i3 = (const uint8_t*) ((uintptr_t) i2 + input_stride); in xnn_q8_gavgpool_ukernel_up7__scalar()
38 const uint8_t* i4 = (const uint8_t*) ((uintptr_t) i3 + input_stride); in xnn_q8_gavgpool_ukernel_up7__scalar()
42 const uint8_t* i5 = (const uint8_t*) ((uintptr_t) i4 + input_stride); in xnn_q8_gavgpool_ukernel_up7__scalar()
46 const uint8_t* i6 = (const uint8_t*) ((uintptr_t) i5 + input_stride); in xnn_q8_gavgpool_ukernel_up7__scalar()
/external/libaom/libaom/av1/common/
Dcfl.c213 int input_stride, in cfl_luma_subsampling_420_lbd_c() argument
218 const int bot = i + input_stride; in cfl_luma_subsampling_420_lbd_c()
222 input += input_stride << 1; in cfl_luma_subsampling_420_lbd_c()
228 int input_stride, in cfl_luma_subsampling_422_lbd_c() argument
236 input += input_stride; in cfl_luma_subsampling_422_lbd_c()
242 int input_stride, in cfl_luma_subsampling_444_lbd_c() argument
250 input += input_stride; in cfl_luma_subsampling_444_lbd_c()
257 int input_stride, in cfl_luma_subsampling_420_hbd_c() argument
262 const int bot = i + input_stride; in cfl_luma_subsampling_420_hbd_c()
266 input += input_stride << 1; in cfl_luma_subsampling_420_hbd_c()
[all …]
/external/libaom/libaom/test/
Dav1_fwd_txfm2d_test.cc255 int input_stride = 64; in AV1FwdTxfm2dMatchTest() local
261 input[r * input_stride + c] = (1 << bd) - 1; in AV1FwdTxfm2dMatchTest()
267 input[r * input_stride + c] = rnd.Rand16() % (1 << bd); in AV1FwdTxfm2dMatchTest()
275 ref_func(input, ref_output, input_stride, (TX_TYPE)tx_type, bd); in AV1FwdTxfm2dMatchTest()
276 target_func(input, output, input_stride, &param); in AV1FwdTxfm2dMatchTest()
312 int input_stride = 64; in AV1FwdTxfm2dSpeedTest() local
317 input[r * input_stride + c] = rnd.Rand16() % (1 << bd); in AV1FwdTxfm2dSpeedTest()
330 ref_func(input, ref_output, input_stride, (TX_TYPE)tx_type, bd); in AV1FwdTxfm2dSpeedTest()
338 target_func(input, output, input_stride, &param); in AV1FwdTxfm2dSpeedTest()
444 int input_stride = 64; in AV1HighbdFwdTxfm2dMatchTest() local
[all …]

1234567891011