/external/XNNPACK/test/ |
D | x32-transpose.cc | 23 .output_stride(2) in TEST() 36 .output_stride(i * 7) in TEST() 49 .output_stride(1) in TEST() 61 .output_stride(2) in TEST() 74 .output_stride(2) in TEST() 86 .output_stride(7) in TEST() 98 .output_stride(i) in TEST() 111 .output_stride(i) in TEST() 125 .output_stride(i) in TEST() 138 .output_stride(1) in TEST() [all …]
|
D | x16-transpose.cc | 23 .output_stride(2) in TEST() 36 .output_stride(i * 7) in TEST() 49 .output_stride(1) in TEST() 61 .output_stride(2) in TEST() 74 .output_stride(2) in TEST() 86 .output_stride(7) in TEST() 98 .output_stride(i) in TEST() 111 .output_stride(i) in TEST() 125 .output_stride(i) in TEST() 138 .output_stride(1) in TEST() [all …]
|
D | x64-transpose.cc | 23 .output_stride(2) in TEST() 36 .output_stride(i * 7) in TEST() 49 .output_stride(1) in TEST() 61 .output_stride(2) in TEST() 74 .output_stride(2) in TEST() 86 .output_stride(7) in TEST() 98 .output_stride(i) in TEST() 111 .output_stride(i) in TEST() 125 .output_stride(i) in TEST() 138 .output_stride(1) in TEST() [all …]
|
D | x8-transpose.cc | 23 .output_stride(2) in TEST() 36 .output_stride(i * 7) in TEST() 49 .output_stride(1) in TEST() 61 .output_stride(2) in TEST() 74 .output_stride(2) in TEST() 86 .output_stride(7) in TEST() 98 .output_stride(i) in TEST() 111 .output_stride(i) in TEST() 125 .output_stride(i) in TEST() 138 .output_stride(1) in TEST() [all …]
|
D | x24-transpose.cc | 23 .output_stride(2) in TEST() 36 .output_stride(i * 7) in TEST() 49 .output_stride(1) in TEST() 61 .output_stride(2) in TEST() 74 .output_stride(2) in TEST() 86 .output_stride(7) in TEST() 98 .output_stride(i) in TEST() 111 .output_stride(i) in TEST() 125 .output_stride(i) in TEST() 138 .output_stride(1) in TEST() [all …]
|
D | xx-transpose.cc | 23 .output_stride(2) in TEST() 36 .output_stride(i * 7) in TEST() 49 .output_stride(1) in TEST() 61 .output_stride(2) in TEST() 74 .output_stride(2) in TEST() 86 .output_stride(7) in TEST() 98 .output_stride(i) in TEST() 111 .output_stride(i) in TEST() 125 .output_stride(i) in TEST() 138 .output_stride(1) in TEST() [all …]
|
D | transpose-microkernel-tester.h | 54 inline TransposeMicrokernelTester& output_stride(size_t output_stride) { in output_stride() function 55 this->output_stride_ = output_stride; in output_stride() 59 inline size_t output_stride() const { return this->output_stride_; } in output_stride() function 98 std::vector<uint8_t> output(output_stride() * block_width() * output_element_stride()); in Test() 106 output_stride() * output_element_stride(), in Test() 117 &output[output_element_stride() * (r + c * output_stride())], in Test() 126 …std::vector<uint64_t> input(input_stride() * output_stride() + XNN_EXTRA_BYTES / sizeof(uint64_t)); in Test() 127 std::vector<uint64_t> output(input_stride() * output_stride()); in Test() 136 output_stride() * sizeof(uint64_t), in Test() 143 ASSERT_EQ(input[c + r * input_stride()], output[r + c * output_stride()]) in Test() [all …]
|
D | fill-microkernel-tester.h | 45 inline FillMicrokernelTester& output_stride(size_t output_stride) { in output_stride() function 46 assert(output_stride != 0); in output_stride() 47 this->output_stride_ = output_stride; in output_stride() 51 inline size_t output_stride() const { in output_stride() function 69 ASSERT_GE(output_stride(), channels()); in Test() 75 std::vector<uint8_t> output((rows() - 1) * output_stride() + channels()); in Test() 90 output_stride() * sizeof(uint8_t), in Test() 96 …ASSERT_EQ(uint32_t(output[i * output_stride() + c]), uint32_t(fill_pattern[c % fill_pattern.size()… in Test() 100 …tput value 0x" << std::hex << std::setw(8) << std::setfill('0') << output[i * output_stride() + c]; in Test() 104 for (size_t c = channels(); c < output_stride(); c++) { in Test() [all …]
|
D | transpose-operator-tester.h | 22 const size_t* output_stride, in reference_index() argument 29 const size_t idx = pos / output_stride[j]; in reference_index() 30 pos = pos % output_stride[j]; in reference_index() 67 std::vector<size_t> output_stride(input.size(), 1); in TestX8() 70 output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; in TestX8() 97 …const size_t in_idx = reference_index(input_stride.data(), output_stride.data(), perm_.data(), num… in TestX8() 107 std::vector<size_t> output_stride(input.size(), 1); in TestRunX8() 110 output_stride[i - 1] = output_stride[i] * shape_[perm()[i]]; in TestRunX8() 126 …const size_t in_idx = reference_index(input_stride.data(), output_stride.data(), perm_.data(), num… in TestRunX8() 136 std::vector<size_t> output_stride(input.size(), 1); in TestX16() [all …]
|
D | pad-microkernel-tester.h | 82 inline PadMicrokernelTester& output_stride(size_t output_stride) { in output_stride() function 83 assert(output_stride != 0); in output_stride() 84 this->output_stride_ = output_stride; in output_stride() 88 inline size_t output_stride() const { in output_stride() function 112 …t8_t> output((pre_padding() + input_channels() + post_padding()) + (rows() - 1) * output_stride()); in Test() 128 output.data(), output_stride() * sizeof(uint8_t), in Test() 135 uint32_t(output[i * output_stride() + l]), in Test() 141 << uint32_t(output[i * output_stride() + l]); in Test() 145 uint32_t(output[i * output_stride() + pre_padding() + c]), in Test() 151 << uint32_t(output[i * output_stride() + pre_padding() + c]); in Test() [all …]
|
D | clamp-operator-tester.h | 53 inline ClampOperatorTester& output_stride(size_t output_stride) { in output_stride() argument 54 assert(output_stride != 0); in output_stride() 55 this->output_stride_ = output_stride; in output_stride() 59 inline size_t output_stride() const { in output_stride() function 125 std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); in TestF16() 147 channels(), input_stride(), output_stride(), in TestF16() 172 ASSERT_LE(fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_max) in TestF16() 174 ASSERT_GE(fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_min) in TestF16() 176 …ASSERT_NEAR(fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + … in TestF16() 194 std::vector<float> output((batch_size() - 1) * output_stride() + channels()); in TestF32() [all …]
|
D | hardswish-operator-tester.h | 49 inline HardSwishOperatorTester& output_stride(size_t output_stride) { in output_stride() argument 50 assert(output_stride != 0); in output_stride() 51 this->output_stride_ = output_stride; in output_stride() 55 inline size_t output_stride() const { in output_stride() function 90 std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); in TestF16() 110 channels(), input_stride(), output_stride(), in TestF16() 134 …ASSERT_NEAR(fp16_ieee_to_fp32_value(output[i * output_stride() + c]), output_ref[i * channels() + … in TestF16() 148 std::vector<float> output((batch_size() - 1) * output_stride() + channels()); in TestF32() 169 channels(), input_stride(), output_stride(), in TestF32() 189 …put_ref[i * channels() + c], output[i * output_stride() + c], std::max(1.0e-7f, std::abs(output[i … in TestF32()
|
D | channel-shuffle-nc.cc | 162 .output_stride(513) in TEST() 174 .output_stride(513) in TEST() 186 .output_stride(513) in TEST() 199 .output_stride(1111) in TEST() 213 .output_stride(513) in TEST() 226 .output_stride(513) in TEST() 239 .output_stride(513) in TEST() 253 .output_stride(1111) in TEST() 408 .output_stride(513) in TEST() 420 .output_stride(513) in TEST() [all …]
|
D | copy-operator-tester.h | 48 inline CopyOperatorTester& output_stride(size_t output_stride) { in output_stride() function 49 assert(output_stride != 0); in output_stride() 50 this->output_stride_ = output_stride; in output_stride() 54 inline size_t output_stride() const { in output_stride() function 90 std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); in TestX8() 109 channels(), input_stride(), output_stride(), in TestX8() 129 ASSERT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) in TestX8() 143 std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); in TestX16() 162 channels(), input_stride(), output_stride(), in TestX16() 182 ASSERT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) in TestX16() [all …]
|
D | negate-operator-tester.h | 49 inline NegateOperatorTester& output_stride(size_t output_stride) { in output_stride() function 50 assert(output_stride != 0); in output_stride() 51 this->output_stride_ = output_stride; in output_stride() 55 inline size_t output_stride() const { in output_stride() function 90 std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); in TestF16() 108 channels(), input_stride(), output_stride(), in TestF16() 132 ASSERT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) in TestF16() 146 std::vector<float> output((batch_size() - 1) * output_stride() + channels()); in TestF32() 165 channels(), input_stride(), output_stride(), in TestF32() 185 ASSERT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) in TestF32()
|
D | channel-shuffle-operator-tester.h | 65 inline ChannelShuffleOperatorTester& output_stride(size_t output_stride) { in output_stride() function 66 assert(output_stride != 0); in output_stride() 67 this->output_stride_ = output_stride; in output_stride() 71 inline size_t output_stride() const { in output_stride() function 106 std::vector<uint8_t> output((batch_size() - 1) * output_stride() + channels()); in TestX8() 118 input_stride(), output_stride(), in TestX8() 140 int32_t(output[i * output_stride() + c * groups() + g])) in TestX8() 154 std::vector<uint32_t> output((batch_size() - 1) * output_stride() + channels()); in TestX32() 166 input_stride(), output_stride(), in TestX32() 188 output[i * output_stride() + c * groups() + g]) in TestX32()
|
D | convert-operator-tester.h | 50 inline ConvertOperatorTester& output_stride(size_t output_stride) { in output_stride() function 51 assert(output_stride != 0); in output_stride() 52 this->output_stride_ = output_stride; in output_stride() 56 inline size_t output_stride() const { in output_stride() function 129 std::vector<float> output((batch_size() - 1) * output_stride() + channels()); in TestF16toF32() 148 channels(), input_stride(), output_stride(), in TestF16toF32() 168 ASSERT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) in TestF16toF32() 182 std::vector<uint16_t> output((batch_size() - 1) * output_stride() + channels()); in TestF32toF16() 201 channels(), input_stride(), output_stride(), in TestF32toF16() 221 ASSERT_EQ(output_ref[i * channels() + c], output[i * output_stride() + c]) in TestF32toF16() [all …]
|
D | global-average-pooling-operator-tester.h | 63 inline GlobalAveragePoolingOperatorTester& output_stride(size_t output_stride) { in output_stride() argument 64 assert(output_stride != 0); in output_stride() 65 this->output_stride_ = output_stride; in output_stride() 69 inline size_t output_stride() const { in output_stride() function 162 std::vector<uint8_t> output(batch_size() * output_stride()); in TestNWCxQU8() 187 channels(), input_stride(), output_stride(), in TestNWCxQU8() 214 ASSERT_LE(uint32_t(output[i * output_stride() + c]), uint32_t(qmax())); in TestNWCxQU8() 215 ASSERT_GE(uint32_t(output[i * output_stride() + c]), uint32_t(qmin())); in TestNWCxQU8() 216 …ASSERT_NEAR(float(int32_t(output[i * output_stride() + c])), output_ref[i * channels() + c], 0.80f) in TestNWCxQU8() 231 std::vector<int8_t> output(batch_size() * output_stride()); in TestNWCxQS8() [all …]
|
D | sigmoid-nc.cc | 50 .output_stride(117) in TEST() 62 .output_stride(117) in TEST() 105 .output_stride(117) in TEST() 117 .output_stride(117) in TEST() 208 .output_stride(117) in TEST() 268 .output_stride(117) in TEST() 280 .output_stride(117) in TEST() 293 .output_stride(117) in TEST() 307 .output_stride(117) in TEST() 322 .output_stride(117) in TEST() [all …]
|
/external/XNNPACK/src/operators/ |
D | unary-elementwise-nc.c | 25 size_t output_stride, in create_unary_elementwise_nc() argument 63 if (output_stride < channels) { in create_unary_elementwise_nc() 67 xnn_operator_type_to_string(operator_type), output_stride, channels); in create_unary_elementwise_nc() 81 unary_elementwise_op->output_pixel_stride = output_stride; in create_unary_elementwise_nc() 129 const size_t output_stride = unary_elementwise_op->output_pixel_stride; in setup_unary_elementwise_nc() local 133 if ((((input_stride ^ channels) | (output_stride ^ channels)) == 0) || batch_size == 1) { in setup_unary_elementwise_nc() 157 .y_stride = output_stride << log2_output_size, in setup_unary_elementwise_nc() 176 size_t output_stride, in xnn_create_clamp_nc_f16() argument 224 channels, input_stride, output_stride, flags, in xnn_create_clamp_nc_f16() 234 size_t output_stride, in xnn_create_clamp_nc_f32() argument [all …]
|
D | lut-elementwise-nc.c | 23 size_t output_stride, in create_lut_elementwise_nc() argument 63 if (output_stride < channels) { in create_lut_elementwise_nc() 67 xnn_operator_type_to_string(operator_type), output_stride, channels); in create_lut_elementwise_nc() 123 lut_elementwise_op->output_pixel_stride = output_stride; in create_lut_elementwise_nc() 146 size_t output_stride, in xnn_create_elu_nc_qs8() argument 165 channels, input_stride, output_stride, in xnn_create_elu_nc_qs8() 181 size_t output_stride, in xnn_create_sigmoid_nc_qs8() argument 206 channels, input_stride, output_stride, in xnn_create_sigmoid_nc_qs8() 218 size_t output_stride, in xnn_create_sigmoid_nc_qu8() argument 243 channels, input_stride, output_stride, in xnn_create_sigmoid_nc_qu8() [all …]
|
/external/XNNPACK/src/ |
D | normalization.c | 15 const size_t* output_stride, in can_dimension_be_removed() argument 27 if (output_stride != NULL && perm[dim] > 0) { in can_dimension_be_removed() 28 if (output_stride[perm[dim] - 1] != output_stride[perm[dim]] * shape[dim]) { in can_dimension_be_removed() 40 size_t* output_stride, in remove_dimension() argument 52 if (output_stride != NULL) { in remove_dimension() 54 output_stride[j] = output_stride[j + 1]; in remove_dimension() 72 const size_t* output_stride, in xnn_normalize_transpose_permutation() argument 89 if (output_stride != NULL) { in xnn_normalize_transpose_permutation() 90 memcpy(normalized_output_stride, output_stride, num_dims * sizeof(size_t)); in xnn_normalize_transpose_permutation() 158 if (output_stride == NULL) { in xnn_normalize_transpose_permutation()
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 8x4-minmax-neonfma.c | 25 size_t output_stride, in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() argument 34 size_t output_decrement = output_stride * nc - 8 * sizeof(float); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() 89 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() 92 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() 95 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() 98 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() 129 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() 174 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() 176 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() 178 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_8x4__neonfma() [all …]
|
D | 4x4-minmax-neonfma.c | 25 size_t output_stride, in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() argument 34 size_t output_decrement = output_stride * nc - 4 * sizeof(float); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() 71 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() 73 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() 75 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() 77 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() 102 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() 147 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() 149 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() 151 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_4x4__neonfma() [all …]
|
D | 12x4-minmax-neonfma.c | 25 size_t output_stride, in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() argument 34 size_t output_decrement = output_stride * nc - 12 * sizeof(float); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() 107 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() 111 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() 115 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() 119 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() 156 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() 219 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() 222 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() 225 output = (float*restrict) ((uintptr_t) output + output_stride); in xnn_f32_spmm_minmax_ukernel_12x4__neonfma() [all …]
|