/external/XNNPACK/test/ |
D | qs8-vaddc-minmax.cc | 102 TEST(QS8_VADDC_MINMAX__NEON_LD64_X8, a_scale) { in TEST() argument 105 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 241 TEST(QS8_VADDC_MINMAX__NEON_LD64_X16, a_scale) { in TEST() argument 244 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 380 TEST(QS8_VADDC_MINMAX__NEON_LD64_X24, a_scale) { in TEST() argument 383 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 519 TEST(QS8_VADDC_MINMAX__NEON_LD64_X32, a_scale) { in TEST() argument 522 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 658 TEST(QS8_VADDC_MINMAX__NEON_LD128_X16, a_scale) { in TEST() argument 661 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local [all …]
|
D | qs8-vmulc-minmax-fp32.cc | 102 TEST(QS8_VMULC_MINMAX_FP32__NEON_LD64_X8, a_scale) { in TEST() argument 105 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 241 TEST(QS8_VMULC_MINMAX_FP32__NEON_LD64_X16, a_scale) { in TEST() argument 244 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 380 TEST(QS8_VMULC_MINMAX_FP32__NEON_LD128_X16, a_scale) { in TEST() argument 383 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 519 TEST(QS8_VMULC_MINMAX_FP32__NEONV8_LD64_X8, a_scale) { in TEST() argument 522 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 658 TEST(QS8_VMULC_MINMAX_FP32__NEONV8_LD64_X16, a_scale) { in TEST() argument 661 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local [all …]
|
D | qu8-vmulc-minmax-fp32.cc | 102 TEST(QU8_VMULC_MINMAX_FP32__NEON_LD64_X8, a_scale) { in TEST() argument 105 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 241 TEST(QU8_VMULC_MINMAX_FP32__NEON_LD64_X16, a_scale) { in TEST() argument 244 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 380 TEST(QU8_VMULC_MINMAX_FP32__NEON_LD128_X16, a_scale) { in TEST() argument 383 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 519 TEST(QU8_VMULC_MINMAX_FP32__NEONV8_LD64_X8, a_scale) { in TEST() argument 522 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 658 TEST(QU8_VMULC_MINMAX_FP32__NEONV8_LD64_X16, a_scale) { in TEST() argument 661 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local [all …]
|
D | qu8-vaddc-minmax.cc | 102 TEST(QU8_VADDC_MINMAX__NEON_LD64_X8, a_scale) { in TEST() argument 105 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 241 TEST(QU8_VADDC_MINMAX__NEON_LD64_X16, a_scale) { in TEST() argument 244 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 380 TEST(QU8_VADDC_MINMAX__NEON_LD64_X32, a_scale) { in TEST() argument 383 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 519 TEST(QU8_VADDC_MINMAX__NEON_LD128_X16, a_scale) { in TEST() argument 522 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 658 TEST(QU8_VADDC_MINMAX__SSE2_MUL16_LD64_X8, a_scale) { in TEST() argument 661 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local [all …]
|
D | qs8-vadd-minmax.cc | 123 TEST(QS8_VADD_MINMAX__NEON_LD64_X8, a_scale) { in TEST() argument 126 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 283 TEST(QS8_VADD_MINMAX__NEON_LD64_X16, a_scale) { in TEST() argument 286 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 443 TEST(QS8_VADD_MINMAX__NEON_LD64_X24, a_scale) { in TEST() argument 446 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 603 TEST(QS8_VADD_MINMAX__NEON_LD64_X32, a_scale) { in TEST() argument 606 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 763 TEST(QS8_VADD_MINMAX__NEON_LD128_X16, a_scale) { in TEST() argument 766 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local [all …]
|
D | qu8-vadd-minmax.cc | 123 TEST(QU8_VADD_MINMAX__NEON_LD64_X8, a_scale) { in TEST() argument 126 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 283 TEST(QU8_VADD_MINMAX__NEON_LD64_X16, a_scale) { in TEST() argument 286 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 443 TEST(QU8_VADD_MINMAX__NEON_LD64_X32, a_scale) { in TEST() argument 446 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 603 TEST(QU8_VADD_MINMAX__NEON_LD128_X16, a_scale) { in TEST() argument 606 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 763 TEST(QU8_VADD_MINMAX__SSE2_MUL16_LD64_X8, a_scale) { in TEST() argument 766 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local [all …]
|
D | vaddc-microkernel-tester.h | 49 inline VAddCMicrokernelTester& a_scale(float a_scale) { in a_scale() argument 56 inline float a_scale() const { in a_scale() function
|
D | qu8-vmul-minmax-fp32.cc | 123 TEST(QU8_VMUL_MINMAX_FP32__NEON_LD64_X8, a_scale) { in TEST() argument 126 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 283 TEST(QU8_VMUL_MINMAX_FP32__NEON_LD64_X16, a_scale) { in TEST() argument 286 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 443 TEST(QU8_VMUL_MINMAX_FP32__NEON_LD128_X16, a_scale) { in TEST() argument 446 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 603 TEST(QU8_VMUL_MINMAX_FP32__NEONV8_LD64_X8, a_scale) { in TEST() argument 606 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 763 TEST(QU8_VMUL_MINMAX_FP32__NEONV8_LD64_X16, a_scale) { in TEST() argument 766 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local [all …]
|
D | qs8-vmul-minmax-fp32.cc | 123 TEST(QS8_VMUL_MINMAX_FP32__NEON_LD64_X8, a_scale) { in TEST() argument 126 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 283 TEST(QS8_VMUL_MINMAX_FP32__NEON_LD64_X16, a_scale) { in TEST() argument 286 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 443 TEST(QS8_VMUL_MINMAX_FP32__NEON_LD128_X16, a_scale) { in TEST() argument 446 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 603 TEST(QS8_VMUL_MINMAX_FP32__NEONV8_LD64_X8, a_scale) { in TEST() argument 606 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 763 TEST(QS8_VMUL_MINMAX_FP32__NEONV8_LD64_X16, a_scale) { in TEST() argument 766 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local [all …]
|
D | vadd-microkernel-tester.h | 58 inline VAddMicrokernelTester& a_scale(float a_scale) { in a_scale() function 65 inline float a_scale() const { in a_scale() function
|
D | vmulc-microkernel-tester.h | 46 inline VMulCMicrokernelTester& a_scale(float a_scale) { in a_scale() function 53 inline float a_scale() const { in a_scale() function
|
D | qu8-vmulc-minmax-rndnu.cc | 102 TEST(QU8_VMULC_MINMAX_RNDNU__NEON_LD64_X8, a_scale) { in TEST() argument 105 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 241 TEST(QU8_VMULC_MINMAX_RNDNU__NEON_LD64_X16, a_scale) { in TEST() argument 244 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 380 TEST(QU8_VMULC_MINMAX_RNDNU__NEON_LD128_X16, a_scale) { in TEST() argument 383 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local
|
D | qs8-vmulc-minmax-rndnu.cc | 102 TEST(QS8_VMULC_MINMAX_RNDNU__NEON_LD64_X8, a_scale) { in TEST() argument 105 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 241 TEST(QS8_VMULC_MINMAX_RNDNU__NEON_LD64_X16, a_scale) { in TEST() argument 244 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 380 TEST(QS8_VMULC_MINMAX_RNDNU__NEON_LD128_X16, a_scale) { in TEST() argument 383 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local
|
D | vmul-microkernel-tester.h | 55 inline VMulMicrokernelTester& a_scale(float a_scale) { in a_scale() argument 62 inline float a_scale() const { in a_scale() function
|
D | qu8-vmul-minmax-rndnu.cc | 123 TEST(QU8_VMUL_MINMAX_RNDNU__NEON_LD64_X8, a_scale) { in TEST() argument 126 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 283 TEST(QU8_VMUL_MINMAX_RNDNU__NEON_LD64_X16, a_scale) { in TEST() argument 286 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 443 TEST(QU8_VMUL_MINMAX_RNDNU__NEON_LD128_X16, a_scale) { in TEST() argument 446 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local
|
D | qs8-vmul-minmax-rndnu.cc | 123 TEST(QS8_VMUL_MINMAX_RNDNU__NEON_LD64_X8, a_scale) { in TEST() argument 126 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 283 TEST(QS8_VMUL_MINMAX_RNDNU__NEON_LD64_X16, a_scale) { in TEST() argument 286 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local 443 TEST(QS8_VMUL_MINMAX_RNDNU__NEON_LD128_X16, a_scale) { in TEST() argument 446 for (float a_scale = 0.1f; a_scale <= 10.0f; a_scale *= 3.14f) { in TEST() local
|
/external/libyuv/files/source/ |
D | row_common.cc | 858 const uint32_t a_scale = REPEAT8(value >> 24); in ARGBShadeRow_C() local 893 const uint32_t a_scale = src_argb1[3]; in ARGBMultiplyRow_C() local
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | row_common.cc | 858 const uint32_t a_scale = REPEAT8(value >> 24); in ARGBShadeRow_C() local 893 const uint32_t a_scale = src_argb1[3]; in ARGBMultiplyRow_C() local
|
/external/libaom/libaom/third_party/libyuv/source/ |
D | row_common.cc | 1127 const uint32_t a_scale = REPEAT8(value >> 24); in ARGBShadeRow_C() local 1162 const uint32_t a_scale = src_argb1[3]; in ARGBMultiplyRow_C() local
|