| /external/XNNPACK/src/f32-avgpool/ |
| D | 9p8x-minmax-sse-c4.c | 106 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4() local 182 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4() local 281 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4() local 310 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4() local
|
| D | 9p8x-minmax-wasmsimd-arm-c4.c | 106 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 183 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 283 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 312 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local
|
| D | 9p8x-minmax-wasmsimd-x86-c4.c | 106 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 183 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 283 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 312 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local
|
| D | 9p8x-minmax-neon-c4.c | 97 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4() local 165 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4() local 256 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4() local 284 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4() local
|
| D | 9p8x-minmax-scalar-c1.c | 97 const float vsum67 = vi6 + vi7; in xnn_f32_avgpool_minmax_ukernel_9p8x__scalar_c1() local 166 const float vsum67 = vi6 + vi7; in xnn_f32_avgpool_minmax_ukernel_9p8x__scalar_c1() local 256 const float vsum67 = vi6 + vi7; in xnn_f32_avgpool_minmax_ukernel_9p8x__scalar_c1() local
|
| D | 9p8x-minmax-wasm-c1.c | 97 const float vsum67 = vi6 + vi7; in xnn_f32_avgpool_minmax_ukernel_9p8x__wasm_c1() local 166 const float vsum67 = vi6 + vi7; in xnn_f32_avgpool_minmax_ukernel_9p8x__wasm_c1() local 256 const float vsum67 = vi6 + vi7; in xnn_f32_avgpool_minmax_ukernel_9p8x__wasm_c1() local
|
| D | 9x-minmax-wasmsimd-arm-c4.c | 130 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_arm_c4() local 159 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_arm_c4() local
|
| D | 9x-minmax-wasmsimd-x86-c4.c | 130 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_x86_c4() local 159 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9x__wasmsimd_x86_c4() local
|
| D | 9x-minmax-sse-c4.c | 130 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9x__sse_c4() local 158 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_avgpool_minmax_ukernel_9x__sse_c4() local
|
| /external/XNNPACK/src/f16-pavgpool/ |
| D | 9p8x-minmax-neonfp16arith-c8.c | 97 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_pavgpool_minmax_ukernel_9p8x__neonfp16arith_c8() local 165 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_pavgpool_minmax_ukernel_9p8x__neonfp16arith_c8() local 257 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_pavgpool_minmax_ukernel_9p8x__neonfp16arith_c8() local 285 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_pavgpool_minmax_ukernel_9p8x__neonfp16arith_c8() local
|
| D | 9p8x-minmax-avx2-c8.c | 108 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_pavgpool_minmax_ukernel_9p8x__avx2_c8() local 185 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_pavgpool_minmax_ukernel_9p8x__avx2_c8() local 288 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_pavgpool_minmax_ukernel_9p8x__avx2_c8() local 317 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_pavgpool_minmax_ukernel_9p8x__avx2_c8() local
|
| D | 9x-minmax-neonfp16arith-c8.c | 123 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_pavgpool_minmax_ukernel_9x__neonfp16arith_c8() local 151 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_pavgpool_minmax_ukernel_9x__neonfp16arith_c8() local
|
| D | 9x-minmax-avx2-c8.c | 135 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_pavgpool_minmax_ukernel_9x__avx2_c8() local 164 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_pavgpool_minmax_ukernel_9x__avx2_c8() local
|
| /external/XNNPACK/src/f16-avgpool/ |
| D | 9p8x-minmax-neonfp16arith-c8.c | 97 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8() local 165 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8() local 256 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8() local 284 const float16x8_t vsum67 = vaddq_f16(vi6, vi7); in xnn_f16_avgpool_minmax_ukernel_9p8x__neonfp16arith_c8() local
|
| D | 9p8x-minmax-f16c-c8.c | 108 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_avgpool_minmax_ukernel_9p8x__f16c_c8() local 185 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_avgpool_minmax_ukernel_9p8x__f16c_c8() local 286 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_avgpool_minmax_ukernel_9p8x__f16c_c8() local 315 …const __m256 vsum67 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_add_ps(vi6, vi7), _MM_FROUND_NO_EXC)); in xnn_f16_avgpool_minmax_ukernel_9p8x__f16c_c8() local
|
| /external/XNNPACK/src/f32-pavgpool/ |
| D | 9p8x-minmax-sse-c4.c | 106 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4() local 182 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4() local 284 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4() local 313 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4() local
|
| D | 9p8x-minmax-neon-c4.c | 97 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4() local 165 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4() local 257 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4() local 285 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4() local
|
| D | 9p8x-minmax-wasmsimd-arm-c4.c | 106 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 183 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 286 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 315 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local
|
| D | 9p8x-minmax-wasmsimd-x86-c4.c | 106 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 183 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 286 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 315 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local
|
| D | 9p8x-minmax-scalar-c1.c | 97 const float vsum67 = vi6 + vi7; in xnn_f32_pavgpool_minmax_ukernel_9p8x__scalar_c1() local 166 const float vsum67 = vi6 + vi7; in xnn_f32_pavgpool_minmax_ukernel_9p8x__scalar_c1() local 258 const float vsum67 = vi6 + vi7; in xnn_f32_pavgpool_minmax_ukernel_9p8x__scalar_c1() local
|
| D | 9p8x-minmax-wasm-c1.c | 97 const float vsum67 = vi6 + vi7; in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasm_c1() local 166 const float vsum67 = vi6 + vi7; in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasm_c1() local 258 const float vsum67 = vi6 + vi7; in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasm_c1() local
|
| D | 9x-minmax-sse-c4.c | 133 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9x__sse_c4() local 161 const __m128 vsum67 = _mm_add_ps(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9x__sse_c4() local
|
| D | 9x-minmax-wasmsimd-x86-c4.c | 133 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9x__wasmsimd_x86_c4() local 162 const v128_t vsum67 = wasm_f32x4_add(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9x__wasmsimd_x86_c4() local
|
| D | 9x-minmax-neon-c4.c | 123 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9x__neon_c4() local 151 const float32x4_t vsum67 = vaddq_f32(vi6, vi7); in xnn_f32_pavgpool_minmax_ukernel_9x__neon_c4() local
|
| /external/XNNPACK/src/qu8-avgpool/ |
| D | 9p8x-minmax-scalar-c1.c | 104 const uint32_t vsum67 = vi6 + vi7; in xnn_qu8_avgpool_minmax_ukernel_9p8x__scalar_c1() local 175 const uint32_t vsum67 = vi6 + vi7; in xnn_qu8_avgpool_minmax_ukernel_9p8x__scalar_c1() local 267 const uint32_t vsum67 = vi6 + vi7; in xnn_qu8_avgpool_minmax_ukernel_9p8x__scalar_c1() local
|