/external/libvpx/libvpx/vpx_dsp/x86/ |
D | variance_sse2.c | 26 __m128i vsum = _mm_setzero_si128(); in vpx_get_mb_ss_sse2() local 56 static INLINE void variance_final_128_pel_sse2(__m128i vsse, __m128i vsum, in variance_final_128_pel_sse2() 68 static INLINE void variance_final_256_pel_sse2(__m128i vsse, __m128i vsum, in variance_final_256_pel_sse2() 80 static INLINE void variance_final_512_pel_sse2(__m128i vsse, __m128i vsum, in variance_final_512_pel_sse2() 220 __m128i vsse, vsum; in vpx_get8x8var_sse2() local 228 __m128i vsse, vsum; in vpx_get16x16var_sse2() local 236 __m128i vsse, vsum; in vpx_variance4x4_sse2() local 246 __m128i vsse, vsum; in vpx_variance4x8_sse2() local 256 __m128i vsse, vsum; in vpx_variance8x4_sse2() local 266 __m128i vsse, vsum; in vpx_variance8x8_sse2() local [all …]
|
D | variance_avx2.c | 62 __m128i vsum, in variance_final_from_32bit_sum_avx2() 81 __m256i vsum, in variance_final_from_16bit_sum_avx2() 125 __m256i *const vsum) { in variance16_avx2() 140 __m256i *const vsum) { in variance32_avx2() 155 __m256i *const vsum) { in variance64_avx2() 170 __m256i vsse, vsum; in vpx_get16x16var_avx2() local 616 __m256i vsse, vsum; in vpx_variance16x8_avx2() local 626 __m256i vsse, vsum; in vpx_variance16x16_avx2() local 636 __m256i vsse, vsum; in vpx_variance16x32_avx2() local 646 __m256i vsse, vsum; in vpx_variance32x16_avx2() local [all …]
|
/external/XNNPACK/src/f32-gavgpool-cw/ |
D | neon-x4.c | 76 const float32x4_t vsum = vpaddq_f32(vsum01, vsum23); in xnn_f32_gavgpool_cw_ukernel__neon_x4() local 82 const float32x4_t vsum = vcombine_f32(vpadd_f32(vget_low_f32(vsum01), vget_high_f32(vsum01)), in xnn_f32_gavgpool_cw_ukernel__neon_x4() local 114 float32x2_t vsum = vadd_f32(vget_low_f32(vsum0), vget_high_f32(vsum0)); in xnn_f32_gavgpool_cw_ukernel__neon_x4() local
|
D | wasmsimd-arm-x4.c | 78 …const v128_t vsum = wasm_f32x4_add(wasm_v32x4_shuffle(vsum01, vsum23, 0, 2, 4, 6), wasm_v32x4_shuf… in xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4() local 94 v128_t vsum = wasm_f64x2_splat(0.0); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4() local
|
D | sse-x4.c | 78 const __m128 vsum = _mm_add_ps(_mm_movelh_ps(vsum01, vsum23), _mm_movehl_ps(vsum23, vsum01)); in xnn_f32_gavgpool_cw_ukernel__sse_x4() local 94 __m128 vsum = _mm_setzero_ps(); in xnn_f32_gavgpool_cw_ukernel__sse_x4() local
|
D | wasmsimd-x86-x4.c | 78 …const v128_t vsum = wasm_f32x4_add(wasm_v32x4_shuffle(vsum01, vsum23, 0, 2, 4, 6), wasm_v32x4_shuf… in xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4() local 96 v128_t vsum = wasm_f64x2_splat(0.0); in xnn_f32_gavgpool_cw_ukernel__wasmsimd_x86_x4() local
|
/external/XNNPACK/src/u8-lut32norm/ |
D | scalar.c | 23 uint32_t vsum = 0; in compute_sum() local 39 const uint32_t vsum = compute_sum(n, x, t); in xnn_u8_lut32norm_ukernel__scalar() local
|
/external/XNNPACK/src/f32-gavgpool/ |
D | 7p7x-minmax-neon-c4.c | 54 const float32x4_t vsum = vaddq_f32(vsum016, vsum2345); in xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4() local 87 const float32x4_t vsum = vaddq_f32(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4() local 141 const float32x4_t vsum = vaddq_f32(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4() local 169 const float32x4_t vsum = vaddq_f32(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__neon_c4() local
|
D | 7p7x-minmax-wasmsimd-arm-c4.c | 61 const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4() local 102 const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4() local 164 const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4() local 193 const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_arm_c4() local
|
D | 7p7x-minmax-sse-c4.c | 61 const __m128 vsum = _mm_add_ps(vsum016, vsum2345); in xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4() local 101 const __m128 vsum = _mm_add_ps(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4() local 163 const __m128 vsum = _mm_add_ps(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4() local 192 const __m128 vsum = _mm_add_ps(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__sse_c4() local
|
D | 7p7x-minmax-wasmsimd-x86-c4.c | 61 const v128_t vsum = wasm_f32x4_add(vsum016, vsum2345); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4() local 102 const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4() local 164 const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4() local 193 const v128_t vsum = wasm_f32x4_add(vsum0123, vsum456a); in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasmsimd_x86_c4() local
|
D | 7p7x-minmax-scalar-c1.c | 52 const float vsum = vsum016 + vsum2345; in xnn_f32_gavgpool_minmax_ukernel_7p7x__scalar_c1() local 86 const float vsum = vsum0123 + vsum456a; in xnn_f32_gavgpool_minmax_ukernel_7p7x__scalar_c1() local 140 const float vsum = vsum0123 + vsum456a; in xnn_f32_gavgpool_minmax_ukernel_7p7x__scalar_c1() local
|
D | 7p7x-minmax-wasm-c1.c | 52 const float vsum = vsum016 + vsum2345; in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasm_c1() local 86 const float vsum = vsum0123 + vsum456a; in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasm_c1() local 140 const float vsum = vsum0123 + vsum456a; in xnn_f32_gavgpool_minmax_ukernel_7p7x__wasm_c1() local
|
D | 7x-minmax-sse-c4.c | 78 const __m128 vsum = _mm_add_ps(vsum016, vsum2345); in xnn_f32_gavgpool_minmax_ukernel_7x__sse_c4() local 105 const __m128 vsum = _mm_add_ps(vsum016, vsum2345); in xnn_f32_gavgpool_minmax_ukernel_7x__sse_c4() local
|
/external/XNNPACK/src/f16-gavgpool/ |
D | 7p7x-minmax-neonfp16arith-c8.c | 55 const float16x8_t vsum = vaddq_f16(vsum016, vsum2345); in xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8() local 88 const float16x8_t vsum = vaddq_f16(vsum0123, vsum456a); in xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8() local 142 const float16x8_t vsum = vaddq_f16(vsum0123, vsum456a); in xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8() local 170 const float16x8_t vsum = vaddq_f16(vsum0123, vsum456a); in xnn_f16_gavgpool_minmax_ukernel_7p7x__neonfp16arith_c8() local
|
/external/libaom/libaom/aom_dsp/x86/ |
D | variance_avx2.c | 48 static INLINE int variance_final_from_32bit_sum_avx2(__m256i vsse, __m128i vsum, in variance_final_from_32bit_sum_avx2() 65 static INLINE int variance_final_512_avx2(__m256i vsse, __m256i vsum, in variance_final_512_avx2() 75 static INLINE int variance_final_1024_avx2(__m256i vsse, __m256i vsum, in variance_final_1024_avx2() 93 static INLINE int variance_final_2048_avx2(__m256i vsse, __m256i vsum, in variance_final_2048_avx2() 124 __m256i *const vsum) { in variance16_avx2() 137 __m256i *const vsum) { in variance32_avx2() 150 __m256i *const vsum) { in variance64_avx2() 164 __m256i *const vsum) { in variance128_avx2()
|
/external/XNNPACK/src/qu8-gavgpool/ |
D | 7p7x-minmax-scalar-c1.c | 56 const uint32_t vsum = vsum016 + vsum2345; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__scalar_c1() local 90 const uint32_t vsum = vsum016 + vsum2345; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__scalar_c1() local 150 const uint32_t vsum = vsum016 + vsum2345; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__scalar_c1() local
|
/external/XNNPACK/src/f32-pavgpool/ |
D | 9p8x-minmax-sse-c4.c | 110 const __m128 vsum = _mm_add_ps(vsum2345, vsum01678); in xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4() local 186 const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4() local 288 const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4() local 317 const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__sse_c4() local
|
D | 9p8x-minmax-wasmsimd-arm-c4.c | 110 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 187 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 290 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 319 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local
|
D | 9p8x-minmax-neon-c4.c | 101 const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678); in xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4() local 169 const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4() local 261 const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4() local 289 const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__neon_c4() local
|
D | 9p8x-minmax-wasmsimd-x86-c4.c | 110 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 187 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 290 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 321 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_pavgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local
|
/external/XNNPACK/src/f32-avgpool/ |
D | 9p8x-minmax-neon-c4.c | 101 const float32x4_t vsum = vaddq_f32(vsum2345, vsum01678); in xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4() local 169 const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4() local 260 const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4() local 288 const float32x4_t vsum = vaddq_f32(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__neon_c4() local
|
D | 9p8x-minmax-wasmsimd-arm-c4.c | 110 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 187 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 287 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local 316 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_arm_c4() local
|
D | 9p8x-minmax-sse-c4.c | 110 const __m128 vsum = _mm_add_ps(vsum2345, vsum01678); in xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4() local 186 const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4() local 285 const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4() local 314 const __m128 vsum = _mm_add_ps(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__sse_c4() local
|
D | 9p8x-minmax-wasmsimd-x86-c4.c | 110 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum01678); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 187 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 287 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local 318 const v128_t vsum = wasm_f32x4_add(vsum2345, vsum0167a); in xnn_f32_avgpool_minmax_ukernel_9p8x__wasmsimd_x86_c4() local
|