/external/XNNPACK/src/f16-dwconv2d-chw/gen/ |
D | 5x5p2-minmax-neonfp16arith-5x4.c | 299 const float16x4_t vi8x6789 = vext_f16(vi8x4567, vi8x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4() local 554 const float16x4_t vi8x6789 = vext_f16(vi8x4567, vi8x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4() local 786 const float16x4_t vi8x6789 = vext_f16(vi8x5678, vzero, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4() local
|
/external/XNNPACK/src/f32-dwconv2d-chw/gen/ |
D | 5x5p2-minmax-wasmsimd-x86-splat-5x4.c | 302 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() local 556 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() local 787 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() local
|
D | 5x5p2-minmax-wasmsimd-arm-splat-5x4.c | 302 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() local 556 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() local 787 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() local
|
D | 5x5p2-minmax-neon-5x4.c | 301 const float32x4_t vi8x6789 = vextq_f32(vi8x4567, vi8x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() local 556 const float32x4_t vi8x6789 = vextq_f32(vi8x4567, vi8x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() local 788 const float32x4_t vi8x6789 = vextq_f32(vi8x5678, vzero, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() local
|
D | 5x5p2-minmax-neonfma-5x4.c | 301 const float32x4_t vi8x6789 = vextq_f32(vi8x4567, vi8x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() local 556 const float32x4_t vi8x6789 = vextq_f32(vi8x4567, vi8x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() local 788 const float32x4_t vi8x6789 = vextq_f32(vi8x5678, vzero, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() local
|
D | 5x5p2-minmax-wasmsimd-x86-loadsplat-5x4.c | 328 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() local 582 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() local 813 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() local
|
D | 5x5p2-minmax-wasmsimd-arm-loadsplat-5x4.c | 328 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() local 582 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() local 813 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() local
|
D | 5x5p2-minmax-sse-5x4.c | 337 const __m128 vi8x6789 = _mm_shuffle_ps(vi8x5678, vi8x89AB, _MM_SHUFFLE(1, 0, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4() local 590 const __m128 vi8x6789 = _mm_shuffle_ps(vi8x5678, vi8x89AB, _MM_SHUFFLE(1, 0, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4() local 820 const __m128 vi8x6789 = _mm_shuffle_ps(vi8x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4() local
|