/external/XNNPACK/src/f32-dwconv2d-chw/gen/ |
D | 3x3p1-minmax-wasmsimd-arm-splat-1x4.c | 78 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4() local 128 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4() local
|
D | 3x3p1-minmax-neonfma-1x4.c | 77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4() local 128 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4() local
|
D | 3x3p1-minmax-neon-1x4.c | 77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4() local 128 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4() local
|
D | 3x3p1-minmax-wasmsimd-x86-splat-1x4.c | 78 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4() local 128 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4() local
|
D | 3x3p1-minmax-wasmsimd-x86-splat-1x4-acc2.c | 78 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2() local 129 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc2() local
|
D | 3x3p1-minmax-wasmsimd-arm-splat-1x4-acc2.c | 78 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2() local 129 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc2() local
|
D | 3x3p1-minmax-neonfma-1x4-acc2.c | 77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4_acc2() local 129 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4_acc2() local
|
D | 3x3p1-minmax-neon-1x4-acc2.c | 77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc2() local 129 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc2() local
|
D | 3x3p1-minmax-wasmsimd-arm-splat-1x4-acc3.c | 78 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3() local 130 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc3() local
|
D | 3x3p1-minmax-neon-1x4-acc3.c | 77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc3() local 130 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc3() local
|
D | 3x3p1-minmax-wasmsimd-x86-splat-1x4-acc3.c | 78 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3() local 130 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc3() local
|
D | 3x3p1-minmax-neonfma-1x4-acc3.c | 77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4_acc3() local 130 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4_acc3() local
|
D | 3x3p1-minmax-wasmsimd-arm-splat-1x4-acc4.c | 78 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4() local 131 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_splat_1x4_acc4() local
|
D | 3x3p1-minmax-wasmsimd-x86-splat-1x4-acc4.c | 78 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4() local 131 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_splat_1x4_acc4() local
|
D | 3x3p1-minmax-ssse3-1x4.c | 86 …const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4() local 130 …const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4() local
|
D | 3x3p1-minmax-neonfma-1x4-acc4.c | 77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4_acc4() local 131 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neonfma_1x4_acc4() local
|
D | 3x3p1-minmax-neon-1x4-acc4.c | 77 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc4() local 131 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_1x4_acc4() local
|
D | 3x3p1-minmax-ssse3-1x4-acc3.c | 86 …const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc3() local 132 …const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc3() local
|
D | 3x3p1-minmax-wasmsimd-x86-loadsplat-1x4.c | 91 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4() local 134 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4() local
|
D | 3x3p1-minmax-ssse3-1x4-acc2.c | 86 …const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc2() local 131 …const __m128 vi0x3456 = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(vi0x4567), _mm_castps_si… in xnn_f32_dwconv2d_chw_ukernel_3x3p1__ssse3_1x4_acc2() local
|
D | 3x3p1-minmax-wasmsimd-arm-loadsplat-1x4.c | 91 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4() local 134 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4() local
|
D | 3x3p1-minmax-neon-2x4.c | 90 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4() local 159 const float32x4_t vi0x3456 = vextq_f32(vi0x0123, vi0x4567, 3); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__neon_2x4() local
|
D | 3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c | 91 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3() local 136 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc3() local
|
D | 3x3p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c | 91 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2() local 135 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_arm_loadsplat_1x4_acc2() local
|
D | 3x3p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c | 91 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2() local 135 const v128_t vi0x3456 = wasm_v32x4_shuffle(vi0x0123, vi0x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd_x86_loadsplat_1x4_acc2() local
|