Home
last modified time | relevance | path

Searched refs:vi2x89AB (Results 1 – 25 of 327) sorted by relevance

12345678910>>...14

/external/XNNPACK/src/f32-dwconv2d-chw/gen/
D3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc2.c76 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2() local
84 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2()
85 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2()
128 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2() local
135 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2()
136 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc2.c86 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2() local
94 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2()
95 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2()
138 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2() local
145 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2()
146 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc2.c76 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2() local
84 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2()
85 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2()
128 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2() local
135 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2()
136 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc4.c76 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4() local
84 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4()
85 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4()
130 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4() local
137 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4()
138 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc4()
D3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4.c86 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4() local
94 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4()
95 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4()
137 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4() local
144 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4()
145 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4()
D3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc2.c86 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2() local
94 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2()
95 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2()
138 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2() local
145 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2()
146 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc2()
D3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4.c86 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4() local
94 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4()
95 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4()
137 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4() local
144 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4()
145 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4()
D3x3s2p1-minmax-wasmsimd-arm-splat-1x4.c76 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4() local
84 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4()
85 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4()
127 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4() local
134 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4()
135 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4()
D3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc3.c76 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3() local
84 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3()
85 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3()
129 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3() local
136 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3()
137 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc3()
D3x3s2p1-minmax-wasmsimd-x86-splat-1x4.c76 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4() local
84 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4()
85 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4()
127 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4() local
134 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4()
135 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4()
D3x3s2p1-minmax-wasmsimd-x86-splat-1x4-acc4.c76 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4() local
84 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4()
85 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4()
130 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4() local
137 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4()
138 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_splat_1x4_acc4()
D3x3s2p1-minmax-wasmsimd-arm-splat-1x4-acc3.c76 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3() local
84 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3()
85 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3()
129 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3() local
136 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3()
137 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_splat_1x4_acc3()
D3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc3.c86 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3() local
94 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3()
95 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3()
139 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3() local
146 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3()
147 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc3()
D3x3s2p1-minmax-sse-1x4.c80 const __m128 vi2x89AB = _mm_loadu_ps(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4() local
88 const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4()
89 const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4()
130 const __m128 vi2x89AB = _mm_loadu_ps(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4() local
137 …const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4()
138 …const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4()
D3x3s2p1-minmax-sse-1x4-acc3.c80 const __m128 vi2x89AB = _mm_loadu_ps(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3() local
88 const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3()
89 const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3()
132 const __m128 vi2x89AB = _mm_loadu_ps(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3() local
139 …const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3()
140 …const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc3()
D3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc4.c86 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4() local
94 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4()
95 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4()
140 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4() local
147 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4()
148 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc4()
D3x3s2p1-minmax-wasmsimd-arm-loadsplat-1x4-acc3.c86 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3() local
94 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3()
95 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3()
139 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3() local
146 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3()
147 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_arm_loadsplat_1x4_acc3()
D3x3s2p1-minmax-wasmsimd-x86-loadsplat-1x4-acc4.c86 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4() local
94 const v128_t vi2x8ACE = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, 6); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4()
95 const v128_t vi2x9BDF = wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, 7); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4()
140 const v128_t vi2x89AB = wasm_v128_load(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4() local
147 …const v128_t vi2x8ACE = wasm_v128_and(vmask_even, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 0, 2, 4, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4()
148 …const v128_t vi2x9BDF = wasm_v128_and(vmask_odd, wasm_v32x4_shuffle(vi2x89AB, vi2xCDEF, 1, 3, 5, … in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__wasmsimd_x86_loadsplat_1x4_acc4()
D3x3s2p1-minmax-sse-1x4-acc2.c80 const __m128 vi2x89AB = _mm_loadu_ps(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2() local
88 const __m128 vi2x8ACE = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2()
89 const __m128 vi2x9BDF = _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1, 3, 1)); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2()
131 const __m128 vi2x89AB = _mm_loadu_ps(i2); in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2() local
138 …const __m128 vi2x8ACE = _mm_and_ps(vmask_even, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(2, 0… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2()
139 …const __m128 vi2x9BDF = _mm_and_ps(vmask_odd, _mm_shuffle_ps(vi2x89AB, vi2xCDEF, _MM_SHUFFLE(3, 1… in xnn_f32_dwconv2d_chw_ukernel_3x3s2p1__sse_1x4_acc2()
D5x5p2-minmax-neonfma-1x4.c82 const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4() local
135 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4()
153 const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4()
154 vi2x4567 = vi2x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4()
183 float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4() local
189 vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB))); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4()
242 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4()
260 const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4()
261 vi2x4567 = vi2x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4()
D5x5p2-minmax-neonfma-1x4-acc2.c82 const float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2() local
135 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2()
153 const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2()
154 vi2x4567 = vi2x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2()
184 float32x4_t vi2x89AB = vld1q_f32(i2); i2 += 4; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2() local
190 vi2x89AB = vreinterpretq_f32_u32(vandq_u32(vmask, vreinterpretq_u32_f32(vi2x89AB))); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2()
243 const float32x4_t vi2x5678 = vextq_f32(vi2x4567, vi2x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2()
261 const float32x4_t vi2x6789 = vextq_f32(vi2x4567, vi2x89AB, 2); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2()
262 vi2x4567 = vi2x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_1x4_acc2()
/external/XNNPACK/src/f16-dwconv2d-chw/gen/
D5x5p2-minmax-neonfp16arith-1x4-acc4.c80 const float16x4_t vi2x89AB = vld1_f16(i2); i2 += 4; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4() local
133 const float16x4_t vi2x5678 = vext_f16(vi2x4567, vi2x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4()
151 const float16x4_t vi2x6789 = vext_f16(vi2x4567, vi2x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4()
152 vi2x4567 = vi2x89AB; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4()
184 float16x4_t vi2x89AB = vld1_f16(i2); i2 += 4; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4() local
190 vi2x89AB = vreinterpret_f16_u16(vand_u16(vmask, vreinterpret_u16_f16(vi2x89AB))); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4()
243 const float16x4_t vi2x5678 = vext_f16(vi2x4567, vi2x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4()
261 const float16x4_t vi2x6789 = vext_f16(vi2x4567, vi2x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4()
262 vi2x4567 = vi2x89AB; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc4()
D5x5p2-minmax-neonfp16arith-1x4.c80 const float16x4_t vi2x89AB = vld1_f16(i2); i2 += 4; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4() local
133 const float16x4_t vi2x5678 = vext_f16(vi2x4567, vi2x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4()
151 const float16x4_t vi2x6789 = vext_f16(vi2x4567, vi2x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4()
152 vi2x4567 = vi2x89AB; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4()
181 float16x4_t vi2x89AB = vld1_f16(i2); i2 += 4; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4() local
187 vi2x89AB = vreinterpret_f16_u16(vand_u16(vmask, vreinterpret_u16_f16(vi2x89AB))); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4()
240 const float16x4_t vi2x5678 = vext_f16(vi2x4567, vi2x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4()
258 const float16x4_t vi2x6789 = vext_f16(vi2x4567, vi2x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4()
259 vi2x4567 = vi2x89AB; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4()
D5x5p2-minmax-neonfp16arith-1x4-acc2.c80 const float16x4_t vi2x89AB = vld1_f16(i2); i2 += 4; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2() local
133 const float16x4_t vi2x5678 = vext_f16(vi2x4567, vi2x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2()
151 const float16x4_t vi2x6789 = vext_f16(vi2x4567, vi2x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2()
152 vi2x4567 = vi2x89AB; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2()
182 float16x4_t vi2x89AB = vld1_f16(i2); i2 += 4; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2() local
188 vi2x89AB = vreinterpret_f16_u16(vand_u16(vmask, vreinterpret_u16_f16(vi2x89AB))); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2()
241 const float16x4_t vi2x5678 = vext_f16(vi2x4567, vi2x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2()
259 const float16x4_t vi2x6789 = vext_f16(vi2x4567, vi2x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2()
260 vi2x4567 = vi2x89AB; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc2()
D5x5p2-minmax-neonfp16arith-1x4-acc3.c80 const float16x4_t vi2x89AB = vld1_f16(i2); i2 += 4; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3() local
133 const float16x4_t vi2x5678 = vext_f16(vi2x4567, vi2x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3()
151 const float16x4_t vi2x6789 = vext_f16(vi2x4567, vi2x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3()
152 vi2x4567 = vi2x89AB; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3()
183 float16x4_t vi2x89AB = vld1_f16(i2); i2 += 4; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3() local
189 vi2x89AB = vreinterpret_f16_u16(vand_u16(vmask, vreinterpret_u16_f16(vi2x89AB))); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3()
242 const float16x4_t vi2x5678 = vext_f16(vi2x4567, vi2x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3()
260 const float16x4_t vi2x6789 = vext_f16(vi2x4567, vi2x89AB, 2); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3()
261 vi2x4567 = vi2x89AB; in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_1x4_acc3()

12345678910>>...14