Home
last modified time | relevance | path

Searched refs:vi8x5678 (Results 1 – 8 of 8) sorted by relevance

/external/XNNPACK/src/f32-dwconv2d-chw/gen/
D5x5p2-minmax-sse-5x4.c301 const __m128 vi8x5678 = _mm_shuffle_ps(vi8x8567, vi8x8567, _MM_SHUFFLE(0, 3, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4() local
327 vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4()
337 const __m128 vi8x6789 = _mm_shuffle_ps(vi8x5678, vi8x89AB, _MM_SHUFFLE(1, 0, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4()
554 const __m128 vi8x5678 = _mm_shuffle_ps(vi8x8567, vi8x8567, _MM_SHUFFLE(0, 3, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4() local
580 vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4()
590 const __m128 vi8x6789 = _mm_shuffle_ps(vi8x5678, vi8x89AB, _MM_SHUFFLE(1, 0, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4()
784 const __m128 vi8x5678 = _mm_shuffle_ps(vi8x8567, vi8x8567, _MM_SHUFFLE(0, 3, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4() local
810 vo4p0 = _mm_add_ps(vo4p0, _mm_mul_ps(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4()
820 const __m128 vi8x6789 = _mm_shuffle_ps(vi8x5678, vzero, _MM_SHUFFLE(1, 0, 2, 1)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__sse_5x4()
D5x5p2-minmax-wasmsimd-x86-loadsplat-5x4.c280 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() local
310 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4()
534 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() local
564 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4()
773 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4() local
803 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4()
813 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_loadsplat_5x4()
D5x5p2-minmax-wasmsimd-arm-loadsplat-5x4.c280 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() local
310 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4()
534 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() local
564 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4()
773 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4() local
803 vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, vk43)); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4()
813 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_loadsplat_5x4()
D5x5p2-minmax-neon-5x4.c253 const float32x4_t vi8x5678 = vextq_f32(vi8x4567, vi8x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() local
283 vo4p0 = vmlaq_lane_f32(vo4p0, vi8x5678, vwOP, 0); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4()
508 const float32x4_t vi8x5678 = vextq_f32(vi8x4567, vi8x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() local
538 vo4p0 = vmlaq_lane_f32(vo4p0, vi8x5678, vwOP, 0); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4()
748 const float32x4_t vi8x5678 = vextq_f32(vi8x4567, vzero, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4() local
778 vo4p0 = vmlaq_lane_f32(vo4p0, vi8x5678, vwOP, 0); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4()
788 const float32x4_t vi8x6789 = vextq_f32(vi8x5678, vzero, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neon_5x4()
D5x5p2-minmax-neonfma-5x4.c253 const float32x4_t vi8x5678 = vextq_f32(vi8x4567, vi8x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() local
283 vo4p0 = vfmaq_lane_f32(vo4p0, vi8x5678, vwOP, 0); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4()
508 const float32x4_t vi8x5678 = vextq_f32(vi8x4567, vi8x89AB, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() local
538 vo4p0 = vfmaq_lane_f32(vo4p0, vi8x5678, vwOP, 0); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4()
748 const float32x4_t vi8x5678 = vextq_f32(vi8x4567, vzero, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4() local
778 vo4p0 = vfmaq_lane_f32(vo4p0, vi8x5678, vwOP, 0); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4()
788 const float32x4_t vi8x6789 = vextq_f32(vi8x5678, vzero, 1); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__neonfma_5x4()
D5x5p2-minmax-wasmsimd-x86-splat-5x4.c254 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() local
284 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0))… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4()
508 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() local
538 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0))… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4()
747 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4() local
777 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0))… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4()
787 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_x86_splat_5x4()
D5x5p2-minmax-wasmsimd-arm-splat-5x4.c254 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() local
284 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0))… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
508 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() local
538 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0))… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
747 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4() local
777 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x5678, wasm_v32x4_shuffle(vwOP, vwOP, 0, 0, 0, 0))… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
787 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x5678, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
/external/XNNPACK/src/f16-dwconv2d-chw/gen/
D5x5p2-minmax-neonfp16arith-5x4.c251 const float16x4_t vi8x5678 = vext_f16(vi8x4567, vi8x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4() local
281 vo4p0 = vfma_lane_f16(vo4p0, vi8x5678, vwOP, 0); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4()
506 const float16x4_t vi8x5678 = vext_f16(vi8x4567, vi8x89AB, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4() local
536 vo4p0 = vfma_lane_f16(vo4p0, vi8x5678, vwOP, 0); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4()
746 const float16x4_t vi8x5678 = vext_f16(vi8x4567, vzero, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4() local
776 vo4p0 = vfma_lane_f16(vo4p0, vi8x5678, vwOP, 0); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4()
786 const float16x4_t vi8x6789 = vext_f16(vi8x5678, vzero, 1); in xnn_f16_dwconv2d_chw_ukernel_5x5p2__neonfp16arith_5x4()