• Home
  • Raw
  • Download

Lines Matching refs:vi8x4567

109     v128_t vi8x4567 = wasm_v128_load(i8); i8 += 4;  in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()  local
157 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
167 const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
215 const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
216 vi8x0123 = vi8x4567; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
256 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
304 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
305 vi8x4567 = vi8x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
411 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
421 const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
469 const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
470 vi8x0123 = vi8x4567; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
510 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
558 const v128_t vi8x6789 = wasm_v32x4_shuffle(vi8x4567, vi8x89AB, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
559 vi8x4567 = vi8x89AB; in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
628 vi8x4567 = wasm_v128_and(vmask, vi8x4567); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
658 …vo4p0 = wasm_f32x4_add(vo4p0, wasm_f32x4_mul(vi8x4567, wasm_v32x4_shuffle(vwKLMN, vwKLMN, 3, 3, 3,… in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
668 const v128_t vi8x3456 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 3, 4, 5, 6); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
708 const v128_t vi8x2345 = wasm_v32x4_shuffle(vi8x0123, vi8x4567, 2, 3, 4, 5); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()
748 const v128_t vi8x5678 = wasm_v32x4_shuffle(vi8x4567, vzero, 1, 2, 3, 4); in xnn_f32_dwconv2d_chw_ukernel_5x5p2__wasmsimd_arm_splat_5x4()