/external/XNNPACK/src/f32-gemm/gen/ |
D | 4x8-minmax-neonfma-dup-ld64.c | 83 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld64() local 87 vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld64() 91 vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld64()
|
D | 4x8-minmax-neon-dup-ld64.c | 83 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld64() local 87 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld64() 91 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld64()
|
D | 4x8-wasmsimd-splat.c | 84 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemm_ukernel_4x8__wasmsimd_splat() local 92 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_gemm_ukernel_4x8__wasmsimd_splat() 96 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_gemm_ukernel_4x8__wasmsimd_splat()
|
D | 4x8-minmax-neon-dup-ld128.c | 84 const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld128() local 88 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld128() 92 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld128()
|
D | 4x8-relu-wasmsimd-splat.c | 84 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat() local 92 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat() 96 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat()
|
D | 4x8-minmax-neonfma-dup-ld128.c | 84 const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld128() local 88 vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld128() 92 vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x2-minmax-neonfma-lane-ld64.c | 84 const float32x2_t va3c0 = vdup_lane_f32(va3, 0); in xnn_f32_gemm_minmax_ukernel_4x2__neonfma_lane_ld64() local 88 vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0); in xnn_f32_gemm_minmax_ukernel_4x2__neonfma_lane_ld64()
|
D | 4x8-minmax-wasmsimd-arm-splat.c | 86 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat() local 94 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat() 98 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat()
|
D | 4x8-minmax-wasmsimd-x86-splat.c | 84 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat() local 92 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat() 96 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat()
|
D | 6x8-minmax-neon-dup-ld64.c | 101 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64() local 107 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64() 113 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 4x8-minmax-neon-dup-ld64.c | 105 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld64() local 109 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld64() 113 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld64()
|
D | 4x8-minmax-neonfma-dup-ld64.c | 105 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld64() local 109 vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld64() 113 vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld64()
|
D | 4x8-relu-wasmsimd-splat.c | 106 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat() local 114 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat() 118 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat()
|
D | 4x8-wasmsimd-splat.c | 106 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_igemm_ukernel_4x8__wasmsimd_splat() local 114 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_igemm_ukernel_4x8__wasmsimd_splat() 118 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_igemm_ukernel_4x8__wasmsimd_splat()
|
D | 4x2-minmax-neonfma-lane-ld64.c | 106 const float32x2_t va3c0 = vdup_lane_f32(va3, 0); in xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64() local 110 vacc3x01 = vfma_f32(vacc3x01, va3c0, vb01c0); in xnn_f32_igemm_minmax_ukernel_4x2__neonfma_lane_ld64()
|
D | 4x8-minmax-neonfma-dup-ld128.c | 106 const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld128() local 110 vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld128() 114 vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-minmax-neon-dup-ld128.c | 106 const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld128() local 110 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld128() 114 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld128()
|
D | 4x8-minmax-wasmsimd-arm-splat.c | 108 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat() local 116 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat() 120 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat()
|
D | 4x8-minmax-wasmsimd-x86-splat.c | 106 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat() local 114 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat() 118 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 4x8inc-minmax-neon-dup-ld64.c | 85 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld64() local 89 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld64() 93 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld64()
|
D | 4x8inc-minmax-neonfma-dup-ld64.c | 85 const float32x4_t va3c0 = vdupq_lane_f32(va3, 0); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld64() local 89 vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld64() 93 vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld64()
|
D | 4x8inc-minmax-neon-dup-ld128.c | 86 const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld128() local 90 vacc3x0123 = vmlaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld128() 94 vacc3x4567 = vmlaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld128()
|
D | 4x8inc-minmax-neonfma-dup-ld128.c | 86 const float32x4_t va3c0 = vdupq_lane_f32(vget_low_f32(va3), 0); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld128() local 90 vacc3x0123 = vfmaq_f32(vacc3x0123, va3c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld128() 94 vacc3x4567 = vfmaq_f32(vacc3x4567, va3c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8inc-minmax-wasmsimd-arm-splat.c | 88 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat() local 96 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat() 100 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat()
|
D | 4x8inc-minmax-wasmsimd-x86-splat.c | 86 const v128_t va3c0 = wasm_v32x4_shuffle(va3, va3, 0, 0, 0, 0); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat() local 94 vacc3x0123 = wasm_f32x4_add(vacc3x0123, wasm_f32x4_mul(va3c0, vb0123c0)); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat() 98 vacc3x4567 = wasm_f32x4_add(vacc3x4567, wasm_f32x4_mul(va3c0, vb4567c0)); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat()
|