/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 4x8-neonfma-dup-ld128.c | 117 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld128() local 121 vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld128() 125 vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-psimd-splat.c | 117 const psimd_f32 va2c2 = psimd_splat2_f32(va2); in xnn_f32_gemminc_ukernel_4x8__psimd_splat() local 125 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemminc_ukernel_4x8__psimd_splat() 129 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemminc_ukernel_4x8__psimd_splat()
|
D | 4x8-neon-dup-ld128.c | 117 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_gemminc_ukernel_4x8__neon_dup_ld128() local 121 vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemminc_ukernel_4x8__neon_dup_ld128() 125 vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemminc_ukernel_4x8__neon_dup_ld128()
|
D | 6x8-psimd-splat.c | 149 const psimd_f32 va2c2 = psimd_splat2_f32(va2); in xnn_f32_gemminc_ukernel_6x8__psimd_splat() local 159 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemminc_ukernel_6x8__psimd_splat() 165 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemminc_ukernel_6x8__psimd_splat()
|
D | 6x8-neon-dup-ld128.c | 147 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_gemminc_ukernel_6x8__neon_dup_ld128() local 153 vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemminc_ukernel_6x8__neon_dup_ld128() 159 vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemminc_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-neonfma-dup-ld128.c | 147 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld128() local 153 vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld128() 159 vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld128()
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 4x8-psimd-splat.c | 115 const psimd_f32 va2c2 = psimd_splat2_f32(va2); in xnn_f32_gemm_ukernel_4x8__psimd_splat() local 123 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemm_ukernel_4x8__psimd_splat() 127 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemm_ukernel_4x8__psimd_splat()
|
D | 4x8-neonfma-dup-ld128.c | 115 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128() local 119 vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128() 123 vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-neon-dup-ld128.c | 115 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_gemm_ukernel_4x8__neon_dup_ld128() local 119 vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemm_ukernel_4x8__neon_dup_ld128() 123 vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemm_ukernel_4x8__neon_dup_ld128()
|
D | 6x8-psimd-splat.c | 147 const psimd_f32 va2c2 = psimd_splat2_f32(va2); in xnn_f32_gemm_ukernel_6x8__psimd_splat() local 157 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemm_ukernel_6x8__psimd_splat() 163 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemm_ukernel_6x8__psimd_splat()
|
D | 6x8-neonfma-dup-ld128.c | 145 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld128() local 151 vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld128() 157 vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-neon-dup-ld128.c | 145 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_gemm_ukernel_6x8__neon_dup_ld128() local 151 vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_gemm_ukernel_6x8__neon_dup_ld128() 157 vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_gemm_ukernel_6x8__neon_dup_ld128()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 4x8-psimd-splat.c | 137 const psimd_f32 va2c2 = psimd_splat2_f32(va2); in xnn_f32_igemm_ukernel_4x8__psimd_splat() local 145 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_igemm_ukernel_4x8__psimd_splat() 149 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_igemm_ukernel_4x8__psimd_splat()
|
D | 4x8-neonfma-dup-ld128.c | 137 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld128() local 141 vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld128() 145 vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-neon-dup-ld128.c | 137 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_igemm_ukernel_4x8__neon_dup_ld128() local 141 vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_igemm_ukernel_4x8__neon_dup_ld128() 145 vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_igemm_ukernel_4x8__neon_dup_ld128()
|
D | 6x8-neonfma-dup-ld128.c | 173 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_igemm_ukernel_6x8__neonfma_dup_ld128() local 179 vacc2x0123 = vfmaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_igemm_ukernel_6x8__neonfma_dup_ld128() 185 vacc2x4567 = vfmaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_igemm_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-psimd-splat.c | 175 const psimd_f32 va2c2 = psimd_splat2_f32(va2); in xnn_f32_igemm_ukernel_6x8__psimd_splat() local 185 vacc2x0123 = psimd_qfma_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_igemm_ukernel_6x8__psimd_splat() 191 vacc2x4567 = psimd_qfma_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_igemm_ukernel_6x8__psimd_splat()
|
D | 6x8-neon-dup-ld128.c | 173 const float32x4_t va2c2 = vdupq_lane_f32(vget_high_f32(va2), 0); in xnn_f32_igemm_ukernel_6x8__neon_dup_ld128() local 179 vacc2x0123 = vmlaq_f32(vacc2x0123, va2c2, vb0123c2); in xnn_f32_igemm_ukernel_6x8__neon_dup_ld128() 185 vacc2x4567 = vmlaq_f32(vacc2x4567, va2c2, vb4567c2); in xnn_f32_igemm_ukernel_6x8__neon_dup_ld128()
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 4x8-neonfp16arith-ld64.c | 119 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64() local 124 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
|
D | 6x8-neonfp16arith-ld64.c | 149 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64() local 156 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
|
D | 8x8-neonfp16arith-ld64.c | 179 const float16x8_t va2c2 = vdupq_lane_f16(va2, 2); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64() local 188 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c2, vb01234567c2); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
|