/external/XNNPACK/src/f32-gemm/gen/ |
D | 1x8-psimd-splat.c | 72 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_gemm_ukernel_1x8__psimd_splat() local 77 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemm_ukernel_1x8__psimd_splat() 78 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemm_ukernel_1x8__psimd_splat()
|
D | 4x8-psimd-splat.c | 129 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_gemm_ukernel_4x8__psimd_splat() local 137 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemm_ukernel_4x8__psimd_splat() 141 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemm_ukernel_4x8__psimd_splat()
|
D | 4x8-neonfma-dup-ld128.c | 129 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128() local 133 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128() 137 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemm_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-neon-dup-ld128.c | 129 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_gemm_ukernel_4x8__neon_dup_ld128() local 133 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemm_ukernel_4x8__neon_dup_ld128() 137 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemm_ukernel_4x8__neon_dup_ld128()
|
D | 6x8-psimd-splat.c | 167 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_gemm_ukernel_6x8__psimd_splat() local 177 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemm_ukernel_6x8__psimd_splat() 183 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemm_ukernel_6x8__psimd_splat()
|
D | 6x8-neonfma-dup-ld128.c | 165 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld128() local 171 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld128() 177 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemm_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-neon-dup-ld128.c | 165 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_gemm_ukernel_6x8__neon_dup_ld128() local 171 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemm_ukernel_6x8__neon_dup_ld128() 177 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemm_ukernel_6x8__neon_dup_ld128()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 1x8-psimd-splat.c | 74 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_gemminc_ukernel_1x8__psimd_splat() local 79 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemminc_ukernel_1x8__psimd_splat() 80 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemminc_ukernel_1x8__psimd_splat()
|
D | 4x8-neonfma-dup-ld128.c | 131 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld128() local 135 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld128() 139 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemminc_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-psimd-splat.c | 131 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_gemminc_ukernel_4x8__psimd_splat() local 139 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemminc_ukernel_4x8__psimd_splat() 143 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemminc_ukernel_4x8__psimd_splat()
|
D | 4x8-neon-dup-ld128.c | 131 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_gemminc_ukernel_4x8__neon_dup_ld128() local 135 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemminc_ukernel_4x8__neon_dup_ld128() 139 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemminc_ukernel_4x8__neon_dup_ld128()
|
D | 6x8-psimd-splat.c | 169 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_gemminc_ukernel_6x8__psimd_splat() local 179 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemminc_ukernel_6x8__psimd_splat() 185 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemminc_ukernel_6x8__psimd_splat()
|
D | 6x8-neon-dup-ld128.c | 167 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_gemminc_ukernel_6x8__neon_dup_ld128() local 173 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemminc_ukernel_6x8__neon_dup_ld128() 179 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemminc_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-neonfma-dup-ld128.c | 167 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld128() local 173 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld128() 179 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_gemminc_ukernel_6x8__neonfma_dup_ld128()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 1x8-psimd-splat.c | 85 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_igemm_ukernel_1x8__psimd_splat() local 90 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_igemm_ukernel_1x8__psimd_splat() 91 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_igemm_ukernel_1x8__psimd_splat()
|
D | 4x8-psimd-splat.c | 151 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_igemm_ukernel_4x8__psimd_splat() local 159 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_igemm_ukernel_4x8__psimd_splat() 163 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_igemm_ukernel_4x8__psimd_splat()
|
D | 4x8-neonfma-dup-ld128.c | 151 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld128() local 155 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld128() 159 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_igemm_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-neon-dup-ld128.c | 151 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_igemm_ukernel_4x8__neon_dup_ld128() local 155 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_igemm_ukernel_4x8__neon_dup_ld128() 159 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_igemm_ukernel_4x8__neon_dup_ld128()
|
D | 6x8-neonfma-dup-ld128.c | 193 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_igemm_ukernel_6x8__neonfma_dup_ld128() local 199 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_igemm_ukernel_6x8__neonfma_dup_ld128() 205 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_igemm_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-psimd-splat.c | 195 const psimd_f32 va0c3 = psimd_splat3_f32(va0); in xnn_f32_igemm_ukernel_6x8__psimd_splat() local 205 vacc0x0123 = psimd_qfma_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_igemm_ukernel_6x8__psimd_splat() 211 vacc0x4567 = psimd_qfma_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_igemm_ukernel_6x8__psimd_splat()
|
D | 6x8-neon-dup-ld128.c | 193 const float32x4_t va0c3 = vdupq_lane_f32(vget_high_f32(va0), 1); in xnn_f32_igemm_ukernel_6x8__neon_dup_ld128() local 199 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c3, vb0123c3); in xnn_f32_igemm_ukernel_6x8__neon_dup_ld128() 205 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c3, vb4567c3); in xnn_f32_igemm_ukernel_6x8__neon_dup_ld128()
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 4x8-neonfp16arith-ld64.c | 135 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64() local 140 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3); in xnn_f16_gemm_ukernel_4x8__neonfp16arith_ld64()
|
D | 6x8-neonfp16arith-ld64.c | 171 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64() local 178 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3); in xnn_f16_gemm_ukernel_6x8__neonfp16arith_ld64()
|
D | 8x8-neonfp16arith-ld64.c | 207 const float16x8_t va0c3 = vdupq_lane_f16(va0, 3); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64() local 216 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c3, vb01234567c3); in xnn_f16_gemm_ukernel_8x8__neonfp16arith_ld64()
|