/external/XNNPACK/src/f32-gemm/gen/ |
D | 6x8-minmax-neonfma-dup-ld128.c | 170 const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128() local 176 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c3, vb0123c3); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128() 182 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c3, vb4567c3); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-minmax-wasmsimd-arm-splat.c | 174 const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat() local 184 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat() 190 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-neon-dup-ld128.c | 170 const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128() local 176 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c3, vb0123c3); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128() 182 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c3, vb4567c3); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-minmax-wasmsimd-x86-splat.c | 172 const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat() local 182 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat() 188 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 6x8-minmax-neon-dup-ld128.c | 198 const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128() local 204 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c3, vb0123c3); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128() 210 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c3, vb4567c3); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-minmax-neonfma-dup-ld128.c | 198 const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128() local 204 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c3, vb0123c3); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128() 210 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c3, vb4567c3); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-minmax-wasmsimd-arm-splat.c | 202 const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat() local 212 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat() 218 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-wasmsimd-x86-splat.c | 200 const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat() local 210 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat() 216 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 6x8inc-minmax-neon-dup-ld128.c | 172 const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128() local 178 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c3, vb0123c3); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128() 184 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c3, vb4567c3); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8inc-minmax-neonfma-dup-ld128.c | 172 const float32x4_t va5c3 = vdupq_lane_f32(vget_high_f32(va5), 1); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128() local 178 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c3, vb0123c3); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128() 184 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c3, vb4567c3); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8inc-minmax-wasmsimd-x86-splat.c | 174 const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat() local 184 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat() 190 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
D | 6x8inc-minmax-wasmsimd-arm-splat.c | 176 const v128_t va5c3 = wasm_v32x4_shuffle(va5, va5, 3, 3, 3, 3); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat() local 186 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c3, vb0123c3)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat() 192 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c3, vb4567c3)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 6x16-minmax-neonfp16arith-ld64.c | 230 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64() local 237 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64() 243 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64()
|
D | 6x8-minmax-neonfp16arith-ld64.c | 178 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64() local 185 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64()
|
D | 8x16-minmax-neonfp16arith-ld64.c | 282 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() local 291 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 299 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
|
D | 8x8-minmax-neonfp16arith-ld64.c | 214 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() local 223 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64()
|
/external/XNNPACK/src/f16-igemm/gen/ |
D | 6x16-minmax-neonfp16arith-ld64.c | 256 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64() local 263 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64() 269 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64()
|
D | 6x8-minmax-neonfp16arith-ld64.c | 204 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64() local 211 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64()
|
D | 8x16-minmax-neonfp16arith-ld64.c | 314 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() local 323 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 331 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64()
|
D | 8x8-minmax-neonfp16arith-ld64.c | 246 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() local 255 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64()
|
/external/XNNPACK/src/f16-gemm/gen-inc/ |
D | 6x16inc-minmax-neonfp16arith-ld64.c | 232 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64() local 239 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64() 245 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64()
|
D | 6x8inc-minmax-neonfp16arith-ld64.c | 180 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_gemminc_minmax_ukernel_6x8__neonfp16arith_ld64() local 187 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_gemminc_minmax_ukernel_6x8__neonfp16arith_ld64()
|
D | 8x16inc-minmax-neonfp16arith-ld64.c | 284 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() local 293 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 301 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c3, vb89ABCDEFc3); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64()
|
D | 8x8inc-minmax-neonfp16arith-ld64.c | 216 const float16x8_t va5c3 = vdupq_lane_f16(va5, 3); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() local 225 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c3, vb01234567c3); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64()
|