/external/XNNPACK/src/f32-gemm/gen/ |
D | 6x8-minmax-neon-dup-ld64.c | 103 const float32x4_t va5c0 = vdupq_lane_f32(va5, 0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64() local 109 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64() 115 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64()
|
D | 6x8-minmax-neonfma-dup-ld64.c | 103 const float32x4_t va5c0 = vdupq_lane_f32(va5, 0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld64() local 109 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld64() 115 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld64()
|
D | 6x8-minmax-neonfma-dup-ld128.c | 104 const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128() local 110 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128() 116 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-minmax-wasmsimd-arm-splat.c | 108 const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat() local 118 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat() 124 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-neon-dup-ld128.c | 104 const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128() local 110 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128() 116 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-minmax-wasmsimd-x86-splat.c | 106 const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat() local 116 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat() 122 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 6x8inc-minmax-neon-dup-ld64.c | 105 const float32x4_t va5c0 = vdupq_lane_f32(va5, 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld64() local 111 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld64() 117 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld64()
|
D | 6x8inc-minmax-neonfma-dup-ld64.c | 105 const float32x4_t va5c0 = vdupq_lane_f32(va5, 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld64() local 111 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld64() 117 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld64()
|
D | 6x8inc-minmax-neon-dup-ld128.c | 106 const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128() local 112 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128() 118 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8inc-minmax-neonfma-dup-ld128.c | 106 const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128() local 112 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128() 118 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8inc-minmax-wasmsimd-x86-splat.c | 108 const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat() local 118 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat() 124 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
D | 6x8inc-minmax-wasmsimd-arm-splat.c | 110 const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat() local 120 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat() 126 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 6x8-minmax-neonfma-dup-ld64.c | 131 const float32x4_t va5c0 = vdupq_lane_f32(va5, 0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld64() local 137 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld64() 143 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld64()
|
D | 6x8-minmax-neon-dup-ld64.c | 131 const float32x4_t va5c0 = vdupq_lane_f32(va5, 0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld64() local 137 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld64() 143 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld64()
|
D | 6x8-minmax-neon-dup-ld128.c | 132 const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128() local 138 vacc5x0123 = vmlaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128() 144 vacc5x4567 = vmlaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-minmax-neonfma-dup-ld128.c | 132 const float32x4_t va5c0 = vdupq_lane_f32(vget_low_f32(va5), 0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128() local 138 vacc5x0123 = vfmaq_f32(vacc5x0123, va5c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128() 144 vacc5x4567 = vfmaq_f32(vacc5x4567, va5c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-minmax-wasmsimd-arm-splat.c | 136 const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat() local 146 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat() 152 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-wasmsimd-x86-splat.c | 134 const v128_t va5c0 = wasm_v32x4_shuffle(va5, va5, 0, 0, 0, 0); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat() local 144 vacc5x0123 = wasm_f32x4_add(vacc5x0123, wasm_f32x4_mul(va5c0, vb0123c0)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat() 150 vacc5x4567 = wasm_f32x4_add(vacc5x4567, wasm_f32x4_mul(va5c0, vb4567c0)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 6x16-minmax-neonfp16arith-ld64.c | 119 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64() local 126 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64() 132 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c0, vb89ABCDEFc0); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64()
|
D | 6x8-minmax-neonfp16arith-ld64.c | 106 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0); in xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64() local 113 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_6x8__neonfp16arith_ld64()
|
/external/XNNPACK/src/f16-igemm/gen/ |
D | 6x16-minmax-neonfp16arith-ld64.c | 145 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64() local 152 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64() 158 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c0, vb89ABCDEFc0); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64()
|
D | 6x8-minmax-neonfp16arith-ld64.c | 132 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0); in xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64() local 139 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0); in xnn_f16_igemm_minmax_ukernel_6x8__neonfp16arith_ld64()
|
/external/XNNPACK/src/f16-gemm/gen-inc/ |
D | 6x16inc-minmax-neonfp16arith-ld64.c | 121 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64() local 128 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64() 134 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c0, vb89ABCDEFc0); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64()
|
D | 6x8inc-minmax-neonfp16arith-ld64.c | 108 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0); in xnn_f16_gemminc_minmax_ukernel_6x8__neonfp16arith_ld64() local 115 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0); in xnn_f16_gemminc_minmax_ukernel_6x8__neonfp16arith_ld64()
|
D | 8x16inc-minmax-neonfp16arith-ld64.c | 143 const float16x8_t va5c0 = vdupq_lane_f16(va5, 0); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() local 152 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 160 vacc5x89ABCDEF = vfmaq_f16(vacc5x89ABCDEF, va5c0, vb89ABCDEFc0); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64()
|