/external/XNNPACK/src/f32-gemm/gen/ |
D | 5x8-relu-wasmsimd-splat.c | 133 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat() local 142 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat() 147 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat()
|
D | 5x8-wasmsimd-splat.c | 133 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat() local 142 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat() 147 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat()
|
D | 5x8-minmax-wasmsimd-arm-splat.c | 135 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat() local 144 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat() 149 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat()
|
D | 5x8-minmax-wasmsimd-x86-splat.c | 133 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat() local 142 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat() 147 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat()
|
D | 6x8-minmax-neonfma-dup-ld128.c | 147 const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128() local 153 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c2, vb0123c2); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128() 159 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c2, vb4567c2); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-minmax-wasmsimd-arm-splat.c | 151 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat() local 161 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat() 167 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-neon-dup-ld128.c | 147 const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128() local 153 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c2, vb0123c2); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128() 159 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c2, vb4567c2); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-minmax-wasmsimd-x86-splat.c | 149 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat() local 159 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat() 165 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 5x8-wasmsimd-splat.c | 158 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat() local 167 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat() 172 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat()
|
D | 5x8-relu-wasmsimd-splat.c | 158 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat() local 167 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat() 172 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat()
|
D | 5x8-minmax-wasmsimd-x86-splat.c | 158 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat() local 167 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat() 172 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat()
|
D | 5x8-minmax-wasmsimd-arm-splat.c | 160 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat() local 169 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat() 174 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-neon-dup-ld128.c | 175 const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128() local 181 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c2, vb0123c2); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128() 187 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c2, vb4567c2); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-minmax-neonfma-dup-ld128.c | 175 const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128() local 181 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c2, vb0123c2); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128() 187 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c2, vb4567c2); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-minmax-wasmsimd-arm-splat.c | 179 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat() local 189 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat() 195 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-wasmsimd-x86-splat.c | 177 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat() local 187 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat() 193 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_igemm_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 5x8inc-minmax-wasmsimd-arm-splat.c | 137 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat() local 146 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat() 151 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat()
|
D | 5x8inc-minmax-wasmsimd-x86-splat.c | 135 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat() local 144 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat() 149 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat()
|
D | 6x8inc-minmax-neon-dup-ld128.c | 149 const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128() local 155 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c2, vb0123c2); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128() 161 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c2, vb4567c2); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8inc-minmax-neonfma-dup-ld128.c | 149 const float32x4_t va4c2 = vdupq_lane_f32(vget_high_f32(va4), 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128() local 155 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c2, vb0123c2); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128() 161 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c2, vb4567c2); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8inc-minmax-wasmsimd-x86-splat.c | 151 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat() local 161 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat() 167 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
D | 6x8inc-minmax-wasmsimd-arm-splat.c | 153 const v128_t va4c2 = wasm_v32x4_shuffle(va4, va4, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat() local 163 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c2, vb0123c2)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat() 169 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c2, vb4567c2)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 6x16-minmax-neonfp16arith-ld64.c | 192 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64() local 199 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64() 205 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c2, vb89ABCDEFc2); in xnn_f16_gemm_minmax_ukernel_6x16__neonfp16arith_ld64()
|
/external/XNNPACK/src/f16-igemm/gen/ |
D | 6x16-minmax-neonfp16arith-ld64.c | 218 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64() local 225 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64() 231 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c2, vb89ABCDEFc2); in xnn_f16_igemm_minmax_ukernel_6x16__neonfp16arith_ld64()
|
/external/XNNPACK/src/f16-gemm/gen-inc/ |
D | 6x16inc-minmax-neonfp16arith-ld64.c | 194 const float16x8_t va4c2 = vdupq_lane_f16(va4, 2); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64() local 201 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c2, vb01234567c2); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64() 207 vacc4x89ABCDEF = vfmaq_f16(vacc4x89ABCDEF, va4c2, vb89ABCDEFc2); in xnn_f16_gemminc_minmax_ukernel_6x16__neonfp16arith_ld64()
|