/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 1x8inc-minmax-neon-dup-ld64.c | 61 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_gemminc_minmax_ukernel_1x8__neon_dup_ld64() local 62 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_gemminc_minmax_ukernel_1x8__neon_dup_ld64() 63 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_gemminc_minmax_ukernel_1x8__neon_dup_ld64()
|
D | 1x8inc-minmax-neonfma-dup-ld64.c | 61 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_gemminc_minmax_ukernel_1x8__neonfma_dup_ld64() local 62 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_gemminc_minmax_ukernel_1x8__neonfma_dup_ld64() 63 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_gemminc_minmax_ukernel_1x8__neonfma_dup_ld64()
|
D | 1x8inc-minmax-wasmsimd-arm-splat.c | 62 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat() local 67 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat() 68 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_arm_splat()
|
D | 1x8inc-minmax-wasmsimd-x86-splat.c | 60 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat() local 65 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat() 66 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_gemminc_minmax_ukernel_1x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 1x8-minmax-neonfma-dup-ld64.c | 59 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_gemm_minmax_ukernel_1x8__neonfma_dup_ld64() local 60 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_gemm_minmax_ukernel_1x8__neonfma_dup_ld64() 61 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_gemm_minmax_ukernel_1x8__neonfma_dup_ld64()
|
D | 1x8-minmax-neon-dup-ld64.c | 59 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_gemm_minmax_ukernel_1x8__neon_dup_ld64() local 60 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_gemm_minmax_ukernel_1x8__neon_dup_ld64() 61 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_gemm_minmax_ukernel_1x8__neon_dup_ld64()
|
D | 1x8-wasmsimd-splat.c | 58 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_gemm_ukernel_1x8__wasmsimd_splat() local 63 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_gemm_ukernel_1x8__wasmsimd_splat() 64 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_gemm_ukernel_1x8__wasmsimd_splat()
|
D | 1x8-relu-wasmsimd-splat.c | 58 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat() local 63 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat() 64 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_gemm_relu_ukernel_1x8__wasmsimd_splat()
|
D | 1x8-minmax-wasmsimd-arm-splat.c | 60 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat() local 65 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat() 66 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_arm_splat()
|
D | 1x8-minmax-wasmsimd-x86-splat.c | 58 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat() local 63 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat() 64 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_gemm_minmax_ukernel_1x8__wasmsimd_x86_splat()
|
D | 4x8-minmax-neonfma-dup-ld64.c | 95 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld64() local 99 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld64() 103 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld64()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 1x8-minmax-neonfma-dup-ld64.c | 72 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_igemm_minmax_ukernel_1x8__neonfma_dup_ld64() local 73 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_igemm_minmax_ukernel_1x8__neonfma_dup_ld64() 74 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_igemm_minmax_ukernel_1x8__neonfma_dup_ld64()
|
D | 1x8-minmax-neon-dup-ld64.c | 72 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_igemm_minmax_ukernel_1x8__neon_dup_ld64() local 73 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_igemm_minmax_ukernel_1x8__neon_dup_ld64() 74 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_igemm_minmax_ukernel_1x8__neon_dup_ld64()
|
D | 1x8-wasmsimd-splat.c | 71 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_igemm_ukernel_1x8__wasmsimd_splat() local 76 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_igemm_ukernel_1x8__wasmsimd_splat() 77 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_igemm_ukernel_1x8__wasmsimd_splat()
|
D | 1x8-relu-wasmsimd-splat.c | 71 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat() local 76 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat() 77 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_igemm_relu_ukernel_1x8__wasmsimd_splat()
|
D | 1x8-minmax-wasmsimd-x86-splat.c | 71 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat() local 76 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat() 77 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_x86_splat()
|
D | 1x8-minmax-wasmsimd-arm-splat.c | 73 const v128_t va0c1 = wasm_v32x4_shuffle(va0, va0, 1, 1, 1, 1); in xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat() local 78 vacc0x0123 = wasm_f32x4_add(vacc0x0123, wasm_f32x4_mul(va0c1, vb0123c1)); in xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat() 79 vacc0x4567 = wasm_f32x4_add(vacc0x4567, wasm_f32x4_mul(va0c1, vb4567c1)); in xnn_f32_igemm_minmax_ukernel_1x8__wasmsimd_arm_splat()
|
D | 4x8-minmax-neon-dup-ld64.c | 117 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld64() local 121 vacc0x0123 = vmlaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld64() 125 vacc0x4567 = vmlaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld64()
|
D | 4x8-minmax-neonfma-dup-ld64.c | 117 const float32x4_t va0c1 = vdupq_lane_f32(va0, 1); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld64() local 121 vacc0x0123 = vfmaq_f32(vacc0x0123, va0c1, vb0123c1); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld64() 125 vacc0x4567 = vfmaq_f32(vacc0x4567, va0c1, vb4567c1); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld64()
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 1x16-minmax-neonfp16arith-ld64.c | 71 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1); in xnn_f16_gemm_minmax_ukernel_1x16__neonfp16arith_ld64() local 73 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1); in xnn_f16_gemm_minmax_ukernel_1x16__neonfp16arith_ld64() 74 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c1, vb89ABCDEFc1); in xnn_f16_gemm_minmax_ukernel_1x16__neonfp16arith_ld64()
|
D | 1x8-minmax-neonfp16arith-ld64.c | 65 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1); in xnn_f16_gemm_minmax_ukernel_1x8__neonfp16arith_ld64() local 67 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1); in xnn_f16_gemm_minmax_ukernel_1x8__neonfp16arith_ld64()
|
/external/XNNPACK/src/f16-gemm/gen-inc/ |
D | 1x16inc-minmax-neonfp16arith-ld64.c | 73 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1); in xnn_f16_gemminc_minmax_ukernel_1x16__neonfp16arith_ld64() local 75 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1); in xnn_f16_gemminc_minmax_ukernel_1x16__neonfp16arith_ld64() 76 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c1, vb89ABCDEFc1); in xnn_f16_gemminc_minmax_ukernel_1x16__neonfp16arith_ld64()
|
D | 1x8inc-minmax-neonfp16arith-ld64.c | 67 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1); in xnn_f16_gemminc_minmax_ukernel_1x8__neonfp16arith_ld64() local 69 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1); in xnn_f16_gemminc_minmax_ukernel_1x8__neonfp16arith_ld64()
|
/external/XNNPACK/src/f16-igemm/gen/ |
D | 1x16-minmax-neonfp16arith-ld64.c | 82 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1); in xnn_f16_igemm_minmax_ukernel_1x16__neonfp16arith_ld64() local 84 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1); in xnn_f16_igemm_minmax_ukernel_1x16__neonfp16arith_ld64() 85 vacc0x89ABCDEF = vfmaq_f16(vacc0x89ABCDEF, va0c1, vb89ABCDEFc1); in xnn_f16_igemm_minmax_ukernel_1x16__neonfp16arith_ld64()
|
D | 1x8-minmax-neonfp16arith-ld64.c | 76 const float16x8_t va0c1 = vdupq_lane_f16(va0, 1); in xnn_f16_igemm_minmax_ukernel_1x8__neonfp16arith_ld64() local 78 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c1, vb01234567c1); in xnn_f16_igemm_minmax_ukernel_1x8__neonfp16arith_ld64()
|