/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 3x8inc-minmax-wasmsimd-arm-splat.c | 102 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat() local 109 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat() 112 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_splat()
|
D | 3x8inc-minmax-wasmsimd-x86-splat.c | 100 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat() local 107 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat() 110 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_splat()
|
D | 4x8inc-minmax-neon-dup-ld128.c | 116 const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld128() local 120 vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld128() 124 vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2); in xnn_f32_gemminc_minmax_ukernel_4x8__neon_dup_ld128()
|
D | 4x8inc-minmax-neonfma-dup-ld128.c | 116 const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld128() local 120 vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld128() 124 vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2); in xnn_f32_gemminc_minmax_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8inc-minmax-wasmsimd-arm-splat.c | 118 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat() local 126 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat() 130 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_arm_splat()
|
D | 4x8inc-minmax-wasmsimd-x86-splat.c | 116 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat() local 124 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat() 128 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemminc_minmax_ukernel_4x8__wasmsimd_x86_splat()
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 3x8-minmax-wasmsimd-arm-splat.c | 100 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat() local 107 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat() 110 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_splat()
|
D | 3x8-minmax-wasmsimd-x86-splat.c | 98 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat() local 105 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat() 108 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_splat()
|
D | 4x8-wasmsimd-splat.c | 114 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_ukernel_4x8__wasmsimd_splat() local 122 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemm_ukernel_4x8__wasmsimd_splat() 126 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemm_ukernel_4x8__wasmsimd_splat()
|
D | 4x8-minmax-neon-dup-ld128.c | 114 const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld128() local 118 vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld128() 122 vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2); in xnn_f32_gemm_minmax_ukernel_4x8__neon_dup_ld128()
|
D | 4x8-relu-wasmsimd-splat.c | 114 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat() local 122 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat() 126 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_splat()
|
D | 4x8-minmax-neonfma-dup-ld128.c | 114 const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld128() local 118 vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld128() 122 vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2); in xnn_f32_gemm_minmax_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-minmax-wasmsimd-arm-splat.c | 116 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat() local 124 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat() 128 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_arm_splat()
|
D | 4x8-minmax-wasmsimd-x86-splat.c | 114 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat() local 122 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat() 126 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemm_minmax_ukernel_4x8__wasmsimd_x86_splat()
|
D | 5x8-relu-wasmsimd-splat.c | 130 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat() local 139 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat() 144 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat()
|
D | 5x8-wasmsimd-splat.c | 130 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat() local 139 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat() 144 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 3x8-minmax-wasmsimd-arm-splat.c | 119 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat() local 126 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat() 129 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_arm_splat()
|
D | 3x8-minmax-wasmsimd-x86-splat.c | 117 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat() local 124 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat() 127 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_splat()
|
D | 4x8-relu-wasmsimd-splat.c | 136 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat() local 144 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat() 148 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_igemm_relu_ukernel_4x8__wasmsimd_splat()
|
D | 4x8-wasmsimd-splat.c | 136 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_ukernel_4x8__wasmsimd_splat() local 144 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_igemm_ukernel_4x8__wasmsimd_splat() 148 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_igemm_ukernel_4x8__wasmsimd_splat()
|
D | 4x8-minmax-neonfma-dup-ld128.c | 136 const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld128() local 140 vacc1x0123 = vfmaq_f32(vacc1x0123, va1c2, vb0123c2); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld128() 144 vacc1x4567 = vfmaq_f32(vacc1x4567, va1c2, vb4567c2); in xnn_f32_igemm_minmax_ukernel_4x8__neonfma_dup_ld128()
|
D | 4x8-minmax-neon-dup-ld128.c | 136 const float32x4_t va1c2 = vdupq_lane_f32(vget_high_f32(va1), 0); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld128() local 140 vacc1x0123 = vmlaq_f32(vacc1x0123, va1c2, vb0123c2); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld128() 144 vacc1x4567 = vmlaq_f32(vacc1x4567, va1c2, vb4567c2); in xnn_f32_igemm_minmax_ukernel_4x8__neon_dup_ld128()
|
D | 4x8-minmax-wasmsimd-arm-splat.c | 138 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat() local 146 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat() 150 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_arm_splat()
|
D | 4x8-minmax-wasmsimd-x86-splat.c | 136 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat() local 144 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat() 148 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_igemm_minmax_ukernel_4x8__wasmsimd_x86_splat()
|
D | 5x8-wasmsimd-splat.c | 155 const v128_t va1c2 = wasm_v32x4_shuffle(va1, va1, 2, 2, 2, 2); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat() local 164 vacc1x0123 = wasm_f32x4_add(vacc1x0123, wasm_f32x4_mul(va1c2, vb0123c2)); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat() 169 vacc1x4567 = wasm_f32x4_add(vacc1x4567, wasm_f32x4_mul(va1c2, vb4567c2)); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat()
|