/external/XNNPACK/src/f32-gemm/gen/ |
D | 6x8-minmax-neon-dup-ld64.c | 102 const float32x4_t va4c0 = vdupq_lane_f32(va4, 0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64() local 108 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64() 114 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld64()
|
D | 5x8-relu-wasmsimd-splat.c | 95 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat() local 104 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat() 109 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemm_relu_ukernel_5x8__wasmsimd_splat()
|
D | 6x8-minmax-neonfma-dup-ld64.c | 102 const float32x4_t va4c0 = vdupq_lane_f32(va4, 0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld64() local 108 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld64() 114 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld64()
|
D | 5x8-wasmsimd-splat.c | 95 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat() local 104 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat() 109 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemm_ukernel_5x8__wasmsimd_splat()
|
D | 5x8-minmax-wasmsimd-arm-splat.c | 97 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat() local 106 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat() 111 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_arm_splat()
|
D | 5x8-minmax-wasmsimd-x86-splat.c | 95 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat() local 104 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat() 109 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemm_minmax_ukernel_5x8__wasmsimd_x86_splat()
|
D | 6x8-minmax-neonfma-dup-ld128.c | 103 const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128() local 109 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128() 115 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8-minmax-wasmsimd-arm-splat.c | 107 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat() local 117 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat() 123 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemm_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-neon-dup-ld128.c | 103 const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128() local 109 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128() 115 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_gemm_minmax_ukernel_6x8__neon_dup_ld128()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 6x8inc-minmax-neon-dup-ld64.c | 104 const float32x4_t va4c0 = vdupq_lane_f32(va4, 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld64() local 110 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld64() 116 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld64()
|
D | 6x8inc-minmax-neonfma-dup-ld64.c | 104 const float32x4_t va4c0 = vdupq_lane_f32(va4, 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld64() local 110 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld64() 116 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld64()
|
D | 5x8inc-minmax-wasmsimd-arm-splat.c | 99 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat() local 108 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat() 113 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_arm_splat()
|
D | 5x8inc-minmax-wasmsimd-x86-splat.c | 97 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat() local 106 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat() 111 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemminc_minmax_ukernel_5x8__wasmsimd_x86_splat()
|
D | 6x8inc-minmax-neon-dup-ld128.c | 105 const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128() local 111 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128() 117 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8inc-minmax-neonfma-dup-ld128.c | 105 const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128() local 111 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128() 117 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_6x8__neonfma_dup_ld128()
|
D | 6x8inc-minmax-wasmsimd-x86-splat.c | 107 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat() local 117 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat() 123 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_x86_splat()
|
D | 6x8inc-minmax-wasmsimd-arm-splat.c | 109 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat() local 119 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat() 125 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_gemminc_minmax_ukernel_6x8__wasmsimd_arm_splat()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 5x8-wasmsimd-splat.c | 120 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat() local 129 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat() 134 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_igemm_ukernel_5x8__wasmsimd_splat()
|
D | 6x8-minmax-neonfma-dup-ld64.c | 130 const float32x4_t va4c0 = vdupq_lane_f32(va4, 0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld64() local 136 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld64() 142 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld64()
|
D | 6x8-minmax-neon-dup-ld64.c | 130 const float32x4_t va4c0 = vdupq_lane_f32(va4, 0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld64() local 136 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld64() 142 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld64()
|
D | 5x8-relu-wasmsimd-splat.c | 120 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat() local 129 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat() 134 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_igemm_relu_ukernel_5x8__wasmsimd_splat()
|
D | 5x8-minmax-wasmsimd-x86-splat.c | 120 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat() local 129 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat() 134 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_x86_splat()
|
D | 5x8-minmax-wasmsimd-arm-splat.c | 122 const v128_t va4c0 = wasm_v32x4_shuffle(va4, va4, 0, 0, 0, 0); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat() local 131 vacc4x0123 = wasm_f32x4_add(vacc4x0123, wasm_f32x4_mul(va4c0, vb0123c0)); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat() 136 vacc4x4567 = wasm_f32x4_add(vacc4x4567, wasm_f32x4_mul(va4c0, vb4567c0)); in xnn_f32_igemm_minmax_ukernel_5x8__wasmsimd_arm_splat()
|
D | 6x8-minmax-neon-dup-ld128.c | 131 const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128() local 137 vacc4x0123 = vmlaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128() 143 vacc4x4567 = vmlaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_6x8__neon_dup_ld128()
|
D | 6x8-minmax-neonfma-dup-ld128.c | 131 const float32x4_t va4c0 = vdupq_lane_f32(vget_low_f32(va4), 0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128() local 137 vacc4x0123 = vfmaq_f32(vacc4x0123, va4c0, vb0123c0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128() 143 vacc4x4567 = vfmaq_f32(vacc4x4567, va4c0, vb4567c0); in xnn_f32_igemm_minmax_ukernel_6x8__neonfma_dup_ld128()
|