/external/XNNPACK/src/f32-prelu/gen/ |
D | wasmsimd-minmax-4x8.c | 86 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local 134 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local 170 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local
|
D | wasmsimd-minmax-4x16.c | 104 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local 176 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local 212 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local
|
D | wasmsimd-bitselect-4x8.c | 85 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local 133 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local 169 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local
|
D | wasmsimd-minmax-4x4.c | 77 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x4() local 113 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x4() local
|
D | neon-4x8.c | 80 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local 123 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local 154 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local
|
D | wasmsimd-bitselect-4x4.c | 76 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x4() local 112 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x4() local
|
D | neon-4x4.c | 71 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x4() local 102 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x4() local
|
D | neon-4x16.c | 98 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local 165 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local 196 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local
|
D | wasmsimd-bitselect-4x16.c | 103 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local 175 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local 211 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local
|
/external/XNNPACK/src/bf16-gemm/gen/ |
D | 3x4c8-minmax-neonbf16-bfdot.c | 144 float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23); in xnn_bf16_gemm_minmax_ukernel_3x4c8__neonbf16_bfdot() local 161 float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3)); in xnn_bf16_gemm_minmax_ukernel_3x4c8__neonbf16_bfdot() local
|
D | 3x4c8-minmax-neonbf16-bfmlal.c | 169 float32x4_t vacc2x0123 = vpaddq_f32(vacc2x01, vacc2x23); in xnn_bf16_gemm_minmax_ukernel_3x4c8__neonbf16_bfmlal() local 186 float32x4_t vacc2x0123 = vcombine_f32(vpadd_f32(vsum2x0, vsum2x1), vpadd_f32(vsum2x2, vsum2x3)); in xnn_bf16_gemm_minmax_ukernel_3x4c8__neonbf16_bfmlal() local
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 3x8-wasmrelaxedsimd-fma-loadsplat.c | 58 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat() local
|
D | 3x8-wasmsimd-loadsplat.c | 58 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_ukernel_3x8__wasmsimd_loadsplat() local
|
D | 3x8-relu-wasmrelaxedsimd-fma-loadsplat.c | 58 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_relu_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat() local
|
D | 3x8-relu-wasmsimd-loadsplat.c | 58 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_relu_ukernel_3x8__wasmsimd_loadsplat() local
|
D | 3x8-minmax-wasmrelaxedsimd-loadsplat.c | 60 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_3x8__wasmrelaxedsimd_loadsplat() local
|
D | 3x8-minmax-wasmrelaxedsimd-fma-loadsplat.c | 60 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_3x8__wasmrelaxedsimd_fma_loadsplat() local
|
D | 3x8-minmax-wasmsimd-x86-loadsplat.c | 60 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat() local
|
D | 3x8-minmax-sse-load1.c | 58 __m128 vacc2x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_3x8__sse_load1() local
|
D | 3x8-minmax-wasmsimd-arm-loadsplat.c | 60 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat() local
|
/external/XNNPACK/src/f32-ppmm/gen/ |
D | 4x8-minmax-wasmsimd-arm-splat.c | 55 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat() local
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 3x8inc-minmax-sse-load1.c | 60 __m128 vacc2x0123 = _mm_load_ps(acc + 16); in xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1() local
|
D | 3x8inc-minmax-wasmrelaxedsimd-loadsplat.c | 62 v128_t vacc2x0123 = wasm_v128_load(acc + 16); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmrelaxedsimd_loadsplat() local
|
D | 3x8inc-minmax-wasmsimd-x86-loadsplat.c | 62 v128_t vacc2x0123 = wasm_v128_load(acc + 16); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat() local
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 3x8-wasmsimd-loadsplat.c | 58 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_igemm_ukernel_3x8__wasmsimd_loadsplat() local
|