/external/XNNPACK/src/f32-prelu/gen/ |
D | wasmsimd-minmax-4x8.c | 90 v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local 136 v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local 172 v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local
|
D | wasmsimd-minmax-4x16.c | 112 v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local 178 v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local 214 v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local
|
D | wasmsimd-bitselect-4x8.c | 89 v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local 135 v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local 171 v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local
|
D | wasmsimd-minmax-4x4.c | 79 v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x4() local 115 v128_t vacc3x0123 = wasm_i32x4_max(vi3x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x4() local
|
D | neon-4x8.c | 84 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local 125 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local 156 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local
|
D | wasmsimd-bitselect-4x4.c | 78 v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x4() local 114 v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x4() local
|
D | neon-4x4.c | 73 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x4() local 104 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x4() local
|
D | neon-4x16.c | 106 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local 167 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local 198 float32x4_t vacc3x0123 = vmulq_f32(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local
|
D | wasmsimd-bitselect-4x16.c | 111 v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local 177 v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local 213 v128_t vacc3x0123 = wasm_f32x4_mul(vi3x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local
|
/external/XNNPACK/src/f32-ppmm/gen/ |
D | 4x8-minmax-wasmsimd-arm-splat.c | 57 v128_t vacc3x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat() local
|
D | 4x8-minmax-sse.c | 55 __m128 vacc3x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__sse() local
|
D | 4x8-minmax-neon.c | 56 float32x4_t vacc3x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__neon() local
|
D | 4x8-minmax-wasmsimd-x86-splat.c | 57 v128_t vacc3x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat() local
|
D | 4x8-minmax-neonfma.c | 56 float32x4_t vacc3x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__neonfma() local
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 4x8-wasmrelaxedsimd-fma-loadsplat.c | 66 v128_t vacc3x0123 = vacc0x0123; in xnn_f32_gemm_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat() local
|
D | 4x8-wasmsimd-loadsplat.c | 66 v128_t vacc3x0123 = vacc0x0123; in xnn_f32_gemm_ukernel_4x8__wasmsimd_loadsplat() local
|
D | 4x8-minmax-avx-broadcast.c | 117 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); in xnn_f32_gemm_minmax_ukernel_4x8__avx_broadcast() local
|
D | 4x8-minmax-fma3-broadcast.c | 117 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); in xnn_f32_gemm_minmax_ukernel_4x8__fma3_broadcast() local
|
D | 4x8-relu-wasmsimd-loadsplat.c | 66 v128_t vacc3x0123 = vacc0x0123; in xnn_f32_gemm_relu_ukernel_4x8__wasmsimd_loadsplat() local
|
D | 4x8-relu-wasmrelaxedsimd-fma-loadsplat.c | 66 v128_t vacc3x0123 = vacc0x0123; in xnn_f32_gemm_relu_ukernel_4x8__wasmrelaxedsimd_fma_loadsplat() local
|
D | 4x8-minmax-sse-load1.c | 66 __m128 vacc3x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_4x8__sse_load1() local
|
D | 4x8-minmax-wasmrelaxedsimd-loadsplat.c | 68 v128_t vacc3x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_4x8__wasmrelaxedsimd_loadsplat() local
|
/external/XNNPACK/src/bf16-gemm/gen/ |
D | 4x4c8-minmax-neonbf16-bfmlal.c | 204 float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23); in xnn_bf16_gemm_minmax_ukernel_4x4c8__neonbf16_bfmlal() local 226 float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3)); in xnn_bf16_gemm_minmax_ukernel_4x4c8__neonbf16_bfmlal() local
|
D | 4x4c8-minmax-neonbf16-bfdot.c | 171 float32x4_t vacc3x0123 = vpaddq_f32(vacc3x01, vacc3x23); in xnn_bf16_gemm_minmax_ukernel_4x4c8__neonbf16_bfdot() local 193 float32x4_t vacc3x0123 = vcombine_f32(vpadd_f32(vsum3x0, vsum3x1), vpadd_f32(vsum3x2, vsum3x3)); in xnn_bf16_gemm_minmax_ukernel_4x4c8__neonbf16_bfdot() local
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 4x8inc-minmax-fma3-broadcast.c | 119 __m128 vacc3x0123 = _mm256_castps256_ps128(vacc3x01234567); in xnn_f32_gemminc_minmax_ukernel_4x8__fma3_broadcast() local
|