/external/XNNPACK/src/f32-prelu/gen/ |
D | wasmsimd-minmax-4x8.c | 86 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local 134 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local 170 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x8() local
|
D | wasmsimd-minmax-4x16.c | 104 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local 176 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local 212 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x16() local
|
D | wasmsimd-bitselect-4x8.c | 85 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local 133 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local 169 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x8() local
|
D | neon-4x8.c | 80 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local 123 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local 154 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x8() local
|
D | wasmsimd-minmax-4x4.c | 77 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x4() local 113 v128_t vacc2x0123 = wasm_i32x4_max(vi2x0123, vzero); in xnn_f32_prelu_ukernel__wasmsimd_minmax_4x4() local
|
D | wasmsimd-bitselect-4x4.c | 76 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x4() local 112 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x4() local
|
D | neon-4x4.c | 71 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x4() local 102 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x4() local
|
D | wasmsimd-bitselect-4x16.c | 103 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local 175 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local 211 v128_t vacc2x0123 = wasm_f32x4_mul(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__wasmsimd_bitselect_4x16() local
|
D | neon-4x16.c | 98 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local 165 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local 196 float32x4_t vacc2x0123 = vmulq_f32(vi2x0123, vw0123); in xnn_f32_prelu_ukernel__neon_4x16() local
|
/external/XNNPACK/src/f32-gemm/gen/ |
D | 3x8-minmax-wasmsimd-arm-loadsplat.c | 60 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_arm_loadsplat() local
|
D | 3x8-minmax-sse-load1.c | 58 __m128 vacc2x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_3x8__sse_load1() local
|
D | 3x8-minmax-wasmsimd-x86-loadsplat.c | 58 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_gemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat() local
|
D | 3x16-minmax-avx-broadcast.c | 131 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); in xnn_f32_gemm_minmax_ukernel_3x16__avx_broadcast() local
|
D | 3x16-minmax-fma3-broadcast.c | 131 __m128 vacc2x0123 = _mm256_castps256_ps128(vacc2x01234567); in xnn_f32_gemm_minmax_ukernel_3x16__fma3_broadcast() local
|
/external/XNNPACK/src/f32-ppmm/gen/ |
D | 4x8-minmax-sse.c | 53 __m128 vacc2x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__sse() local
|
D | 4x8-minmax-wasmsimd-arm-splat.c | 55 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_arm_splat() local
|
D | 4x8-minmax-wasmsimd-x86-splat.c | 53 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__wasmsimd_x86_splat() local
|
D | 4x8-minmax-neon.c | 54 float32x4_t vacc2x0123 = vacc0x0123; in xnn_f32_ppmm_minmax_ukernel_4x8__neon() local
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 3x8inc-minmax-wasmsimd-x86-loadsplat.c | 60 v128_t vacc2x0123 = wasm_v128_load(acc + 16); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_x86_loadsplat() local
|
D | 3x8inc-minmax-wasmsimd-arm-loadsplat.c | 62 v128_t vacc2x0123 = wasm_v128_load(acc + 16); in xnn_f32_gemminc_minmax_ukernel_3x8__wasmsimd_arm_loadsplat() local
|
D | 3x8inc-minmax-sse-load1.c | 60 __m128 vacc2x0123 = _mm_load_ps(acc + 16); in xnn_f32_gemminc_minmax_ukernel_3x8__sse_load1() local
|
/external/XNNPACK/src/qs8-gemm/gen/ |
D | 3x8c4s2-minmax-rndnu-neon-mull.c | 144 int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull() local 161 int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); in xnn_qs8_gemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull() local
|
/external/XNNPACK/src/qs8-igemm/gen/ |
D | 3x8c4s2-minmax-rndnu-neon-mull.c | 162 int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull() local 179 int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); in xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull() local
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 3x8-minmax-sse-load1.c | 58 __m128 vacc2x0123 = vacc0x0123; in xnn_f32_igemm_minmax_ukernel_3x8__sse_load1() local
|
D | 3x8-minmax-wasmsimd-x86-loadsplat.c | 58 v128_t vacc2x0123 = vacc0x0123; in xnn_f32_igemm_minmax_ukernel_3x8__wasmsimd_x86_loadsplat() local
|