| /external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
| D | sse2-rr2-p5-x8-acc2.c | 75 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x8_acc2() local
|
| D | sse2-rr2-p5-x8.c | 74 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x8() local
|
| D | neonfma-rr1-p5-x8.c | 60 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8() local
|
| D | neonfma-rr1-p5-x8-acc2.c | 61 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8_acc2() local
|
| D | neon-rr2-p5-x8.c | 64 float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8() local
|
| D | neon-rr2-p5-x8-acc2.c | 65 float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8_acc2() local
|
| D | wasmsimd-rr2-p5-x8-acc2.c | 75 v128_t vp4567 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vc5, vt4567)); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x8_acc2() local
|
| D | wasmsimd-rr2-p5-x8.c | 74 v128_t vp4567 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vc5, vt4567)); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x8() local
|
| D | sse2-rr2-p5-x12.c | 81 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12() local
|
| D | neonfma-rr1-p5-x12-acc3.c | 68 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12_acc3() local
|
| D | wasmsimd-rr2-p5-x12.c | 81 v128_t vp4567 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vc5, vt4567)); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_rr2_p5_x12() local
|
| D | neonfma-rr1-p5-x12-acc2.c | 67 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12_acc2() local
|
| D | neonfma-rr1-p5-x12.c | 66 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x12() local
|
| D | sse2-rr2-p5-x12-acc2.c | 82 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12_acc2() local
|
| D | neon-rr2-p5-x12-acc2.c | 72 float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x12_acc2() local
|
| D | neon-rr2-p5-x12.c | 71 float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x12() local
|
| D | sse2-rr2-p5-x12-acc3.c | 83 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4); in xnn_f32_raddstoreexpminusmax_ukernel__sse2_rr2_p5_x12_acc3() local
|
| /external/XNNPACK/src/f32-velu/gen/ |
| D | velu-sse41-rr2-p6-x8.c | 68 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse41_rr2_p6_x8() local
|
| D | velu-wasmsimd-arm-rr2-p6-x8.c | 68 v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8() local
|
| D | velu-sse2-rr2-p6-x8.c | 68 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse2_rr2_p6_x8() local
|
| D | velu-neonfma-rr1-p6-x8.c | 62 float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x8() local
|
| D | velu-wasmsimd-x86-rr2-p6-x8.c | 74 v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8() local
|
| /external/XNNPACK/src/f32-vsigmoid/gen/ |
| D | vsigmoid-sse41-rr2-p5-div-x8.c | 62 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4); in xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x8() local
|
| D | vsigmoid-wasmsimd-rr2-p5-div-x8.c | 62 v128_t vp4567 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt4567, vc5)); in xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x8() local
|
| D | vsigmoid-sse2-rr2-p5-div-x8.c | 62 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc5, vt4567), vc4); in xnn_f32_vsigmoid_ukernel__sse2_rr2_p5_div_x8() local
|