/external/XNNPACK/src/f32-velu/gen/ |
D | velu-neonfma-rr1-p6-x8.c | 62 float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x8() local 65 vp4567 = vfmaq_f32(vc4, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x8() 68 vp4567 = vfmaq_f32(vc3, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x8() 71 vp4567 = vfmaq_f32(vc2, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x8() 74 vp4567 = vmulq_f32(vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x8() 82 vp4567 = vfmaq_f32(vt4567, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x8() 85 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x8()
|
D | velu-neon-rr2-p6-x8.c | 66 float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x8() local 69 vp4567 = vmlaq_f32(vc4, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x8() 72 vp4567 = vmlaq_f32(vc3, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x8() 75 vp4567 = vmlaq_f32(vc2, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x8() 78 vp4567 = vmulq_f32(vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x8() 86 vp4567 = vmlaq_f32(vt4567, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x8() 89 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__neon_rr2_p6_x8()
|
D | velu-sse41-rr2-p6-x8.c | 68 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse41_rr2_p6_x8() local 71 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__sse41_rr2_p6_x8() 74 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__sse41_rr2_p6_x8() 77 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__sse41_rr2_p6_x8() 80 vp4567 = _mm_mul_ps(vp4567, vt4567); in xnn_f32_velu_ukernel__sse41_rr2_p6_x8() 88 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__sse41_rr2_p6_x8() 91 const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__sse41_rr2_p6_x8()
|
D | velu-wasmsimd-x86-rr2-p6-x8.c | 74 v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8() local 77 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8() 80 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8() 83 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8() 86 vp4567 = wasm_f32x4_mul(vp4567, vt4567); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8() 94 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8() 97 const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x8()
|
D | velu-wasmsimd-arm-rr2-p6-x8.c | 68 v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8() local 71 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8() 74 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8() 77 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8() 80 vp4567 = wasm_f32x4_mul(vp4567, vt4567); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8() 88 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8() 91 const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x8()
|
D | velu-sse2-rr2-p6-x8.c | 68 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse2_rr2_p6_x8() local 71 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__sse2_rr2_p6_x8() 74 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__sse2_rr2_p6_x8() 77 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__sse2_rr2_p6_x8() 80 vp4567 = _mm_mul_ps(vp4567, vt4567); in xnn_f32_velu_ukernel__sse2_rr2_p6_x8() 88 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__sse2_rr2_p6_x8() 91 const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__sse2_rr2_p6_x8()
|
D | velu-neon-rr2-p6-x12.c | 73 float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x12() local 77 vp4567 = vmlaq_f32(vc4, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x12() 81 vp4567 = vmlaq_f32(vc3, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x12() 85 vp4567 = vmlaq_f32(vc2, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x12() 89 vp4567 = vmulq_f32(vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x12() 100 vp4567 = vmlaq_f32(vt4567, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x12() 104 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__neon_rr2_p6_x12()
|
D | velu-neonfma-rr1-p6-x12.c | 68 float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x12() local 72 vp4567 = vfmaq_f32(vc4, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x12() 76 vp4567 = vfmaq_f32(vc3, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x12() 80 vp4567 = vfmaq_f32(vc2, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x12() 84 vp4567 = vmulq_f32(vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x12() 95 vp4567 = vfmaq_f32(vt4567, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x12() 99 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x12()
|
D | velu-wasmsimd-arm-rr2-p6-x12.c | 75 v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12() local 79 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12() 83 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12() 87 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12() 91 vp4567 = wasm_f32x4_mul(vp4567, vt4567); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12() 102 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12() 106 const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x12()
|
D | velu-sse41-rr2-p6-x12.c | 75 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse41_rr2_p6_x12() local 79 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__sse41_rr2_p6_x12() 83 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__sse41_rr2_p6_x12() 87 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__sse41_rr2_p6_x12() 91 vp4567 = _mm_mul_ps(vp4567, vt4567); in xnn_f32_velu_ukernel__sse41_rr2_p6_x12() 102 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__sse41_rr2_p6_x12() 106 const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__sse41_rr2_p6_x12()
|
D | velu-wasmsimd-x86-rr2-p6-x12.c | 83 v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12() local 88 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12() 92 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12() 96 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12() 100 vp4567 = wasm_f32x4_mul(vp4567, vt4567); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12() 111 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12() 115 const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x12()
|
D | velu-sse2-rr2-p6-x12.c | 75 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse2_rr2_p6_x12() local 79 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__sse2_rr2_p6_x12() 83 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__sse2_rr2_p6_x12() 87 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__sse2_rr2_p6_x12() 91 vp4567 = _mm_mul_ps(vp4567, vt4567); in xnn_f32_velu_ukernel__sse2_rr2_p6_x12() 102 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__sse2_rr2_p6_x12() 106 const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__sse2_rr2_p6_x12()
|
D | velu-neonfma-rr1-p6-x16.c | 74 float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x16() local 79 vp4567 = vfmaq_f32(vc4, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x16() 84 vp4567 = vfmaq_f32(vc3, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x16() 89 vp4567 = vfmaq_f32(vc2, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x16() 94 vp4567 = vmulq_f32(vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x16() 108 vp4567 = vfmaq_f32(vt4567, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x16() 113 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x16()
|
D | velu-neon-rr2-p6-x16.c | 80 float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x16() local 85 vp4567 = vmlaq_f32(vc4, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x16() 90 vp4567 = vmlaq_f32(vc3, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x16() 95 vp4567 = vmlaq_f32(vc2, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x16() 100 vp4567 = vmulq_f32(vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x16() 114 vp4567 = vmlaq_f32(vt4567, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x16() 119 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__neon_rr2_p6_x16()
|
D | velu-sse41-rr2-p6-x16.c | 82 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse41_rr2_p6_x16() local 87 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__sse41_rr2_p6_x16() 92 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__sse41_rr2_p6_x16() 97 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__sse41_rr2_p6_x16() 102 vp4567 = _mm_mul_ps(vp4567, vt4567); in xnn_f32_velu_ukernel__sse41_rr2_p6_x16() 116 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__sse41_rr2_p6_x16() 121 const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__sse41_rr2_p6_x16()
|
D | velu-wasmsimd-arm-rr2-p6-x16.c | 82 v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16() local 87 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16() 92 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16() 97 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16() 102 vp4567 = wasm_f32x4_mul(vp4567, vt4567); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16() 116 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16() 121 const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x16()
|
D | velu-wasmsimd-x86-rr2-p6-x16.c | 92 v128_t vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16() local 99 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16() 104 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16() 109 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16() 114 vp4567 = wasm_f32x4_mul(vp4567, vt4567); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16() 128 vp4567 = wasm_f32x4_add(wasm_f32x4_mul(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16() 133 const v128_t ve4567 = wasm_f32x4_mul(wasm_f32x4_add(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__wasmsimd_x86_rr2_p6_x16()
|
D | velu-sse41-rr2-p6-x20.c | 89 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse41_rr2_p6_x20() local 95 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__sse41_rr2_p6_x20() 101 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__sse41_rr2_p6_x20() 107 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__sse41_rr2_p6_x20() 113 vp4567 = _mm_mul_ps(vp4567, vt4567); in xnn_f32_velu_ukernel__sse41_rr2_p6_x20() 130 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__sse41_rr2_p6_x20() 136 const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__sse41_rr2_p6_x20()
|
D | velu-neonfma-rr1-p6-x20.c | 80 float32x4_t vp4567 = vfmaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x20() local 86 vp4567 = vfmaq_f32(vc4, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x20() 92 vp4567 = vfmaq_f32(vc3, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x20() 98 vp4567 = vfmaq_f32(vc2, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x20() 104 vp4567 = vmulq_f32(vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x20() 121 vp4567 = vfmaq_f32(vt4567, vp4567, vt4567); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x20() 127 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x20()
|
D | velu-sse2-rr2-p6-x16.c | 82 __m128 vp4567 = _mm_add_ps(_mm_mul_ps(vc6, vt4567), vc5); in xnn_f32_velu_ukernel__sse2_rr2_p6_x16() local 87 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc4); in xnn_f32_velu_ukernel__sse2_rr2_p6_x16() 92 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc3); in xnn_f32_velu_ukernel__sse2_rr2_p6_x16() 97 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vc2); in xnn_f32_velu_ukernel__sse2_rr2_p6_x16() 102 vp4567 = _mm_mul_ps(vp4567, vt4567); in xnn_f32_velu_ukernel__sse2_rr2_p6_x16() 116 vp4567 = _mm_add_ps(_mm_mul_ps(vp4567, vt4567), vt4567); in xnn_f32_velu_ukernel__sse2_rr2_p6_x16() 121 const __m128 ve4567 = _mm_mul_ps(_mm_add_ps(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__sse2_rr2_p6_x16()
|
D | velu-neon-rr2-p6-x20.c | 87 float32x4_t vp4567 = vmlaq_f32(vc5, vc6, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x20() local 93 vp4567 = vmlaq_f32(vc4, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x20() 99 vp4567 = vmlaq_f32(vc3, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x20() 105 vp4567 = vmlaq_f32(vc2, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x20() 111 vp4567 = vmulq_f32(vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x20() 128 vp4567 = vmlaq_f32(vt4567, vp4567, vt4567); in xnn_f32_velu_ukernel__neon_rr2_p6_x20() 134 const float32x4_t ve4567 = vmulq_f32(vaddq_f32(vp4567, vs4567), valpha); in xnn_f32_velu_ukernel__neon_rr2_p6_x20()
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | neonfma-rr1-p5-x8.c | 60 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8() local 63 vp4567 = vfmaq_f32(vc3, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8() 66 vp4567 = vfmaq_f32(vc2, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8() 69 vp4567 = vfmaq_f32(vc1, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8() 75 float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8()
|
D | neonfma-rr1-p5-x8-acc2.c | 61 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8_acc2() local 64 vp4567 = vfmaq_f32(vc3, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8_acc2() 67 vp4567 = vfmaq_f32(vc2, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8_acc2() 70 vp4567 = vfmaq_f32(vc1, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8_acc2() 76 float32x4_t vf4567 = vfmaq_f32(vs4567, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x8_acc2()
|
D | neon-rr2-p5-x8-acc2.c | 65 float32x4_t vp4567 = vmlaq_f32(vc4, vc5, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8_acc2() local 68 vp4567 = vmlaq_f32(vc3, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8_acc2() 71 vp4567 = vmlaq_f32(vc2, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8_acc2() 74 vp4567 = vmlaq_f32(vc1, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8_acc2() 80 float32x4_t vf4567 = vmlaq_f32(vs4567, vp4567, vt4567); in xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x8_acc2()
|
/external/XNNPACK/src/f32-vsigmoid/gen/ |
D | vsigmoid-neonfma-rr1-p5-div-x8.c | 57 float32x4_t vp4567 = vfmaq_f32(vc4, vc5, vt4567); in xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_div_x8() local 60 vp4567 = vfmaq_f32(vc3, vp4567, vt4567); in xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_div_x8() 63 vp4567 = vfmaq_f32(vc2, vp4567, vt4567); in xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_div_x8() 66 vp4567 = vfmaq_f32(vc1, vp4567, vt4567); in xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_div_x8() 72 const float32x4_t ve4567 = vfmaq_f32(vs4567, vp4567, vt4567); in xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_div_x8()
|