/external/XNNPACK/src/f32-raddexpminusmax/gen/ |
D | avx2-p5-x72-acc3.c | 78 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72_acc3() local 90 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72_acc3() 101 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72_acc3() 113 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72_acc3() 123 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72_acc3()
|
D | avx2-p5-x72.c | 76 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72() local 88 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72() 99 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72() 111 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72() 121 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x72()
|
D | avx2-p5-x80-acc2.c | 79 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc2() local 92 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc2() 104 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc2() 117 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc2() 128 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc2()
|
D | avx2-p5-x80.c | 78 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80() local 91 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80() 103 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80() 116 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80() 127 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80()
|
D | avx2-p5-x80-acc5.c | 82 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc5() local 95 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc5() 107 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc5() 120 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc5() 131 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x80_acc5()
|
D | avx2-p5-x96.c | 82 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96() local 97 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96() 111 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96() 126 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96() 139 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddexpminusmax_ukernel__avx2_p5_x96()
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx2-p5-x72.c | 78 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72() local 90 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72() 101 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72() 113 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72() 123 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72()
|
D | avx2-p5-x80.c | 80 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80() local 93 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80() 105 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80() 118 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80() 129 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
|
D | avx2-p5-x88.c | 82 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88() local 96 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88() 109 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88() 123 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88() 135 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88()
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | avx2-p5-x72-acc3.c | 79 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3() local 91 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3() 102 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3() 114 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3() 124 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3()
|
D | avx2-p5-x72.c | 77 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72() local 89 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72() 100 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72() 112 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72() 122 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72()
|
D | avx2-p5-x80.c | 79 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80() local 92 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80() 104 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80() 117 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80() 128 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80()
|
D | avx2-p5-x80-acc5.c | 83 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5() local 96 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5() 108 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5() 121 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5() 132 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5()
|
D | avx2-p5-x80-acc2.c | 80 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2() local 93 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2() 105 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2() 118 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2() 129 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2()
|
D | avx2-p5-x96-acc6.c | 88 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc6() local 103 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc6() 117 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc6() 132 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc6() 145 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc6()
|
D | avx2-p5-x96-acc3.c | 85 __m256 vn8 = _mm256_fmadd_ps(vx8, vlog2e, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc3() local 100 const __m256 vs8 = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn8), 23)); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc3() 114 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc3() 129 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc3() 142 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x96_acc3()
|
/external/XNNPACK/src/f32-raddextexp/gen/ |
D | avx512f-p5-scalef-x144.c | 64 const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144() local 76 __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144() 86 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144() 157 vmax_e0 = _mm512_max_ps(vmax_e0, vn8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144() 168 const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e0); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144()
|
D | avx512f-p5-scalef-x144-acc3.c | 68 const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144_acc3() local 80 __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144_acc3() 90 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144_acc3() 161 vmax_e2 = _mm512_max_ps(vmax_e2, vn8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144_acc3() 174 const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e2); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x144_acc3()
|
D | avx512f-p5-scalef-x160.c | 65 const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160() local 78 __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160() 89 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160() 166 vmax_e0 = _mm512_max_ps(vmax_e0, vn8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160() 178 const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e0); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160()
|
D | avx512f-p5-scalef-x160-acc2.c | 67 const __m512 vn8 = _mm512_roundscale_ps(_mm512_mul_ps(vx8, vlog2e), 0); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc2() local 80 __m512 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_hi, vx8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc2() 91 vt8 = _mm512_fmadd_ps(vn8, vminus_ln2_lo, vt8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc2() 168 vmax_e0 = _mm512_max_ps(vmax_e0, vn8); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc2() 181 const __m512 vdelta_e8 = _mm512_sub_ps(vn8, vmax_e0); in xnn_f32_raddextexp_ukernel__avx512f_p5_scalef_x160_acc2()
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-avx2-rr1-lut4-p4-perm-x72.c | 73 __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72() local 99 const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 21); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72() 100 const __m256i vl8 = _mm256_castps_si256(_mm256_permutevar_ps(vtable, _mm256_castps_si256(vn8))); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72() 101 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72() 120 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x72()
|
D | velu-avx2-rr1-lut16-p3-gather-x72.c | 72 __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72() local 90 const __m256i vidx8 = _mm256_and_si256(_mm256_castps_si256(vn8), vindex_mask); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72() 109 const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 19); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72() 110 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72() 129 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x72()
|
D | velu-avx2-rr1-lut8-p4-perm-x72.c | 72 __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72() local 98 const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 20); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72() 99 const __m256i vl8 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn8)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72() 100 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72() 119 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x72()
|
D | velu-avx2-rr1-lut8-p4-perm-x80.c | 74 __m256 vn8 = _mm256_fmadd_ps(vz8, vlog2e, vmagic_bias); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80() local 101 const __m256i ven8 = _mm256_slli_epi32(_mm256_castps_si256(vn8), 20); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80() 102 const __m256i vl8 = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn8)); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80() 103 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80() 125 __m256 vt8 = _mm256_fmadd_ps(vn8, vminus_ln2, vz8); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x80()
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | avx-rr2-p5-div-x72.c | 71 __m256 vn8 = _mm256_add_ps(_mm256_mul_ps(vz8, vlog2e), vmagic_bias); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72() local 97 …_m128 vs_lo8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_castps256_ps128(vn8)), 23)); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72() 98 …128 vs_hi8 = _mm_castsi128_ps(_mm_slli_epi32(_mm_castps_si128(_mm256_extractf128_ps(vn8, 1)), 23)); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72() 109 vn8 = _mm256_sub_ps(vn8, vmagic_bias); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72() 119 __m256 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_hi), vz8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72() 129 vt8 = _mm256_add_ps(_mm256_mul_ps(vn8, vminus_ln2_lo), vt8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72()
|