/external/XNNPACK/src/xnnpack/ |
D | microparams.h | 1422 uint16_t minus_ln2; member 1435 XNN_ALIGN(32) float minus_ln2[8]; 1510 float minus_ln2; member 1524 float minus_ln2; member 1617 XNN_ALIGN(32) float minus_ln2[8]; 1630 XNN_ALIGN(32) float minus_ln2[8]; 1644 XNN_ALIGN(32) float minus_ln2[8]; 1657 XNN_ALIGN(32) float minus_ln2[8]; 1672 float minus_ln2; member 1684 float minus_ln2; member [all …]
|
/external/XNNPACK/src/f32-vsigmoid/gen/ |
D | vsigmoid-avx512f-rr1-p5-scalef-div-x16.c | 29 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x16()
|
D | vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x16.c | 31 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x16()
|
D | vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x16.c | 31 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x16()
|
D | vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x16.c | 29 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x16()
|
D | vsigmoid-avx2-rr1-p5-div-x8.c | 29 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x8()
|
D | vsigmoid-avx2-rr1-p5-nr2fma-x8.c | 29 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr2fma_x8()
|
D | vsigmoid-avx2-rr1-p5-nr1fma-x8.c | 29 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_nr1fma_x8()
|
D | vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x32.c | 31 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x32()
|
D | vsigmoid-avx512f-rr1-p5-scalef-div-x32.c | 29 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p5.minus_ln2); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32()
|
/external/XNNPACK/src/f16-vsigmoid/gen/ |
D | vsigmoid-avx2-rr1-p2-div-x8.c | 30 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2); in xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x8()
|
D | vsigmoid-avx2-rr1-p2-rcp-x8.c | 30 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2); in xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x8()
|
D | vsigmoid-avx2-rr1-p2-div-x16.c | 30 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2); in xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x16()
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-avx512f-rr1-lut16-p3-perm-x16.c | 34 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_lut16_p3.minus_ln2); in xnn_f32_velu_ukernel__avx512f_rr1_lut16_p3_perm_x16()
|
D | velu-avx2-rr1-lut8-p4-perm-x8.c | 33 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut8_p4.minus_ln2); in xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x8()
|
D | velu-avx2-rr1-p6-x8.c | 32 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2); in xnn_f32_velu_ukernel__avx2_rr1_p6_x8()
|
D | velu-avx512f-rr1-p6-x16.c | 34 const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x16()
|
D | velu-avx2-rr1-lut16-p3-gather-x8.c | 35 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut16_p3.minus_ln2); in xnn_f32_velu_ukernel__avx2_rr1_lut16_p3_gather_x8()
|
D | velu-avx2-rr1-lut4-p4-perm-x8.c | 33 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_lut4_p4.minus_ln2); in xnn_f32_velu_ukernel__avx2_rr1_lut4_p4_perm_x8()
|
D | velu-neonfma-rr1-p6-x4.c | 35 const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p6.minus_ln2); in xnn_f32_velu_ukernel__neonfma_rr1_p6_x4()
|
D | velu-neonfma-rr1-lut16-p3-x4.c | 38 const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut16_p3.minus_ln2); in xnn_f32_velu_ukernel__neonfma_rr1_lut16_p3_x4()
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | neonfma-rr1-p5-x4.c | 31 const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_p5.minus_ln2); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_p5_x4()
|
D | neonfma-rr1-lut64-p2-x4.c | 34 const float32x4_t vminus_ln2 = vld1q_dup_f32(¶ms->neonfma_rr1_lut64_p2.minus_ln2); in xnn_f32_raddstoreexpminusmax_ukernel__neonfma_rr1_lut64_p2_x4()
|
/external/XNNPACK/src/f16-velu/gen/ |
D | velu-neonfp16arith-rr1-p3-x8.c | 31 …t16x8_t vminus_ln2 = vreinterpretq_f16_u16(vld1q_dup_u16(¶ms->neonfp16arith_rr1_p3.minus_ln2)); in xnn_f16_velu_ukernel__neonfp16arith_rr1_p3_x8()
|
D | velu-avx2-rr1-p3-x8.c | 31 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p3.minus_ln2); in xnn_f16_velu_ukernel__avx2_rr1_p3_x8()
|