| /external/XNNPACK/src/f32-vscaleexpminusmax/gen/ | 
| D | avx512f-p5-scalef-x80.c | 62     __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);  in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x80()  local
  | 
| D | avx512f-p5-scalef-x96.c | 64     __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);  in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x96()  local
  | 
| D | avx2-p5-x40.c | 66     __m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);  in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x40()  local
  | 
| D | avx512f-p5-scalef-x112.c | 66     __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);  in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x112()  local
  | 
| /external/XNNPACK/src/f32-vscaleextexp/gen/ | 
| D | avx512f-p5-scalef-x80.c | 56     const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);  in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80()  local
  | 
| D | avx512f-p5-scalef-x96.c | 57     const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);  in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96()  local
  | 
| D | avx2-p5-x40.c | 62 …const __m256 vn4 = _mm256_round_ps(_mm256_mul_ps(vx4, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FRO…  in xnn_f32_vscaleextexp_ukernel__avx2_p5_x40()  local
  | 
| D | avx512f-p5-scalef-x112.c | 58     const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);  in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x112()  local
  | 
| D | avx512f-p5-scalef-x128.c | 59     const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);  in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x128()  local
  | 
| /external/XNNPACK/src/f32-velu/gen/ | 
| D | velu-wasm-rr2-p6-x5.c | 59     float vn4 = vz4 * vlog2e + vmagic_bias;  in xnn_f32_velu_ukernel__wasm_rr2_p6_x5()  local
  | 
| D | velu-scalar-rr2-p6-x5.c | 59     float vn4 = vz4 * vlog2e + vmagic_bias;  in xnn_f32_velu_ukernel__scalar_rr2_p6_x5()  local
  | 
| D | velu-wasm-rr2-p6-x6.c | 61     float vn4 = vz4 * vlog2e + vmagic_bias;  in xnn_f32_velu_ukernel__wasm_rr2_p6_x6()  local
  | 
| D | velu-avx512f-rr1-p6-x80.c | 59     __m512 vn4 = _mm512_fmadd_ps(vz4, vlog2e, vmagic_bias);  in xnn_f32_velu_ukernel__avx512f_rr1_p6_x80()  local
  | 
| D | velu-avx2-rr1-p6-x40.c | 57     __m256 vn4 = _mm256_fmadd_ps(vz4, vlog2e, vmagic_bias);  in xnn_f32_velu_ukernel__avx2_rr1_p6_x40()  local
  | 
| D | velu-scalar-rr2-p6-x6.c | 61     float vn4 = vz4 * vlog2e + vmagic_bias;  in xnn_f32_velu_ukernel__scalar_rr2_p6_x6()  local
  | 
| D | velu-wasm-rr2-lut16-p3-x5.c | 59     float vn4 = vz4 * vlog2e + vmagic_bias;  in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x5()  local
  | 
| /external/XNNPACK/src/f16-raddstoreexpminusmax/gen/ | 
| D | avx2-rr1-p2-x40.c | 57     __m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);  in xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40()  local
  | 
| D | avx2-rr1-p2-x40-acc2.c | 58     __m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);  in xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40_acc2()  local
  | 
| D | avx2-rr1-p2-x40-acc5.c | 61     __m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);  in xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x40_acc5()  local
  | 
| D | avx2-rr1-p2-x48-acc2.c | 60     __m256 vn4 = _mm256_fmadd_ps(vx4, vlog2e, vmagic_bias);  in xnn_f16_raddstoreexpminusmax_ukernel__avx2_rr1_p2_x48_acc2()  local
  | 
| D | neonfp16arith-rr2-p2-x40.c | 57     float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);  in xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x40()  local
  | 
| /external/XNNPACK/src/f32-vsigmoid/gen/ | 
| D | vsigmoid-avx512f-rr1-p5-scalef-div-x80.c | 55     __m512 vn4 = _mm512_mul_ps(vz4, vlog2e);  in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x80()  local
  | 
| D | vsigmoid-avx512f-rr1-p5-scalef-div-x96.c | 57     __m512 vn4 = _mm512_mul_ps(vz4, vlog2e);  in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x96()  local
  | 
| D | vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x80.c | 55     __m512 vn4 = _mm512_mul_ps(vz4, vlog2e);  in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x80()  local
  | 
| /external/XNNPACK/src/f32-raddexpminusmax/gen/ | 
| D | avx512f-p5-scalef-x128-acc2.c | 68     const __m512 vn4 = _mm512_roundscale_ps(_mm512_mul_ps(vx4, vlog2e), 0);  in xnn_f32_raddexpminusmax_ukernel__avx512f_p5_scalef_x128_acc2()  local
  |