| /external/XNNPACK/src/f32-vscaleextexp/gen/ | 
| D | avx512f-p5-scalef-x16.c | 57     __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16()  local
  | 
| D | avx512f-p5-scalef-x32.c | 61     __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32()  local
  | 
| D | avx2-p5-x8.c | 63     __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleextexp_ukernel__avx2_p5_x8()  local
  | 
| D | avx512f-p5-scalef-x48.c | 65     __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x48()  local
  | 
| D | avx2-p5-x16.c | 67     __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleextexp_ukernel__avx2_p5_x16()  local
  | 
| /external/XNNPACK/src/f32-vscaleexpminusmax/gen/ | 
| D | avx512f-p5-scalef-x16.c | 59     __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16()  local
  | 
| D | avx2-p5-x8.c | 70     __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8()  local
  | 
| D | avx512f-p5-scalef-x32.c | 64     __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x32()  local
  | 
| D | avx2-p5-x16.c | 77     __m256 vp0 = _mm256_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16()  local
  | 
| D | avx512f-p5-scalef-x48.c | 69     __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x48()  local
  | 
| /external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ | 
| D | scalar-rr2-p5-x2.c | 76     float vp0 = vc5 * vt0 + vc4;  in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2()  local
  | 
| D | scalar-rr2-p5-x2-acc2.c | 77     float vp0 = vc5 * vt0 + vc4;  in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_p5_x2_acc2()  local
  | 
| D | scalar-rr2-lut64-p2-x2-acc2.c | 94     float vp0 = vt0 * vc2;  in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2_acc2()  local
  | 
| D | scalar-rr2-lut64-p2-x2.c | 93     float vp0 = vt0 * vc2;  in xnn_f32_raddstoreexpminusmax_ukernel__scalar_rr2_lut64_p2_x2()  local
  | 
| /external/XNNPACK/src/f32-velu/gen/ | 
| D | velu-scalar-rr2-p6-x2.c | 72     float vp0 = vc6 * vt0 + vc5;  in xnn_f32_velu_ukernel__scalar_rr2_p6_x2()  local
  | 
| D | velu-wasm-rr2-p6-x2.c | 64     float vp0 = vc6 * vt0 + vc5;  in xnn_f32_velu_ukernel__wasm_rr2_p6_x2()  local
  | 
| D | velu-scalar-rr2-lut16-p3-x2.c | 75     float vp0 = vc3 * vt0 + vc2;  in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x2()  local
  | 
| D | velu-wasm-rr2-lut16-p3-x2.c | 67     float vp0 = vc3 * vt0 + vc2;  in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x2()  local
  | 
| D | velu-avx2-rr1-p6-x16.c | 58     __m256 vp0 = _mm256_fmadd_ps(vc6, vt0, vc5);  in xnn_f32_velu_ukernel__avx2_rr1_p6_x16()  local
  | 
| D | velu-scalar-rr2-p6-x3.c | 83     float vp0 = vc6 * vt0 + vc5;  in xnn_f32_velu_ukernel__scalar_rr2_p6_x3()  local
  | 
| /external/XNNPACK/src/f32-vsigmoid/gen/ | 
| D | vsigmoid-scalar-rr2-p5-div-x2.c | 61     float vp0 = vt0 * vc5 + vc4;  in xnn_f32_vsigmoid_ukernel__scalar_rr2_p5_div_x2()  local
  | 
| D | vsigmoid-scalar-rr2-lut64-p2-div-x2.c | 66     float vp0 = vt0 * vc2;  in xnn_f32_vsigmoid_ukernel__scalar_rr2_lut64_p2_div_x2()  local
  | 
| D | vsigmoid-scalar-rr2-lut2048-p1-div-x2.c | 66     const float vp0 = vt0 * vc1;  in xnn_f32_vsigmoid_ukernel__scalar_rr2_lut2048_p1_div_x2()  local
  | 
| D | vsigmoid-avx512f-rr1-p5-scalef-div-x32.c | 54     __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32()  local
  | 
| D | vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x32.c | 54     __m512 vp0 = _mm512_fmadd_ps(vc5, vt0, vc4);  in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x32()  local
  |