/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | wasmsimd-p5-x4.c | 190 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x4() local 203 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x4() local
|
D | wasmsimd-p5-x8.c | 206 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x8() local 219 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x8() local
|
D | wasmsimd-p5-x8-acc2.c | 209 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x8_acc2() local 222 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x8_acc2() local
|
D | wasmsimd-p5-x12.c | 222 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x12() local 235 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x12() local
|
D | wasmsimd-p5-x12-acc2.c | 225 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x12_acc2() local 238 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x12_acc2() local
|
D | wasmsimd-p5-x12-acc3.c | 227 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x12_acc3() local 240 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x12_acc3() local
|
D | wasmsimd-p5-x16.c | 238 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x16() local 251 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x16() local
|
D | wasmsimd-p5-x16-acc2.c | 241 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x16_acc2() local 254 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x16_acc2() local
|
D | wasmsimd-p5-x16-acc4.c | 245 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x16_acc4() local 258 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x16_acc4() local
|
D | wasmsimd-p5-x20-acc2.c | 257 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x20_acc2() local 270 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x20_acc2() local
|
D | wasmsimd-p5-x20.c | 254 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x20() local 267 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x20() local
|
D | wasmsimd-p5-x20-acc5.c | 263 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x20_acc5() local 276 const float vf0 = wasm_f32x4_extract_lane(vf, 0); in xnn_f32_raddstoreexpminusmax_ukernel__wasmsimd_p5_x20_acc5() local
|
D | scalar-p5-x2.c | 97 float vf0 = vt0 * vp0 + vs0; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2() local
|
D | scalar-p5-x2-acc2.c | 98 float vf0 = vt0 * vp0 + vs0; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2() local
|
D | scalar-lut64-p2-x2.c | 107 float vf0 = vp0 * vs0 + vs0; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2() local
|
D | scalar-lut64-p2-x2-acc2.c | 108 float vf0 = vp0 * vs0 + vs0; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2() local
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x16.c | 73 __m512 vf0 = _mm512_mul_ps(vp0, vscalev); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x16() local
|
D | avx2-p5-x8.c | 79 __m256 vf0 = _mm256_mul_ps(vp0, vscalev); in xnn_f32_vscaleextexp_ukernel__avx2_p5_x8() local
|
D | avx512f-p5-scalef-x32.c | 82 __m512 vf0 = _mm512_mul_ps(vp0, vscalev); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x32() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x16.c | 72 __m512 vf0 = _mm512_scalef_ps(vp0, vn0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x16() local
|
D | avx2-p5-x8.c | 84 __m256 vf0 = _mm256_fmadd_ps(vt0, vp0, vs0); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x8() local
|
D | avx512f-p5-scalef-x32.c | 82 __m512 vf0 = _mm512_scalef_ps(vp0, vn0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x32() local
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | scalar-p5-div-x2.c | 83 float vf0 = ve0 / vd0; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x2() local
|
D | scalar-lut64-p2-div-x2.c | 79 float vf0 = vy0 / vd0; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x2() local
|
D | avx512f-rr1-p5-scalef-div-x32.c | 75 __m512 vf0 = _mm512_div_ps(ve0, vd0); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32() local
|