/external/rust/crates/memchr/src/x86/ |
D | sse42.rs | 31 let vn3 = _mm_set1_epi8(n3 as i8); in memchr3() localVariable
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | scalar-p5-x4.c | 64 float vn3 = vx3 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4() local
|
D | scalar-p5-x4-acc2.c | 65 float vn3 = vx3 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc2() local
|
D | scalar-p5-x4-acc4.c | 67 float vn3 = vx3 * vlog2e + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x4_acc4() local
|
D | scalar-lut64-p2-x4.c | 67 float vn3 = vx3 * vlog2e_x64 + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4() local
|
D | scalar-lut64-p2-x4-acc2.c | 68 float vn3 = vx3 * vlog2e_x64 + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4_acc2() local
|
D | scalar-lut64-p2-x4-acc4.c | 70 float vn3 = vx3 * vlog2e_x64 + vmagic_bias; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x4_acc4() local
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x64.c | 54 const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x64() local
|
D | avx2-p5-x32.c | 60 …const __m256 vn3 = _mm256_round_ps(_mm256_mul_ps(vx3, vlog2e), _MM_FROUND_TO_NEAREST_INT | _MM_FRO… in xnn_f32_vscaleextexp_ukernel__avx2_p5_x32() local
|
D | avx512f-p5-scalef-x80.c | 55 const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x80() local
|
D | avx512f-p5-scalef-x96.c | 56 const __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x96() local
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx512f-p5-scalef-x64.c | 59 __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x64() local
|
D | avx512f-p5-scalef-x80.c | 61 __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x80() local
|
D | avx2-p5-x32.c | 63 __m256 vn3 = _mm256_fmadd_ps(vx3, vlog2e, vmagic_bias); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x32() local
|
D | avx512f-p5-scalef-x96.c | 63 __m512 vn3 = _mm512_roundscale_ps(_mm512_mul_ps(vx3, vlog2e), 0); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x96() local
|
/external/XNNPACK/src/f32-velu/gen/ |
D | velu-scalar-rr2-p6-x4.c | 58 float vn3 = vz3 * vlog2e + vmagic_bias; in xnn_f32_velu_ukernel__scalar_rr2_p6_x4() local
|
D | velu-wasm-rr2-p6-x4.c | 58 float vn3 = vz3 * vlog2e + vmagic_bias; in xnn_f32_velu_ukernel__wasm_rr2_p6_x4() local
|
D | velu-scalar-rr2-p6-x5.c | 60 float vn3 = vz3 * vlog2e + vmagic_bias; in xnn_f32_velu_ukernel__scalar_rr2_p6_x5() local
|
D | velu-scalar-rr2-lut16-p3-x4.c | 58 float vn3 = vz3 * vlog2e + vmagic_bias; in xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4() local
|
D | velu-wasm-rr2-p6-x5.c | 60 float vn3 = vz3 * vlog2e + vmagic_bias; in xnn_f32_velu_ukernel__wasm_rr2_p6_x5() local
|
D | velu-avx512f-rr1-p6-x64.c | 57 __m512 vn3 = _mm512_fmadd_ps(vz3, vlog2e, vmagic_bias); in xnn_f32_velu_ukernel__avx512f_rr1_p6_x64() local
|
D | velu-wasm-rr2-lut16-p3-x4.c | 58 float vn3 = vz3 * vlog2e + vmagic_bias; in xnn_f32_velu_ukernel__wasm_rr2_lut16_p3_x4() local
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | scalar-p5-div-x4.c | 54 float vn3 = vz3 * vminus_log2e + vmagic_bias; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4() local
|
D | avx512f-rr1-p5-scalef-div-x64.c | 52 __m512 vn3 = _mm512_mul_ps(vz3, vlog2e); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x64() local
|
/external/rust/crates/memchr/src/ |
D | fallback.rs | 129 let vn3 = repeat_byte(n3); in memchr3() localVariable 249 let vn3 = repeat_byte(n3); in memrchr3() localVariable
|