| /external/vixl/src/aarch64/ |
| D | assembler-aarch64.cc | 314 void Assembler::NEONTable(const VRegister& vd, in NEONTable() 326 void Assembler::tbl(const VRegister& vd, in tbl() 334 void Assembler::tbl(const VRegister& vd, in tbl() 346 void Assembler::tbl(const VRegister& vd, in tbl() 359 void Assembler::tbl(const VRegister& vd, in tbl() 373 void Assembler::tbx(const VRegister& vd, in tbx() 381 void Assembler::tbx(const VRegister& vd, in tbx() 393 void Assembler::tbx(const VRegister& vd, in tbx() 406 void Assembler::tbx(const VRegister& vd, in tbx() 2380 void Assembler::NEON3DifferentL(const VRegister& vd, in NEON3DifferentL() [all …]
|
| /external/clang/test/PCH/ |
| D | cxx1y-variable-templates.cpp | 60 template<typename T> constexpr T vd = T(10); variable 61 template<typename T> T* vd<T*> = new T(); variable 74 template<typename T> T vd = T(10); variable 111 template<typename T> T* vd<T*> = new T(); variable
|
| /external/eigen/bench/ |
| D | bench_norm.cpp | 251 VectorXd vd = VectorXd::Ones(s) * yd; in check_accuracy() local 266 VectorXd vd(s); in check_accuracy_var() local 333 VectorXd vd = VectorXd::Random(s1) * y; in main() local 349 VectorXd vd = VectorXd::Random(512) * y; in main() local
|
| /external/XNNPACK/src/f16-ibilinear-chw/ |
| D | neonfp16arith.c.in | 115 const float16x4_t vd = vsub_f16(vr, vl); variable 156 const float16x4_t vd = vsub_f16(vr, vl); variable 207 const float16x4_t vd = vsub_f16(vr, vl); variable
|
| /external/clang/lib/Analysis/ |
| D | UninitializedValues.cpp | 36 static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) { in isTrackedVar() 72 const VarDecl *vd = *I; in computeMap() local 135 const VarDecl *vd) { in getValue() 196 ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) { in operator []() 268 const VarDecl *vd; member in __anonbed60ebd0411::FindVarResult 271 FindVarResult(const VarDecl *vd, const DeclRefExpr *dr) : vd(vd), dr(dr) {} in FindVarResult() 518 bool isTrackedVar(const VarDecl *vd) { in isTrackedVar() 526 UninitUse getUninitUse(const Expr *ex, const VarDecl *vd, Value v) { in getUninitUse() 677 void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) { in reportUse() 695 const VarDecl *vd = I.getVariable(); in VisitBlockExpr() local
|
| /external/XNNPACK/src/f32-vsigmoid/ |
| D | wasmsimd-rr2-p5-div.c.in | 117 const v128_t vd = wasm_f32x4_add(ve, vone); variable 145 const v128_t vd = wasm_f32x4_add(ve, vone); variable
|
| D | sse-rr2-p5-div.c.in | 128 __m128 vd = _mm_add_ps(ve, vone); variable 163 __m128 vd = _mm_add_ps(ve, vone); variable
|
| D | scalar-rr2-lut2048-p1-div.c.in | 108 const float vd = vy + vone; variable 139 const float vd = vy + vone; variable 169 const float vd = vy + vone; variable
|
| D | scalar-rr2-p5-div.c.in | 118 const float vd = ve + vone; variable 152 const float vd = ve + vone; variable 185 const float vd = ve + vone; variable
|
| D | scalar-rr2-lut64-p2-div.c.in | 112 const float vd = vy + vone; variable 145 const float vd = vy + vone; variable 177 const float vd = vy + vone; variable
|
| /external/XNNPACK/src/f32-vsigmoid/gen/ |
| D | vsigmoid-avx512f-rr1-p5-scalef-div-x16.c | 54 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x16() local 85 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_div_x16() local
|
| D | vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-div-x16.c | 54 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x16() local 86 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_div_x16() local
|
| D | vsigmoid-avx512f-rr1-p5-scalef-nr1fma-x16.c | 54 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x16() local 88 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_p5_scalef_nr1fma_x16() local
|
| D | vsigmoid-avx512f-rr1-lut16-p3-perm-scalef-nr1fma-x16.c | 54 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x16() local 89 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr1_lut16_p3_perm_scalef_nr1fma_x16() local
|
| D | vsigmoid-avx512f-rr2-lut32-p2-perm2-scalef-div-x16.c | 56 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x16() local 88 const __m512 vd = _mm512_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx512f_rr2_lut32_p2_perm2_scalef_div_x16() local
|
| D | vsigmoid-wasmsimd-rr2-p5-div-x4.c | 58 const v128_t vd = wasm_f32x4_add(ve, vone); in xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x4() local 86 const v128_t vd = wasm_f32x4_add(ve, vone); in xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x4() local
|
| D | vsigmoid-sse41-rr2-p5-div-x4.c | 59 __m128 vd = _mm_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x4() local 90 __m128 vd = _mm_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__sse41_rr2_p5_div_x4() local
|
| D | vsigmoid-neonfma-rr1-p5-div-x4.c | 54 const float32x4_t vd = vaddq_f32(ve, vone); in xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_div_x4() local 80 const float32x4_t vd = vaddq_f32(ve, vone); in xnn_f32_vsigmoid_ukernel__neonfma_rr1_p5_div_x4() local
|
| D | vsigmoid-avx2-rr1-p5-div-x8.c | 58 const __m256 vd = _mm256_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x8() local 90 const __m256 vd = _mm256_add_ps(ve, vone); in xnn_f32_vsigmoid_ukernel__avx2_rr1_p5_div_x8() local
|
| /external/ComputeLibrary/src/core/NEON/ |
| D | NEAsymm.inl | 26 inline qasymm8x16_t vmlaq_qasymm8(qasymm8x16_t vd, float32x4_t vs, float32x4_t vo) 59 inline qasymm8x16_signed_t vmlaq_qasymm8_signed(qasymm8x16_signed_t vd, float32x4_t vs, float32x4_t…
|
| D | SVEAsymm.inl | 27 inline svuint8_t svmla_qasymm8_z(svbool_t pg, svuint8_t vd, svfloat32_t vs, svfloat32_t vo) 66 inline svint8_t svmla_qasymm8_signed_z(svbool_t pg, svint8_t vd, svfloat32_t vs, svfloat32_t vo)
|
| /external/XNNPACK/src/f16-vsigmoid/gen/ |
| D | vsigmoid-avx2-rr1-p2-div-x8.c | 54 const __m256 vd = _mm256_add_ps(ve, vone); in xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x8() local 80 const __m256 vd = _mm256_add_ps(ve, vone); in xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_div_x8() local
|
| D | vsigmoid-neonfp16arith-rr2-p2-div-x8.c | 52 const float16x8_t vd = vaddq_f16(ve, vone); in xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_div_x8() local 76 const float16x8_t vd = vaddq_f16(ve, vone); in xnn_f16_vsigmoid_ukernel__neonfp16arith_rr2_p2_div_x8() local
|
| D | vsigmoid-avx2-rr1-p2-rcp-x8.c | 54 const __m256 vd = _mm256_add_ps(ve, vone); in xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x8() local 81 const __m256 vd = _mm256_add_ps(ve, vone); in xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_rcp_x8() local
|
| /external/XNNPACK/src/f32-ibilinear/gen/ |
| D | wasmsimd-c4.c | 56 const v128_t vd = wasm_f32x4_sub(vb, vt); in xnn_f32_ibilinear_ukernel__wasmsimd_c4() local 72 const v128_t vd = wasm_f32x4_sub(vb, vt); in xnn_f32_ibilinear_ukernel__wasmsimd_c4() local
|