/external/clang/include/clang/Sema/ |
D | ScopeInfo.h | 453 QualType CaptureType, Expr *Cpy) in Capture() argument 456 Cpy, !Var ? Cap_VLA : Block ? Cap_Block : ByRef ? Cap_ByRef in Capture() 462 QualType CaptureType, Expr *Cpy, const bool ByCopy) in Capture() argument 465 InitExprAndCaptureKind(Cpy, ByCopy ? Cap_ByCopy : Cap_ByRef), in Capture() 540 QualType CaptureType, Expr *Cpy) { in addCapture() argument 542 EllipsisLoc, CaptureType, Cpy)); in addCapture() 557 Expr *Cpy, bool ByCopy); 878 Expr *Cpy, in addThisCapture() argument 881 Cpy, ByCopy)); in addThisCapture()
|
/external/swiftshader/third_party/llvm-16.0/llvm/lib/CodeGen/ |
D | MachineFunction.cpp | 1013 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> { in salvageCopySSAImpl() argument 1016 if (Cpy.isCopy()) { in salvageCopySSAImpl() 1017 OldReg = Cpy.getOperand(0).getReg(); in salvageCopySSAImpl() 1018 NewReg = Cpy.getOperand(1).getReg(); in salvageCopySSAImpl() 1019 SubReg = Cpy.getOperand(1).getSubReg(); in salvageCopySSAImpl() 1020 } else if (Cpy.isSubregToReg()) { in salvageCopySSAImpl() 1021 OldReg = Cpy.getOperand(0).getReg(); in salvageCopySSAImpl() 1022 NewReg = Cpy.getOperand(2).getReg(); in salvageCopySSAImpl() 1023 SubReg = Cpy.getOperand(3).getImm(); in salvageCopySSAImpl() 1025 auto CopyDetails = *TII.isCopyInstr(Cpy); in salvageCopySSAImpl()
|
/external/vixl/src/aarch64/ |
D | macro-assembler-sve-aarch64.cc | 363 void MacroAssembler::Cpy(const ZRegister& zd, in Cpy() function in vixl::aarch64::MacroAssembler 441 Cpy(zd, pg, FPToRawbitsWithSize(zd.GetLaneSizeInBits(), imm)); in Fcpy() 458 Cpy(zd, pg, FPToRawbitsWithSize(zd.GetLaneSizeInBits(), imm)); in Fcpy() 475 Cpy(zd, pg, FPToRawbitsWithSize(zd.GetLaneSizeInBits(), imm)); in Fcpy()
|
D | macro-assembler-aarch64.h | 4107 void Cpy(const ZRegister& zd, const PRegister& pg, IntegerOperand imm); 4108 void Cpy(const ZRegister& zd, const PRegisterM& pg, const Register& rn) { in Cpy() function 4113 void Cpy(const ZRegister& zd, const PRegisterM& pg, const VRegister& vn) { in Cpy() function 5441 Cpy(zd, pg, imm); in Mov() 7841 V(Cpy, cpy, ) \
|
/external/vixl/test/aarch64/ |
D | test-assembler-sve-aarch64.cc | 6440 __ Cpy(z0.VnB(), pg, w0); in TEST_SVE() local 6441 __ Cpy(z1.VnH(), pg, x1); // X registers are accepted for small lanes. in TEST_SVE() local 6442 __ Cpy(z2.VnS(), pg, w2); in TEST_SVE() local 6443 __ Cpy(z3.VnD(), pg, x3); in TEST_SVE() local 6446 __ Cpy(z4.VnB(), pg, b28); in TEST_SVE() local 6447 __ Cpy(z5.VnH(), pg, h29); in TEST_SVE() local 6448 __ Cpy(z6.VnS(), pg, s30); in TEST_SVE() local 6449 __ Cpy(z7.VnD(), pg, d31); in TEST_SVE() local 6454 __ Cpy(z16.VnB(), pg, sp); in TEST_SVE() local 6455 __ Cpy(z17.VnH(), pg, wsp); in TEST_SVE() local [all …]
|
D | test-simulator-aarch64.cc | 5878 __ Cpy(x15, ip0, ip1); in TEST() local
|
D | test-assembler-aarch64.cc | 14530 __ Cpy(x5, x4, x6); in TEST() local 14538 __ Cpy(x8, x7, x9); in TEST() local
|
/external/llvm/lib/Transforms/Instrumentation/ |
D | MemorySanitizer.cpp | 1140 Value *Cpy = EntryIRB.CreateMemCpy( in getShadow() local 1143 DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); in getShadow() 1144 (void)Cpy; in getShadow()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Instrumentation/ |
D | MemorySanitizer.cpp | 1635 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base, in getShadow() local 1637 LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); in getShadow() 1638 (void)Cpy; in getShadow()
|
/external/swiftshader/third_party/llvm-16.0/llvm/lib/Transforms/Instrumentation/ |
D | MemorySanitizer.cpp | 1910 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base, in getShadow() local 1912 LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); in getShadow() 1913 (void)Cpy; in getShadow()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | SVEInstrFormats.td | 159 def SVECpyImmOperand8 : SVEShiftedImmOperand<8, "Cpy", "isSVECpyImm<int8_t>">; 160 def SVECpyImmOperand16 : SVEShiftedImmOperand<16, "Cpy", "isSVECpyImm<int16_t>">; 161 def SVECpyImmOperand32 : SVEShiftedImmOperand<32, "Cpy", "isSVECpyImm<int32_t>">; 162 def SVECpyImmOperand64 : SVEShiftedImmOperand<64, "Cpy", "isSVECpyImm<int64_t>">;
|
D | AArch64ISelLowering.cpp | 4183 SDValue Cpy = DAG.getMemcpy( in LowerCall() local 4189 MemOpChains.push_back(Cpy); in LowerCall()
|
/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/AArch64/ |
D | SVEInstrFormats.td | 183 def SVECpyImmOperand8 : SVEShiftedImmOperand<8, "Cpy", "isSVECpyImm<int8_t>">; 184 def SVECpyImmOperand16 : SVEShiftedImmOperand<16, "Cpy", "isSVECpyImm<int16_t>">; 185 def SVECpyImmOperand32 : SVEShiftedImmOperand<32, "Cpy", "isSVECpyImm<int32_t>">; 186 def SVECpyImmOperand64 : SVEShiftedImmOperand<64, "Cpy", "isSVECpyImm<int64_t>">;
|
D | AArch64ISelLowering.cpp | 7505 SDValue Cpy = DAG.getMemcpy( in LowerCall() local 7511 MemOpChains.push_back(Cpy); in LowerCall()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ISelLowering.cpp | 3138 SDValue Cpy = DAG.getMemcpy( in LowerCall() local 3144 MemOpChains.push_back(Cpy); in LowerCall()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIISelLowering.cpp | 2867 SDValue Cpy = DAG.getMemcpy( in LowerCall() local 2873 MemOpChains.push_back(Cpy); in LowerCall()
|
/external/swiftshader/third_party/llvm-16.0/llvm/lib/Target/AMDGPU/ |
D | SIISelLowering.cpp | 3291 SDValue Cpy = in LowerCall() local 3298 MemOpChains.push_back(Cpy); in LowerCall()
|