/external/llvm/lib/IR/ |
D | Instruction.cpp | 380 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1)) in haveSameSpecialState() 381 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() && in haveSameSpecialState() 382 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() && in haveSameSpecialState() 384 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() && in haveSameSpecialState() 386 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() && in haveSameSpecialState() 387 CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope(); in haveSameSpecialState()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | AtomicExpandPass.cpp | 92 void expandPartwordCmpXchg(AtomicCmpXchgInst *I); 94 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI); 96 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI); 102 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI); 104 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); 116 void expandAtomicCASToLibcall(AtomicCmpXchgInst *I); 150 static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) { in getAtomicOpSize() 182 static unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) { in getAtomicOpAlign() 223 auto CASI = dyn_cast<AtomicCmpXchgInst>(I); in runOnFunction() 443 AtomicCmpXchgInst::getStrongestFailureOrdering(Order)); in expandAtomicLoadToCmpXchg() [all …]
|
/external/llvm/lib/Transforms/Scalar/ |
D | LowerAtomic.cpp | 25 static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) { in LowerAtomicCmpXchgInst() 119 else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Inst)) in runOnBasicBlock()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
D | LowerAtomic.cpp | 24 static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) { in LowerAtomicCmpXchgInst() 124 else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Inst)) in runOnBasicBlock()
|
/external/llvm-project/llvm/lib/Transforms/Scalar/ |
D | LowerAtomic.cpp | 24 static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) { in LowerAtomicCmpXchgInst() 123 else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&Inst)) in runOnBasicBlock()
|
/external/llvm-project/llvm/lib/CodeGen/ |
D | AtomicExpandPass.cpp | 92 bool expandPartwordCmpXchg(AtomicCmpXchgInst *I); 94 void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI); 96 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI); 102 bool tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI); 104 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); 116 void expandAtomicCASToLibcall(AtomicCmpXchgInst *I); 150 static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) { in getAtomicOpSize() 191 auto CASI = dyn_cast<AtomicCmpXchgInst>(I); in runOnFunction() 411 AtomicCmpXchgInst::getStrongestFailureOrdering(Order)); in expandAtomicLoadToCmpXchg() 484 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder)); in createCmpXchgInstFun() [all …]
|
/external/llvm/lib/CodeGen/ |
D | AtomicExpandPass.cpp | 70 void expandPartwordCmpXchg(AtomicCmpXchgInst *I); 72 AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI); 79 bool expandAtomicCmpXchg(AtomicCmpXchgInst *CI); 91 void expandAtomicCASToLibcall(AtomicCmpXchgInst *I); 125 unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) { in getAtomicOpSize() 157 unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) { in getAtomicOpAlign() 195 auto CASI = dyn_cast<AtomicCmpXchgInst>(I); in runOnFunction() 422 AtomicCmpXchgInst::getStrongestFailureOrdering(Order)); in expandAtomicLoadToCmpXchg() 483 AtomicCmpXchgInst::getStrongestFailureOrdering(MemOpOrder)); in createCmpXchgInstFun() 727 void AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) { in expandPartwordCmpXchg() [all …]
|
/external/llvm/lib/Transforms/Instrumentation/ |
D | BoundsChecking.cpp | 182 if (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<AtomicCmpXchgInst>(I) || in runOnFunction() 197 } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(Inst)) { in runOnFunction()
|
/external/llvm/include/llvm/Analysis/ |
D | MemoryLocation.h | 68 static MemoryLocation get(const AtomicCmpXchgInst *CXI); 77 else if (auto *I = dyn_cast<AtomicCmpXchgInst>(Inst)) in get()
|
D | AliasAnalysis.h | 407 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, 411 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P, in getModRefInfo() 490 return getModRefInfo((const AtomicCmpXchgInst*)I, Loc); in getModRefInfo()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/IR/ |
D | Instruction.cpp | 423 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1)) in haveSameSpecialState() 424 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() && in haveSameSpecialState() 425 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() && in haveSameSpecialState() 427 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() && in haveSameSpecialState() 429 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() && in haveSameSpecialState() 431 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID(); in haveSameSpecialState()
|
/external/llvm-project/llvm/lib/IR/ |
D | Instruction.cpp | 447 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1)) in haveSameSpecialState() 448 return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() && in haveSameSpecialState() 449 CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() && in haveSameSpecialState() 451 cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() && in haveSameSpecialState() 453 cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() && in haveSameSpecialState() 455 cast<AtomicCmpXchgInst>(I2)->getSyncScopeID(); in haveSameSpecialState()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Utils/ |
D | FunctionComparator.cpp | 615 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) { in cmpOperations() 617 cast<AtomicCmpXchgInst>(R)->isVolatile())) in cmpOperations() 620 cast<AtomicCmpXchgInst>(R)->isWeak())) in cmpOperations() 624 cast<AtomicCmpXchgInst>(R)->getSuccessOrdering())) in cmpOperations() 628 cast<AtomicCmpXchgInst>(R)->getFailureOrdering())) in cmpOperations() 631 cast<AtomicCmpXchgInst>(R)->getSyncScopeID()); in cmpOperations()
|
/external/llvm-project/llvm/lib/Transforms/Utils/ |
D | FunctionComparator.cpp | 635 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) { in cmpOperations() 637 cast<AtomicCmpXchgInst>(R)->isVolatile())) in cmpOperations() 640 cmpNumbers(CXI->isWeak(), cast<AtomicCmpXchgInst>(R)->isWeak())) in cmpOperations() 644 cast<AtomicCmpXchgInst>(R)->getSuccessOrdering())) in cmpOperations() 648 cast<AtomicCmpXchgInst>(R)->getFailureOrdering())) in cmpOperations() 651 cast<AtomicCmpXchgInst>(R)->getSyncScopeID()); in cmpOperations()
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Analysis/ |
D | MemoryLocation.h | 202 static MemoryLocation get(const AtomicCmpXchgInst *CXI); 216 return get(cast<AtomicCmpXchgInst>(Inst)); in getOrNone()
|
D | AliasAnalysis.h | 564 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, 568 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P, in getModRefInfo() 711 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, 740 return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP); in getModRefInfo()
|
/external/llvm-project/llvm/lib/Transforms/Instrumentation/ |
D | BoundsChecking.cpp | 164 } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { in addBoundsChecking()
|
/external/llvm-project/llvm/include/llvm/Analysis/ |
D | MemoryLocation.h | 31 class AtomicCmpXchgInst; variable 239 static MemoryLocation get(const AtomicCmpXchgInst *CXI);
|
D | AliasAnalysis.h | 616 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, 620 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, const Value *P, in getModRefInfo() 760 ModRefInfo getModRefInfo(const AtomicCmpXchgInst *CX, 789 return getModRefInfo((const AtomicCmpXchgInst *)I, Loc, AAQIP); in getModRefInfo()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Instrumentation/ |
D | BoundsChecking.cpp | 162 } else if (AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) { in addBoundsChecking()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/RISCV/ |
D | RISCVISelLowering.h | 214 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override; 216 emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder, AtomicCmpXchgInst *CI,
|
/external/llvm-project/llvm/lib/Analysis/ |
D | MemoryLocation.cpp | 66 MemoryLocation MemoryLocation::get(const AtomicCmpXchgInst *CXI) { in get() 97 return get(cast<AtomicCmpXchgInst>(Inst)); in getOrNone()
|
/external/llvm-project/llvm/lib/Target/RISCV/ |
D | RISCVISelLowering.h | 228 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override; 230 AtomicCmpXchgInst *CI,
|
/external/llvm/lib/Transforms/IPO/ |
D | MergeFunctions.cpp | 1022 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) { in cmpOperations() 1024 cast<AtomicCmpXchgInst>(R)->isVolatile())) in cmpOperations() 1027 cast<AtomicCmpXchgInst>(R)->isWeak())) in cmpOperations() 1031 cast<AtomicCmpXchgInst>(R)->getSuccessOrdering())) in cmpOperations() 1035 cast<AtomicCmpXchgInst>(R)->getFailureOrdering())) in cmpOperations() 1038 cast<AtomicCmpXchgInst>(R)->getSynchScope()); in cmpOperations()
|
/external/llvm/lib/Analysis/ |
D | MemoryLocation.cpp | 47 MemoryLocation MemoryLocation::get(const AtomicCmpXchgInst *CXI) { in get()
|