/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | AliasAnalysis.cpp | 604 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, in getModRefInfo() argument 607 return getModRefInfo(RMW, Loc, AAQIP); in getModRefInfo() 610 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, in getModRefInfo() argument 614 if (isStrongerThanMonotonic(RMW->getOrdering())) in getModRefInfo() 618 AliasResult AR = alias(MemoryLocation::get(RMW), Loc, AAQI); in getModRefInfo()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
D | InferAddressSpaces.cpp | 364 else if (auto *RMW = dyn_cast<AtomicRMWInst>(&I)) in collectFlatAddressExpressions() local 365 PushPtrOperand(RMW->getPointerOperand()); in collectFlatAddressExpressions() 772 if (auto *RMW = dyn_cast<AtomicRMWInst>(Inst)) in isSimplePointerUseValidToReplace() local 774 (VolatileIsAllowed || !RMW->isVolatile()); in isSimplePointerUseValidToReplace()
|
D | LoopStrengthReduce.cpp | 827 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { in isAddressUse() local 828 if (RMW->getPointerOperand() == OperandVal) in isAddressUse() 846 } else if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(Inst)) { in getAccessType() local 847 AccessTy.AddrSpace = RMW->getPointerAddressSpace(); in getAccessType()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Analysis/ |
D | AliasAnalysis.h | 575 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc); 578 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const Value *P, in getModRefInfo() argument 580 return getModRefInfo(RMW, MemoryLocation(P, Size)); in getModRefInfo() 713 ModRefInfo getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc,
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/WebAssembly/ |
D | WebAssemblyInstrAtomics.td | 519 // Truncating & zero-extending binary RMW patterns. 540 // Truncating & sign-extending binary RMW patterns. 543 // 64-bit) and select a zext RMW; the next instruction will be sext_inreg which 552 // 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s 721 // Truncating & zero-extending ternary RMW patterns. 742 // Truncating & sign-extending ternary RMW patterns. 744 // zext RMW; the next instruction will be sext_inreg which is selected by 757 // 32->64 sext RMW gets selected as i32.atomic.rmw.***, i64.extend_i32_s
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Instrumentation/ |
D | HWAddressSanitizer.cpp | 529 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { in isInterestingMemoryAccess() local 532 *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType()); in isInterestingMemoryAccess() 534 PtrOperand = RMW->getPointerOperand(); in isInterestingMemoryAccess() 566 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) in getPointerOperandIndex() local 567 return RMW->getPointerOperandIndex(); in getPointerOperandIndex()
|
D | AddressSanitizer.cpp | 1370 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { in isInterestingMemoryAccess() local 1373 *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType()); in isInterestingMemoryAccess() 1375 PtrOperand = RMW->getPointerOperand(); in isInterestingMemoryAccess()
|
/third_party/mesa3d/docs/relnotes/ |
D | 20.1.6.rst | 93 - spirv: Don't emit RMW for vector indexing in shared or global
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPUPromoteAlloca.cpp | 558 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { in collectUsesWithPtrTypes() local 559 if (RMW->isVolatile()) in collectUsesWithPtrTypes()
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/docs/ |
D | DESIGN.rst | 63 | Read-modify-write (RMW) transform | | 522 - Read-modify-write (RMW) transformation 921 Subzero's ``O2`` recipe includes an early pass to find read-modify-write (RMW) 924 range ends after the RMW. Since liveness analysis is one of the most expensive 925 passes, it's not attractive to run it an extra time just for RMW analysis. 926 Instead, we essentially generate both the RMW and the non-RMW versions, and then 927 during lowering, the RMW version deletes itself if it finds x still live.
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/ |
D | DESIGN.rst | 63 | Read-modify-write (RMW) transform | | 522 - Read-modify-write (RMW) transformation 921 Subzero's ``O2`` recipe includes an early pass to find read-modify-write (RMW) 924 range ends after the RMW. Since liveness analysis is one of the most expensive 925 passes, it's not attractive to run it an extra time just for RMW analysis. 926 Instead, we essentially generate both the RMW and the non-RMW versions, and then 927 during lowering, the RMW version deletes itself if it finds x still live.
|
/third_party/node/deps/v8/src/execution/loong64/ |
D | simulator-loong64.h | 553 RMW, enumerator
|
D | simulator-loong64.cc | 5396 if (access_state_ == MonitorAccess::RMW) { in NotifyLoad() 5405 access_state_ = MonitorAccess::RMW; in NotifyLoadLinked() 5411 if (access_state_ == MonitorAccess::RMW) { in NotifyStore() 5420 if (access_state_ == MonitorAccess::RMW) { in NotifyStoreConditional() 5447 access_state_ = MonitorAccess::RMW; in NotifyLoadLinked_Locked() 5452 if (access_state_ == MonitorAccess::RMW) { in NotifyStore_Locked() 5461 if (access_state_ == MonitorAccess::RMW) { in NotifyStoreConditional_Locked()
|
/third_party/node/deps/v8/src/execution/mips/ |
D | simulator-mips.h | 626 RMW, enumerator
|
D | simulator-mips.cc | 7129 if (access_state_ == MonitorAccess::RMW) { in NotifyLoad() 7138 access_state_ = MonitorAccess::RMW; in NotifyLoadLinked() 7144 if (access_state_ == MonitorAccess::RMW) { in NotifyStore() 7153 if (access_state_ == MonitorAccess::RMW) { in NotifyStoreConditional() 7180 access_state_ = MonitorAccess::RMW; in NotifyLoadLinked_Locked() 7185 if (access_state_ == MonitorAccess::RMW) { in NotifyStore_Locked() 7194 if (access_state_ == MonitorAccess::RMW) { in NotifyStoreConditional_Locked()
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/src/ |
D | IceTargetLoweringX8632.cpp | 628 auto *RMW = in findRMW() local 631 Node->getInsts().insert(I3, RMW); in findRMW() 6758 void TargetX8632::lowerRMW(const InstX86FakeRMW *RMW) { in lowerRMW() argument 6765 if (!RMW->isLastUse(RMW->getBeacon())) in lowerRMW() 6767 Operand *Src = RMW->getData(); in lowerRMW() 6769 X86OperandMem *Addr = formMemoryOperand(RMW->getAddr(), Ty); in lowerRMW() 6777 switch (RMW->getOp()) { in lowerRMW() 6805 switch (RMW->getOp()) { in lowerRMW() 6835 if (const auto *RMW = llvm::dyn_cast<InstX86FakeRMW>(Instr)) { in lowerOther() local 6836 lowerRMW(RMW); in lowerOther()
|
D | IceTargetLoweringX8664.cpp | 637 auto *RMW = in findRMW() local 640 Node->getInsts().insert(I3, RMW); in findRMW() 6099 void TargetX8664::lowerRMW(const InstX86FakeRMW *RMW) { in lowerRMW() argument 6106 if (!RMW->isLastUse(RMW->getBeacon())) in lowerRMW() 6108 Operand *Src = RMW->getData(); in lowerRMW() 6110 X86OperandMem *Addr = formMemoryOperand(RMW->getAddr(), Ty); in lowerRMW() 6112 switch (RMW->getOp()) { in lowerRMW() 6141 if (const auto *RMW = llvm::dyn_cast<InstX86FakeRMW>(Instr)) { in lowerOther() local 6142 lowerRMW(RMW); in lowerOther()
|
D | IceTargetLoweringX8632.h | 285 void lowerRMW(const InstX86FakeRMW *RMW);
|
/third_party/node/deps/v8/src/execution/mips64/ |
D | simulator-mips64.h | 649 RMW, enumerator
|
D | simulator-mips64.cc | 7563 if (access_state_ == MonitorAccess::RMW) { in NotifyLoad() 7572 access_state_ = MonitorAccess::RMW; in NotifyLoadLinked() 7578 if (access_state_ == MonitorAccess::RMW) { in NotifyStore() 7587 if (access_state_ == MonitorAccess::RMW) { in NotifyStoreConditional() 7614 access_state_ = MonitorAccess::RMW; in NotifyLoadLinked_Locked() 7619 if (access_state_ == MonitorAccess::RMW) { in NotifyStore_Locked() 7628 if (access_state_ == MonitorAccess::RMW) { in NotifyStoreConditional_Locked()
|
/third_party/node/deps/v8/src/execution/riscv64/ |
D | simulator-riscv64.cc | 7148 if (access_state_ == MonitorAccess::RMW) { in NotifyLoad() 7157 access_state_ = MonitorAccess::RMW; in NotifyLoadLinked() 7163 if (access_state_ == MonitorAccess::RMW) { in NotifyStore() 7172 if (access_state_ == MonitorAccess::RMW) { in NotifyStoreConditional() 7199 access_state_ = MonitorAccess::RMW; in NotifyLoadLinked_Locked() 7204 if (access_state_ == MonitorAccess::RMW) { in NotifyStore_Locked() 7213 if (access_state_ == MonitorAccess::RMW) { in NotifyStoreConditional_Locked()
|
D | simulator-riscv64.h | 984 RMW, enumerator
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | CodeGenPrepare.cpp | 4567 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) { in FindAllMemoryUses() local 4571 MemoryUses.push_back(std::make_pair(RMW, opNo)); in FindAllMemoryUses() 7125 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { in optimizeInst() local 7126 unsigned AS = RMW->getPointerAddressSpace(); in optimizeInst() 7127 return optimizeMemoryInst(I, RMW->getPointerOperand(), in optimizeInst() 7128 RMW->getType(), AS); in optimizeInst()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86ScheduleBtVer2.td | 183 // A folded store needs a cycle on the SAGU for the store data, most RMW 184 // instructions don't need an extra uop. ALU RMW operations don't seem to
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
D | TargetLowering.h | 1837 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { in shouldExpandAtomicRMWInIR() argument 1838 return RMW->isFloatingPointOperation() ? in shouldExpandAtomicRMWInIR()
|