/external/llvm/lib/Analysis/ |
D | TypeBasedAliasAnalysis.cpp | 545 uint64_t OffsetA = TagA.getOffset(), OffsetB = TagB.getOffset(); in PathAliases() local 549 return OffsetA == OffsetB; in PathAliases() 554 T = T.getParent(OffsetA); in PathAliases() 561 OffsetA = TagA.getOffset(); in PathAliases() 565 return OffsetA == OffsetB; in PathAliases()
|
D | LoopAccessAnalysis.cpp | 1020 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); in isConsecutiveAccess() local 1021 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); in isConsecutiveAccess() 1025 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); in isConsecutiveAccess()
|
/external/llvm/lib/Target/Lanai/ |
D | LanaiInstrInfo.cpp | 106 int64_t OffsetA = 0, OffsetB = 0; in areMemAccessesTriviallyDisjoint() local 108 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) && in areMemAccessesTriviallyDisjoint() 111 int LowOffset = std::min(OffsetA, OffsetB); in areMemAccessesTriviallyDisjoint() 112 int HighOffset = std::max(OffsetA, OffsetB); in areMemAccessesTriviallyDisjoint() 113 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; in areMemAccessesTriviallyDisjoint()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Lanai/ |
D | LanaiInstrInfo.cpp | 105 int64_t OffsetA = 0, OffsetB = 0; in areMemAccessesTriviallyDisjoint() local 107 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) && in areMemAccessesTriviallyDisjoint() 110 int LowOffset = std::min(OffsetA, OffsetB); in areMemAccessesTriviallyDisjoint() 111 int HighOffset = std::max(OffsetA, OffsetB); in areMemAccessesTriviallyDisjoint() 112 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; in areMemAccessesTriviallyDisjoint()
|
/external/swiftshader/third_party/subzero/src/ |
D | IceOperand.cpp | 48 RelocOffsetT OffsetA = A.Offset; in operator ==() local 53 OffsetA += A.OffsetExpr[i]->getOffset(); in operator ==() 65 return OffsetA == OffsetB; in operator ==()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/ |
D | MachineInstr.cpp | 1051 int64_t OffsetA = MMOa->getOffset(); in mayAlias() local 1054 int64_t MinOffset = std::min(OffsetA, OffsetB); in mayAlias() 1072 int64_t MaxOffset = std::max(OffsetA, OffsetB); in mayAlias() 1073 int64_t LowWidth = (MinOffset == OffsetA) ? WidthA : WidthB; in mayAlias() 1083 assert((OffsetA >= 0) && "Negative MachineMemOperand offset"); in mayAlias() 1086 int64_t Overlapa = WidthA + OffsetA - MinOffset; in mayAlias()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/SystemZ/ |
D | SystemZInstrInfo.cpp | 1882 int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset(); in areMemAccessesTriviallyDisjoint() local 1884 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; in areMemAccessesTriviallyDisjoint() 1885 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; in areMemAccessesTriviallyDisjoint() 1886 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; in areMemAccessesTriviallyDisjoint()
|
/external/llvm/lib/Transforms/Vectorize/ |
D | LoadStoreVectorizer.cpp | 248 APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0); in isConsecutiveAccess() local 249 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); in isConsecutiveAccess() 252 APInt OffsetDelta = OffsetB - OffsetA; in isConsecutiveAccess()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Transforms/Vectorize/ |
D | LoadStoreVectorizer.cpp | 325 APInt OffsetA(PtrBitWidth, 0); in areConsecutivePointers() local 327 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); in areConsecutivePointers() 330 APInt OffsetDelta = OffsetB - OffsetA; in areConsecutivePointers()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64InstrInfo.cpp | 661 int64_t OffsetA = 0, OffsetB = 0; in areMemAccessesTriviallyDisjoint() local 676 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) && in areMemAccessesTriviallyDisjoint() 679 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; in areMemAccessesTriviallyDisjoint() 680 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; in areMemAccessesTriviallyDisjoint() 681 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; in areMemAccessesTriviallyDisjoint()
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 1333 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, in offsetsDoNotOverlap() argument 1335 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; in offsetsDoNotOverlap() 1336 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; in offsetsDoNotOverlap() 1337 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; in offsetsDoNotOverlap()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Analysis/ |
D | LoopAccessAnalysis.cpp | 1196 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0); in isConsecutiveAccess() local 1197 PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA); in isConsecutiveAccess() 1201 const SCEV *OffsetSCEVA = SE.getConstant(OffsetA); in isConsecutiveAccess()
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonInstrInfo.cpp | 1628 int OffsetA = 0, OffsetB = 0; in areMemAccessesTriviallyDisjoint() local 1641 unsigned BaseRegA = getBaseAndOffset(&MIa, OffsetA, SizeA); in areMemAccessesTriviallyDisjoint() 1655 if (OffsetA > OffsetB) { in areMemAccessesTriviallyDisjoint() 1656 uint64_t offDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB); in areMemAccessesTriviallyDisjoint() 1658 } else if (OffsetA < OffsetB) { in areMemAccessesTriviallyDisjoint() 1659 uint64_t offDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA); in areMemAccessesTriviallyDisjoint()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AArch64/ |
D | AArch64InstrInfo.cpp | 1082 int64_t OffsetA = 0, OffsetB = 0; in areMemAccessesTriviallyDisjoint() local 1097 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) && in areMemAccessesTriviallyDisjoint() 1100 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; in areMemAccessesTriviallyDisjoint() 1101 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; in areMemAccessesTriviallyDisjoint() 1102 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; in areMemAccessesTriviallyDisjoint()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Hexagon/ |
D | HexagonInstrInfo.cpp | 1855 int OffsetA = isPostIncrement(MIa) ? 0 : OffA.getImm(); in areMemAccessesTriviallyDisjoint() local 1860 if (OffsetA > OffsetB) { in areMemAccessesTriviallyDisjoint() 1861 uint64_t OffDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB); in areMemAccessesTriviallyDisjoint() 1864 if (OffsetA < OffsetB) { in areMemAccessesTriviallyDisjoint() 1865 uint64_t OffDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA); in areMemAccessesTriviallyDisjoint()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 2110 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, in offsetsDoNotOverlap() argument 2112 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; in offsetsDoNotOverlap() 2113 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; in offsetsDoNotOverlap() 2114 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; in offsetsDoNotOverlap()
|