/external/llvm/lib/Analysis/ |
D | DemandedBits.cpp | 145 uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); in determineLiveOperandBits() local 146 AB = AOut.lshr(ShiftAmt); in determineLiveOperandBits() 152 AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt+1); in determineLiveOperandBits() 154 AB |= APInt::getHighBitsSet(BitWidth, ShiftAmt); in determineLiveOperandBits() 161 uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); in determineLiveOperandBits() local 162 AB = AOut.shl(ShiftAmt); in determineLiveOperandBits() 167 AB |= APInt::getLowBitsSet(BitWidth, ShiftAmt); in determineLiveOperandBits() 174 uint64_t ShiftAmt = CI->getLimitedValue(BitWidth-1); in determineLiveOperandBits() local 175 AB = AOut.shl(ShiftAmt); in determineLiveOperandBits() 179 if ((AOut & APInt::getHighBitsSet(BitWidth, ShiftAmt)) in determineLiveOperandBits() [all …]
|
D | ValueTracking.cpp | 992 unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1); in computeKnownBitsFromShiftOperator() local 995 KnownZero = KZF(KnownZero, ShiftAmt); in computeKnownBitsFromShiftOperator() 996 KnownOne = KOF(KnownOne, ShiftAmt); in computeKnownBitsFromShiftOperator() 1028 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { in computeKnownBitsFromShiftOperator() local 1031 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) in computeKnownBitsFromShiftOperator() 1033 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) in computeKnownBitsFromShiftOperator() 1038 if (ShiftAmt == 0) { in computeKnownBitsFromShiftOperator() 1046 KnownZero &= KZF(KnownZero2, ShiftAmt); in computeKnownBitsFromShiftOperator() 1047 KnownOne &= KOF(KnownOne2, ShiftAmt); in computeKnownBitsFromShiftOperator() 1214 auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { in computeKnownBitsFromOperator() argument [all …]
|
D | ConstantFolding.cpp | 176 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1); in FoldBitCast() local 187 ConstantInt::get(Src->getType(), ShiftAmt)); in FoldBitCast() 188 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; in FoldBitCast() 208 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); in FoldBitCast() local 213 ConstantInt::get(Src->getType(), ShiftAmt)); in FoldBitCast() 214 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; in FoldBitCast()
|
/external/llvm/lib/Transforms/InstCombine/ |
D | InstCombineSimplifyDemanded.cpp | 567 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); in SimplifyDemandedUseBits() local 568 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt)); in SimplifyDemandedUseBits() 573 DemandedMaskIn |= APInt::getHighBitsSet(BitWidth, ShiftAmt+1); in SimplifyDemandedUseBits() 575 DemandedMaskIn |= APInt::getHighBitsSet(BitWidth, ShiftAmt); in SimplifyDemandedUseBits() 581 KnownZero <<= ShiftAmt; in SimplifyDemandedUseBits() 582 KnownOne <<= ShiftAmt; in SimplifyDemandedUseBits() 584 if (ShiftAmt) in SimplifyDemandedUseBits() 585 KnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt); in SimplifyDemandedUseBits() 591 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); in SimplifyDemandedUseBits() local 594 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); in SimplifyDemandedUseBits() [all …]
|
D | InstCombineCasts.cpp | 642 uint32_t ShiftAmt = KnownZeroMask.logBase2(); in transformZExtICmp() local 644 if (ShiftAmt) { in transformZExtICmp() 647 In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt), in transformZExtICmp() 784 uint64_t ShiftAmt = Amt->getZExtValue(); in canEvaluateZExtd() local 785 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; in canEvaluateZExtd() 1018 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); in transformSExtICmp() local 1020 if (ShiftAmt) in transformSExtICmp() 1022 ConstantInt::get(In->getType(), ShiftAmt)); in transformSExtICmp() 1032 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); in transformSExtICmp() local 1034 if (ShiftAmt) in transformSExtICmp() [all …]
|
D | InstCombineCalls.cpp | 298 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth)); in SimplifyX86immshift() local 299 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt); in SimplifyX86immshift()
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64ExpandPseudoInsts.cpp | 87 const unsigned ShiftAmt = ToIdx * 16; in replicateChunk() local 90 const uint64_t Chunk = getChunk(Imm, FromIdx) << ShiftAmt; in replicateChunk() 92 Imm &= ~(0xFFFFLL << ShiftAmt); in replicateChunk() 104 const unsigned ShiftAmt = ChunkIdx * 16; in tryOrrMovk() local 124 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt)); in tryOrrMovk() 185 unsigned ShiftAmt = 0; in tryToreplicateChunks() local 188 for (; ShiftAmt < 64; ShiftAmt += 16) { in tryToreplicateChunks() 189 Imm16 = (UImm >> ShiftAmt) & 0xFFFF; in tryToreplicateChunks() 202 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt)); in tryToreplicateChunks() 213 for (ShiftAmt += 16; ShiftAmt < 64; ShiftAmt += 16) { in tryToreplicateChunks() [all …]
|
D | AArch64ConditionOptimizer.cpp | 158 unsigned ShiftAmt = AArch64_AM::getShiftValue(I->getOperand(3).getImm()); in findSuitableCompare() local 162 } else if (I->getOperand(2).getImm() << ShiftAmt >= 0xfff) { in findSuitableCompare()
|
D | AArch64ISelDAGToDAG.cpp | 260 unsigned ShiftAmt; in SelectArithImmed() local 263 ShiftAmt = 0; in SelectArithImmed() 265 ShiftAmt = 12; in SelectArithImmed() 270 unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt); in SelectArithImmed() 1779 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); in getUsefulBitsFromOrWithShiftedReg() local 1780 Mask = Mask.shl(ShiftAmt); in getUsefulBitsFromOrWithShiftedReg() 1782 Mask = Mask.lshr(ShiftAmt); in getUsefulBitsFromOrWithShiftedReg() 1787 uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); in getUsefulBitsFromOrWithShiftedReg() local 1788 Mask = Mask.lshr(ShiftAmt); in getUsefulBitsFromOrWithShiftedReg() 1790 Mask = Mask.shl(ShiftAmt); in getUsefulBitsFromOrWithShiftedReg()
|
/external/llvm/lib/Target/X86/ |
D | X86ISelDAGToDAG.cpp | 975 unsigned ShiftAmt = Shift.getConstantOperandVal(1); in foldMaskedShiftToScaledMask() local 976 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3) in foldMaskedShiftToScaledMask() 981 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT); in foldMaskedShiftToScaledMask() 995 AM.Scale = 1 << ShiftAmt; in foldMaskedShiftToScaledMask() 1035 unsigned ShiftAmt = Shift.getConstantOperandVal(1); in foldMaskAndShiftToScale() local 1052 MaskLZ -= (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt; in foldMaskAndShiftToScale() 1087 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8); in foldMaskAndShiftToScale()
|
D | X86ISelLowering.cpp | 7454 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1); in lowerVectorShuffleAsShift() local 7468 DAG.getConstant(ShiftAmt, DL, MVT::i8)); in lowerVectorShuffleAsShift() 15973 SDValue SrcOp, uint64_t ShiftAmt, in getTargetVShiftByConstNode() argument 15978 if (ShiftAmt == 0) in getTargetVShiftByConstNode() 15982 if (ShiftAmt >= ElementType.getSizeInBits()) { in getTargetVShiftByConstNode() 15984 ShiftAmt = ElementType.getSizeInBits() - 1; in getTargetVShiftByConstNode() 16011 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType)); in getTargetVShiftByConstNode() 16023 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType)); in getTargetVShiftByConstNode() 16035 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType)); in getTargetVShiftByConstNode() 16044 DAG.getConstant(ShiftAmt, dl, MVT::i8)); in getTargetVShiftByConstNode() [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCISelDAGToDAG.cpp | 940 unsigned ShiftAmt = V.getConstantOperandVal(1); in getValueBits() local 945 for (unsigned i = ShiftAmt; i < Bits.size(); ++i) in getValueBits() 946 Bits[i] = LHSBits[i - ShiftAmt]; in getValueBits() 948 for (unsigned i = 0; i < ShiftAmt; ++i) in getValueBits() 956 unsigned ShiftAmt = V.getConstantOperandVal(1); in getValueBits() local 961 for (unsigned i = 0; i < Bits.size() - ShiftAmt; ++i) in getValueBits() 962 Bits[i] = LHSBits[i + ShiftAmt]; in getValueBits() 964 for (unsigned i = Bits.size() - ShiftAmt; i < Bits.size(); ++i) in getValueBits() 2468 SDValue ShiftAmt = in Select() local 2475 N0, ShiftAmt); in Select() [all …]
|
D | PPCISelLowering.cpp | 1423 unsigned ShiftAmt = SVOp->getMaskElt(i); in isVSLDOIShuffleMask() local 1424 if (ShiftAmt < i) return -1; in isVSLDOIShuffleMask() 1426 ShiftAmt -= i; in isVSLDOIShuffleMask() 1432 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i)) in isVSLDOIShuffleMask() 1437 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15)) in isVSLDOIShuffleMask() 1443 ShiftAmt = 16 - ShiftAmt; in isVSLDOIShuffleMask() 1445 return ShiftAmt; in isVSLDOIShuffleMask() 1618 unsigned ShiftAmt = SVOp->getMaskElt(i); in isQVALIGNIShuffleMask() local 1619 if (ShiftAmt < i) return -1; in isQVALIGNIShuffleMask() 1620 ShiftAmt -= i; in isQVALIGNIShuffleMask() [all …]
|
/external/llvm/lib/Target/Mips/ |
D | MipsISelLowering.cpp | 1190 unsigned ShiftAmt = RegInfo.createVirtualRegister(RC); in emitAtomicBinaryPartword() local 1244 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3); in emitAtomicBinaryPartword() 1249 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3); in emitAtomicBinaryPartword() 1254 .addReg(MaskUpper).addReg(ShiftAmt); in emitAtomicBinaryPartword() 1256 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt); in emitAtomicBinaryPartword() 1317 .addReg(MaskedOldVal1).addReg(ShiftAmt); in emitAtomicBinaryPartword() 1428 unsigned ShiftAmt = RegInfo.createVirtualRegister(RC); in emitAtomicCmpSwapPartword() local 1489 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3); in emitAtomicCmpSwapPartword() 1494 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3); in emitAtomicCmpSwapPartword() 1499 .addReg(MaskUpper).addReg(ShiftAmt); in emitAtomicCmpSwapPartword() [all …]
|
D | MipsFastISel.cpp | 1564 unsigned ShiftAmt; in emitIntSExt32r1() local 1569 ShiftAmt = 24; in emitIntSExt32r1() 1572 ShiftAmt = 16; in emitIntSExt32r1() 1576 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt); in emitIntSExt32r1() 1577 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt); in emitIntSExt32r1()
|
/external/llvm/lib/ExecutionEngine/Interpreter/ |
D | Execution.cpp | 1563 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1); in executeBitCastInst() local 1569 Tmp = Tmp.shl(ShiftAmt); in executeBitCastInst() 1570 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; in executeBitCastInst() 1579 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1); in executeBitCastInst() local 1584 Elt.IntVal = Elt.IntVal.lshr(ShiftAmt); in executeBitCastInst() 1588 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; in executeBitCastInst()
|
/external/llvm/lib/Transforms/Scalar/ |
D | GVN.cpp | 1150 unsigned ShiftAmt; in GetStoreValueForLoad() local 1152 ShiftAmt = Offset*8; in GetStoreValueForLoad() 1154 ShiftAmt = (StoreSize-LoadSize-Offset)*8; in GetStoreValueForLoad() 1156 if (ShiftAmt) in GetStoreValueForLoad() 1157 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt); in GetStoreValueForLoad()
|
/external/llvm/lib/Target/AArch64/AsmParser/ |
D | AArch64AsmParser.cpp | 1285 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0; in addAddSubImmNegOperands() local 1288 Inst.addOperand(MCOperand::createImm(ShiftAmt)); in addAddSubImmNegOperands() 3287 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16; in parseOperand() local 3289 ShiftAmt += 16; in parseOperand() 3292 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) { in parseOperand() 3296 if (ShiftAmt) in parseOperand() 3298 ShiftAmt, true, S, E, Ctx)); in parseOperand() 3301 APInt Simm = APInt(64, Imm << ShiftAmt); in parseOperand()
|
/external/clang/lib/CodeGen/ |
D | CGBuiltin.cpp | 2189 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue(); in EmitNeonRShiftImm() local 2196 if (ShiftAmt == EltSize) { in EmitNeonRShiftImm() 2203 --ShiftAmt; in EmitNeonRShiftImm() 2204 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt); in EmitNeonRShiftImm() 2970 Constant *ShiftAmt = in EmitCommonNeonBuiltinExpr() local 2972 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn"); in EmitCommonNeonBuiltinExpr() 3266 Constant *ShiftAmt = in EmitCommonNeonBuiltinExpr() local 3268 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn"); in EmitCommonNeonBuiltinExpr() 4965 uint64_t ShiftAmt = Amt->getZExtValue(); in EmitAArch64BuiltinExpr() local 4967 if (ShiftAmt == 64) in EmitAArch64BuiltinExpr() [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | AMDGPUISelLowering.cpp | 1453 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx, in LowerLOAD() local 1457 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt); in LowerLOAD() 1517 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx, in LowerSTORE() local 1526 MaskedValue, ShiftAmt); in LowerSTORE() 1530 ShiftAmt); in LowerSTORE()
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | TargetLowering.cpp | 842 SDValue ShiftAmt = TLO.DAG.getConstant(BitWidth - ShAmt, dl, in SimplifyDemandedBits() local 846 ShiftAmt)); in SimplifyDemandedBits() 2960 unsigned ShiftAmt = VT.getSizeInBits() - HiLoVT.getSizeInBits(); in expandMUL() local 2961 SDValue Shift = DAG.getConstant(ShiftAmt, dl, getShiftAmountTy(VT, DL)); in expandMUL()
|
/external/llvm/lib/Target/NVPTX/ |
D | NVPTXISelDAGToDAG.cpp | 4863 uint64_t ShiftAmt = ShiftCnst->getZExtValue(); in SelectBFE() local 4886 NumBits = countTrailingOnes(MaskVal) - ShiftAmt; in SelectBFE() 4893 NumBits = NumZeros + NumOnes - ShiftAmt; in SelectBFE() 4899 if (ShiftAmt < NumZeros) { in SelectBFE() 4906 Start = CurDAG->getTargetConstant(ShiftAmt, DL, MVT::i32); in SelectBFE()
|
D | NVPTXISelLowering.cpp | 4187 APInt ShiftAmt = ShlRHS->getAPIntValue(); in TryMULWIDECombine() local 4189 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) { in TryMULWIDECombine() 4190 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt; in TryMULWIDECombine()
|
/external/llvm/lib/Transforms/Utils/ |
D | SimplifyCFG.cpp | 4298 Value *ShiftAmt = Builder.CreateZExtOrTrunc(Index, MapTy, "switch.cast"); in BuildLookup() local 4301 ShiftAmt = Builder.CreateMul(ShiftAmt, in BuildLookup() 4306 Value *DownShifted = Builder.CreateLShr(BitMap, ShiftAmt, in BuildLookup()
|
/external/llvm/lib/Target/Sparc/ |
D | SparcISelLowering.cpp | 2877 SDValue ShiftAmt = DAG.getConstant(63, dl, VT); in LowerUMULO_SMULO() local 2880 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt); in LowerUMULO_SMULO() 2881 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt); in LowerUMULO_SMULO() 2892 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); in LowerUMULO_SMULO()
|