/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMLegalizerInfo.cpp | 400 auto OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); in legalizeCustom() local 405 auto Libcalls = getFCmpLibcalls(Predicate, OpSize); in legalizeCustom() 417 assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size"); in legalizeCustom() 418 auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx); in legalizeCustom()
|
D | ARMInstructionSelector.cpp | 462 unsigned OpRegBank, unsigned OpSize) in CmpConstants() 465 OperandSize(OpSize) {} in CmpConstants()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/IR/ |
D | Metadata.cpp | 481 size_t OpSize = NumOps * sizeof(MDOperand); in operator new() local 484 OpSize = alignTo(OpSize, alignof(uint64_t)); in operator new() 485 void *Ptr = reinterpret_cast<char *>(::operator new(OpSize + Size)) + OpSize; in operator new() 496 size_t OpSize = N->NumOperands * sizeof(MDOperand); in operator delete() local 497 OpSize = alignTo(OpSize, alignof(uint64_t)); in operator delete() 502 ::operator delete(reinterpret_cast<char *>(Mem) - OpSize); in operator delete()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64RegisterBankInfo.cpp | 616 SmallVector<unsigned, 4> OpSize(NumOperands); in getInstrMapping() local 624 OpSize[Idx] = Ty.getSizeInBits(); in getInstrMapping() 667 OpSize[0]); in getInstrMapping() 842 auto Mapping = getValueMapping(OpRegBankIdx[Idx], OpSize[Idx]); in getInstrMapping()
|
D | AArch64InstructionSelector.cpp | 477 unsigned OpSize) { in selectBinaryOp() argument 480 if (OpSize == 32) { in selectBinaryOp() 491 } else if (OpSize == 64) { in selectBinaryOp() 507 switch (OpSize) { in selectBinaryOp() 548 unsigned OpSize) { in selectLoadStoreUIOp() argument 552 switch (OpSize) { in selectLoadStoreUIOp() 564 switch (OpSize) { in selectLoadStoreUIOp() 878 unsigned OpSize = MRI.getType(I.getOperand(2).getReg()).getSizeInBits(); in selectFCMPOpc() local 879 if (OpSize != 32 && OpSize != 64) in selectFCMPOpc() 883 return CmpOpcTbl[ShouldUseImm][OpSize == 64]; in selectFCMPOpc() [all …]
|
D | AArch64ISelLowering.cpp | 4147 unsigned OpSize = Flags.isByVal() ? Flags.getByValSize() * 8 in LowerCall() local 4149 OpSize = (OpSize + 7) / 8; in LowerCall() 4152 if (OpSize < 8) in LowerCall() 4153 BEAlign = 8 - OpSize; in LowerCall() 4162 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); in LowerCall()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Analysis/ |
D | TargetTransformInfoImpl.h | 76 unsigned OpSize = OpTy->getScalarSizeInBits(); in getOperationCost() local 77 if (DL.isLegalInteger(OpSize) && in getOperationCost() 78 OpSize <= DL.getPointerTypeSizeInBits(Ty)) in getOperationCost()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | TargetInstrInfo.cpp | 556 int64_t OpSize = MFI.getObjectSize(FI); in foldMemoryOperand() local 561 OpSize = SubRegSize / 8; in foldMemoryOperand() 564 MemSize = std::max(MemSize, OpSize); in foldMemoryOperand()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPURegisterBankInfo.cpp | 844 unsigned OpSize = OpTy.getSizeInBits(); in executeInWaterfallLoop() local 847 if (OpSize == 32) { in executeInWaterfallLoop() 888 bool Is64 = OpSize % 64 == 0; in executeInWaterfallLoop() 890 LLT UnmergeTy = OpSize % 64 == 0 ? LLT::scalar(64) : LLT::scalar(32); in executeInWaterfallLoop() 891 unsigned CmpOp = OpSize % 64 == 0 ? AMDGPU::V_CMP_EQ_U64_e64 in executeInWaterfallLoop() 3011 unsigned OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); in getInstrMapping() local 3014 OpdsMapping[2] = AMDGPU::getValueMapping(Op1Bank, OpSize); in getInstrMapping() 3015 OpdsMapping[3] = AMDGPU::getValueMapping(Op2Bank, OpSize); in getInstrMapping()
|
D | SIISelLowering.cpp | 2834 unsigned OpSize = Flags.isByVal() ? in LowerCall() local 2844 int FI = MFI.CreateFixedObject(OpSize, Offset, true); in LowerCall()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/GlobalISel/ |
D | MachineIRBuilder.cpp | 542 unsigned OpSize = OpTy.getSizeInBits(); in buildSequence() local 545 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) { in buildSequence() 551 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) { in buildSequence()
|
D | LegalizerHelper.cpp | 3517 uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); in narrowScalarExtract() local 3521 if (SrcStart + NarrowSize <= OpStart || SrcStart >= OpStart + OpSize) { in narrowScalarExtract() 3536 SegSize = std::min(NarrowSize, OpStart + OpSize - SrcStart); in narrowScalarExtract() 3539 SegSize = std::min(SrcStart + NarrowSize - OpStart, OpSize); in narrowScalarExtract() 3584 uint64_t OpSize = MRI.getType(OpReg).getSizeInBits(); in narrowScalarInsert() local 3588 if (DstStart + NarrowSize <= OpStart || DstStart >= OpStart + OpSize) { in narrowScalarInsert() 3606 SegSize = std::min(NarrowSize, OpStart + OpSize - DstStart); in narrowScalarInsert() 3611 std::min(NarrowSize - InsertOffset, OpStart + OpSize - DstStart); in narrowScalarInsert() 3615 if (ExtractOffset != 0 || SegSize != OpSize) { in narrowScalarInsert()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86InstrFormats.td | 185 class OpSize16 { OperandSize OpSize = OpSize16; } 186 class OpSize32 { OperandSize OpSize = OpSize32; } 283 OperandSize OpSize = OpSizeFixed; // Does this instruction's encoding change 285 bits<2> OpSizeBits = OpSize.Value;
|
D | X86InstrArithmetic.td | 594 /// OpSize - Selects whether the instruction needs a 0x66 prefix based on 597 OperandSize OpSize = opSize; 636 let OpSize = typeinfo.OpSize;
|
D | X86ISelLowering.cpp | 4125 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; in LowerCall() local 4126 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); in LowerCall() 43720 unsigned OpSize = OpVT.getSizeInBits(); in combineVectorSizedSetCCEquality() local 43721 if (!OpVT.isScalarInteger() || OpSize < 128) in combineVectorSizedSetCCEquality() 43750 if ((OpSize == 128 && Subtarget.hasSSE2()) || in combineVectorSizedSetCCEquality() 43751 (OpSize == 256 && HasAVX) || in combineVectorSizedSetCCEquality() 43752 (OpSize == 512 && Subtarget.useAVX512Regs())) { in combineVectorSizedSetCCEquality() 43759 bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512; in combineVectorSizedSetCCEquality() 43763 if (OpSize == 256) { in combineVectorSizedSetCCEquality() 43769 if (OpSize == 512 || NeedZExt) { in combineVectorSizedSetCCEquality() [all …]
|
D | X86InstrSSE.td | 1617 // SSE2 instructions without OpSize prefix
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | ConstantFolding.cpp | 751 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType()); in SymbolicallyEvaluateBinop() local 756 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) - in SymbolicallyEvaluateBinop() 757 Offs2.zextOrTrunc(OpSize)); in SymbolicallyEvaluateBinop()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/AsmParser/ |
D | AMDGPUAsmParser.cpp | 2752 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx); in isInlineConstant() local 2754 switch (OpSize) { // expected operand size in isInlineConstant()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCISelLowering.cpp | 4884 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8; in CalculateTailCallArgDest() local 4885 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); in CalculateTailCallArgDest() 12310 auto OpSize = N->getOperand(0).getValueSizeInBits(); in ConvertSETCCToSubtract() local 12314 if (OpSize < Size) { in ConvertSETCCToSubtract()
|