/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64FastISel.cpp | 368 unsigned ResultReg = createResultReg(&AArch64::GPR64spRegClass); in fastMaterializeAlloca() local 370 ResultReg) in fastMaterializeAlloca() 374 return ResultReg; in fastMaterializeAlloca() 391 unsigned ResultReg = createResultReg(RC); in materializeInt() local 393 ResultReg).addReg(ZeroReg, getKillRegState(true)); in materializeInt() 394 return ResultReg; in materializeInt() 427 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); in materializeFP() local 429 TII.get(TargetOpcode::COPY), ResultReg) in materializeFP() 432 return ResultReg; in materializeFP() 447 unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); in materializeFP() local [all …]
|
D | AArch64InstrInfo.cpp | 4178 Register ResultReg = Root.getOperand(0).getReg(); in genFusedMultiply() local 4195 if (Register::isVirtualRegister(ResultReg)) in genFusedMultiply() 4196 MRI.constrainRegClass(ResultReg, RC); in genFusedMultiply() 4206 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg) in genFusedMultiply() 4211 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg) in genFusedMultiply() 4217 MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg) in genFusedMultiply() 4329 Register ResultReg = Root.getOperand(0).getReg(); in genMaddR() local 4335 if (Register::isVirtualRegister(ResultReg)) in genMaddR() 4336 MRI.constrainRegClass(ResultReg, RC); in genMaddR() 4345 BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc), ResultReg) in genMaddR()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86FastISel.cpp | 89 unsigned &ResultReg, unsigned Alignment = 1); 98 unsigned &ResultReg); 319 MachineMemOperand *MMO, unsigned &ResultReg, in X86FastEmitLoad() argument 470 ResultReg = createResultReg(RC); in X86FastEmitLoad() 472 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg); in X86FastEmitLoad() 707 unsigned &ResultReg) { in X86FastEmitExtend() argument 713 ResultReg = RR; in X86FastEmitExtend() 1330 unsigned ResultReg = 0; in X86SelectLoad() local 1331 if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg, in X86SelectLoad() 1335 updateValueMap(I, ResultReg); in X86SelectLoad() [all …]
|
D | X86FlagsCopyLowering.cpp | 1079 Register ResultReg = MRI->createVirtualRegister(&SetBRC); in rewriteSetCarryExtended() local 1080 BuildMI(MBB, SetPos, SetLoc, TII->get(Sub), ResultReg) in rewriteSetCarryExtended() 1083 return RewriteToReg(ResultReg); in rewriteSetCarryExtended()
|
D | X86InstructionSelector.cpp | 1028 Register ResultReg = I.getOperand(0).getReg(); in selectFCmp() local 1030 ResultReg, in selectFCmp() 1031 *getRegClass(LLT::scalar(8), *RBI.getRegBank(ResultReg, MRI, TRI)), MRI); in selectFCmp() 1045 TII.get(SETFOpc[2]), ResultReg) in selectFCmp() 1072 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC); in selectFCmp()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Mips/ |
D | MipsFastISel.cpp | 181 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr, 329 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); in emitLogicalOp() local 330 if (!ResultReg) in emitLogicalOp() 333 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg); in emitLogicalOp() 334 return ResultReg; in emitLogicalOp() 345 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass); in fastMaterializeAlloca() local 347 ResultReg) in fastMaterializeAlloca() 350 return ResultReg; in fastMaterializeAlloca() 366 unsigned ResultReg = createResultReg(RC); in materialize32BitInt() local 370 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm); in materialize32BitInt() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/SelectionDAG/ |
D | FastISel.cpp | 613 unsigned ResultReg = in selectBinaryOp() local 616 if (!ResultReg) in selectBinaryOp() 620 updateValueMap(I, ResultReg); in selectBinaryOp() 647 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, in selectBinaryOp() local 649 if (!ResultReg) in selectBinaryOp() 653 updateValueMap(I, ResultReg); in selectBinaryOp() 663 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(), in selectBinaryOp() local 665 if (!ResultReg) in selectBinaryOp() 671 updateValueMap(I, ResultReg); in selectBinaryOp() 942 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64)); in selectPatchpoint() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/WebAssembly/ |
D | WebAssemblyFastISel.cpp | 590 unsigned ResultReg = createResultReg(MRI.getRegClass(Reg)); in copyValue() local 592 ResultReg) in copyValue() 594 return ResultReg; in copyValue() 602 unsigned ResultReg = in fastMaterializeAlloca() local 607 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) in fastMaterializeAlloca() 609 return ResultReg; in fastMaterializeAlloca() 621 unsigned ResultReg = in fastMaterializeConstant() local 626 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) in fastMaterializeConstant() 628 return ResultReg; in fastMaterializeConstant() 712 unsigned ResultReg = createResultReg(RC); in fastLowerArguments() local [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | ARMFastISel.cpp | 195 bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr, 304 Register ResultReg = createResultReg(RC); in fastEmitInst_r() local 312 ResultReg).addReg(Op0, Op0IsKill * RegState::Kill)); in fastEmitInst_r() 317 TII.get(TargetOpcode::COPY), ResultReg) in fastEmitInst_r() 320 return ResultReg; in fastEmitInst_r() 327 unsigned ResultReg = createResultReg(RC); in fastEmitInst_rr() local 337 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg) in fastEmitInst_rr() 345 TII.get(TargetOpcode::COPY), ResultReg) in fastEmitInst_rr() 348 return ResultReg; in fastEmitInst_rr() 355 unsigned ResultReg = createResultReg(RC); in fastEmitInst_ri() local [all …]
|
D | ARMInstructionSelector.cpp | 690 auto ResultReg = MIB->getOperand(0).getReg(); in selectGlobal() local 698 .addDef(ResultReg) in selectGlobal()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCFastISel.cpp | 165 bool PPCEmitLoad(MVT VT, Register &ResultReg, Address &Addr, 435 unsigned ResultReg = createResultReg(&PPC::G8RC_and_G8RC_NOX0RegClass); in PPCSimplifyAddress() local 437 ResultReg).addFrameIndex(Addr.Base.FI).addImm(0); in PPCSimplifyAddress() 438 Addr.Base.Reg = ResultReg; in PPCSimplifyAddress() 454 bool PPCFastISel::PPCEmitLoad(MVT VT, Register &ResultReg, Address &Addr, in PPCEmitLoad() argument 469 (ResultReg ? MRI.getRegClass(ResultReg) : in PPCEmitLoad() 525 if (ResultReg == 0) in PPCEmitLoad() 526 ResultReg = createResultReg(UseRC); in PPCEmitLoad() 541 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) in PPCEmitLoad() 549 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg) in PPCEmitLoad() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIInstrInfo.cpp | 5121 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in moveScalarAddSub() local 5135 MRI.replaceRegWith(OldDstReg, ResultReg); in moveScalarAddSub() 5138 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); in moveScalarAddSub() 5155 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in lowerScalarAbs() local 5164 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) in lowerScalarAbs() 5168 MRI.replaceRegWith(Dest.getReg(), ResultReg); in lowerScalarAbs() 5169 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); in lowerScalarAbs() 5544 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); in splitScalar64BitBCNT() local 5555 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); in splitScalar64BitBCNT() 5557 MRI.replaceRegWith(Dest.getReg(), ResultReg); in splitScalar64BitBCNT() [all …]
|
D | SIRegisterInfo.cpp | 1092 Register ResultReg = IsCopy ? in eliminateFrameIndex() local 1103 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_LSHRREV_B32_e64), ResultReg) in eliminateFrameIndex() 1107 if (auto MIB = TII->getAddNoCarry(*MBB, MI, DL, ResultReg, *RS)) { in eliminateFrameIndex() 1159 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::COPY), ResultReg) in eliminateFrameIndex() 1185 FIOp.ChangeToRegister(ResultReg, false, false, true); in eliminateFrameIndex()
|
D | SIISelLowering.cpp | 3178 unsigned ResultReg, in emitLoadM0FromVGPRLoop() argument 3198 .addReg(ResultReg) in emitLoadM0FromVGPRLoop()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
D | FastISel.h | 91 unsigned ResultReg = 0; member
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/GlobalISel/ |
D | MachineIRBuilder.cpp | 669 for (unsigned ResultReg : ResultRegs) in buildIntrinsic() local 670 MIB.addDef(ResultReg); in buildIntrinsic()
|
D | LegalizerHelper.cpp | 1199 Register ResultReg = MIRBuilder.buildZExt(WideTy, Src1).getReg(0); in widenScalarMergeValues() local 1214 MIRBuilder.buildOr(NextResult, ResultReg, Shl); in widenScalarMergeValues() 1215 ResultReg = NextResult; in widenScalarMergeValues() 1219 MIRBuilder.buildTrunc(DstReg, ResultReg); in widenScalarMergeValues() 1221 MIRBuilder.buildIntToPtr(DstReg, ResultReg); in widenScalarMergeValues()
|