/third_party/skia/third_party/externals/swiftshader/third_party/subzero/src/ |
D | IceTargetLoweringMIPS32.h | 166 void _add(Variable *Dest, Variable *Src0, Variable *Src1) { in _add() argument 167 Context.insert<InstMIPS32Add>(Dest, Src0, Src1); in _add() 170 void _addu(Variable *Dest, Variable *Src0, Variable *Src1) { in _addu() argument 171 Context.insert<InstMIPS32Addu>(Dest, Src0, Src1); in _addu() 174 void _and(Variable *Dest, Variable *Src0, Variable *Src1) { in _and() argument 175 Context.insert<InstMIPS32And>(Dest, Src0, Src1); in _and() 189 Operand *Src1, CondMIPS32::Cond Condition) { in _br() argument 190 Context.insert<InstMIPS32Br>(TargetTrue, TargetFalse, Src0, Src1, in _br() 200 Operand *Src1, const InstMIPS32Label *Label, in _br() argument 202 Context.insert<InstMIPS32Br>(TargetTrue, TargetFalse, Src0, Src1, Label, in _br() [all …]
|
D | IceTargetLoweringARM32.h | 212 Operand *Src0, Operand *Src1); 252 Operand *Src0, Operand *Src1); 254 Operand *Src1); 256 Operand *Src1); 258 Operand *Src1); 318 void lowerIDivRem(Variable *Dest, Variable *T, Variable *Src0R, Operand *Src1, 326 void _add(Variable *Dest, Variable *Src0, Operand *Src1, 328 Context.insert<InstARM32Add>(Dest, Src0, Src1, Pred); 330 void _adds(Variable *Dest, Variable *Src0, Operand *Src1, 333 Context.insert<InstARM32Add>(Dest, Src0, Src1, Pred, SetFlags); [all …]
|
D | IceTargetLoweringX8632.h | 399 Operand *legalizeSrc0ForCmp(Operand *Src0, Operand *Src1); 447 void _adc_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _adc_rmw() argument 448 Context.insert<Insts::AdcRMW>(DestSrc0, Src1); in _adc_rmw() 453 void _add_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _add_rmw() argument 454 Context.insert<Insts::AddRMW>(DestSrc0, Src1); in _add_rmw() 472 void _and_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _and_rmw() argument 473 Context.insert<Insts::AndRMW>(DestSrc0, Src1); in _and_rmw() 475 void _blendvps(Variable *Dest, Operand *Src0, Operand *Src1) { in _blendvps() argument 476 Context.insert<Insts::Blendvps>(Dest, Src0, Src1); in _blendvps() 505 void _cmp(Operand *Src0, Operand *Src1) { in _cmp() argument [all …]
|
D | IceTargetLoweringX8664.h | 395 Operand *legalizeSrc0ForCmp(Operand *Src0, Operand *Src1); 443 void _adc_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _adc_rmw() argument 444 Context.insert<Insts::AdcRMW>(DestSrc0, Src1); in _adc_rmw() 449 void _add_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _add_rmw() argument 450 Context.insert<Insts::AddRMW>(DestSrc0, Src1); in _add_rmw() 468 void _and_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _and_rmw() argument 469 Context.insert<Insts::AndRMW>(DestSrc0, Src1); in _and_rmw() 471 void _blendvps(Variable *Dest, Operand *Src0, Operand *Src1) { in _blendvps() argument 472 Context.insert<Insts::Blendvps>(Dest, Src0, Src1); in _blendvps() 501 void _cmp(Operand *Src0, Operand *Src1) { in _cmp() argument [all …]
|
D | IceTargetLoweringX8664.cpp | 666 Operand *&Src0, Operand *&Src1) { in canFoldLoadIntoBinaryInst() argument 667 if (Src0 == LoadDest && Src1 != LoadDest) { in canFoldLoadIntoBinaryInst() 671 if (Src0 != LoadDest && Src1 == LoadDest) { in canFoldLoadIntoBinaryInst() 672 Src1 = LoadSrc; in canFoldLoadIntoBinaryInst() 721 Operand *Src1 = Arith->getSrc(1); in doLoadOpt() local 722 if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { in doLoadOpt() 724 Arith->getDest(), Src0, Src1); in doLoadOpt() 728 Operand *Src1 = Icmp->getSrc(1); in doLoadOpt() local 729 if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { in doLoadOpt() 731 Icmp->getDest(), Src0, Src1); in doLoadOpt() [all …]
|
D | IceTargetLoweringX8632.cpp | 657 Operand *&Src0, Operand *&Src1) { in canFoldLoadIntoBinaryInst() argument 658 if (Src0 == LoadDest && Src1 != LoadDest) { in canFoldLoadIntoBinaryInst() 662 if (Src0 != LoadDest && Src1 == LoadDest) { in canFoldLoadIntoBinaryInst() 663 Src1 = LoadSrc; in canFoldLoadIntoBinaryInst() 711 Operand *Src1 = Arith->getSrc(1); in doLoadOpt() local 712 if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { in doLoadOpt() 714 Arith->getDest(), Src0, Src1); in doLoadOpt() 718 Operand *Src1 = Icmp->getSrc(1); in doLoadOpt() local 719 if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { in doLoadOpt() 721 Icmp->getDest(), Src0, Src1); in doLoadOpt() [all …]
|
D | IceTargetLoweringARM32.cpp | 532 Operand *Src1 = Instr->getSrc(1); in genTargetHelperCallFor() local 544 if (auto *C = llvm::dyn_cast<ConstantInteger32>(Src1)) { in genTargetHelperCallFor() 552 Src1 = Ctx->getConstantInt32(NewC); in genTargetHelperCallFor() 555 Context.insert<InstCast>(CastKind, Src1_32, Src1); in genTargetHelperCallFor() 556 Src1 = Src1_32; in genTargetHelperCallFor() 566 assert(Src1->getType() == IceType_i32); in genTargetHelperCallFor() 567 Call->addArg(Src1); in genTargetHelperCallFor() 2133 Operand *Src1, ExtInstr ExtFunc, in lowerIDivRem() argument 2135 div0Check(Dest->getType(), Src1, nullptr); in lowerIDivRem() 2136 Variable *Src1R = legalizeToReg(Src1); in lowerIDivRem() [all …]
|
D | IceInstARM32.h | 745 Variable *Src0, Operand *Src1, 749 InstARM32ThreeAddrGPR(Func, Dest, Src0, Src1, Predicate, SetFlags); 771 Operand *Src1, CondARM32::Cond Predicate, bool SetFlags) in InstARM32ThreeAddrGPR() argument 775 addSource(Src1); in InstARM32ThreeAddrGPR() 796 Variable *Src1) { in create() argument 798 InstARM32ThreeAddrFP(Func, Dest, Src0, Src1); in create() 821 InstARM32ThreeAddrFP(Cfg *Func, Variable *Dest, Variable *Src0, Operand *Src1) in InstARM32ThreeAddrFP() argument 824 addSource(Src1); in InstARM32ThreeAddrFP() 846 Variable *Src0, Variable *Src1) { in create() argument 848 InstARM32ThreeAddrSignAwareFP(Func, Dest, Src0, Src1); in create() [all …]
|
D | IceInstMIPS32.h | 500 Variable *Src0, Variable *Src1) { in create() argument 502 InstMIPS32ThreeAddrFPR(Func, Dest, Src0, Src1); in create() 528 Variable *Src1) in InstMIPS32ThreeAddrFPR() argument 531 addSource(Src1); in InstMIPS32ThreeAddrFPR() 549 Variable *Src0, Variable *Src1) { in create() argument 551 InstMIPS32ThreeAddrGPR(Func, Dest, Src0, Src1); in create() 577 Variable *Src1) in InstMIPS32ThreeAddrGPR() argument 580 addSource(Src1); in InstMIPS32ThreeAddrGPR() 821 Operand *Src1, CondMIPS32::Cond Cond) { in create() argument 824 InstMIPS32Br(Func, TargetTrue, TargetFalse, Src0, Src1, NoLabel, Cond); in create() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/ExecutionEngine/Interpreter/ |
D | Execution.cpp | 105 Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \ 108 static void executeFAddInst(GenericValue &Dest, GenericValue Src1, in executeFAddInst() argument 119 static void executeFSubInst(GenericValue &Dest, GenericValue Src1, in executeFSubInst() argument 130 static void executeFMulInst(GenericValue &Dest, GenericValue Src1, in executeFMulInst() argument 141 static void executeFDivInst(GenericValue &Dest, GenericValue Src1, in executeFDivInst() argument 152 static void executeFRemInst(GenericValue &Dest, GenericValue Src1, in executeFRemInst() argument 156 Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal); in executeFRemInst() 159 Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal); in executeFRemInst() 169 Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ 174 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/unittest/AssemblerX8664/ |
D | ControlFlow.cpp | 17 #define TestJ(C, Near, Dest, Src0, Value0, Src1, Value1) \ in TEST_F() argument 20 "(" #C ", " #Near ", " #Dest ", " #Src0 ", " #Value0 ", " #Src1 \ in TEST_F() 25 __ mov(IceType_i32, Encoded_GPR_##Src1(), Immediate(Value1)); \ in TEST_F() 27 __ cmp(IceType_i32, Encoded_GPR_##Src0(), Encoded_GPR_##Src1()); \ in TEST_F() 34 ASSERT_EQ(Value1, test.Src1()) << TestString; \ in TEST_F() 39 #define TestImpl(Dst, Src0, Src1) \ in TEST_F() argument 41 TestJ(o, Near, Dst, Src0, 0x80000000ul, Src1, 0x1ul); \ in TEST_F() 42 TestJ(o, Far, Dst, Src0, 0x80000000ul, Src1, 0x1ul); \ in TEST_F() 43 TestJ(no, Near, Dst, Src0, 0x1ul, Src1, 0x1ul); \ in TEST_F() 44 TestJ(no, Far, Dst, Src0, 0x1ul, Src1, 0x1ul); \ in TEST_F() [all …]
|
D | DataMov.cpp | 422 #define TestRegReg(C, Dest, IsTrue, Src0, Value0, Src1, Value1) \ in TEST_F() argument 425 "(" #C ", " #Dest ", " #IsTrue ", " #Src0 ", " #Value0 ", " #Src1 \ in TEST_F() 428 __ mov(IceType_i32, Encoded_GPR_##Src1(), Immediate(Value1)); \ in TEST_F() 430 __ cmp(IceType_i32, Encoded_GPR_##Src0(), Encoded_GPR_##Src1()); \ in TEST_F() 432 Encoded_GPR_##Src1()); \ in TEST_F() 462 #define TestValue(C, Dest, IsTrue, Src0, Value0, Src1, Value1) \ in TEST_F() argument 464 TestRegReg(C, Dest, IsTrue, Src0, Value0, Src1, Value1); \ in TEST_F() 468 #define TestImpl(Dest, Src0, Src1) \ in TEST_F() argument 470 TestValue(o, Dest, 1u, Src0, 0x80000000u, Src1, 0x1u); \ in TEST_F() 471 TestValue(o, Dest, 0u, Src0, 0x1u, Src1, 0x10000000u); \ in TEST_F() [all …]
|
D | GPRArith.cpp | 33 #define TestSetCC(C, Dest, IsTrue, Src0, Value0, Src1, Value1) \ in TEST_F() argument 36 "(" #C ", " #Dest ", " #IsTrue ", " #Src0 ", " #Value0 ", " #Src1 \ in TEST_F() 41 __ mov(IceType_i32, Encoded_GPR_##Src1(), Immediate(Value1)); \ in TEST_F() 42 __ cmp(IceType_i32, Encoded_GPR_##Src0(), Encoded_GPR_##Src1()); \ in TEST_F() 57 #define TestImpl(Dest, Src0, Src1) \ in TEST_F() argument 59 TestSetCC(o, Dest, 1u, Src0, 0x80000000u, Src1, 0x1u); \ in TEST_F() 60 TestSetCC(o, Dest, 0u, Src0, 0x1u, Src1, 0x10000000u); \ in TEST_F() 61 TestSetCC(no, Dest, 1u, Src0, 0x1u, Src1, 0x10000000u); \ in TEST_F() 62 TestSetCC(no, Dest, 0u, Src0, 0x80000000u, Src1, 0x1u); \ in TEST_F() 63 TestSetCC(b, Dest, 1u, Src0, 0x1, Src1, 0x80000000u); \ in TEST_F() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/GlobalISel/ |
D | MachineIRBuilder.h | 1227 const SrcOp &Src1, 1229 return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags); 1244 const SrcOp &Src1, 1246 return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags); 1260 const SrcOp &Src1, 1262 return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags); 1266 const SrcOp &Src1, 1268 return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags); 1272 const SrcOp &Src1, 1274 return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags); [all …]
|
D | ConstantFoldingMIRBuilder.h | 51 const SrcOp &Src1 = SrcOps[1]; variable 53 ConstantFoldBinOp(Opc, Src0.getReg(), Src1.getReg(), *getMRI())) 62 const SrcOp &Src1 = SrcOps[1]; variable 64 ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI()))
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | R600ExpandSpecialInstrs.cpp | 161 Register Src1 = in runOnMachineFunction() local 165 (void) Src1; in runOnMachineFunction() 167 (TRI.getEncodingValue(Src1) & 0xff) < 127) in runOnMachineFunction() 168 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); in runOnMachineFunction() 212 unsigned Src1 = 0; in runOnMachineFunction() local 218 Src1 = MI.getOperand(Src1Idx).getReg(); in runOnMachineFunction() 224 Src1 = TRI.getSubReg(Src1, SubRegIndex); in runOnMachineFunction() 229 Src1 = TRI.getSubReg(Src0, SubRegIndex1); in runOnMachineFunction() 264 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1); in runOnMachineFunction()
|
D | SIOptimizeExecMasking.cpp | 110 const MachineOperand &Src1 = MI.getOperand(1); in isLogicalOpOnExec() local 111 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC) in isLogicalOpOnExec() 126 const MachineOperand &Src1 = MI.getOperand(1); in isLogicalOpOnExec() local 127 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO) in isLogicalOpOnExec() 390 MachineOperand &Src1 = SaveExecInst->getOperand(2); in runOnMachineFunction() local 395 OtherOp = &Src1; in runOnMachineFunction() 396 } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) { in runOnMachineFunction()
|
D | SIInstrInfo.cpp | 1633 MachineOperand &Src1, in swapSourceModifiers() argument 1691 MachineOperand &Src1 = MI.getOperand(Src1Idx); in commuteInstructionImpl() local 1694 if (Src0.isReg() && Src1.isReg()) { in commuteInstructionImpl() 1701 } else if (Src0.isReg() && !Src1.isReg()) { in commuteInstructionImpl() 1704 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); in commuteInstructionImpl() 1705 } else if (!Src0.isReg() && Src1.isReg()) { in commuteInstructionImpl() 1707 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); in commuteInstructionImpl() 1715 Src1, AMDGPU::OpName::src1_modifiers); in commuteInstructionImpl() 2377 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); in FoldImmediate() local 2383 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) in FoldImmediate() [all …]
|
D | SIShrinkInstructions.cpp | 188 const MachineOperand &Src1 = MI.getOperand(1); in shrinkScalarCompare() local 189 if (!Src1.isImm()) in shrinkScalarCompare() 200 if (isKImmOrKUImmOperand(TII, Src1, HasUImm)) { in shrinkScalarCompare() 214 if ((TII->sopkIsZext(SOPKOpc) && isKUImmOperand(TII, Src1)) || in shrinkScalarCompare() 215 (!TII->sopkIsZext(SOPKOpc) && isKImmOperand(TII, Src1))) { in shrinkScalarCompare() 322 MachineOperand *Src1 = &MI.getOperand(2); in shrinkScalarLogicOp() local 324 MachineOperand *SrcImm = Src1; in shrinkScalarLogicOp() 636 MachineOperand *Src1 = &MI.getOperand(2); in runOnMachineFunction() local 638 if (!Src0->isReg() && Src1->isReg()) { in runOnMachineFunction() 640 std::swap(Src0, Src1); in runOnMachineFunction() [all …]
|
D | SIPeepholeSDWA.cpp | 571 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in matchSDWAOperand() local 573 if (Register::isPhysicalRegister(Src1->getReg()) || in matchSDWAOperand() 580 Dst, Src1, *Imm == 16 ? WORD_1 : BYTE_3, UNUSED_PAD); in matchSDWAOperand() 583 Src1, Dst, *Imm == 16 ? WORD_1 : BYTE_3, false, false, in matchSDWAOperand() 609 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in matchSDWAOperand() local 612 if (Register::isPhysicalRegister(Src1->getReg()) || in matchSDWAOperand() 618 return std::make_unique<SDWADstOperand>(Dst, Src1, BYTE_1, UNUSED_PAD); in matchSDWAOperand() 621 Src1, Dst, BYTE_1, false, false, in matchSDWAOperand() 644 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in matchSDWAOperand() local 645 auto Offset = foldToImm(*Src1); in matchSDWAOperand() [all …]
|
D | SIFoldOperands.cpp | 1005 MachineOperand *Src1 = getImmOrMaterializedImm(MRI, MI->getOperand(Src1Idx)); in tryConstantFoldOp() local 1007 if (!Src0->isImm() && !Src1->isImm()) in tryConstantFoldOp() 1026 if (Src0->isImm() && Src1->isImm()) { in tryConstantFoldOp() 1028 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) in tryConstantFoldOp() 1045 if (Src0->isImm() && !Src1->isImm()) { in tryConstantFoldOp() 1046 std::swap(Src0, Src1); in tryConstantFoldOp() 1050 int32_t Src1Val = static_cast<int32_t>(Src1->getImm()); in tryConstantFoldOp() 1109 const MachineOperand *Src1 = TII->getNamedOperand(*MI, AMDGPU::OpName::src1); in tryFoldInst() local 1112 if (Src1->isIdenticalTo(*Src0) && in tryFoldInst() 1275 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isClamp() local [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonGenMux.cpp | 134 unsigned getMuxOpcode(const MachineOperand &Src1, 208 unsigned HexagonGenMux::getMuxOpcode(const MachineOperand &Src1, in getMuxOpcode() argument 210 bool IsReg1 = Src1.isReg(), IsReg2 = Src2.isReg(); in getMuxOpcode() 305 MachineOperand *Src1 = &Def1.getOperand(2), *Src2 = &Def2.getOperand(2); in genMuxInBlock() local 306 Register SR1 = Src1->isReg() ? Src1->getReg() : Register(); in genMuxInBlock() 323 MachineOperand *SrcT = (MinX == CI.TrueX) ? Src1 : Src2; in genMuxInBlock() 324 MachineOperand *SrcF = (MinX == CI.FalseX) ? Src1 : Src2; in genMuxInBlock()
|
D | HexagonPeephole.cpp | 156 MachineOperand &Src1 = MI.getOperand(1); in runOnMachineFunction() local 158 if (Src1.getImm() != 0) in runOnMachineFunction() 173 MachineOperand &Src1 = MI.getOperand(1); in runOnMachineFunction() local 178 Register SrcReg = Src1.getReg(); in runOnMachineFunction()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/SystemZ/ |
D | SystemZSelectionDAGInfo.cpp | 146 SDValue Src1, SDValue Src2, uint64_t Size) { in emitCLC() argument 148 EVT PtrVT = Src1.getValueType(); in emitCLC() 158 return DAG.getNode(SystemZISD::CLC_LOOP, DL, VTs, Chain, Src1, Src2, in emitCLC() 161 return DAG.getNode(SystemZISD::CLC, DL, VTs, Chain, Src1, Src2, in emitCLC() 180 SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src1, in EmitTargetCodeForMemcmp() argument 187 SDValue CCReg = emitCLC(DAG, DL, Chain, Src2, Src1, Bytes); in EmitTargetCodeForMemcmp() 231 SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, SDValue Src1, in EmitTargetCodeForStrcmp() argument 234 SDVTList VTs = DAG.getVTList(Src1.getValueType(), MVT::i32, MVT::Other); in EmitTargetCodeForStrcmp() 236 SDValue Unused = DAG.getNode(SystemZISD::STRCMP, DL, VTs, Chain, Src2, Src1, in EmitTargetCodeForStrcmp()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64AdvSIMDScalarPass.cpp | 299 unsigned Src1 = 0, SubReg1; in transformInstruction() local 328 Src1 = MOSrc1->getReg(); in transformInstruction() 347 if (!Src1) { in transformInstruction() 349 Src1 = MRI->createVirtualRegister(&AArch64::FPR64RegClass); in transformInstruction() 350 insertCopy(TII, MI, Src1, OrigSrc1, KillSrc1); in transformInstruction() 364 .addReg(Src1, getKillRegState(KillSrc1), SubReg1); in transformInstruction()
|