/external/swiftshader/third_party/subzero/src/ |
D | IceTargetLoweringMIPS32.h | 166 void _add(Variable *Dest, Variable *Src0, Variable *Src1) { in _add() argument 167 Context.insert<InstMIPS32Add>(Dest, Src0, Src1); in _add() 170 void _addu(Variable *Dest, Variable *Src0, Variable *Src1) { in _addu() argument 171 Context.insert<InstMIPS32Addu>(Dest, Src0, Src1); in _addu() 174 void _and(Variable *Dest, Variable *Src0, Variable *Src1) { in _and() argument 175 Context.insert<InstMIPS32And>(Dest, Src0, Src1); in _and() 189 Operand *Src1, CondMIPS32::Cond Condition) { in _br() argument 190 Context.insert<InstMIPS32Br>(TargetTrue, TargetFalse, Src0, Src1, in _br() 200 Operand *Src1, const InstMIPS32Label *Label, in _br() argument 202 Context.insert<InstMIPS32Br>(TargetTrue, TargetFalse, Src0, Src1, Label, in _br() [all …]
|
D | IceTargetLoweringARM32.h | 213 Operand *Src0, Operand *Src1); 253 Operand *Src0, Operand *Src1); 255 Operand *Src1); 257 Operand *Src1); 259 Operand *Src1); 319 void lowerIDivRem(Variable *Dest, Variable *T, Variable *Src0R, Operand *Src1, 327 void _add(Variable *Dest, Variable *Src0, Operand *Src1, 329 Context.insert<InstARM32Add>(Dest, Src0, Src1, Pred); 331 void _adds(Variable *Dest, Variable *Src0, Operand *Src1, 334 Context.insert<InstARM32Add>(Dest, Src0, Src1, Pred, SetFlags); [all …]
|
D | IceTargetLoweringX86Base.h | 424 Operand *legalizeSrc0ForCmp(Operand *Src0, Operand *Src1); 520 void _adc_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _adc_rmw() argument 521 AutoMemorySandboxer<> _(this, &DestSrc0, &Src1); in _adc_rmw() 522 Context.insert<typename Traits::Insts::AdcRMW>(DestSrc0, Src1); in _adc_rmw() 528 void _add_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _add_rmw() argument 529 AutoMemorySandboxer<> _(this, &DestSrc0, &Src1); in _add_rmw() 530 Context.insert<typename Traits::Insts::AddRMW>(DestSrc0, Src1); in _add_rmw() 555 void _and_rmw(X86OperandMem *DestSrc0, Operand *Src1) { in _and_rmw() argument 556 AutoMemorySandboxer<> _(this, &DestSrc0, &Src1); in _and_rmw() 557 Context.insert<typename Traits::Insts::AndRMW>(DestSrc0, Src1); in _and_rmw() [all …]
|
D | IceTargetLoweringX86BaseImpl.h | 802 Operand *&Src0, Operand *&Src1) { 803 if (Src0 == LoadDest && Src1 != LoadDest) { 807 if (Src0 != LoadDest && Src1 == LoadDest) { 808 Src1 = LoadSrc; 858 Operand *Src1 = Arith->getSrc(1); 859 if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { 861 Arith->getDest(), Src0, Src1); 865 Operand *Src1 = Icmp->getSrc(1); 866 if (canFoldLoadIntoBinaryInst(LoadSrc, LoadDest, Src0, Src1)) { 868 Icmp->getDest(), Src0, Src1); [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/ExecutionEngine/Interpreter/ |
D | Execution.cpp | 105 Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \ 108 static void executeFAddInst(GenericValue &Dest, GenericValue Src1, in executeFAddInst() argument 119 static void executeFSubInst(GenericValue &Dest, GenericValue Src1, in executeFSubInst() argument 130 static void executeFMulInst(GenericValue &Dest, GenericValue Src1, in executeFMulInst() argument 141 static void executeFDivInst(GenericValue &Dest, GenericValue Src1, in executeFDivInst() argument 152 static void executeFRemInst(GenericValue &Dest, GenericValue Src1, in executeFRemInst() argument 156 Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal); in executeFRemInst() 159 Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal); in executeFRemInst() 169 Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ 174 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ [all …]
|
/external/llvm/lib/ExecutionEngine/Interpreter/ |
D | Execution.cpp | 52 Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \ 55 static void executeFAddInst(GenericValue &Dest, GenericValue Src1, in executeFAddInst() argument 66 static void executeFSubInst(GenericValue &Dest, GenericValue Src1, in executeFSubInst() argument 77 static void executeFMulInst(GenericValue &Dest, GenericValue Src1, in executeFMulInst() argument 88 static void executeFDivInst(GenericValue &Dest, GenericValue Src1, in executeFDivInst() argument 99 static void executeFRemInst(GenericValue &Dest, GenericValue Src1, in executeFRemInst() argument 103 Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal); in executeFRemInst() 106 Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal); in executeFRemInst() 116 Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ 121 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ [all …]
|
/external/llvm-project/llvm/lib/ExecutionEngine/Interpreter/ |
D | Execution.cpp | 105 Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \ 108 static void executeFAddInst(GenericValue &Dest, GenericValue Src1, in executeFAddInst() argument 119 static void executeFSubInst(GenericValue &Dest, GenericValue Src1, in executeFSubInst() argument 130 static void executeFMulInst(GenericValue &Dest, GenericValue Src1, in executeFMulInst() argument 141 static void executeFDivInst(GenericValue &Dest, GenericValue Src1, in executeFDivInst() argument 152 static void executeFRemInst(GenericValue &Dest, GenericValue Src1, in executeFRemInst() argument 156 Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal); in executeFRemInst() 159 Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal); in executeFRemInst() 169 Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \ 175 assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \ [all …]
|
/external/swiftshader/third_party/subzero/unittest/AssemblerX8664/ |
D | ControlFlow.cpp | 17 #define TestJ(C, Near, Dest, Src0, Value0, Src1, Value1) \ in TEST_F() argument 20 "(" #C ", " #Near ", " #Dest ", " #Src0 ", " #Value0 ", " #Src1 \ in TEST_F() 25 __ mov(IceType_i32, Encoded_GPR_##Src1(), Immediate(Value1)); \ in TEST_F() 27 __ cmp(IceType_i32, Encoded_GPR_##Src0(), Encoded_GPR_##Src1()); \ in TEST_F() 34 ASSERT_EQ(Value1, test.Src1()) << TestString; \ in TEST_F() 39 #define TestImpl(Dst, Src0, Src1) \ in TEST_F() argument 41 TestJ(o, Near, Dst, Src0, 0x80000000ul, Src1, 0x1ul); \ in TEST_F() 42 TestJ(o, Far, Dst, Src0, 0x80000000ul, Src1, 0x1ul); \ in TEST_F() 43 TestJ(no, Near, Dst, Src0, 0x1ul, Src1, 0x1ul); \ in TEST_F() 44 TestJ(no, Far, Dst, Src0, 0x1ul, Src1, 0x1ul); \ in TEST_F() [all …]
|
D | DataMov.cpp | 422 #define TestRegReg(C, Dest, IsTrue, Src0, Value0, Src1, Value1) \ in TEST_F() argument 425 "(" #C ", " #Dest ", " #IsTrue ", " #Src0 ", " #Value0 ", " #Src1 \ in TEST_F() 428 __ mov(IceType_i32, Encoded_GPR_##Src1(), Immediate(Value1)); \ in TEST_F() 430 __ cmp(IceType_i32, Encoded_GPR_##Src0(), Encoded_GPR_##Src1()); \ in TEST_F() 432 Encoded_GPR_##Src1()); \ in TEST_F() 462 #define TestValue(C, Dest, IsTrue, Src0, Value0, Src1, Value1) \ in TEST_F() argument 464 TestRegReg(C, Dest, IsTrue, Src0, Value0, Src1, Value1); \ in TEST_F() 468 #define TestImpl(Dest, Src0, Src1) \ in TEST_F() argument 470 TestValue(o, Dest, 1u, Src0, 0x80000000u, Src1, 0x1u); \ in TEST_F() 471 TestValue(o, Dest, 0u, Src0, 0x1u, Src1, 0x10000000u); \ in TEST_F() [all …]
|
D | GPRArith.cpp | 33 #define TestSetCC(C, Dest, IsTrue, Src0, Value0, Src1, Value1) \ in TEST_F() argument 36 "(" #C ", " #Dest ", " #IsTrue ", " #Src0 ", " #Value0 ", " #Src1 \ in TEST_F() 41 __ mov(IceType_i32, Encoded_GPR_##Src1(), Immediate(Value1)); \ in TEST_F() 42 __ cmp(IceType_i32, Encoded_GPR_##Src0(), Encoded_GPR_##Src1()); \ in TEST_F() 57 #define TestImpl(Dest, Src0, Src1) \ in TEST_F() argument 59 TestSetCC(o, Dest, 1u, Src0, 0x80000000u, Src1, 0x1u); \ in TEST_F() 60 TestSetCC(o, Dest, 0u, Src0, 0x1u, Src1, 0x10000000u); \ in TEST_F() 61 TestSetCC(no, Dest, 1u, Src0, 0x1u, Src1, 0x10000000u); \ in TEST_F() 62 TestSetCC(no, Dest, 0u, Src0, 0x80000000u, Src1, 0x1u); \ in TEST_F() 63 TestSetCC(b, Dest, 1u, Src0, 0x1, Src1, 0x80000000u); \ in TEST_F() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/GlobalISel/ |
D | MachineIRBuilder.h | 1227 const SrcOp &Src1, 1229 return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags); 1244 const SrcOp &Src1, 1246 return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags); 1260 const SrcOp &Src1, 1262 return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags); 1266 const SrcOp &Src1, 1268 return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags); 1272 const SrcOp &Src1, 1274 return buildInstr(TargetOpcode::G_SMULH, {Dst}, {Src0, Src1}, Flags); [all …]
|
D | ConstantFoldingMIRBuilder.h | 51 const SrcOp &Src1 = SrcOps[1]; variable 53 ConstantFoldBinOp(Opc, Src0.getReg(), Src1.getReg(), *getMRI())) 62 const SrcOp &Src1 = SrcOps[1]; variable 64 ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI()))
|
/external/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
D | MachineIRBuilder.h | 975 MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, 1374 const SrcOp &Src1, 1376 return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags); 1391 const SrcOp &Src1, 1393 return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags); 1407 const SrcOp &Src1, 1409 return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags); 1413 const SrcOp &Src1, 1415 return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags); 1419 const SrcOp &Src1, [all …]
|
D | ConstantFoldingMIRBuilder.h | 51 const SrcOp &Src1 = SrcOps[1]; variable 53 ConstantFoldBinOp(Opc, Src0.getReg(), Src1.getReg(), *getMRI())) 62 const SrcOp &Src1 = SrcOps[1]; variable 64 ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI()))
|
/external/llvm-project/llvm/lib/Target/AMDGPU/ |
D | AMDGPUInstCombineIntrinsic.cpp | 40 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, in fmed3AMDGCN() argument 42 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2); in fmed3AMDGCN() 47 return maxnum(Src1, Src2); in fmed3AMDGCN() 49 APFloat::cmpResult Cmp1 = Max3.compare(Src1); in fmed3AMDGCN() 54 return maxnum(Src0, Src1); in fmed3AMDGCN() 275 Value *Src1 = II.getArgOperand(1); in instCombineIntrinsic() local 276 const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1); in instCombineIntrinsic() 282 if (isa<UndefValue>(Src1)) { in instCombineIntrinsic() 320 II, 1, ConstantInt::get(Src1->getType(), Mask & ~(S_NAN | Q_NAN))); in instCombineIntrinsic() 333 {Src0, ConstantInt::get(Src1->getType(), Mask & FullMask)}); in instCombineIntrinsic() [all …]
|
D | R600ExpandSpecialInstrs.cpp | 161 Register Src1 = in runOnMachineFunction() local 165 (void) Src1; in runOnMachineFunction() 167 (TRI.getEncodingValue(Src1) & 0xff) < 127) in runOnMachineFunction() 168 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); in runOnMachineFunction() 212 unsigned Src1 = 0; in runOnMachineFunction() local 218 Src1 = MI.getOperand(Src1Idx).getReg(); in runOnMachineFunction() 224 Src1 = TRI.getSubReg(Src1, SubRegIndex); in runOnMachineFunction() 229 Src1 = TRI.getSubReg(Src0, SubRegIndex1); in runOnMachineFunction() 264 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1); in runOnMachineFunction()
|
D | SIOptimizeExecMasking.cpp | 110 const MachineOperand &Src1 = MI.getOperand(1); in isLogicalOpOnExec() local 111 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC) in isLogicalOpOnExec() 126 const MachineOperand &Src1 = MI.getOperand(1); in isLogicalOpOnExec() local 127 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO) in isLogicalOpOnExec() 422 MachineOperand &Src1 = SaveExecInst->getOperand(2); in runOnMachineFunction() local 427 OtherOp = &Src1; in runOnMachineFunction() 428 } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) { in runOnMachineFunction()
|
D | GCNDPPCombine.cpp | 237 if (auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1)) { in createDPPInst() local 238 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src1)) { in createDPPInst() 243 DPPInst.add(*Src1); in createDPPInst() 330 auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1); in createDPPInst() local 331 if (!Src1 || !Src1->isReg()) { in createDPPInst() 339 CombOldVGPR = getRegSubRegPair(*Src1); in createDPPInst() 517 auto *Src1 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src1); in combineDPPMov() local 518 if (Use != Src0 && !(Use == Src1 && OrigMI.isCommutable())) { // [1] in combineDPPMov() 524 if (Src1 && Src1->isIdenticalTo(*Src0)) { in combineDPPMov() 525 assert(Src1->isReg()); in combineDPPMov() [all …]
|
/external/llvm-project/llvm/unittests/CodeGen/GlobalISel/ |
D | PatternMatchTest.cpp | 55 Register Src0, Src1, Src2; in TEST_F() local 57 m_GAdd(m_Reg(Src0), m_Reg(Src1))); in TEST_F() 60 EXPECT_EQ(Src1, Copies[1]); in TEST_F() 67 m_GMul(m_Reg(Src0), m_Reg(Src1))); in TEST_F() 70 EXPECT_EQ(Src1, Copies[2]); in TEST_F() 74 m_GMul(m_GAdd(m_Reg(Src0), m_Reg(Src1)), m_Reg(Src2))); in TEST_F() 77 EXPECT_EQ(Src1, Copies[1]); in TEST_F() 118 m_GAnd(m_Reg(Src0), m_Reg(Src1))); in TEST_F() 121 EXPECT_EQ(Src1, Copies[1]); in TEST_F() 127 m_GOr(m_Reg(Src0), m_Reg(Src1))); in TEST_F() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | R600ExpandSpecialInstrs.cpp | 161 Register Src1 = in runOnMachineFunction() local 165 (void) Src1; in runOnMachineFunction() 167 (TRI.getEncodingValue(Src1) & 0xff) < 127) in runOnMachineFunction() 168 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); in runOnMachineFunction() 212 unsigned Src1 = 0; in runOnMachineFunction() local 218 Src1 = MI.getOperand(Src1Idx).getReg(); in runOnMachineFunction() 224 Src1 = TRI.getSubReg(Src1, SubRegIndex); in runOnMachineFunction() 229 Src1 = TRI.getSubReg(Src0, SubRegIndex1); in runOnMachineFunction() 264 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1); in runOnMachineFunction()
|
D | SIOptimizeExecMasking.cpp | 110 const MachineOperand &Src1 = MI.getOperand(1); in isLogicalOpOnExec() local 111 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC) in isLogicalOpOnExec() 126 const MachineOperand &Src1 = MI.getOperand(1); in isLogicalOpOnExec() local 127 if (Src1.isReg() && Src1.getReg() == AMDGPU::EXEC_LO) in isLogicalOpOnExec() 390 MachineOperand &Src1 = SaveExecInst->getOperand(2); in runOnMachineFunction() local 395 OtherOp = &Src1; in runOnMachineFunction() 396 } else if (Src1.isReg() && Src1.getReg() == CopyFromExec) { in runOnMachineFunction()
|
/external/llvm/lib/Target/AMDGPU/ |
D | R600ExpandSpecialInstrs.cpp | 225 unsigned Src1 = BMI->getOperand( in runOnMachineFunction() local 229 (void) Src1; in runOnMachineFunction() 231 (TRI.getEncodingValue(Src1) & 0xff) < 127) in runOnMachineFunction() 232 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); in runOnMachineFunction() 276 unsigned Src1 = 0; in runOnMachineFunction() local 282 Src1 = MI.getOperand(Src1Idx).getReg(); in runOnMachineFunction() 288 Src1 = TRI.getSubReg(Src1, SubRegIndex); in runOnMachineFunction() 293 Src1 = TRI.getSubReg(Src0, SubRegIndex1); in runOnMachineFunction() 328 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1); in runOnMachineFunction()
|
D | SIShrinkInstructions.cpp | 106 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in canShrink() local 110 if (Src1 && (!isVGPR(Src1, TRI, MRI) || (Src1Mod && Src1Mod->getImm() != 0))) in canShrink() 277 const MachineOperand &Src1 = MI.getOperand(2); in runOnMachineFunction() local 289 if (Src1.isImm() && isKImmOperand(TII, Src1)) { in runOnMachineFunction() 381 const MachineOperand *Src1 = in runOnMachineFunction() local 383 if (Src1) in runOnMachineFunction() 384 Inst32.addOperand(*Src1); in runOnMachineFunction()
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonGenMux.cpp | 96 unsigned getMuxOpcode(const MachineOperand &Src1, 175 unsigned HexagonGenMux::getMuxOpcode(const MachineOperand &Src1, in getMuxOpcode() argument 177 bool IsReg1 = Src1.isReg(), IsReg2 = Src2.isReg(); in getMuxOpcode() 266 MachineOperand *Src1 = &Def1.getOperand(2), *Src2 = &Def2.getOperand(2); in genMuxInBlock() local 267 unsigned SR1 = Src1->isReg() ? Src1->getReg() : 0; in genMuxInBlock() 284 MachineOperand *SrcT = (MinX == CI.TrueX) ? Src1 : Src2; in genMuxInBlock() 285 MachineOperand *SrcF = (MinX == CI.FalseX) ? Src1 : Src2; in genMuxInBlock()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonGenMux.cpp | 134 unsigned getMuxOpcode(const MachineOperand &Src1, 208 unsigned HexagonGenMux::getMuxOpcode(const MachineOperand &Src1, in getMuxOpcode() argument 210 bool IsReg1 = Src1.isReg(), IsReg2 = Src2.isReg(); in getMuxOpcode() 305 MachineOperand *Src1 = &Def1.getOperand(2), *Src2 = &Def2.getOperand(2); in genMuxInBlock() local 306 Register SR1 = Src1->isReg() ? Src1->getReg() : Register(); in genMuxInBlock() 323 MachineOperand *SrcT = (MinX == CI.TrueX) ? Src1 : Src2; in genMuxInBlock() 324 MachineOperand *SrcF = (MinX == CI.FalseX) ? Src1 : Src2; in genMuxInBlock()
|