/external/llvm/lib/Target/AMDGPU/ |
D | R600InstrInfo.cpp | 71 NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0)) in copyPhysReg() 148 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1; in isLDSNoRetInstr() 152 return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1; in isLDSRetInstr() 253 AMDGPU::OpName::src0, in getSrcIdx() 254 AMDGPU::OpName::src1, in getSrcIdx() 255 AMDGPU::OpName::src2 in getSrcIdx() 264 {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, in getSelIdx() 265 {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, in getSelIdx() 266 {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, in getSelIdx() 267 {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, in getSelIdx() [all …]
|
D | R600ExpandSpecialInstrs.cpp | 83 int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); in runOnMachineFunction() 90 AMDGPU::OpName::pred_sel); in runOnMachineFunction() 92 AMDGPU::OpName::pred_sel); in runOnMachineFunction() 112 TII->setImmOperand(*PredSet, AMDGPU::OpName::update_exec_mask, 1); in runOnMachineFunction() 114 TII->setImmOperand(*PredSet, AMDGPU::OpName::update_pred, 1); in runOnMachineFunction() 223 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0)) in runOnMachineFunction() 226 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1)) in runOnMachineFunction() 273 TII->getOperandIdx(MI, AMDGPU::OpName::dst)).getReg(); in runOnMachineFunction() 275 TII->getOperandIdx(MI, AMDGPU::OpName::src0)).getReg(); in runOnMachineFunction() 280 int Src1Idx = TII->getOperandIdx(MI, AMDGPU::OpName::src1); in runOnMachineFunction() [all …]
|
D | SIShrinkInstructions.cpp | 84 const MachineOperand *Src2 = TII->getNamedOperand(MI, AMDGPU::OpName::src2); in canShrink() 97 TII->hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) in canShrink() 106 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in canShrink() 108 TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); in canShrink() 115 if (TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) in canShrink() 119 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) in canShrink() 122 return !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp); in canShrink() 138 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0); in foldImmediates() 349 TII->getNamedOperand(MI, AMDGPU::OpName::src2); in runOnMachineFunction() 369 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); in runOnMachineFunction() [all …]
|
D | R600ClauseMergePass.cpp | 77 .getOperand(TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::COUNT)) in getCFAluSize() 84 .getOperand(TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::Enabled)) in isCFAluEnabled() 90 int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT); in cleanPotentialDisabledCFAlu() 109 int CntIdx = TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::COUNT); in mergeIfPossible() 121 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE0); in mergeIfPossible() 123 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK0); in mergeIfPossible() 125 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR0); in mergeIfPossible() 137 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_MODE1); in mergeIfPossible() 139 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_BANK1); in mergeIfPossible() 141 TII->getOperandIdx(AMDGPU::CF_ALU, AMDGPU::OpName::KCACHE_ADDR1); in mergeIfPossible()
|
D | SIInstrInfo.cpp | 53 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { in nodesHaveSameOperandValue() argument 57 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); in nodesHaveSameOperandValue() 58 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); in nodesHaveSameOperandValue() 123 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || in areLoadsFromSameBasePtr() 124 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) in areLoadsFromSameBasePtr() 160 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || in areLoadsFromSameBasePtr() 162 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || in areLoadsFromSameBasePtr() 163 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) in areLoadsFromSameBasePtr() 166 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr() 167 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); in areLoadsFromSameBasePtr() [all …]
|
D | SILoadStoreOptimizer.cpp | 171 int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr); in findMatchingDSInst() 180 AMDGPU::OpName::offset); in findMatchingDSInst() 200 const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr); in mergeRead2Pair() 202 const MachineOperand *Dest0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst); in mergeRead2Pair() 203 const MachineOperand *Dest1 = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst); in mergeRead2Pair() 206 = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff; in mergeRead2Pair() 208 = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff; in mergeRead2Pair() 295 const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr); in mergeWrite2Pair() 296 const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0); in mergeWrite2Pair() 298 = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0); in mergeWrite2Pair() [all …]
|
D | R600Packetizer.cpp | 89 int OperandIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::write); in getPreviousVector() 92 int DstIdx = TII->getOperandIdx(BI->getOpcode(), AMDGPU::OpName::dst); in getPreviousVector() 134 AMDGPU::OpName::src0, in substitutePV() 135 AMDGPU::OpName::src1, in substitutePV() 136 AMDGPU::OpName::src2 in substitutePV() 190 int OpI = TII->getOperandIdx(MII->getOpcode(), AMDGPU::OpName::pred_sel), in isLegalToPacketizeTogether() 191 OpJ = TII->getOperandIdx(MIJ->getOpcode(), AMDGPU::OpName::pred_sel); in isLegalToPacketizeTogether() 225 unsigned LastOp = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::last); in setIsLastBit() 306 AMDGPU::OpName::bank_swizzle); in addToPacket() 310 TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::bank_swizzle); in addToPacket()
|
D | R600ISelLowering.cpp | 223 int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); in EmitInstrWithCustomInserter() 288 int Idx = TII->getOperandIdx(*MIB, AMDGPU::OpName::literal); in EmitInstrWithCustomInserter() 296 TII->setImmOperand(*NewMI, AMDGPU::OpName::src0_sel, in EmitInstrWithCustomInserter() 2192 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1; in FoldOperand() 2203 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0), in FoldOperand() 2204 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1), in FoldOperand() 2205 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2), in FoldOperand() 2206 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X), in FoldOperand() 2207 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y), in FoldOperand() 2208 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z), in FoldOperand() [all …]
|
D | SIRegisterInfo.cpp | 259 AMDGPU::OpName::vaddr) && in getFrameIndexInstrOffset() 263 AMDGPU::OpName::offset); in getFrameIndexInstrOffset() 324 MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr); in resolveFrameIndex() 329 MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset); in resolveFrameIndex() 640 TII->getNamedOperand(*MI, AMDGPU::OpName::src), in eliminateFrameIndex() 641 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(), in eliminateFrameIndex() 642 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(), in eliminateFrameIndex() 644 TII->getNamedOperand(*MI, AMDGPU::OpName::offset)->getImm(), RS); in eliminateFrameIndex() 655 TII->getNamedOperand(*MI, AMDGPU::OpName::dst), in eliminateFrameIndex() 656 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(), in eliminateFrameIndex() [all …]
|
D | SILowerControlFlow.cpp | 423 if (const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val)) { in splitLoadM0BlockLiveIns() 435 const MachineOperand *Src = TII->getNamedOperand(MI, AMDGPU::OpName::src); in splitLoadM0BlockLiveIns() 531 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); in loadM0() 548 MachineOperand *SaveOp = TII->getNamedOperand(MI, AMDGPU::OpName::sdst); in loadM0() 633 const MachineOperand *SrcVec = TII->getNamedOperand(MI, AMDGPU::OpName::src); in indirectSrc() 634 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); in indirectSrc() 639 const MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); in indirectSrc() 662 int Offset = TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm(); in indirectDst() 665 const MachineOperand *Val = TII->getNamedOperand(MI, AMDGPU::OpName::val); in indirectDst() 668 MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::idx); in indirectDst()
|
D | SIInstrInfo.h | 396 unsigned OpName) const; 508 unsigned OpName) const { in getNamedOperand() argument 509 return getNamedOperand(const_cast<MachineInstr &>(MI), OpName); in getNamedOperand() 513 int64_t getNamedImmOperand(const MachineInstr &MI, unsigned OpName) const { in getNamedImmOperand() argument 514 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName); in getNamedImmOperand()
|
D | SIInsertWaits.cpp | 249 MachineOperand *Data = TII->getNamedOperand(MI, AMDGPU::OpName::data); in isOpRelevant() 255 MachineOperand *Data0 = TII->getNamedOperand(MI, AMDGPU::OpName::data0); in isOpRelevant() 259 MachineOperand *Data1 = TII->getNamedOperand(MI, AMDGPU::OpName::data1); in isOpRelevant()
|
D | R600Defines.h | 65 namespace OpName {
|
/external/tensorflow/tensorflow/core/kernels/ |
D | linalg_ops_common.h | 185 #define REGISTER_LINALG_OP_CPU(OpName, OpClass, Scalar) \ argument 187 Name(OpName).Device(DEVICE_CPU).TypeConstraint<Scalar>("T"), OpClass) 189 #define REGISTER_LINALG_OP_GPU(OpName, OpClass, Scalar) \ argument 191 Name(OpName).Device(DEVICE_GPU).TypeConstraint<Scalar>("T"), OpClass) 194 #define REGISTER_LINALG_OP(OpName, OpClass, Scalar) \ argument 195 REGISTER_LINALG_OP_CPU(OpName, OpClass, Scalar)
|
/external/swiftshader/third_party/LLVM/utils/TableGen/ |
D | CodeGenInstruction.cpp | 159 std::string OpName = Op.substr(1); in ParseOperandName() local 163 std::string::size_type DotIdx = OpName.find_first_of("."); in ParseOperandName() 165 SubOpName = OpName.substr(DotIdx+1); in ParseOperandName() 168 OpName = OpName.substr(0, DotIdx); in ParseOperandName() 171 unsigned OpIdx = getOperandNamed(OpName); in ParseOperandName() 271 std::string OpName = P.first; in ProcessDisableEncoding() local 273 if (OpName.empty()) break; in ProcessDisableEncoding() 276 std::pair<unsigned,unsigned> Op = ParseOperandName(OpName, false); in ProcessDisableEncoding()
|
D | CodeGenDAGPatterns.cpp | 1742 TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){ in ParseTreePattern() argument 1753 OpName); in ParseTreePattern() 1757 if (R->getName() == "node" && !OpName.empty()) { in ParseTreePattern() 1758 if (OpName.empty()) in ParseTreePattern() 1760 Args.push_back(OpName); in ParseTreePattern() 1763 Res->setName(OpName); in ParseTreePattern() 1768 if (!OpName.empty()) in ParseTreePattern() 1778 return ParseTreePattern(II, OpName); in ParseTreePattern() 1802 if (!OpName.empty()) in ParseTreePattern() 1870 Result->setName(OpName); in ParseTreePattern() [all …]
|
D | FastISelEmitter.cpp | 386 static std::string getLegalCName(std::string OpName) { in getLegalCName() argument 387 std::string::size_type pos = OpName.find("::"); in getLegalCName() 389 OpName.replace(pos, 2, "_"); in getLegalCName() 390 return OpName; in getLegalCName()
|
/external/tensorflow/tensorflow/contrib/lite/tools/ |
D | visualize.py | 205 def OpName(idx): function 220 "target": OpName(op_index) 225 "source": OpName(op_index) 228 "id": OpName(op_index),
|
/external/llvm/utils/TableGen/ |
D | CodeGenInstruction.cpp | 164 std::string OpName = Op.substr(1); in ParseOperandName() local 168 std::string::size_type DotIdx = OpName.find_first_of("."); in ParseOperandName() 170 SubOpName = OpName.substr(DotIdx+1); in ParseOperandName() 173 OpName = OpName.substr(0, DotIdx); in ParseOperandName() 176 unsigned OpIdx = getOperandNamed(OpName); in ParseOperandName() 279 std::string OpName = P.first; in ProcessDisableEncoding() local 281 if (OpName.empty()) break; in ProcessDisableEncoding() 284 std::pair<unsigned,unsigned> Op = ParseOperandName(OpName, false); in ProcessDisableEncoding()
|
D | CodeGenDAGPatterns.cpp | 2086 TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){ in ParseTreePattern() argument 2097 OpName); in ParseTreePattern() 2101 if (R->getName() == "node" && !OpName.empty()) { in ParseTreePattern() 2102 if (OpName.empty()) in ParseTreePattern() 2104 Args.push_back(OpName); in ParseTreePattern() 2107 Res->setName(OpName); in ParseTreePattern() 2113 if (OpName.empty()) in ParseTreePattern() 2116 Args.push_back(OpName); in ParseTreePattern() 2117 Res->setName(OpName); in ParseTreePattern() 2122 if (!OpName.empty()) in ParseTreePattern() [all …]
|
D | FastISelEmitter.cpp | 413 static std::string getLegalCName(std::string OpName) { in getLegalCName() argument 414 std::string::size_type pos = OpName.find("::"); in getLegalCName() 416 OpName.replace(pos, 2, "_"); in getLegalCName() 417 return OpName; in getLegalCName()
|
/external/clang/lib/AST/ |
D | DeclarationName.cpp | 184 const char *OpName = OperatorNames[N.getCXXOverloadedOperator()]; in print() local 185 assert(OpName && "not an overloaded operator"); in print() 188 if (OpName[0] >= 'a' && OpName[0] <= 'z') in print() 190 OS << OpName; in print()
|
/external/tensorflow/tensorflow/cc/framework/ |
D | scope_internal.h | 55 enum class OpName; member 68 Impl(const Scope& other, Tags::OpName, const string& name,
|
/external/spirv-llvm/lib/SPIRV/libSPIRV/ |
D | SPIRVFunction.cpp | 145 if (Decoder.OpCode == OpName || in decodeBB()
|
/external/tensorflow/tensorflow/cc/gradients/ |
D | README.md | 22 REGISTER_GRADIENT_OP("OpName", OpNameGrad);
|