• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the TargetInstrInfo class that is
11 // common to all AMD GPUs.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "AMDIL.h"
19 #include "AMDILUtilityFunctions.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 
24 #define GET_INSTRINFO_CTOR
25 #include "AMDGPUGenInstrInfo.inc"
26 
27 using namespace llvm;
28 
AMDGPUInstrInfo(TargetMachine & tm)29 AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
30   : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
31 
getRegisterInfo() const32 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
33   return RI;
34 }
35 
isCoalescableExtInstr(const MachineInstr & MI,unsigned & SrcReg,unsigned & DstReg,unsigned & SubIdx) const36 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
37                                            unsigned &SrcReg, unsigned &DstReg,
38                                            unsigned &SubIdx) const {
39 // TODO: Implement this function
40   return false;
41 }
42 
isLoadFromStackSlot(const MachineInstr * MI,int & FrameIndex) const43 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
44                                              int &FrameIndex) const {
45 // TODO: Implement this function
46   return 0;
47 }
48 
isLoadFromStackSlotPostFE(const MachineInstr * MI,int & FrameIndex) const49 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
50                                                    int &FrameIndex) const {
51 // TODO: Implement this function
52   return 0;
53 }
54 
hasLoadFromStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const55 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
56                                           const MachineMemOperand *&MMO,
57                                           int &FrameIndex) const {
58 // TODO: Implement this function
59   return false;
60 }
isStoreFromStackSlot(const MachineInstr * MI,int & FrameIndex) const61 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
62                                               int &FrameIndex) const {
63 // TODO: Implement this function
64   return 0;
65 }
isStoreFromStackSlotPostFE(const MachineInstr * MI,int & FrameIndex) const66 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
67                                                     int &FrameIndex) const {
68 // TODO: Implement this function
69   return 0;
70 }
hasStoreFromStackSlot(const MachineInstr * MI,const MachineMemOperand * & MMO,int & FrameIndex) const71 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
72                                            const MachineMemOperand *&MMO,
73                                            int &FrameIndex) const {
74 // TODO: Implement this function
75   return false;
76 }
77 
78 MachineInstr *
convertToThreeAddress(MachineFunction::iterator & MFI,MachineBasicBlock::iterator & MBBI,LiveVariables * LV) const79 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
80                                       MachineBasicBlock::iterator &MBBI,
81                                       LiveVariables *LV) const {
82 // TODO: Implement this function
83   return NULL;
84 }
getNextBranchInstr(MachineBasicBlock::iterator & iter,MachineBasicBlock & MBB) const85 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
86                                         MachineBasicBlock &MBB) const {
87   while (iter != MBB.end()) {
88     switch (iter->getOpcode()) {
89     default:
90       break;
91       ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
92     case AMDGPU::BRANCH:
93       return true;
94     };
95     ++iter;
96   }
97   return false;
98 }
99 
skipFlowControl(MachineBasicBlock * MBB)100 MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
101   MachineBasicBlock::iterator tmp = MBB->end();
102   if (!MBB->size()) {
103     return MBB->end();
104   }
105   while (--tmp) {
106     if (tmp->getOpcode() == AMDGPU::ENDLOOP
107         || tmp->getOpcode() == AMDGPU::ENDIF
108         || tmp->getOpcode() == AMDGPU::ELSE) {
109       if (tmp == MBB->begin()) {
110         return tmp;
111       } else {
112         continue;
113       }
114     }  else {
115       return ++tmp;
116     }
117   }
118   return MBB->end();
119 }
120 
121 void
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned SrcReg,bool isKill,int FrameIndex,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const122 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
123                                     MachineBasicBlock::iterator MI,
124                                     unsigned SrcReg, bool isKill,
125                                     int FrameIndex,
126                                     const TargetRegisterClass *RC,
127                                     const TargetRegisterInfo *TRI) const {
128   assert(!"Not Implemented");
129 }
130 
131 void
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,unsigned DestReg,int FrameIndex,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const132 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
133                                      MachineBasicBlock::iterator MI,
134                                      unsigned DestReg, int FrameIndex,
135                                      const TargetRegisterClass *RC,
136                                      const TargetRegisterInfo *TRI) const {
137   assert(!"Not Implemented");
138 }
139 
140 MachineInstr *
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops,int FrameIndex) const141 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
142                                       MachineInstr *MI,
143                                       const SmallVectorImpl<unsigned> &Ops,
144                                       int FrameIndex) const {
145 // TODO: Implement this function
146   return 0;
147 }
148 MachineInstr*
foldMemoryOperandImpl(MachineFunction & MF,MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops,MachineInstr * LoadMI) const149 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
150                                       MachineInstr *MI,
151                                       const SmallVectorImpl<unsigned> &Ops,
152                                       MachineInstr *LoadMI) const {
153   // TODO: Implement this function
154   return 0;
155 }
156 bool
canFoldMemoryOperand(const MachineInstr * MI,const SmallVectorImpl<unsigned> & Ops) const157 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
158                                      const SmallVectorImpl<unsigned> &Ops) const
159 {
160   // TODO: Implement this function
161   return false;
162 }
163 bool
unfoldMemoryOperand(MachineFunction & MF,MachineInstr * MI,unsigned Reg,bool UnfoldLoad,bool UnfoldStore,SmallVectorImpl<MachineInstr * > & NewMIs) const164 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
165                                  unsigned Reg, bool UnfoldLoad,
166                                  bool UnfoldStore,
167                                  SmallVectorImpl<MachineInstr*> &NewMIs) const {
168   // TODO: Implement this function
169   return false;
170 }
171 
172 bool
unfoldMemoryOperand(SelectionDAG & DAG,SDNode * N,SmallVectorImpl<SDNode * > & NewNodes) const173 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
174                                     SmallVectorImpl<SDNode*> &NewNodes) const {
175   // TODO: Implement this function
176   return false;
177 }
178 
179 unsigned
getOpcodeAfterMemoryUnfold(unsigned Opc,bool UnfoldLoad,bool UnfoldStore,unsigned * LoadRegIndex) const180 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
181                                            bool UnfoldLoad, bool UnfoldStore,
182                                            unsigned *LoadRegIndex) const {
183   // TODO: Implement this function
184   return 0;
185 }
186 
shouldScheduleLoadsNear(SDNode * Load1,SDNode * Load2,int64_t Offset1,int64_t Offset2,unsigned NumLoads) const187 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
188                                              int64_t Offset1, int64_t Offset2,
189                                              unsigned NumLoads) const {
190   assert(Offset2 > Offset1
191          && "Second offset should be larger than first offset!");
192   // If we have less than 16 loads in a row, and the offsets are within 16,
193   // then schedule together.
194   // TODO: Make the loads schedule near if it fits in a cacheline
195   return (NumLoads < 16 && (Offset2 - Offset1) < 16);
196 }
197 
198 bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const199 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
200   const {
201   // TODO: Implement this function
202   return true;
203 }
insertNoop(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI) const204 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
205                                 MachineBasicBlock::iterator MI) const {
206   // TODO: Implement this function
207 }
208 
isPredicated(const MachineInstr * MI) const209 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
210   // TODO: Implement this function
211   return false;
212 }
213 bool
SubsumesPredicate(const SmallVectorImpl<MachineOperand> & Pred1,const SmallVectorImpl<MachineOperand> & Pred2) const214 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
215                                   const SmallVectorImpl<MachineOperand> &Pred2)
216   const {
217   // TODO: Implement this function
218   return false;
219 }
220 
DefinesPredicate(MachineInstr * MI,std::vector<MachineOperand> & Pred) const221 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
222                                       std::vector<MachineOperand> &Pred) const {
223   // TODO: Implement this function
224   return false;
225 }
226 
isPredicable(MachineInstr * MI) const227 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
228   // TODO: Implement this function
229   return MI->getDesc().isPredicable();
230 }
231 
232 bool
isSafeToMoveRegClassDefs(const TargetRegisterClass * RC) const233 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
234   // TODO: Implement this function
235   return true;
236 }
237 
convertToISA(MachineInstr & MI,MachineFunction & MF,DebugLoc DL) const238 void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
239     DebugLoc DL) const
240 {
241   MachineRegisterInfo &MRI = MF.getRegInfo();
242   const AMDGPURegisterInfo & RI = getRegisterInfo();
243 
244   for (unsigned i = 0; i < MI.getNumOperands(); i++) {
245     MachineOperand &MO = MI.getOperand(i);
246     // Convert dst regclass to one that is supported by the ISA
247     if (MO.isReg() && MO.isDef()) {
248       if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
249         const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
250         const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
251 
252         assert(newRegClass);
253 
254         MRI.setRegClass(MO.getReg(), newRegClass);
255       }
256     }
257   }
258 }
259