• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- AArch64InstrInfo.h - AArch64 Instruction Information -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
14 #define LLVM_LIB_TARGET_AARCH64_AARCH64INSTRINFO_H
15 
16 #include "AArch64.h"
17 #include "AArch64RegisterInfo.h"
18 #include "AArch64StackOffset.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/CodeGen/MachineCombinerPattern.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 
23 #define GET_INSTRINFO_HEADER
24 #include "AArch64GenInstrInfo.inc"
25 
26 namespace llvm {
27 
28 class AArch64Subtarget;
29 class AArch64TargetMachine;
30 
31 static const MachineMemOperand::Flags MOSuppressPair =
32     MachineMemOperand::MOTargetFlag1;
33 static const MachineMemOperand::Flags MOStridedAccess =
34     MachineMemOperand::MOTargetFlag2;
35 
36 #define FALKOR_STRIDED_ACCESS_MD "falkor.strided.access"
37 
38 class AArch64InstrInfo final : public AArch64GenInstrInfo {
39   const AArch64RegisterInfo RI;
40   const AArch64Subtarget &Subtarget;
41 
42 public:
43   explicit AArch64InstrInfo(const AArch64Subtarget &STI);
44 
45   /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info.  As
46   /// such, whenever a client has an instance of instruction info, it should
47   /// always be able to get register info as well (through this method).
getRegisterInfo()48   const AArch64RegisterInfo &getRegisterInfo() const { return RI; }
49 
50   unsigned getInstSizeInBytes(const MachineInstr &MI) const override;
51 
52   bool isAsCheapAsAMove(const MachineInstr &MI) const override;
53 
54   bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg,
55                              unsigned &DstReg, unsigned &SubIdx) const override;
56 
57   bool
58   areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
59                                   const MachineInstr &MIb) const override;
60 
61   unsigned isLoadFromStackSlot(const MachineInstr &MI,
62                                int &FrameIndex) const override;
63   unsigned isStoreToStackSlot(const MachineInstr &MI,
64                               int &FrameIndex) const override;
65 
66   /// Does this instruction set its full destination register to zero?
67   static bool isGPRZero(const MachineInstr &MI);
68 
69   /// Does this instruction rename a GPR without modifying bits?
70   static bool isGPRCopy(const MachineInstr &MI);
71 
72   /// Does this instruction rename an FPR without modifying bits?
73   static bool isFPRCopy(const MachineInstr &MI);
74 
75   /// Return true if pairing the given load or store is hinted to be
76   /// unprofitable.
77   static bool isLdStPairSuppressed(const MachineInstr &MI);
78 
79   /// Return true if the given load or store is a strided memory access.
80   static bool isStridedAccess(const MachineInstr &MI);
81 
82   /// Return true if this is an unscaled load/store.
83   static bool isUnscaledLdSt(unsigned Opc);
isUnscaledLdSt(MachineInstr & MI)84   static bool isUnscaledLdSt(MachineInstr &MI) {
85     return isUnscaledLdSt(MI.getOpcode());
86   }
87 
88   /// Returns the unscaled load/store for the scaled load/store opcode,
89   /// if there is a corresponding unscaled variant available.
90   static Optional<unsigned> getUnscaledLdSt(unsigned Opc);
91 
92   /// Scaling factor for (scaled or unscaled) load or store.
93   static int getMemScale(unsigned Opc);
getMemScale(const MachineInstr & MI)94   static int getMemScale(const MachineInstr &MI) {
95     return getMemScale(MI.getOpcode());
96   }
97 
98 
99   /// Returns the index for the immediate for a given instruction.
100   static unsigned getLoadStoreImmIdx(unsigned Opc);
101 
102   /// Return true if pairing the given load or store may be paired with another.
103   static bool isPairableLdStInst(const MachineInstr &MI);
104 
105   /// Return the opcode that set flags when possible.  The caller is
106   /// responsible for ensuring the opc has a flag setting equivalent.
107   static unsigned convertToFlagSettingOpc(unsigned Opc, bool &Is64Bit);
108 
109   /// Return true if this is a load/store that can be potentially paired/merged.
110   bool isCandidateToMergeOrPair(const MachineInstr &MI) const;
111 
112   /// Hint that pairing the given load or store is unprofitable.
113   static void suppressLdStPair(MachineInstr &MI);
114 
115   bool getMemOperandWithOffset(const MachineInstr &MI,
116                                const MachineOperand *&BaseOp,
117                                int64_t &Offset,
118                                const TargetRegisterInfo *TRI) const override;
119 
120   bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
121                                     const MachineOperand *&BaseOp,
122                                     int64_t &Offset, unsigned &Width,
123                                     const TargetRegisterInfo *TRI) const;
124 
125   /// Return the immediate offset of the base register in a load/store \p LdSt.
126   MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
127 
128   /// Returns true if opcode \p Opc is a memory operation. If it is, set
129   /// \p Scale, \p Width, \p MinOffset, and \p MaxOffset accordingly.
130   ///
131   /// For unscaled instructions, \p Scale is set to 1.
132   static bool getMemOpInfo(unsigned Opcode, unsigned &Scale, unsigned &Width,
133                            int64_t &MinOffset, int64_t &MaxOffset);
134 
135   bool shouldClusterMemOps(const MachineOperand &BaseOp1,
136                            const MachineOperand &BaseOp2,
137                            unsigned NumLoads) const override;
138 
139   void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
140                         const DebugLoc &DL, MCRegister DestReg,
141                         MCRegister SrcReg, bool KillSrc, unsigned Opcode,
142                         llvm::ArrayRef<unsigned> Indices) const;
143   void copyGPRRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
144                        DebugLoc DL, unsigned DestReg, unsigned SrcReg,
145                        bool KillSrc, unsigned Opcode, unsigned ZeroReg,
146                        llvm::ArrayRef<unsigned> Indices) const;
147   void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
148                    const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg,
149                    bool KillSrc) const override;
150 
151   void storeRegToStackSlot(MachineBasicBlock &MBB,
152                            MachineBasicBlock::iterator MBBI, unsigned SrcReg,
153                            bool isKill, int FrameIndex,
154                            const TargetRegisterClass *RC,
155                            const TargetRegisterInfo *TRI) const override;
156 
157   void loadRegFromStackSlot(MachineBasicBlock &MBB,
158                             MachineBasicBlock::iterator MBBI, unsigned DestReg,
159                             int FrameIndex, const TargetRegisterClass *RC,
160                             const TargetRegisterInfo *TRI) const override;
161 
162   // This tells target independent code that it is okay to pass instructions
163   // with subreg operands to foldMemoryOperandImpl.
isSubregFoldable()164   bool isSubregFoldable() const override { return true; }
165 
166   using TargetInstrInfo::foldMemoryOperandImpl;
167   MachineInstr *
168   foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
169                         ArrayRef<unsigned> Ops,
170                         MachineBasicBlock::iterator InsertPt, int FrameIndex,
171                         LiveIntervals *LIS = nullptr,
172                         VirtRegMap *VRM = nullptr) const override;
173 
174   /// \returns true if a branch from an instruction with opcode \p BranchOpc
175   ///  bytes is capable of jumping to a position \p BrOffset bytes away.
176   bool isBranchOffsetInRange(unsigned BranchOpc,
177                              int64_t BrOffset) const override;
178 
179   MachineBasicBlock *getBranchDestBlock(const MachineInstr &MI) const override;
180 
181   bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
182                      MachineBasicBlock *&FBB,
183                      SmallVectorImpl<MachineOperand> &Cond,
184                      bool AllowModify = false) const override;
185   unsigned removeBranch(MachineBasicBlock &MBB,
186                         int *BytesRemoved = nullptr) const override;
187   unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
188                         MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
189                         const DebugLoc &DL,
190                         int *BytesAdded = nullptr) const override;
191   bool
192   reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
193   bool canInsertSelect(const MachineBasicBlock &, ArrayRef<MachineOperand> Cond,
194                        unsigned, unsigned, int &, int &, int &) const override;
195   void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
196                     const DebugLoc &DL, unsigned DstReg,
197                     ArrayRef<MachineOperand> Cond, unsigned TrueReg,
198                     unsigned FalseReg) const override;
199   void getNoop(MCInst &NopInst) const override;
200 
201   bool isSchedulingBoundary(const MachineInstr &MI,
202                             const MachineBasicBlock *MBB,
203                             const MachineFunction &MF) const override;
204 
205   /// analyzeCompare - For a comparison instruction, return the source registers
206   /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
207   /// Return true if the comparison instruction can be analyzed.
208   bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
209                       unsigned &SrcReg2, int &CmpMask,
210                       int &CmpValue) const override;
211   /// optimizeCompareInstr - Convert the instruction supplying the argument to
212   /// the comparison into one that sets the zero bit in the flags register.
213   bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
214                             unsigned SrcReg2, int CmpMask, int CmpValue,
215                             const MachineRegisterInfo *MRI) const override;
216   bool optimizeCondBranch(MachineInstr &MI) const override;
217 
218   /// Return true when a code sequence can improve throughput. It
219   /// should be called only for instructions in loops.
220   /// \param Pattern - combiner pattern
221   bool isThroughputPattern(MachineCombinerPattern Pattern) const override;
222   /// Return true when there is potentially a faster code sequence
223   /// for an instruction chain ending in ``Root``. All potential patterns are
224   /// listed in the ``Patterns`` array.
225   bool getMachineCombinerPatterns(
226       MachineInstr &Root,
227       SmallVectorImpl<MachineCombinerPattern> &Patterns) const override;
228   /// Return true when Inst is associative and commutative so that it can be
229   /// reassociated.
230   bool isAssociativeAndCommutative(const MachineInstr &Inst) const override;
231   /// When getMachineCombinerPatterns() finds patterns, this function generates
232   /// the instructions that could replace the original code sequence
233   void genAlternativeCodeSequence(
234       MachineInstr &Root, MachineCombinerPattern Pattern,
235       SmallVectorImpl<MachineInstr *> &InsInstrs,
236       SmallVectorImpl<MachineInstr *> &DelInstrs,
237       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const override;
238   /// AArch64 supports MachineCombiner.
239   bool useMachineCombiner() const override;
240 
241   bool expandPostRAPseudo(MachineInstr &MI) const override;
242 
243   std::pair<unsigned, unsigned>
244   decomposeMachineOperandsTargetFlags(unsigned TF) const override;
245   ArrayRef<std::pair<unsigned, const char *>>
246   getSerializableDirectMachineOperandTargetFlags() const override;
247   ArrayRef<std::pair<unsigned, const char *>>
248   getSerializableBitmaskMachineOperandTargetFlags() const override;
249   ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
250   getSerializableMachineMemOperandTargetFlags() const override;
251 
252   bool isFunctionSafeToOutlineFrom(MachineFunction &MF,
253                                    bool OutlineFromLinkOnceODRs) const override;
254   outliner::OutlinedFunction getOutliningCandidateInfo(
255       std::vector<outliner::Candidate> &RepeatedSequenceLocs) const override;
256   outliner::InstrType
257   getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const override;
258   bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
259                               unsigned &Flags) const override;
260   void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF,
261                           const outliner::OutlinedFunction &OF) const override;
262   MachineBasicBlock::iterator
263   insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
264                      MachineBasicBlock::iterator &It, MachineFunction &MF,
265                      const outliner::Candidate &C) const override;
266   bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override;
267   /// Returns true if the instruction has a shift by immediate that can be
268   /// executed in one cycle less.
269   static bool isFalkorShiftExtFast(const MachineInstr &MI);
270   /// Return true if the instructions is a SEH instruciton used for unwinding
271   /// on Windows.
272   static bool isSEHInstruction(const MachineInstr &MI);
273 
274   Optional<RegImmPair> isAddImmediate(const MachineInstr &MI,
275                                       Register Reg) const override;
276 
277   Optional<ParamLoadedValue> describeLoadedValue(const MachineInstr &MI,
278                                                  Register Reg) const override;
279 
280 #define GET_INSTRINFO_HELPER_DECLS
281 #include "AArch64GenInstrInfo.inc"
282 
283 protected:
284   /// If the specific machine instruction is an instruction that moves/copies
285   /// value from one register to another register return destination and source
286   /// registers as machine operands.
287   Optional<DestSourcePair>
288   isCopyInstrImpl(const MachineInstr &MI) const override;
289 
290 private:
291   /// Sets the offsets on outlined instructions in \p MBB which use SP
292   /// so that they will be valid post-outlining.
293   ///
294   /// \param MBB A \p MachineBasicBlock in an outlined function.
295   void fixupPostOutline(MachineBasicBlock &MBB) const;
296 
297   void instantiateCondBranch(MachineBasicBlock &MBB, const DebugLoc &DL,
298                              MachineBasicBlock *TBB,
299                              ArrayRef<MachineOperand> Cond) const;
300   bool substituteCmpToZero(MachineInstr &CmpInstr, unsigned SrcReg,
301                            const MachineRegisterInfo *MRI) const;
302 
303   /// Returns an unused general-purpose register which can be used for
304   /// constructing an outlined call if one exists. Returns 0 otherwise.
305   unsigned findRegisterToSaveLRTo(const outliner::Candidate &C) const;
306 };
307 
308 /// emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg
309 /// plus Offset.  This is intended to be used from within the prolog/epilog
310 /// insertion (PEI) pass, where a virtual scratch register may be allocated
311 /// if necessary, to be replaced by the scavenger at the end of PEI.
312 void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
313                      const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
314                      StackOffset Offset, const TargetInstrInfo *TII,
315                      MachineInstr::MIFlag = MachineInstr::NoFlags,
316                      bool SetNZCV = false, bool NeedsWinCFI = false,
317                      bool *HasWinCFI = nullptr);
318 
319 /// rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the
320 /// FP. Return false if the offset could not be handled directly in MI, and
321 /// return the left-over portion by reference.
322 bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
323                               unsigned FrameReg, StackOffset &Offset,
324                               const AArch64InstrInfo *TII);
325 
326 /// Use to report the frame offset status in isAArch64FrameOffsetLegal.
327 enum AArch64FrameOffsetStatus {
328   AArch64FrameOffsetCannotUpdate = 0x0, ///< Offset cannot apply.
329   AArch64FrameOffsetIsLegal = 0x1,      ///< Offset is legal.
330   AArch64FrameOffsetCanUpdate = 0x2     ///< Offset can apply, at least partly.
331 };
332 
333 /// Check if the @p Offset is a valid frame offset for @p MI.
334 /// The returned value reports the validity of the frame offset for @p MI.
335 /// It uses the values defined by AArch64FrameOffsetStatus for that.
336 /// If result == AArch64FrameOffsetCannotUpdate, @p MI cannot be updated to
337 /// use an offset.eq
338 /// If result & AArch64FrameOffsetIsLegal, @p Offset can completely be
339 /// rewritten in @p MI.
340 /// If result & AArch64FrameOffsetCanUpdate, @p Offset contains the
341 /// amount that is off the limit of the legal offset.
342 /// If set, @p OutUseUnscaledOp will contain the whether @p MI should be
343 /// turned into an unscaled operator, which opcode is in @p OutUnscaledOp.
344 /// If set, @p EmittableOffset contains the amount that can be set in @p MI
345 /// (possibly with @p OutUnscaledOp if OutUseUnscaledOp is true) and that
346 /// is a legal offset.
347 int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset,
348                               bool *OutUseUnscaledOp = nullptr,
349                               unsigned *OutUnscaledOp = nullptr,
350                               int64_t *EmittableOffset = nullptr);
351 
isUncondBranchOpcode(int Opc)352 static inline bool isUncondBranchOpcode(int Opc) { return Opc == AArch64::B; }
353 
isCondBranchOpcode(int Opc)354 static inline bool isCondBranchOpcode(int Opc) {
355   switch (Opc) {
356   case AArch64::Bcc:
357   case AArch64::CBZW:
358   case AArch64::CBZX:
359   case AArch64::CBNZW:
360   case AArch64::CBNZX:
361   case AArch64::TBZW:
362   case AArch64::TBZX:
363   case AArch64::TBNZW:
364   case AArch64::TBNZX:
365     return true;
366   default:
367     return false;
368   }
369 }
370 
isIndirectBranchOpcode(int Opc)371 static inline bool isIndirectBranchOpcode(int Opc) {
372   return Opc == AArch64::BR;
373 }
374 
375 // struct TSFlags {
376 #define TSFLAG_ELEMENT_SIZE_TYPE(X)      (X)       // 3-bits
377 #define TSFLAG_DESTRUCTIVE_INST_TYPE(X) ((X) << 3) // 1-bit
378 // }
379 
380 namespace AArch64 {
381 
382 enum ElementSizeType {
383   ElementSizeMask = TSFLAG_ELEMENT_SIZE_TYPE(0x7),
384   ElementSizeNone = TSFLAG_ELEMENT_SIZE_TYPE(0x0),
385   ElementSizeB    = TSFLAG_ELEMENT_SIZE_TYPE(0x1),
386   ElementSizeH    = TSFLAG_ELEMENT_SIZE_TYPE(0x2),
387   ElementSizeS    = TSFLAG_ELEMENT_SIZE_TYPE(0x3),
388   ElementSizeD    = TSFLAG_ELEMENT_SIZE_TYPE(0x4),
389 };
390 
391 enum DestructiveInstType {
392   DestructiveInstTypeMask = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
393   NotDestructive          = TSFLAG_DESTRUCTIVE_INST_TYPE(0x0),
394   Destructive             = TSFLAG_DESTRUCTIVE_INST_TYPE(0x1),
395 };
396 
397 #undef TSFLAG_ELEMENT_SIZE_TYPE
398 #undef TSFLAG_DESTRUCTIVE_INST_TYPE
399 }
400 
401 } // end namespace llvm
402 
403 #endif
404