1 //===-- ARMExpandPseudoInsts.cpp - Expand pseudo instructions -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that expands pseudo instructions into target
10 // instructions to allow proper scheduling, if-conversion, and other late
11 // optimizations. This pass should be run after register allocation but before
12 // the post-regalloc scheduling pass.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "ARM.h"
17 #include "ARMBaseInstrInfo.h"
18 #include "ARMBaseRegisterInfo.h"
19 #include "ARMConstantPoolValue.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMSubtarget.h"
22 #include "MCTargetDesc/ARMAddressingModes.h"
23 #include "llvm/CodeGen/LivePhysRegs.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/Support/Debug.h"
27
28 using namespace llvm;
29
30 #define DEBUG_TYPE "arm-pseudo"
31
32 static cl::opt<bool>
33 VerifyARMPseudo("verify-arm-pseudo-expand", cl::Hidden,
34 cl::desc("Verify machine code after expanding ARM pseudos"));
35
36 #define ARM_EXPAND_PSEUDO_NAME "ARM pseudo instruction expansion pass"
37
38 namespace {
39 class ARMExpandPseudo : public MachineFunctionPass {
40 public:
41 static char ID;
ARMExpandPseudo()42 ARMExpandPseudo() : MachineFunctionPass(ID) {}
43
44 const ARMBaseInstrInfo *TII;
45 const TargetRegisterInfo *TRI;
46 const ARMSubtarget *STI;
47 ARMFunctionInfo *AFI;
48
49 bool runOnMachineFunction(MachineFunction &Fn) override;
50
getRequiredProperties() const51 MachineFunctionProperties getRequiredProperties() const override {
52 return MachineFunctionProperties().set(
53 MachineFunctionProperties::Property::NoVRegs);
54 }
55
getPassName() const56 StringRef getPassName() const override {
57 return ARM_EXPAND_PSEUDO_NAME;
58 }
59
60 private:
61 void TransferImpOps(MachineInstr &OldMI,
62 MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI);
63 bool ExpandMI(MachineBasicBlock &MBB,
64 MachineBasicBlock::iterator MBBI,
65 MachineBasicBlock::iterator &NextMBBI);
66 bool ExpandMBB(MachineBasicBlock &MBB);
67 void ExpandVLD(MachineBasicBlock::iterator &MBBI);
68 void ExpandVST(MachineBasicBlock::iterator &MBBI);
69 void ExpandLaneOp(MachineBasicBlock::iterator &MBBI);
70 void ExpandVTBL(MachineBasicBlock::iterator &MBBI,
71 unsigned Opc, bool IsExt);
72 void ExpandMOV32BitImm(MachineBasicBlock &MBB,
73 MachineBasicBlock::iterator &MBBI);
74 void CMSEClearGPRegs(MachineBasicBlock &MBB,
75 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
76 const SmallVectorImpl<unsigned> &ClearRegs,
77 unsigned ClobberReg);
78 MachineBasicBlock &CMSEClearFPRegs(MachineBasicBlock &MBB,
79 MachineBasicBlock::iterator MBBI);
80 MachineBasicBlock &CMSEClearFPRegsV8(MachineBasicBlock &MBB,
81 MachineBasicBlock::iterator MBBI,
82 const BitVector &ClearRegs);
83 MachineBasicBlock &CMSEClearFPRegsV81(MachineBasicBlock &MBB,
84 MachineBasicBlock::iterator MBBI,
85 const BitVector &ClearRegs);
86 void CMSESaveClearFPRegs(MachineBasicBlock &MBB,
87 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
88 const LivePhysRegs &LiveRegs,
89 SmallVectorImpl<unsigned> &AvailableRegs);
90 void CMSESaveClearFPRegsV8(MachineBasicBlock &MBB,
91 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
92 const LivePhysRegs &LiveRegs,
93 SmallVectorImpl<unsigned> &ScratchRegs);
94 void CMSESaveClearFPRegsV81(MachineBasicBlock &MBB,
95 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
96 const LivePhysRegs &LiveRegs);
97 void CMSERestoreFPRegs(MachineBasicBlock &MBB,
98 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
99 SmallVectorImpl<unsigned> &AvailableRegs);
100 void CMSERestoreFPRegsV8(MachineBasicBlock &MBB,
101 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
102 SmallVectorImpl<unsigned> &AvailableRegs);
103 void CMSERestoreFPRegsV81(MachineBasicBlock &MBB,
104 MachineBasicBlock::iterator MBBI, DebugLoc &DL,
105 SmallVectorImpl<unsigned> &AvailableRegs);
106 bool ExpandCMP_SWAP(MachineBasicBlock &MBB,
107 MachineBasicBlock::iterator MBBI, unsigned LdrexOp,
108 unsigned StrexOp, unsigned UxtOp,
109 MachineBasicBlock::iterator &NextMBBI);
110
111 bool ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
112 MachineBasicBlock::iterator MBBI,
113 MachineBasicBlock::iterator &NextMBBI);
114 };
115 char ARMExpandPseudo::ID = 0;
116 }
117
INITIALIZE_PASS(ARMExpandPseudo,DEBUG_TYPE,ARM_EXPAND_PSEUDO_NAME,false,false)118 INITIALIZE_PASS(ARMExpandPseudo, DEBUG_TYPE, ARM_EXPAND_PSEUDO_NAME, false,
119 false)
120
121 /// TransferImpOps - Transfer implicit operands on the pseudo instruction to
122 /// the instructions created from the expansion.
123 void ARMExpandPseudo::TransferImpOps(MachineInstr &OldMI,
124 MachineInstrBuilder &UseMI,
125 MachineInstrBuilder &DefMI) {
126 const MCInstrDesc &Desc = OldMI.getDesc();
127 for (unsigned i = Desc.getNumOperands(), e = OldMI.getNumOperands();
128 i != e; ++i) {
129 const MachineOperand &MO = OldMI.getOperand(i);
130 assert(MO.isReg() && MO.getReg());
131 if (MO.isUse())
132 UseMI.add(MO);
133 else
134 DefMI.add(MO);
135 }
136 }
137
138 namespace {
139 // Constants for register spacing in NEON load/store instructions.
140 // For quad-register load-lane and store-lane pseudo instructors, the
141 // spacing is initially assumed to be EvenDblSpc, and that is changed to
142 // OddDblSpc depending on the lane number operand.
143 enum NEONRegSpacing {
144 SingleSpc,
145 SingleLowSpc , // Single spacing, low registers, three and four vectors.
146 SingleHighQSpc, // Single spacing, high registers, four vectors.
147 SingleHighTSpc, // Single spacing, high registers, three vectors.
148 EvenDblSpc,
149 OddDblSpc
150 };
151
152 // Entries for NEON load/store information table. The table is sorted by
153 // PseudoOpc for fast binary-search lookups.
154 struct NEONLdStTableEntry {
155 uint16_t PseudoOpc;
156 uint16_t RealOpc;
157 bool IsLoad;
158 bool isUpdating;
159 bool hasWritebackOperand;
160 uint8_t RegSpacing; // One of type NEONRegSpacing
161 uint8_t NumRegs; // D registers loaded or stored
162 uint8_t RegElts; // elements per D register; used for lane ops
163 // FIXME: Temporary flag to denote whether the real instruction takes
164 // a single register (like the encoding) or all of the registers in
165 // the list (like the asm syntax and the isel DAG). When all definitions
166 // are converted to take only the single encoded register, this will
167 // go away.
168 bool copyAllListRegs;
169
170 // Comparison methods for binary search of the table.
operator <__anon20ae9ecd0211::NEONLdStTableEntry171 bool operator<(const NEONLdStTableEntry &TE) const {
172 return PseudoOpc < TE.PseudoOpc;
173 }
operator <(const NEONLdStTableEntry & TE,unsigned PseudoOpc)174 friend bool operator<(const NEONLdStTableEntry &TE, unsigned PseudoOpc) {
175 return TE.PseudoOpc < PseudoOpc;
176 }
operator <(unsigned PseudoOpc,const NEONLdStTableEntry & TE)177 friend bool LLVM_ATTRIBUTE_UNUSED operator<(unsigned PseudoOpc,
178 const NEONLdStTableEntry &TE) {
179 return PseudoOpc < TE.PseudoOpc;
180 }
181 };
182 }
183
184 static const NEONLdStTableEntry NEONLdStTable[] = {
185 { ARM::VLD1LNq16Pseudo, ARM::VLD1LNd16, true, false, false, EvenDblSpc, 1, 4 ,true},
186 { ARM::VLD1LNq16Pseudo_UPD, ARM::VLD1LNd16_UPD, true, true, true, EvenDblSpc, 1, 4 ,true},
187 { ARM::VLD1LNq32Pseudo, ARM::VLD1LNd32, true, false, false, EvenDblSpc, 1, 2 ,true},
188 { ARM::VLD1LNq32Pseudo_UPD, ARM::VLD1LNd32_UPD, true, true, true, EvenDblSpc, 1, 2 ,true},
189 { ARM::VLD1LNq8Pseudo, ARM::VLD1LNd8, true, false, false, EvenDblSpc, 1, 8 ,true},
190 { ARM::VLD1LNq8Pseudo_UPD, ARM::VLD1LNd8_UPD, true, true, true, EvenDblSpc, 1, 8 ,true},
191
192 { ARM::VLD1d16QPseudo, ARM::VLD1d16Q, true, false, false, SingleSpc, 4, 4 ,false},
193 { ARM::VLD1d16TPseudo, ARM::VLD1d16T, true, false, false, SingleSpc, 3, 4 ,false},
194 { ARM::VLD1d32QPseudo, ARM::VLD1d32Q, true, false, false, SingleSpc, 4, 2 ,false},
195 { ARM::VLD1d32TPseudo, ARM::VLD1d32T, true, false, false, SingleSpc, 3, 2 ,false},
196 { ARM::VLD1d64QPseudo, ARM::VLD1d64Q, true, false, false, SingleSpc, 4, 1 ,false},
197 { ARM::VLD1d64QPseudoWB_fixed, ARM::VLD1d64Qwb_fixed, true, true, false, SingleSpc, 4, 1 ,false},
198 { ARM::VLD1d64QPseudoWB_register, ARM::VLD1d64Qwb_register, true, true, true, SingleSpc, 4, 1 ,false},
199 { ARM::VLD1d64TPseudo, ARM::VLD1d64T, true, false, false, SingleSpc, 3, 1 ,false},
200 { ARM::VLD1d64TPseudoWB_fixed, ARM::VLD1d64Twb_fixed, true, true, false, SingleSpc, 3, 1 ,false},
201 { ARM::VLD1d64TPseudoWB_register, ARM::VLD1d64Twb_register, true, true, true, SingleSpc, 3, 1 ,false},
202 { ARM::VLD1d8QPseudo, ARM::VLD1d8Q, true, false, false, SingleSpc, 4, 8 ,false},
203 { ARM::VLD1d8TPseudo, ARM::VLD1d8T, true, false, false, SingleSpc, 3, 8 ,false},
204 { ARM::VLD1q16HighQPseudo, ARM::VLD1d16Q, true, false, false, SingleHighQSpc, 4, 4 ,false},
205 { ARM::VLD1q16HighTPseudo, ARM::VLD1d16T, true, false, false, SingleHighTSpc, 3, 4 ,false},
206 { ARM::VLD1q16LowQPseudo_UPD, ARM::VLD1d16Qwb_fixed, true, true, true, SingleLowSpc, 4, 4 ,false},
207 { ARM::VLD1q16LowTPseudo_UPD, ARM::VLD1d16Twb_fixed, true, true, true, SingleLowSpc, 3, 4 ,false},
208 { ARM::VLD1q32HighQPseudo, ARM::VLD1d32Q, true, false, false, SingleHighQSpc, 4, 2 ,false},
209 { ARM::VLD1q32HighTPseudo, ARM::VLD1d32T, true, false, false, SingleHighTSpc, 3, 2 ,false},
210 { ARM::VLD1q32LowQPseudo_UPD, ARM::VLD1d32Qwb_fixed, true, true, true, SingleLowSpc, 4, 2 ,false},
211 { ARM::VLD1q32LowTPseudo_UPD, ARM::VLD1d32Twb_fixed, true, true, true, SingleLowSpc, 3, 2 ,false},
212 { ARM::VLD1q64HighQPseudo, ARM::VLD1d64Q, true, false, false, SingleHighQSpc, 4, 1 ,false},
213 { ARM::VLD1q64HighTPseudo, ARM::VLD1d64T, true, false, false, SingleHighTSpc, 3, 1 ,false},
214 { ARM::VLD1q64LowQPseudo_UPD, ARM::VLD1d64Qwb_fixed, true, true, true, SingleLowSpc, 4, 1 ,false},
215 { ARM::VLD1q64LowTPseudo_UPD, ARM::VLD1d64Twb_fixed, true, true, true, SingleLowSpc, 3, 1 ,false},
216 { ARM::VLD1q8HighQPseudo, ARM::VLD1d8Q, true, false, false, SingleHighQSpc, 4, 8 ,false},
217 { ARM::VLD1q8HighTPseudo, ARM::VLD1d8T, true, false, false, SingleHighTSpc, 3, 8 ,false},
218 { ARM::VLD1q8LowQPseudo_UPD, ARM::VLD1d8Qwb_fixed, true, true, true, SingleLowSpc, 4, 8 ,false},
219 { ARM::VLD1q8LowTPseudo_UPD, ARM::VLD1d8Twb_fixed, true, true, true, SingleLowSpc, 3, 8 ,false},
220
221 { ARM::VLD2DUPq16EvenPseudo, ARM::VLD2DUPd16x2, true, false, false, EvenDblSpc, 2, 4 ,false},
222 { ARM::VLD2DUPq16OddPseudo, ARM::VLD2DUPd16x2, true, false, false, OddDblSpc, 2, 4 ,false},
223 { ARM::VLD2DUPq32EvenPseudo, ARM::VLD2DUPd32x2, true, false, false, EvenDblSpc, 2, 2 ,false},
224 { ARM::VLD2DUPq32OddPseudo, ARM::VLD2DUPd32x2, true, false, false, OddDblSpc, 2, 2 ,false},
225 { ARM::VLD2DUPq8EvenPseudo, ARM::VLD2DUPd8x2, true, false, false, EvenDblSpc, 2, 8 ,false},
226 { ARM::VLD2DUPq8OddPseudo, ARM::VLD2DUPd8x2, true, false, false, OddDblSpc, 2, 8 ,false},
227
228 { ARM::VLD2LNd16Pseudo, ARM::VLD2LNd16, true, false, false, SingleSpc, 2, 4 ,true},
229 { ARM::VLD2LNd16Pseudo_UPD, ARM::VLD2LNd16_UPD, true, true, true, SingleSpc, 2, 4 ,true},
230 { ARM::VLD2LNd32Pseudo, ARM::VLD2LNd32, true, false, false, SingleSpc, 2, 2 ,true},
231 { ARM::VLD2LNd32Pseudo_UPD, ARM::VLD2LNd32_UPD, true, true, true, SingleSpc, 2, 2 ,true},
232 { ARM::VLD2LNd8Pseudo, ARM::VLD2LNd8, true, false, false, SingleSpc, 2, 8 ,true},
233 { ARM::VLD2LNd8Pseudo_UPD, ARM::VLD2LNd8_UPD, true, true, true, SingleSpc, 2, 8 ,true},
234 { ARM::VLD2LNq16Pseudo, ARM::VLD2LNq16, true, false, false, EvenDblSpc, 2, 4 ,true},
235 { ARM::VLD2LNq16Pseudo_UPD, ARM::VLD2LNq16_UPD, true, true, true, EvenDblSpc, 2, 4 ,true},
236 { ARM::VLD2LNq32Pseudo, ARM::VLD2LNq32, true, false, false, EvenDblSpc, 2, 2 ,true},
237 { ARM::VLD2LNq32Pseudo_UPD, ARM::VLD2LNq32_UPD, true, true, true, EvenDblSpc, 2, 2 ,true},
238
239 { ARM::VLD2q16Pseudo, ARM::VLD2q16, true, false, false, SingleSpc, 4, 4 ,false},
240 { ARM::VLD2q16PseudoWB_fixed, ARM::VLD2q16wb_fixed, true, true, false, SingleSpc, 4, 4 ,false},
241 { ARM::VLD2q16PseudoWB_register, ARM::VLD2q16wb_register, true, true, true, SingleSpc, 4, 4 ,false},
242 { ARM::VLD2q32Pseudo, ARM::VLD2q32, true, false, false, SingleSpc, 4, 2 ,false},
243 { ARM::VLD2q32PseudoWB_fixed, ARM::VLD2q32wb_fixed, true, true, false, SingleSpc, 4, 2 ,false},
244 { ARM::VLD2q32PseudoWB_register, ARM::VLD2q32wb_register, true, true, true, SingleSpc, 4, 2 ,false},
245 { ARM::VLD2q8Pseudo, ARM::VLD2q8, true, false, false, SingleSpc, 4, 8 ,false},
246 { ARM::VLD2q8PseudoWB_fixed, ARM::VLD2q8wb_fixed, true, true, false, SingleSpc, 4, 8 ,false},
247 { ARM::VLD2q8PseudoWB_register, ARM::VLD2q8wb_register, true, true, true, SingleSpc, 4, 8 ,false},
248
249 { ARM::VLD3DUPd16Pseudo, ARM::VLD3DUPd16, true, false, false, SingleSpc, 3, 4,true},
250 { ARM::VLD3DUPd16Pseudo_UPD, ARM::VLD3DUPd16_UPD, true, true, true, SingleSpc, 3, 4,true},
251 { ARM::VLD3DUPd32Pseudo, ARM::VLD3DUPd32, true, false, false, SingleSpc, 3, 2,true},
252 { ARM::VLD3DUPd32Pseudo_UPD, ARM::VLD3DUPd32_UPD, true, true, true, SingleSpc, 3, 2,true},
253 { ARM::VLD3DUPd8Pseudo, ARM::VLD3DUPd8, true, false, false, SingleSpc, 3, 8,true},
254 { ARM::VLD3DUPd8Pseudo_UPD, ARM::VLD3DUPd8_UPD, true, true, true, SingleSpc, 3, 8,true},
255 { ARM::VLD3DUPq16EvenPseudo, ARM::VLD3DUPq16, true, false, false, EvenDblSpc, 3, 4 ,true},
256 { ARM::VLD3DUPq16OddPseudo, ARM::VLD3DUPq16, true, false, false, OddDblSpc, 3, 4 ,true},
257 { ARM::VLD3DUPq32EvenPseudo, ARM::VLD3DUPq32, true, false, false, EvenDblSpc, 3, 2 ,true},
258 { ARM::VLD3DUPq32OddPseudo, ARM::VLD3DUPq32, true, false, false, OddDblSpc, 3, 2 ,true},
259 { ARM::VLD3DUPq8EvenPseudo, ARM::VLD3DUPq8, true, false, false, EvenDblSpc, 3, 8 ,true},
260 { ARM::VLD3DUPq8OddPseudo, ARM::VLD3DUPq8, true, false, false, OddDblSpc, 3, 8 ,true},
261
262 { ARM::VLD3LNd16Pseudo, ARM::VLD3LNd16, true, false, false, SingleSpc, 3, 4 ,true},
263 { ARM::VLD3LNd16Pseudo_UPD, ARM::VLD3LNd16_UPD, true, true, true, SingleSpc, 3, 4 ,true},
264 { ARM::VLD3LNd32Pseudo, ARM::VLD3LNd32, true, false, false, SingleSpc, 3, 2 ,true},
265 { ARM::VLD3LNd32Pseudo_UPD, ARM::VLD3LNd32_UPD, true, true, true, SingleSpc, 3, 2 ,true},
266 { ARM::VLD3LNd8Pseudo, ARM::VLD3LNd8, true, false, false, SingleSpc, 3, 8 ,true},
267 { ARM::VLD3LNd8Pseudo_UPD, ARM::VLD3LNd8_UPD, true, true, true, SingleSpc, 3, 8 ,true},
268 { ARM::VLD3LNq16Pseudo, ARM::VLD3LNq16, true, false, false, EvenDblSpc, 3, 4 ,true},
269 { ARM::VLD3LNq16Pseudo_UPD, ARM::VLD3LNq16_UPD, true, true, true, EvenDblSpc, 3, 4 ,true},
270 { ARM::VLD3LNq32Pseudo, ARM::VLD3LNq32, true, false, false, EvenDblSpc, 3, 2 ,true},
271 { ARM::VLD3LNq32Pseudo_UPD, ARM::VLD3LNq32_UPD, true, true, true, EvenDblSpc, 3, 2 ,true},
272
273 { ARM::VLD3d16Pseudo, ARM::VLD3d16, true, false, false, SingleSpc, 3, 4 ,true},
274 { ARM::VLD3d16Pseudo_UPD, ARM::VLD3d16_UPD, true, true, true, SingleSpc, 3, 4 ,true},
275 { ARM::VLD3d32Pseudo, ARM::VLD3d32, true, false, false, SingleSpc, 3, 2 ,true},
276 { ARM::VLD3d32Pseudo_UPD, ARM::VLD3d32_UPD, true, true, true, SingleSpc, 3, 2 ,true},
277 { ARM::VLD3d8Pseudo, ARM::VLD3d8, true, false, false, SingleSpc, 3, 8 ,true},
278 { ARM::VLD3d8Pseudo_UPD, ARM::VLD3d8_UPD, true, true, true, SingleSpc, 3, 8 ,true},
279
280 { ARM::VLD3q16Pseudo_UPD, ARM::VLD3q16_UPD, true, true, true, EvenDblSpc, 3, 4 ,true},
281 { ARM::VLD3q16oddPseudo, ARM::VLD3q16, true, false, false, OddDblSpc, 3, 4 ,true},
282 { ARM::VLD3q16oddPseudo_UPD, ARM::VLD3q16_UPD, true, true, true, OddDblSpc, 3, 4 ,true},
283 { ARM::VLD3q32Pseudo_UPD, ARM::VLD3q32_UPD, true, true, true, EvenDblSpc, 3, 2 ,true},
284 { ARM::VLD3q32oddPseudo, ARM::VLD3q32, true, false, false, OddDblSpc, 3, 2 ,true},
285 { ARM::VLD3q32oddPseudo_UPD, ARM::VLD3q32_UPD, true, true, true, OddDblSpc, 3, 2 ,true},
286 { ARM::VLD3q8Pseudo_UPD, ARM::VLD3q8_UPD, true, true, true, EvenDblSpc, 3, 8 ,true},
287 { ARM::VLD3q8oddPseudo, ARM::VLD3q8, true, false, false, OddDblSpc, 3, 8 ,true},
288 { ARM::VLD3q8oddPseudo_UPD, ARM::VLD3q8_UPD, true, true, true, OddDblSpc, 3, 8 ,true},
289
290 { ARM::VLD4DUPd16Pseudo, ARM::VLD4DUPd16, true, false, false, SingleSpc, 4, 4,true},
291 { ARM::VLD4DUPd16Pseudo_UPD, ARM::VLD4DUPd16_UPD, true, true, true, SingleSpc, 4, 4,true},
292 { ARM::VLD4DUPd32Pseudo, ARM::VLD4DUPd32, true, false, false, SingleSpc, 4, 2,true},
293 { ARM::VLD4DUPd32Pseudo_UPD, ARM::VLD4DUPd32_UPD, true, true, true, SingleSpc, 4, 2,true},
294 { ARM::VLD4DUPd8Pseudo, ARM::VLD4DUPd8, true, false, false, SingleSpc, 4, 8,true},
295 { ARM::VLD4DUPd8Pseudo_UPD, ARM::VLD4DUPd8_UPD, true, true, true, SingleSpc, 4, 8,true},
296 { ARM::VLD4DUPq16EvenPseudo, ARM::VLD4DUPq16, true, false, false, EvenDblSpc, 4, 4 ,true},
297 { ARM::VLD4DUPq16OddPseudo, ARM::VLD4DUPq16, true, false, false, OddDblSpc, 4, 4 ,true},
298 { ARM::VLD4DUPq32EvenPseudo, ARM::VLD4DUPq32, true, false, false, EvenDblSpc, 4, 2 ,true},
299 { ARM::VLD4DUPq32OddPseudo, ARM::VLD4DUPq32, true, false, false, OddDblSpc, 4, 2 ,true},
300 { ARM::VLD4DUPq8EvenPseudo, ARM::VLD4DUPq8, true, false, false, EvenDblSpc, 4, 8 ,true},
301 { ARM::VLD4DUPq8OddPseudo, ARM::VLD4DUPq8, true, false, false, OddDblSpc, 4, 8 ,true},
302
303 { ARM::VLD4LNd16Pseudo, ARM::VLD4LNd16, true, false, false, SingleSpc, 4, 4 ,true},
304 { ARM::VLD4LNd16Pseudo_UPD, ARM::VLD4LNd16_UPD, true, true, true, SingleSpc, 4, 4 ,true},
305 { ARM::VLD4LNd32Pseudo, ARM::VLD4LNd32, true, false, false, SingleSpc, 4, 2 ,true},
306 { ARM::VLD4LNd32Pseudo_UPD, ARM::VLD4LNd32_UPD, true, true, true, SingleSpc, 4, 2 ,true},
307 { ARM::VLD4LNd8Pseudo, ARM::VLD4LNd8, true, false, false, SingleSpc, 4, 8 ,true},
308 { ARM::VLD4LNd8Pseudo_UPD, ARM::VLD4LNd8_UPD, true, true, true, SingleSpc, 4, 8 ,true},
309 { ARM::VLD4LNq16Pseudo, ARM::VLD4LNq16, true, false, false, EvenDblSpc, 4, 4 ,true},
310 { ARM::VLD4LNq16Pseudo_UPD, ARM::VLD4LNq16_UPD, true, true, true, EvenDblSpc, 4, 4 ,true},
311 { ARM::VLD4LNq32Pseudo, ARM::VLD4LNq32, true, false, false, EvenDblSpc, 4, 2 ,true},
312 { ARM::VLD4LNq32Pseudo_UPD, ARM::VLD4LNq32_UPD, true, true, true, EvenDblSpc, 4, 2 ,true},
313
314 { ARM::VLD4d16Pseudo, ARM::VLD4d16, true, false, false, SingleSpc, 4, 4 ,true},
315 { ARM::VLD4d16Pseudo_UPD, ARM::VLD4d16_UPD, true, true, true, SingleSpc, 4, 4 ,true},
316 { ARM::VLD4d32Pseudo, ARM::VLD4d32, true, false, false, SingleSpc, 4, 2 ,true},
317 { ARM::VLD4d32Pseudo_UPD, ARM::VLD4d32_UPD, true, true, true, SingleSpc, 4, 2 ,true},
318 { ARM::VLD4d8Pseudo, ARM::VLD4d8, true, false, false, SingleSpc, 4, 8 ,true},
319 { ARM::VLD4d8Pseudo_UPD, ARM::VLD4d8_UPD, true, true, true, SingleSpc, 4, 8 ,true},
320
321 { ARM::VLD4q16Pseudo_UPD, ARM::VLD4q16_UPD, true, true, true, EvenDblSpc, 4, 4 ,true},
322 { ARM::VLD4q16oddPseudo, ARM::VLD4q16, true, false, false, OddDblSpc, 4, 4 ,true},
323 { ARM::VLD4q16oddPseudo_UPD, ARM::VLD4q16_UPD, true, true, true, OddDblSpc, 4, 4 ,true},
324 { ARM::VLD4q32Pseudo_UPD, ARM::VLD4q32_UPD, true, true, true, EvenDblSpc, 4, 2 ,true},
325 { ARM::VLD4q32oddPseudo, ARM::VLD4q32, true, false, false, OddDblSpc, 4, 2 ,true},
326 { ARM::VLD4q32oddPseudo_UPD, ARM::VLD4q32_UPD, true, true, true, OddDblSpc, 4, 2 ,true},
327 { ARM::VLD4q8Pseudo_UPD, ARM::VLD4q8_UPD, true, true, true, EvenDblSpc, 4, 8 ,true},
328 { ARM::VLD4q8oddPseudo, ARM::VLD4q8, true, false, false, OddDblSpc, 4, 8 ,true},
329 { ARM::VLD4q8oddPseudo_UPD, ARM::VLD4q8_UPD, true, true, true, OddDblSpc, 4, 8 ,true},
330
331 { ARM::VST1LNq16Pseudo, ARM::VST1LNd16, false, false, false, EvenDblSpc, 1, 4 ,true},
332 { ARM::VST1LNq16Pseudo_UPD, ARM::VST1LNd16_UPD, false, true, true, EvenDblSpc, 1, 4 ,true},
333 { ARM::VST1LNq32Pseudo, ARM::VST1LNd32, false, false, false, EvenDblSpc, 1, 2 ,true},
334 { ARM::VST1LNq32Pseudo_UPD, ARM::VST1LNd32_UPD, false, true, true, EvenDblSpc, 1, 2 ,true},
335 { ARM::VST1LNq8Pseudo, ARM::VST1LNd8, false, false, false, EvenDblSpc, 1, 8 ,true},
336 { ARM::VST1LNq8Pseudo_UPD, ARM::VST1LNd8_UPD, false, true, true, EvenDblSpc, 1, 8 ,true},
337
338 { ARM::VST1d16QPseudo, ARM::VST1d16Q, false, false, false, SingleSpc, 4, 4 ,false},
339 { ARM::VST1d16TPseudo, ARM::VST1d16T, false, false, false, SingleSpc, 3, 4 ,false},
340 { ARM::VST1d32QPseudo, ARM::VST1d32Q, false, false, false, SingleSpc, 4, 2 ,false},
341 { ARM::VST1d32TPseudo, ARM::VST1d32T, false, false, false, SingleSpc, 3, 2 ,false},
342 { ARM::VST1d64QPseudo, ARM::VST1d64Q, false, false, false, SingleSpc, 4, 1 ,false},
343 { ARM::VST1d64QPseudoWB_fixed, ARM::VST1d64Qwb_fixed, false, true, false, SingleSpc, 4, 1 ,false},
344 { ARM::VST1d64QPseudoWB_register, ARM::VST1d64Qwb_register, false, true, true, SingleSpc, 4, 1 ,false},
345 { ARM::VST1d64TPseudo, ARM::VST1d64T, false, false, false, SingleSpc, 3, 1 ,false},
346 { ARM::VST1d64TPseudoWB_fixed, ARM::VST1d64Twb_fixed, false, true, false, SingleSpc, 3, 1 ,false},
347 { ARM::VST1d64TPseudoWB_register, ARM::VST1d64Twb_register, false, true, true, SingleSpc, 3, 1 ,false},
348 { ARM::VST1d8QPseudo, ARM::VST1d8Q, false, false, false, SingleSpc, 4, 8 ,false},
349 { ARM::VST1d8TPseudo, ARM::VST1d8T, false, false, false, SingleSpc, 3, 8 ,false},
350 { ARM::VST1q16HighQPseudo, ARM::VST1d16Q, false, false, false, SingleHighQSpc, 4, 4 ,false},
351 { ARM::VST1q16HighTPseudo, ARM::VST1d16T, false, false, false, SingleHighTSpc, 3, 4 ,false},
352 { ARM::VST1q16LowQPseudo_UPD, ARM::VST1d16Qwb_fixed, false, true, true, SingleLowSpc, 4, 4 ,false},
353 { ARM::VST1q16LowTPseudo_UPD, ARM::VST1d16Twb_fixed, false, true, true, SingleLowSpc, 3, 4 ,false},
354 { ARM::VST1q32HighQPseudo, ARM::VST1d32Q, false, false, false, SingleHighQSpc, 4, 2 ,false},
355 { ARM::VST1q32HighTPseudo, ARM::VST1d32T, false, false, false, SingleHighTSpc, 3, 2 ,false},
356 { ARM::VST1q32LowQPseudo_UPD, ARM::VST1d32Qwb_fixed, false, true, true, SingleLowSpc, 4, 2 ,false},
357 { ARM::VST1q32LowTPseudo_UPD, ARM::VST1d32Twb_fixed, false, true, true, SingleLowSpc, 3, 2 ,false},
358 { ARM::VST1q64HighQPseudo, ARM::VST1d64Q, false, false, false, SingleHighQSpc, 4, 1 ,false},
359 { ARM::VST1q64HighTPseudo, ARM::VST1d64T, false, false, false, SingleHighTSpc, 3, 1 ,false},
360 { ARM::VST1q64LowQPseudo_UPD, ARM::VST1d64Qwb_fixed, false, true, true, SingleLowSpc, 4, 1 ,false},
361 { ARM::VST1q64LowTPseudo_UPD, ARM::VST1d64Twb_fixed, false, true, true, SingleLowSpc, 3, 1 ,false},
362 { ARM::VST1q8HighQPseudo, ARM::VST1d8Q, false, false, false, SingleHighQSpc, 4, 8 ,false},
363 { ARM::VST1q8HighTPseudo, ARM::VST1d8T, false, false, false, SingleHighTSpc, 3, 8 ,false},
364 { ARM::VST1q8LowQPseudo_UPD, ARM::VST1d8Qwb_fixed, false, true, true, SingleLowSpc, 4, 8 ,false},
365 { ARM::VST1q8LowTPseudo_UPD, ARM::VST1d8Twb_fixed, false, true, true, SingleLowSpc, 3, 8 ,false},
366
367 { ARM::VST2LNd16Pseudo, ARM::VST2LNd16, false, false, false, SingleSpc, 2, 4 ,true},
368 { ARM::VST2LNd16Pseudo_UPD, ARM::VST2LNd16_UPD, false, true, true, SingleSpc, 2, 4 ,true},
369 { ARM::VST2LNd32Pseudo, ARM::VST2LNd32, false, false, false, SingleSpc, 2, 2 ,true},
370 { ARM::VST2LNd32Pseudo_UPD, ARM::VST2LNd32_UPD, false, true, true, SingleSpc, 2, 2 ,true},
371 { ARM::VST2LNd8Pseudo, ARM::VST2LNd8, false, false, false, SingleSpc, 2, 8 ,true},
372 { ARM::VST2LNd8Pseudo_UPD, ARM::VST2LNd8_UPD, false, true, true, SingleSpc, 2, 8 ,true},
373 { ARM::VST2LNq16Pseudo, ARM::VST2LNq16, false, false, false, EvenDblSpc, 2, 4,true},
374 { ARM::VST2LNq16Pseudo_UPD, ARM::VST2LNq16_UPD, false, true, true, EvenDblSpc, 2, 4,true},
375 { ARM::VST2LNq32Pseudo, ARM::VST2LNq32, false, false, false, EvenDblSpc, 2, 2,true},
376 { ARM::VST2LNq32Pseudo_UPD, ARM::VST2LNq32_UPD, false, true, true, EvenDblSpc, 2, 2,true},
377
378 { ARM::VST2q16Pseudo, ARM::VST2q16, false, false, false, SingleSpc, 4, 4 ,false},
379 { ARM::VST2q16PseudoWB_fixed, ARM::VST2q16wb_fixed, false, true, false, SingleSpc, 4, 4 ,false},
380 { ARM::VST2q16PseudoWB_register, ARM::VST2q16wb_register, false, true, true, SingleSpc, 4, 4 ,false},
381 { ARM::VST2q32Pseudo, ARM::VST2q32, false, false, false, SingleSpc, 4, 2 ,false},
382 { ARM::VST2q32PseudoWB_fixed, ARM::VST2q32wb_fixed, false, true, false, SingleSpc, 4, 2 ,false},
383 { ARM::VST2q32PseudoWB_register, ARM::VST2q32wb_register, false, true, true, SingleSpc, 4, 2 ,false},
384 { ARM::VST2q8Pseudo, ARM::VST2q8, false, false, false, SingleSpc, 4, 8 ,false},
385 { ARM::VST2q8PseudoWB_fixed, ARM::VST2q8wb_fixed, false, true, false, SingleSpc, 4, 8 ,false},
386 { ARM::VST2q8PseudoWB_register, ARM::VST2q8wb_register, false, true, true, SingleSpc, 4, 8 ,false},
387
388 { ARM::VST3LNd16Pseudo, ARM::VST3LNd16, false, false, false, SingleSpc, 3, 4 ,true},
389 { ARM::VST3LNd16Pseudo_UPD, ARM::VST3LNd16_UPD, false, true, true, SingleSpc, 3, 4 ,true},
390 { ARM::VST3LNd32Pseudo, ARM::VST3LNd32, false, false, false, SingleSpc, 3, 2 ,true},
391 { ARM::VST3LNd32Pseudo_UPD, ARM::VST3LNd32_UPD, false, true, true, SingleSpc, 3, 2 ,true},
392 { ARM::VST3LNd8Pseudo, ARM::VST3LNd8, false, false, false, SingleSpc, 3, 8 ,true},
393 { ARM::VST3LNd8Pseudo_UPD, ARM::VST3LNd8_UPD, false, true, true, SingleSpc, 3, 8 ,true},
394 { ARM::VST3LNq16Pseudo, ARM::VST3LNq16, false, false, false, EvenDblSpc, 3, 4,true},
395 { ARM::VST3LNq16Pseudo_UPD, ARM::VST3LNq16_UPD, false, true, true, EvenDblSpc, 3, 4,true},
396 { ARM::VST3LNq32Pseudo, ARM::VST3LNq32, false, false, false, EvenDblSpc, 3, 2,true},
397 { ARM::VST3LNq32Pseudo_UPD, ARM::VST3LNq32_UPD, false, true, true, EvenDblSpc, 3, 2,true},
398
399 { ARM::VST3d16Pseudo, ARM::VST3d16, false, false, false, SingleSpc, 3, 4 ,true},
400 { ARM::VST3d16Pseudo_UPD, ARM::VST3d16_UPD, false, true, true, SingleSpc, 3, 4 ,true},
401 { ARM::VST3d32Pseudo, ARM::VST3d32, false, false, false, SingleSpc, 3, 2 ,true},
402 { ARM::VST3d32Pseudo_UPD, ARM::VST3d32_UPD, false, true, true, SingleSpc, 3, 2 ,true},
403 { ARM::VST3d8Pseudo, ARM::VST3d8, false, false, false, SingleSpc, 3, 8 ,true},
404 { ARM::VST3d8Pseudo_UPD, ARM::VST3d8_UPD, false, true, true, SingleSpc, 3, 8 ,true},
405
406 { ARM::VST3q16Pseudo_UPD, ARM::VST3q16_UPD, false, true, true, EvenDblSpc, 3, 4 ,true},
407 { ARM::VST3q16oddPseudo, ARM::VST3q16, false, false, false, OddDblSpc, 3, 4 ,true},
408 { ARM::VST3q16oddPseudo_UPD, ARM::VST3q16_UPD, false, true, true, OddDblSpc, 3, 4 ,true},
409 { ARM::VST3q32Pseudo_UPD, ARM::VST3q32_UPD, false, true, true, EvenDblSpc, 3, 2 ,true},
410 { ARM::VST3q32oddPseudo, ARM::VST3q32, false, false, false, OddDblSpc, 3, 2 ,true},
411 { ARM::VST3q32oddPseudo_UPD, ARM::VST3q32_UPD, false, true, true, OddDblSpc, 3, 2 ,true},
412 { ARM::VST3q8Pseudo_UPD, ARM::VST3q8_UPD, false, true, true, EvenDblSpc, 3, 8 ,true},
413 { ARM::VST3q8oddPseudo, ARM::VST3q8, false, false, false, OddDblSpc, 3, 8 ,true},
414 { ARM::VST3q8oddPseudo_UPD, ARM::VST3q8_UPD, false, true, true, OddDblSpc, 3, 8 ,true},
415
416 { ARM::VST4LNd16Pseudo, ARM::VST4LNd16, false, false, false, SingleSpc, 4, 4 ,true},
417 { ARM::VST4LNd16Pseudo_UPD, ARM::VST4LNd16_UPD, false, true, true, SingleSpc, 4, 4 ,true},
418 { ARM::VST4LNd32Pseudo, ARM::VST4LNd32, false, false, false, SingleSpc, 4, 2 ,true},
419 { ARM::VST4LNd32Pseudo_UPD, ARM::VST4LNd32_UPD, false, true, true, SingleSpc, 4, 2 ,true},
420 { ARM::VST4LNd8Pseudo, ARM::VST4LNd8, false, false, false, SingleSpc, 4, 8 ,true},
421 { ARM::VST4LNd8Pseudo_UPD, ARM::VST4LNd8_UPD, false, true, true, SingleSpc, 4, 8 ,true},
422 { ARM::VST4LNq16Pseudo, ARM::VST4LNq16, false, false, false, EvenDblSpc, 4, 4,true},
423 { ARM::VST4LNq16Pseudo_UPD, ARM::VST4LNq16_UPD, false, true, true, EvenDblSpc, 4, 4,true},
424 { ARM::VST4LNq32Pseudo, ARM::VST4LNq32, false, false, false, EvenDblSpc, 4, 2,true},
425 { ARM::VST4LNq32Pseudo_UPD, ARM::VST4LNq32_UPD, false, true, true, EvenDblSpc, 4, 2,true},
426
427 { ARM::VST4d16Pseudo, ARM::VST4d16, false, false, false, SingleSpc, 4, 4 ,true},
428 { ARM::VST4d16Pseudo_UPD, ARM::VST4d16_UPD, false, true, true, SingleSpc, 4, 4 ,true},
429 { ARM::VST4d32Pseudo, ARM::VST4d32, false, false, false, SingleSpc, 4, 2 ,true},
430 { ARM::VST4d32Pseudo_UPD, ARM::VST4d32_UPD, false, true, true, SingleSpc, 4, 2 ,true},
431 { ARM::VST4d8Pseudo, ARM::VST4d8, false, false, false, SingleSpc, 4, 8 ,true},
432 { ARM::VST4d8Pseudo_UPD, ARM::VST4d8_UPD, false, true, true, SingleSpc, 4, 8 ,true},
433
434 { ARM::VST4q16Pseudo_UPD, ARM::VST4q16_UPD, false, true, true, EvenDblSpc, 4, 4 ,true},
435 { ARM::VST4q16oddPseudo, ARM::VST4q16, false, false, false, OddDblSpc, 4, 4 ,true},
436 { ARM::VST4q16oddPseudo_UPD, ARM::VST4q16_UPD, false, true, true, OddDblSpc, 4, 4 ,true},
437 { ARM::VST4q32Pseudo_UPD, ARM::VST4q32_UPD, false, true, true, EvenDblSpc, 4, 2 ,true},
438 { ARM::VST4q32oddPseudo, ARM::VST4q32, false, false, false, OddDblSpc, 4, 2 ,true},
439 { ARM::VST4q32oddPseudo_UPD, ARM::VST4q32_UPD, false, true, true, OddDblSpc, 4, 2 ,true},
440 { ARM::VST4q8Pseudo_UPD, ARM::VST4q8_UPD, false, true, true, EvenDblSpc, 4, 8 ,true},
441 { ARM::VST4q8oddPseudo, ARM::VST4q8, false, false, false, OddDblSpc, 4, 8 ,true},
442 { ARM::VST4q8oddPseudo_UPD, ARM::VST4q8_UPD, false, true, true, OddDblSpc, 4, 8 ,true}
443 };
444
445 /// LookupNEONLdSt - Search the NEONLdStTable for information about a NEON
446 /// load or store pseudo instruction.
LookupNEONLdSt(unsigned Opcode)447 static const NEONLdStTableEntry *LookupNEONLdSt(unsigned Opcode) {
448 #ifndef NDEBUG
449 // Make sure the table is sorted.
450 static std::atomic<bool> TableChecked(false);
451 if (!TableChecked.load(std::memory_order_relaxed)) {
452 assert(llvm::is_sorted(NEONLdStTable) && "NEONLdStTable is not sorted!");
453 TableChecked.store(true, std::memory_order_relaxed);
454 }
455 #endif
456
457 auto I = llvm::lower_bound(NEONLdStTable, Opcode);
458 if (I != std::end(NEONLdStTable) && I->PseudoOpc == Opcode)
459 return I;
460 return nullptr;
461 }
462
463 /// GetDSubRegs - Get 4 D subregisters of a Q, QQ, or QQQQ register,
464 /// corresponding to the specified register spacing. Not all of the results
465 /// are necessarily valid, e.g., a Q register only has 2 D subregisters.
GetDSubRegs(unsigned Reg,NEONRegSpacing RegSpc,const TargetRegisterInfo * TRI,unsigned & D0,unsigned & D1,unsigned & D2,unsigned & D3)466 static void GetDSubRegs(unsigned Reg, NEONRegSpacing RegSpc,
467 const TargetRegisterInfo *TRI, unsigned &D0,
468 unsigned &D1, unsigned &D2, unsigned &D3) {
469 if (RegSpc == SingleSpc || RegSpc == SingleLowSpc) {
470 D0 = TRI->getSubReg(Reg, ARM::dsub_0);
471 D1 = TRI->getSubReg(Reg, ARM::dsub_1);
472 D2 = TRI->getSubReg(Reg, ARM::dsub_2);
473 D3 = TRI->getSubReg(Reg, ARM::dsub_3);
474 } else if (RegSpc == SingleHighQSpc) {
475 D0 = TRI->getSubReg(Reg, ARM::dsub_4);
476 D1 = TRI->getSubReg(Reg, ARM::dsub_5);
477 D2 = TRI->getSubReg(Reg, ARM::dsub_6);
478 D3 = TRI->getSubReg(Reg, ARM::dsub_7);
479 } else if (RegSpc == SingleHighTSpc) {
480 D0 = TRI->getSubReg(Reg, ARM::dsub_3);
481 D1 = TRI->getSubReg(Reg, ARM::dsub_4);
482 D2 = TRI->getSubReg(Reg, ARM::dsub_5);
483 D3 = TRI->getSubReg(Reg, ARM::dsub_6);
484 } else if (RegSpc == EvenDblSpc) {
485 D0 = TRI->getSubReg(Reg, ARM::dsub_0);
486 D1 = TRI->getSubReg(Reg, ARM::dsub_2);
487 D2 = TRI->getSubReg(Reg, ARM::dsub_4);
488 D3 = TRI->getSubReg(Reg, ARM::dsub_6);
489 } else {
490 assert(RegSpc == OddDblSpc && "unknown register spacing");
491 D0 = TRI->getSubReg(Reg, ARM::dsub_1);
492 D1 = TRI->getSubReg(Reg, ARM::dsub_3);
493 D2 = TRI->getSubReg(Reg, ARM::dsub_5);
494 D3 = TRI->getSubReg(Reg, ARM::dsub_7);
495 }
496 }
497
498 /// ExpandVLD - Translate VLD pseudo instructions with Q, QQ or QQQQ register
499 /// operands to real VLD instructions with D register operands.
ExpandVLD(MachineBasicBlock::iterator & MBBI)500 void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) {
501 MachineInstr &MI = *MBBI;
502 MachineBasicBlock &MBB = *MI.getParent();
503 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
504
505 const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
506 assert(TableEntry && TableEntry->IsLoad && "NEONLdStTable lookup failed");
507 NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
508 unsigned NumRegs = TableEntry->NumRegs;
509
510 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
511 TII->get(TableEntry->RealOpc));
512 unsigned OpIdx = 0;
513
514 bool DstIsDead = MI.getOperand(OpIdx).isDead();
515 Register DstReg = MI.getOperand(OpIdx++).getReg();
516 if(TableEntry->RealOpc == ARM::VLD2DUPd8x2 ||
517 TableEntry->RealOpc == ARM::VLD2DUPd16x2 ||
518 TableEntry->RealOpc == ARM::VLD2DUPd32x2) {
519 unsigned SubRegIndex;
520 if (RegSpc == EvenDblSpc) {
521 SubRegIndex = ARM::dsub_0;
522 } else {
523 assert(RegSpc == OddDblSpc && "Unexpected spacing!");
524 SubRegIndex = ARM::dsub_1;
525 }
526 Register SubReg = TRI->getSubReg(DstReg, SubRegIndex);
527 unsigned DstRegPair = TRI->getMatchingSuperReg(SubReg, ARM::dsub_0,
528 &ARM::DPairSpcRegClass);
529 MIB.addReg(DstRegPair, RegState::Define | getDeadRegState(DstIsDead));
530 } else {
531 unsigned D0, D1, D2, D3;
532 GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
533 MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead));
534 if (NumRegs > 1 && TableEntry->copyAllListRegs)
535 MIB.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
536 if (NumRegs > 2 && TableEntry->copyAllListRegs)
537 MIB.addReg(D2, RegState::Define | getDeadRegState(DstIsDead));
538 if (NumRegs > 3 && TableEntry->copyAllListRegs)
539 MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
540 }
541
542 if (TableEntry->isUpdating)
543 MIB.add(MI.getOperand(OpIdx++));
544
545 // Copy the addrmode6 operands.
546 MIB.add(MI.getOperand(OpIdx++));
547 MIB.add(MI.getOperand(OpIdx++));
548
549 // Copy the am6offset operand.
550 if (TableEntry->hasWritebackOperand) {
551 // TODO: The writing-back pseudo instructions we translate here are all
552 // defined to take am6offset nodes that are capable to represent both fixed
553 // and register forms. Some real instructions, however, do not rely on
554 // am6offset and have separate definitions for such forms. When this is the
555 // case, fixed forms do not take any offset nodes, so here we skip them for
556 // such instructions. Once all real and pseudo writing-back instructions are
557 // rewritten without use of am6offset nodes, this code will go away.
558 const MachineOperand &AM6Offset = MI.getOperand(OpIdx++);
559 if (TableEntry->RealOpc == ARM::VLD1d8Qwb_fixed ||
560 TableEntry->RealOpc == ARM::VLD1d16Qwb_fixed ||
561 TableEntry->RealOpc == ARM::VLD1d32Qwb_fixed ||
562 TableEntry->RealOpc == ARM::VLD1d64Qwb_fixed ||
563 TableEntry->RealOpc == ARM::VLD1d8Twb_fixed ||
564 TableEntry->RealOpc == ARM::VLD1d16Twb_fixed ||
565 TableEntry->RealOpc == ARM::VLD1d32Twb_fixed ||
566 TableEntry->RealOpc == ARM::VLD1d64Twb_fixed) {
567 assert(AM6Offset.getReg() == 0 &&
568 "A fixed writing-back pseudo instruction provides an offset "
569 "register!");
570 } else {
571 MIB.add(AM6Offset);
572 }
573 }
574
575 // For an instruction writing double-spaced subregs, the pseudo instruction
576 // has an extra operand that is a use of the super-register. Record the
577 // operand index and skip over it.
578 unsigned SrcOpIdx = 0;
579 if(TableEntry->RealOpc != ARM::VLD2DUPd8x2 &&
580 TableEntry->RealOpc != ARM::VLD2DUPd16x2 &&
581 TableEntry->RealOpc != ARM::VLD2DUPd32x2) {
582 if (RegSpc == EvenDblSpc || RegSpc == OddDblSpc ||
583 RegSpc == SingleLowSpc || RegSpc == SingleHighQSpc ||
584 RegSpc == SingleHighTSpc)
585 SrcOpIdx = OpIdx++;
586 }
587
588 // Copy the predicate operands.
589 MIB.add(MI.getOperand(OpIdx++));
590 MIB.add(MI.getOperand(OpIdx++));
591
592 // Copy the super-register source operand used for double-spaced subregs over
593 // to the new instruction as an implicit operand.
594 if (SrcOpIdx != 0) {
595 MachineOperand MO = MI.getOperand(SrcOpIdx);
596 MO.setImplicit(true);
597 MIB.add(MO);
598 }
599 // Add an implicit def for the super-register.
600 MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
601 TransferImpOps(MI, MIB, MIB);
602
603 // Transfer memoperands.
604 MIB.cloneMemRefs(MI);
605 MI.eraseFromParent();
606 LLVM_DEBUG(dbgs() << "To: "; MIB.getInstr()->dump(););
607 }
608
609 /// ExpandVST - Translate VST pseudo instructions with Q, QQ or QQQQ register
610 /// operands to real VST instructions with D register operands.
ExpandVST(MachineBasicBlock::iterator & MBBI)611 void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) {
612 MachineInstr &MI = *MBBI;
613 MachineBasicBlock &MBB = *MI.getParent();
614 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
615
616 const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
617 assert(TableEntry && !TableEntry->IsLoad && "NEONLdStTable lookup failed");
618 NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
619 unsigned NumRegs = TableEntry->NumRegs;
620
621 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
622 TII->get(TableEntry->RealOpc));
623 unsigned OpIdx = 0;
624 if (TableEntry->isUpdating)
625 MIB.add(MI.getOperand(OpIdx++));
626
627 // Copy the addrmode6 operands.
628 MIB.add(MI.getOperand(OpIdx++));
629 MIB.add(MI.getOperand(OpIdx++));
630
631 if (TableEntry->hasWritebackOperand) {
632 // TODO: The writing-back pseudo instructions we translate here are all
633 // defined to take am6offset nodes that are capable to represent both fixed
634 // and register forms. Some real instructions, however, do not rely on
635 // am6offset and have separate definitions for such forms. When this is the
636 // case, fixed forms do not take any offset nodes, so here we skip them for
637 // such instructions. Once all real and pseudo writing-back instructions are
638 // rewritten without use of am6offset nodes, this code will go away.
639 const MachineOperand &AM6Offset = MI.getOperand(OpIdx++);
640 if (TableEntry->RealOpc == ARM::VST1d8Qwb_fixed ||
641 TableEntry->RealOpc == ARM::VST1d16Qwb_fixed ||
642 TableEntry->RealOpc == ARM::VST1d32Qwb_fixed ||
643 TableEntry->RealOpc == ARM::VST1d64Qwb_fixed ||
644 TableEntry->RealOpc == ARM::VST1d8Twb_fixed ||
645 TableEntry->RealOpc == ARM::VST1d16Twb_fixed ||
646 TableEntry->RealOpc == ARM::VST1d32Twb_fixed ||
647 TableEntry->RealOpc == ARM::VST1d64Twb_fixed) {
648 assert(AM6Offset.getReg() == 0 &&
649 "A fixed writing-back pseudo instruction provides an offset "
650 "register!");
651 } else {
652 MIB.add(AM6Offset);
653 }
654 }
655
656 bool SrcIsKill = MI.getOperand(OpIdx).isKill();
657 bool SrcIsUndef = MI.getOperand(OpIdx).isUndef();
658 Register SrcReg = MI.getOperand(OpIdx++).getReg();
659 unsigned D0, D1, D2, D3;
660 GetDSubRegs(SrcReg, RegSpc, TRI, D0, D1, D2, D3);
661 MIB.addReg(D0, getUndefRegState(SrcIsUndef));
662 if (NumRegs > 1 && TableEntry->copyAllListRegs)
663 MIB.addReg(D1, getUndefRegState(SrcIsUndef));
664 if (NumRegs > 2 && TableEntry->copyAllListRegs)
665 MIB.addReg(D2, getUndefRegState(SrcIsUndef));
666 if (NumRegs > 3 && TableEntry->copyAllListRegs)
667 MIB.addReg(D3, getUndefRegState(SrcIsUndef));
668
669 // Copy the predicate operands.
670 MIB.add(MI.getOperand(OpIdx++));
671 MIB.add(MI.getOperand(OpIdx++));
672
673 if (SrcIsKill && !SrcIsUndef) // Add an implicit kill for the super-reg.
674 MIB->addRegisterKilled(SrcReg, TRI, true);
675 else if (!SrcIsUndef)
676 MIB.addReg(SrcReg, RegState::Implicit); // Add implicit uses for src reg.
677 TransferImpOps(MI, MIB, MIB);
678
679 // Transfer memoperands.
680 MIB.cloneMemRefs(MI);
681 MI.eraseFromParent();
682 LLVM_DEBUG(dbgs() << "To: "; MIB.getInstr()->dump(););
683 }
684
685 /// ExpandLaneOp - Translate VLD*LN and VST*LN instructions with Q, QQ or QQQQ
686 /// register operands to real instructions with D register operands.
ExpandLaneOp(MachineBasicBlock::iterator & MBBI)687 void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) {
688 MachineInstr &MI = *MBBI;
689 MachineBasicBlock &MBB = *MI.getParent();
690 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
691
692 const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode());
693 assert(TableEntry && "NEONLdStTable lookup failed");
694 NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing;
695 unsigned NumRegs = TableEntry->NumRegs;
696 unsigned RegElts = TableEntry->RegElts;
697
698 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
699 TII->get(TableEntry->RealOpc));
700 unsigned OpIdx = 0;
701 // The lane operand is always the 3rd from last operand, before the 2
702 // predicate operands.
703 unsigned Lane = MI.getOperand(MI.getDesc().getNumOperands() - 3).getImm();
704
705 // Adjust the lane and spacing as needed for Q registers.
706 assert(RegSpc != OddDblSpc && "unexpected register spacing for VLD/VST-lane");
707 if (RegSpc == EvenDblSpc && Lane >= RegElts) {
708 RegSpc = OddDblSpc;
709 Lane -= RegElts;
710 }
711 assert(Lane < RegElts && "out of range lane for VLD/VST-lane");
712
713 unsigned D0 = 0, D1 = 0, D2 = 0, D3 = 0;
714 unsigned DstReg = 0;
715 bool DstIsDead = false;
716 if (TableEntry->IsLoad) {
717 DstIsDead = MI.getOperand(OpIdx).isDead();
718 DstReg = MI.getOperand(OpIdx++).getReg();
719 GetDSubRegs(DstReg, RegSpc, TRI, D0, D1, D2, D3);
720 MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead));
721 if (NumRegs > 1)
722 MIB.addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
723 if (NumRegs > 2)
724 MIB.addReg(D2, RegState::Define | getDeadRegState(DstIsDead));
725 if (NumRegs > 3)
726 MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead));
727 }
728
729 if (TableEntry->isUpdating)
730 MIB.add(MI.getOperand(OpIdx++));
731
732 // Copy the addrmode6 operands.
733 MIB.add(MI.getOperand(OpIdx++));
734 MIB.add(MI.getOperand(OpIdx++));
735 // Copy the am6offset operand.
736 if (TableEntry->hasWritebackOperand)
737 MIB.add(MI.getOperand(OpIdx++));
738
739 // Grab the super-register source.
740 MachineOperand MO = MI.getOperand(OpIdx++);
741 if (!TableEntry->IsLoad)
742 GetDSubRegs(MO.getReg(), RegSpc, TRI, D0, D1, D2, D3);
743
744 // Add the subregs as sources of the new instruction.
745 unsigned SrcFlags = (getUndefRegState(MO.isUndef()) |
746 getKillRegState(MO.isKill()));
747 MIB.addReg(D0, SrcFlags);
748 if (NumRegs > 1)
749 MIB.addReg(D1, SrcFlags);
750 if (NumRegs > 2)
751 MIB.addReg(D2, SrcFlags);
752 if (NumRegs > 3)
753 MIB.addReg(D3, SrcFlags);
754
755 // Add the lane number operand.
756 MIB.addImm(Lane);
757 OpIdx += 1;
758
759 // Copy the predicate operands.
760 MIB.add(MI.getOperand(OpIdx++));
761 MIB.add(MI.getOperand(OpIdx++));
762
763 // Copy the super-register source to be an implicit source.
764 MO.setImplicit(true);
765 MIB.add(MO);
766 if (TableEntry->IsLoad)
767 // Add an implicit def for the super-register.
768 MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
769 TransferImpOps(MI, MIB, MIB);
770 // Transfer memoperands.
771 MIB.cloneMemRefs(MI);
772 MI.eraseFromParent();
773 }
774
775 /// ExpandVTBL - Translate VTBL and VTBX pseudo instructions with Q or QQ
776 /// register operands to real instructions with D register operands.
ExpandVTBL(MachineBasicBlock::iterator & MBBI,unsigned Opc,bool IsExt)777 void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI,
778 unsigned Opc, bool IsExt) {
779 MachineInstr &MI = *MBBI;
780 MachineBasicBlock &MBB = *MI.getParent();
781 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
782
783 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc));
784 unsigned OpIdx = 0;
785
786 // Transfer the destination register operand.
787 MIB.add(MI.getOperand(OpIdx++));
788 if (IsExt) {
789 MachineOperand VdSrc(MI.getOperand(OpIdx++));
790 MIB.add(VdSrc);
791 }
792
793 bool SrcIsKill = MI.getOperand(OpIdx).isKill();
794 Register SrcReg = MI.getOperand(OpIdx++).getReg();
795 unsigned D0, D1, D2, D3;
796 GetDSubRegs(SrcReg, SingleSpc, TRI, D0, D1, D2, D3);
797 MIB.addReg(D0);
798
799 // Copy the other source register operand.
800 MachineOperand VmSrc(MI.getOperand(OpIdx++));
801 MIB.add(VmSrc);
802
803 // Copy the predicate operands.
804 MIB.add(MI.getOperand(OpIdx++));
805 MIB.add(MI.getOperand(OpIdx++));
806
807 // Add an implicit kill and use for the super-reg.
808 MIB.addReg(SrcReg, RegState::Implicit | getKillRegState(SrcIsKill));
809 TransferImpOps(MI, MIB, MIB);
810 MI.eraseFromParent();
811 LLVM_DEBUG(dbgs() << "To: "; MIB.getInstr()->dump(););
812 }
813
IsAnAddressOperand(const MachineOperand & MO)814 static bool IsAnAddressOperand(const MachineOperand &MO) {
815 // This check is overly conservative. Unless we are certain that the machine
816 // operand is not a symbol reference, we return that it is a symbol reference.
817 // This is important as the load pair may not be split up Windows.
818 switch (MO.getType()) {
819 case MachineOperand::MO_Register:
820 case MachineOperand::MO_Immediate:
821 case MachineOperand::MO_CImmediate:
822 case MachineOperand::MO_FPImmediate:
823 case MachineOperand::MO_ShuffleMask:
824 return false;
825 case MachineOperand::MO_MachineBasicBlock:
826 return true;
827 case MachineOperand::MO_FrameIndex:
828 return false;
829 case MachineOperand::MO_ConstantPoolIndex:
830 case MachineOperand::MO_TargetIndex:
831 case MachineOperand::MO_JumpTableIndex:
832 case MachineOperand::MO_ExternalSymbol:
833 case MachineOperand::MO_GlobalAddress:
834 case MachineOperand::MO_BlockAddress:
835 return true;
836 case MachineOperand::MO_RegisterMask:
837 case MachineOperand::MO_RegisterLiveOut:
838 return false;
839 case MachineOperand::MO_Metadata:
840 case MachineOperand::MO_MCSymbol:
841 return true;
842 case MachineOperand::MO_CFIIndex:
843 return false;
844 case MachineOperand::MO_IntrinsicID:
845 case MachineOperand::MO_Predicate:
846 llvm_unreachable("should not exist post-isel");
847 }
848 llvm_unreachable("unhandled machine operand type");
849 }
850
makeImplicit(const MachineOperand & MO)851 static MachineOperand makeImplicit(const MachineOperand &MO) {
852 MachineOperand NewMO = MO;
853 NewMO.setImplicit();
854 return NewMO;
855 }
856
ExpandMOV32BitImm(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI)857 void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
858 MachineBasicBlock::iterator &MBBI) {
859 MachineInstr &MI = *MBBI;
860 unsigned Opcode = MI.getOpcode();
861 Register PredReg;
862 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
863 Register DstReg = MI.getOperand(0).getReg();
864 bool DstIsDead = MI.getOperand(0).isDead();
865 bool isCC = Opcode == ARM::MOVCCi32imm || Opcode == ARM::t2MOVCCi32imm;
866 const MachineOperand &MO = MI.getOperand(isCC ? 2 : 1);
867 bool RequiresBundling = STI->isTargetWindows() && IsAnAddressOperand(MO);
868 MachineInstrBuilder LO16, HI16;
869 LLVM_DEBUG(dbgs() << "Expanding: "; MI.dump());
870
871 if (!STI->hasV6T2Ops() &&
872 (Opcode == ARM::MOVi32imm || Opcode == ARM::MOVCCi32imm)) {
873 // FIXME Windows CE supports older ARM CPUs
874 assert(!STI->isTargetWindows() && "Windows on ARM requires ARMv7+");
875
876 assert (MO.isImm() && "MOVi32imm w/ non-immediate source operand!");
877 unsigned ImmVal = (unsigned)MO.getImm();
878 unsigned SOImmValV1 = 0, SOImmValV2 = 0;
879
880 if (ARM_AM::isSOImmTwoPartVal(ImmVal)) { // Expand into a movi + orr.
881 LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVi), DstReg);
882 HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::ORRri))
883 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
884 .addReg(DstReg);
885 SOImmValV1 = ARM_AM::getSOImmTwoPartFirst(ImmVal);
886 SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
887 } else { // Expand into a mvn + sub.
888 LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MVNi), DstReg);
889 HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::SUBri))
890 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
891 .addReg(DstReg);
892 SOImmValV1 = ARM_AM::getSOImmTwoPartFirst(-ImmVal);
893 SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(-ImmVal);
894 SOImmValV1 = ~(-SOImmValV1);
895 }
896
897 unsigned MIFlags = MI.getFlags();
898 LO16 = LO16.addImm(SOImmValV1);
899 HI16 = HI16.addImm(SOImmValV2);
900 LO16.cloneMemRefs(MI);
901 HI16.cloneMemRefs(MI);
902 LO16.setMIFlags(MIFlags);
903 HI16.setMIFlags(MIFlags);
904 LO16.addImm(Pred).addReg(PredReg).add(condCodeOp());
905 HI16.addImm(Pred).addReg(PredReg).add(condCodeOp());
906 if (isCC)
907 LO16.add(makeImplicit(MI.getOperand(1)));
908 TransferImpOps(MI, LO16, HI16);
909 MI.eraseFromParent();
910 return;
911 }
912
913 unsigned LO16Opc = 0;
914 unsigned HI16Opc = 0;
915 unsigned MIFlags = MI.getFlags();
916 if (Opcode == ARM::t2MOVi32imm || Opcode == ARM::t2MOVCCi32imm) {
917 LO16Opc = ARM::t2MOVi16;
918 HI16Opc = ARM::t2MOVTi16;
919 } else {
920 LO16Opc = ARM::MOVi16;
921 HI16Opc = ARM::MOVTi16;
922 }
923
924 LO16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(LO16Opc), DstReg);
925 HI16 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(HI16Opc))
926 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
927 .addReg(DstReg);
928
929 LO16.setMIFlags(MIFlags);
930 HI16.setMIFlags(MIFlags);
931
932 switch (MO.getType()) {
933 case MachineOperand::MO_Immediate: {
934 unsigned Imm = MO.getImm();
935 unsigned Lo16 = Imm & 0xffff;
936 unsigned Hi16 = (Imm >> 16) & 0xffff;
937 LO16 = LO16.addImm(Lo16);
938 HI16 = HI16.addImm(Hi16);
939 break;
940 }
941 case MachineOperand::MO_ExternalSymbol: {
942 const char *ES = MO.getSymbolName();
943 unsigned TF = MO.getTargetFlags();
944 LO16 = LO16.addExternalSymbol(ES, TF | ARMII::MO_LO16);
945 HI16 = HI16.addExternalSymbol(ES, TF | ARMII::MO_HI16);
946 break;
947 }
948 default: {
949 const GlobalValue *GV = MO.getGlobal();
950 unsigned TF = MO.getTargetFlags();
951 LO16 = LO16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_LO16);
952 HI16 = HI16.addGlobalAddress(GV, MO.getOffset(), TF | ARMII::MO_HI16);
953 break;
954 }
955 }
956
957 LO16.cloneMemRefs(MI);
958 HI16.cloneMemRefs(MI);
959 LO16.addImm(Pred).addReg(PredReg);
960 HI16.addImm(Pred).addReg(PredReg);
961
962 if (RequiresBundling)
963 finalizeBundle(MBB, LO16->getIterator(), MBBI->getIterator());
964
965 if (isCC)
966 LO16.add(makeImplicit(MI.getOperand(1)));
967 TransferImpOps(MI, LO16, HI16);
968 MI.eraseFromParent();
969 LLVM_DEBUG(dbgs() << "To: "; LO16.getInstr()->dump(););
970 LLVM_DEBUG(dbgs() << "And: "; HI16.getInstr()->dump(););
971 }
972
973 // The size of the area, accessed by that VLSTM/VLLDM
974 // S0-S31 + FPSCR + 8 more bytes (VPR + pad, or just pad)
975 static const int CMSE_FP_SAVE_SIZE = 136;
976
determineGPRegsToClear(const MachineInstr & MI,const std::initializer_list<unsigned> & Regs,SmallVectorImpl<unsigned> & ClearRegs)977 static void determineGPRegsToClear(const MachineInstr &MI,
978 const std::initializer_list<unsigned> &Regs,
979 SmallVectorImpl<unsigned> &ClearRegs) {
980 SmallVector<unsigned, 4> OpRegs;
981 for (const MachineOperand &Op : MI.operands()) {
982 if (!Op.isReg() || !Op.isUse())
983 continue;
984 OpRegs.push_back(Op.getReg());
985 }
986 llvm::sort(OpRegs);
987
988 std::set_difference(Regs.begin(), Regs.end(), OpRegs.begin(), OpRegs.end(),
989 std::back_inserter(ClearRegs));
990 }
991
CMSEClearGPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,const SmallVectorImpl<unsigned> & ClearRegs,unsigned ClobberReg)992 void ARMExpandPseudo::CMSEClearGPRegs(
993 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
994 const DebugLoc &DL, const SmallVectorImpl<unsigned> &ClearRegs,
995 unsigned ClobberReg) {
996
997 if (STI->hasV8_1MMainlineOps()) {
998 // Clear the registers using the CLRM instruction.
999 MachineInstrBuilder CLRM =
1000 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2CLRM)).add(predOps(ARMCC::AL));
1001 for (unsigned R : ClearRegs)
1002 CLRM.addReg(R, RegState::Define);
1003 CLRM.addReg(ARM::APSR, RegState::Define);
1004 CLRM.addReg(ARM::CPSR, RegState::Define | RegState::Implicit);
1005 } else {
1006 // Clear the registers and flags by copying ClobberReg into them.
1007 // (Baseline can't do a high register clear in one instruction).
1008 for (unsigned Reg : ClearRegs) {
1009 if (Reg == ClobberReg)
1010 continue;
1011 BuildMI(MBB, MBBI, DL, TII->get(ARM::tMOVr), Reg)
1012 .addReg(ClobberReg)
1013 .add(predOps(ARMCC::AL));
1014 }
1015
1016 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2MSR_M))
1017 .addImm(STI->hasDSP() ? 0xc00 : 0x800)
1018 .addReg(ClobberReg)
1019 .add(predOps(ARMCC::AL));
1020 }
1021 }
1022
1023 // Find which FP registers need to be cleared. The parameter `ClearRegs` is
1024 // initialised with all elements set to true, and this function resets all the
1025 // bits, which correspond to register uses. Returns true if any floating point
1026 // register is defined, false otherwise.
determineFPRegsToClear(const MachineInstr & MI,BitVector & ClearRegs)1027 static bool determineFPRegsToClear(const MachineInstr &MI,
1028 BitVector &ClearRegs) {
1029 bool DefFP = false;
1030 for (const MachineOperand &Op : MI.operands()) {
1031 if (!Op.isReg())
1032 continue;
1033
1034 unsigned Reg = Op.getReg();
1035 if (Op.isDef()) {
1036 if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
1037 (Reg >= ARM::D0 && Reg <= ARM::D15) ||
1038 (Reg >= ARM::S0 && Reg <= ARM::S31))
1039 DefFP = true;
1040 continue;
1041 }
1042
1043 if (Reg >= ARM::Q0 && Reg <= ARM::Q7) {
1044 int R = Reg - ARM::Q0;
1045 ClearRegs.reset(R * 4, (R + 1) * 4);
1046 } else if (Reg >= ARM::D0 && Reg <= ARM::D15) {
1047 int R = Reg - ARM::D0;
1048 ClearRegs.reset(R * 2, (R + 1) * 2);
1049 } else if (Reg >= ARM::S0 && Reg <= ARM::S31) {
1050 ClearRegs[Reg - ARM::S0] = false;
1051 }
1052 }
1053 return DefFP;
1054 }
1055
1056 MachineBasicBlock &
CMSEClearFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI)1057 ARMExpandPseudo::CMSEClearFPRegs(MachineBasicBlock &MBB,
1058 MachineBasicBlock::iterator MBBI) {
1059 BitVector ClearRegs(16, true);
1060 (void)determineFPRegsToClear(*MBBI, ClearRegs);
1061
1062 if (STI->hasV8_1MMainlineOps())
1063 return CMSEClearFPRegsV81(MBB, MBBI, ClearRegs);
1064 else
1065 return CMSEClearFPRegsV8(MBB, MBBI, ClearRegs);
1066 }
1067
1068 // Clear the FP registers for v8.0-M, by copying over the content
1069 // of LR. Uses R12 as a scratch register.
1070 MachineBasicBlock &
CMSEClearFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const BitVector & ClearRegs)1071 ARMExpandPseudo::CMSEClearFPRegsV8(MachineBasicBlock &MBB,
1072 MachineBasicBlock::iterator MBBI,
1073 const BitVector &ClearRegs) {
1074 if (!STI->hasFPRegs())
1075 return MBB;
1076
1077 auto &RetI = *MBBI;
1078 const DebugLoc &DL = RetI.getDebugLoc();
1079
1080 // If optimising for minimum size, clear FP registers unconditionally.
1081 // Otherwise, check the CONTROL.SFPA (Secure Floating-Point Active) bit and
1082 // don't clear them if they belong to the non-secure state.
1083 MachineBasicBlock *ClearBB, *DoneBB;
1084 if (STI->hasMinSize()) {
1085 ClearBB = DoneBB = &MBB;
1086 } else {
1087 MachineFunction *MF = MBB.getParent();
1088 ClearBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1089 DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1090
1091 MF->insert(++MBB.getIterator(), ClearBB);
1092 MF->insert(++ClearBB->getIterator(), DoneBB);
1093
1094 DoneBB->splice(DoneBB->end(), &MBB, MBBI, MBB.end());
1095 DoneBB->transferSuccessors(&MBB);
1096 MBB.addSuccessor(ClearBB);
1097 MBB.addSuccessor(DoneBB);
1098 ClearBB->addSuccessor(DoneBB);
1099
1100 // At the new basic blocks we need to have live-in the registers, used
1101 // for the return value as well as LR, used to clear registers.
1102 for (const MachineOperand &Op : RetI.operands()) {
1103 if (!Op.isReg())
1104 continue;
1105 Register Reg = Op.getReg();
1106 if (Reg == ARM::NoRegister || Reg == ARM::LR)
1107 continue;
1108 assert(Register::isPhysicalRegister(Reg) && "Unallocated register");
1109 ClearBB->addLiveIn(Reg);
1110 DoneBB->addLiveIn(Reg);
1111 }
1112 ClearBB->addLiveIn(ARM::LR);
1113 DoneBB->addLiveIn(ARM::LR);
1114
1115 // Read the CONTROL register.
1116 BuildMI(MBB, MBB.end(), DL, TII->get(ARM::t2MRS_M), ARM::R12)
1117 .addImm(20)
1118 .add(predOps(ARMCC::AL));
1119 // Check bit 3 (SFPA).
1120 BuildMI(MBB, MBB.end(), DL, TII->get(ARM::t2TSTri))
1121 .addReg(ARM::R12)
1122 .addImm(8)
1123 .add(predOps(ARMCC::AL));
1124 // If SFPA is clear, jump over ClearBB to DoneBB.
1125 BuildMI(MBB, MBB.end(), DL, TII->get(ARM::tBcc))
1126 .addMBB(DoneBB)
1127 .addImm(ARMCC::EQ)
1128 .addReg(ARM::CPSR, RegState::Kill);
1129 }
1130
1131 // Emit the clearing sequence
1132 for (unsigned D = 0; D < 8; D++) {
1133 // Attempt to clear as double
1134 if (ClearRegs[D * 2 + 0] && ClearRegs[D * 2 + 1]) {
1135 unsigned Reg = ARM::D0 + D;
1136 BuildMI(ClearBB, DL, TII->get(ARM::VMOVDRR), Reg)
1137 .addReg(ARM::LR)
1138 .addReg(ARM::LR)
1139 .add(predOps(ARMCC::AL));
1140 } else {
1141 // Clear first part as single
1142 if (ClearRegs[D * 2 + 0]) {
1143 unsigned Reg = ARM::S0 + D * 2;
1144 BuildMI(ClearBB, DL, TII->get(ARM::VMOVSR), Reg)
1145 .addReg(ARM::LR)
1146 .add(predOps(ARMCC::AL));
1147 }
1148 // Clear second part as single
1149 if (ClearRegs[D * 2 + 1]) {
1150 unsigned Reg = ARM::S0 + D * 2 + 1;
1151 BuildMI(ClearBB, DL, TII->get(ARM::VMOVSR), Reg)
1152 .addReg(ARM::LR)
1153 .add(predOps(ARMCC::AL));
1154 }
1155 }
1156 }
1157
1158 // Clear FPSCR bits 0-4, 7, 28-31
1159 // The other bits are program global according to the AAPCS
1160 BuildMI(ClearBB, DL, TII->get(ARM::VMRS), ARM::R12)
1161 .add(predOps(ARMCC::AL));
1162 BuildMI(ClearBB, DL, TII->get(ARM::t2BICri), ARM::R12)
1163 .addReg(ARM::R12)
1164 .addImm(0x0000009F)
1165 .add(predOps(ARMCC::AL))
1166 .add(condCodeOp());
1167 BuildMI(ClearBB, DL, TII->get(ARM::t2BICri), ARM::R12)
1168 .addReg(ARM::R12)
1169 .addImm(0xF0000000)
1170 .add(predOps(ARMCC::AL))
1171 .add(condCodeOp());
1172 BuildMI(ClearBB, DL, TII->get(ARM::VMSR))
1173 .addReg(ARM::R12)
1174 .add(predOps(ARMCC::AL));
1175
1176 return *DoneBB;
1177 }
1178
1179 MachineBasicBlock &
CMSEClearFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const BitVector & ClearRegs)1180 ARMExpandPseudo::CMSEClearFPRegsV81(MachineBasicBlock &MBB,
1181 MachineBasicBlock::iterator MBBI,
1182 const BitVector &ClearRegs) {
1183 auto &RetI = *MBBI;
1184
1185 // Emit a sequence of VSCCLRM <sreglist> instructions, one instruction for
1186 // each contiguous sequence of S-registers.
1187 int Start = -1, End = -1;
1188 for (int S = 0, E = ClearRegs.size(); S != E; ++S) {
1189 if (ClearRegs[S] && S == End + 1) {
1190 End = S; // extend range
1191 continue;
1192 }
1193 // Emit current range.
1194 if (Start < End) {
1195 MachineInstrBuilder VSCCLRM =
1196 BuildMI(MBB, MBBI, RetI.getDebugLoc(), TII->get(ARM::VSCCLRMS))
1197 .add(predOps(ARMCC::AL));
1198 while (++Start <= End)
1199 VSCCLRM.addReg(ARM::S0 + Start, RegState::Define);
1200 VSCCLRM.addReg(ARM::VPR, RegState::Define);
1201 }
1202 Start = End = S;
1203 }
1204 // Emit last range.
1205 if (Start < End) {
1206 MachineInstrBuilder VSCCLRM =
1207 BuildMI(MBB, MBBI, RetI.getDebugLoc(), TII->get(ARM::VSCCLRMS))
1208 .add(predOps(ARMCC::AL));
1209 while (++Start <= End)
1210 VSCCLRM.addReg(ARM::S0 + Start, RegState::Define);
1211 VSCCLRM.addReg(ARM::VPR, RegState::Define);
1212 }
1213
1214 return MBB;
1215 }
1216
CMSESaveClearFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs,SmallVectorImpl<unsigned> & ScratchRegs)1217 void ARMExpandPseudo::CMSESaveClearFPRegs(
1218 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1219 const LivePhysRegs &LiveRegs, SmallVectorImpl<unsigned> &ScratchRegs) {
1220 if (STI->hasV8_1MMainlineOps())
1221 CMSESaveClearFPRegsV81(MBB, MBBI, DL, LiveRegs);
1222 else
1223 CMSESaveClearFPRegsV8(MBB, MBBI, DL, LiveRegs, ScratchRegs);
1224 }
1225
1226 // Save and clear FP registers if present
CMSESaveClearFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs,SmallVectorImpl<unsigned> & ScratchRegs)1227 void ARMExpandPseudo::CMSESaveClearFPRegsV8(
1228 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1229 const LivePhysRegs &LiveRegs, SmallVectorImpl<unsigned> &ScratchRegs) {
1230 if (!STI->hasFPRegs())
1231 return;
1232
1233 // Store an available register for FPSCR clearing
1234 assert(!ScratchRegs.empty());
1235 unsigned SpareReg = ScratchRegs.front();
1236
1237 // save space on stack for VLSTM
1238 BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBspi), ARM::SP)
1239 .addReg(ARM::SP)
1240 .addImm(CMSE_FP_SAVE_SIZE >> 2)
1241 .add(predOps(ARMCC::AL));
1242
1243 // Use ScratchRegs to store the fp regs
1244 std::vector<std::tuple<unsigned, unsigned, unsigned>> ClearedFPRegs;
1245 std::vector<unsigned> NonclearedFPRegs;
1246 for (const MachineOperand &Op : MBBI->operands()) {
1247 if (Op.isReg() && Op.isUse()) {
1248 unsigned Reg = Op.getReg();
1249 assert(!ARM::DPRRegClass.contains(Reg) ||
1250 ARM::DPR_VFP2RegClass.contains(Reg));
1251 assert(!ARM::QPRRegClass.contains(Reg));
1252 if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1253 if (ScratchRegs.size() >= 2) {
1254 unsigned SaveReg2 = ScratchRegs.pop_back_val();
1255 unsigned SaveReg1 = ScratchRegs.pop_back_val();
1256 ClearedFPRegs.emplace_back(Reg, SaveReg1, SaveReg2);
1257
1258 // Save the fp register to the normal registers
1259 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRRD))
1260 .addReg(SaveReg1, RegState::Define)
1261 .addReg(SaveReg2, RegState::Define)
1262 .addReg(Reg)
1263 .add(predOps(ARMCC::AL));
1264 } else {
1265 NonclearedFPRegs.push_back(Reg);
1266 }
1267 } else if (ARM::SPRRegClass.contains(Reg)) {
1268 if (ScratchRegs.size() >= 1) {
1269 unsigned SaveReg = ScratchRegs.pop_back_val();
1270 ClearedFPRegs.emplace_back(Reg, SaveReg, 0);
1271
1272 // Save the fp register to the normal registers
1273 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRS), SaveReg)
1274 .addReg(Reg)
1275 .add(predOps(ARMCC::AL));
1276 } else {
1277 NonclearedFPRegs.push_back(Reg);
1278 }
1279 }
1280 }
1281 }
1282
1283 bool passesFPReg = (!NonclearedFPRegs.empty() || !ClearedFPRegs.empty());
1284
1285 // Lazy store all fp registers to the stack
1286 MachineInstrBuilder VLSTM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
1287 .addReg(ARM::SP)
1288 .add(predOps(ARMCC::AL));
1289 for (auto R : {ARM::VPR, ARM::FPSCR, ARM::FPSCR_NZCV, ARM::Q0, ARM::Q1,
1290 ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7})
1291 VLSTM.addReg(R, RegState::Implicit |
1292 (LiveRegs.contains(R) ? 0 : RegState::Undef));
1293
1294 // Restore all arguments
1295 for (const auto &Regs : ClearedFPRegs) {
1296 unsigned Reg, SaveReg1, SaveReg2;
1297 std::tie(Reg, SaveReg1, SaveReg2) = Regs;
1298 if (ARM::DPR_VFP2RegClass.contains(Reg))
1299 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVDRR), Reg)
1300 .addReg(SaveReg1)
1301 .addReg(SaveReg2)
1302 .add(predOps(ARMCC::AL));
1303 else if (ARM::SPRRegClass.contains(Reg))
1304 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVSR), Reg)
1305 .addReg(SaveReg1)
1306 .add(predOps(ARMCC::AL));
1307 }
1308
1309 for (unsigned Reg : NonclearedFPRegs) {
1310 if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1311 if (STI->isLittle()) {
1312 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRD), Reg)
1313 .addReg(ARM::SP)
1314 .addImm((Reg - ARM::D0) * 2)
1315 .add(predOps(ARMCC::AL));
1316 } else {
1317 // For big-endian targets we need to load the two subregisters of Reg
1318 // manually because VLDRD would load them in wrong order
1319 unsigned SReg0 = TRI->getSubReg(Reg, ARM::ssub_0);
1320 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), SReg0)
1321 .addReg(ARM::SP)
1322 .addImm((Reg - ARM::D0) * 2)
1323 .add(predOps(ARMCC::AL));
1324 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), SReg0 + 1)
1325 .addReg(ARM::SP)
1326 .addImm((Reg - ARM::D0) * 2 + 1)
1327 .add(predOps(ARMCC::AL));
1328 }
1329 } else if (ARM::SPRRegClass.contains(Reg)) {
1330 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDRS), Reg)
1331 .addReg(ARM::SP)
1332 .addImm(Reg - ARM::S0)
1333 .add(predOps(ARMCC::AL));
1334 }
1335 }
1336 // restore FPSCR from stack and clear bits 0-4, 7, 28-31
1337 // The other bits are program global according to the AAPCS
1338 if (passesFPReg) {
1339 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2LDRi8), SpareReg)
1340 .addReg(ARM::SP)
1341 .addImm(0x40)
1342 .add(predOps(ARMCC::AL));
1343 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), SpareReg)
1344 .addReg(SpareReg)
1345 .addImm(0x0000009F)
1346 .add(predOps(ARMCC::AL))
1347 .add(condCodeOp());
1348 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), SpareReg)
1349 .addReg(SpareReg)
1350 .addImm(0xF0000000)
1351 .add(predOps(ARMCC::AL))
1352 .add(condCodeOp());
1353 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMSR))
1354 .addReg(SpareReg)
1355 .add(predOps(ARMCC::AL));
1356 // The ldr must happen after a floating point instruction. To prevent the
1357 // post-ra scheduler to mess with the order, we create a bundle.
1358 finalizeBundle(MBB, VLSTM->getIterator(), MBBI->getIterator());
1359 }
1360 }
1361
CMSESaveClearFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,const LivePhysRegs & LiveRegs)1362 void ARMExpandPseudo::CMSESaveClearFPRegsV81(MachineBasicBlock &MBB,
1363 MachineBasicBlock::iterator MBBI,
1364 DebugLoc &DL,
1365 const LivePhysRegs &LiveRegs) {
1366 BitVector ClearRegs(32, true);
1367 bool DefFP = determineFPRegsToClear(*MBBI, ClearRegs);
1368
1369 // If the instruction does not write to a FP register and no elements were
1370 // removed from the set, then no FP registers were used to pass
1371 // arguments/returns.
1372 if (!DefFP && ClearRegs.count() == ClearRegs.size()) {
1373 // save space on stack for VLSTM
1374 BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBspi), ARM::SP)
1375 .addReg(ARM::SP)
1376 .addImm(CMSE_FP_SAVE_SIZE >> 2)
1377 .add(predOps(ARMCC::AL));
1378
1379 // Lazy store all FP registers to the stack
1380 MachineInstrBuilder VLSTM = BuildMI(MBB, MBBI, DL, TII->get(ARM::VLSTM))
1381 .addReg(ARM::SP)
1382 .add(predOps(ARMCC::AL));
1383 for (auto R : {ARM::VPR, ARM::FPSCR, ARM::FPSCR_NZCV, ARM::Q0, ARM::Q1,
1384 ARM::Q2, ARM::Q3, ARM::Q4, ARM::Q5, ARM::Q6, ARM::Q7})
1385 VLSTM.addReg(R, RegState::Implicit |
1386 (LiveRegs.contains(R) ? 0 : RegState::Undef));
1387 } else {
1388 // Push all the callee-saved registers (s16-s31).
1389 MachineInstrBuilder VPUSH =
1390 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTMSDB_UPD), ARM::SP)
1391 .addReg(ARM::SP)
1392 .add(predOps(ARMCC::AL));
1393 for (int Reg = ARM::S16; Reg <= ARM::S31; ++Reg)
1394 VPUSH.addReg(Reg);
1395
1396 // Clear FP registers with a VSCCLRM.
1397 (void)CMSEClearFPRegsV81(MBB, MBBI, ClearRegs);
1398
1399 // Save floating-point context.
1400 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTR_FPCXTS_pre), ARM::SP)
1401 .addReg(ARM::SP)
1402 .addImm(-8)
1403 .add(predOps(ARMCC::AL));
1404 }
1405 }
1406
1407 // Restore FP registers if present
CMSERestoreFPRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1408 void ARMExpandPseudo::CMSERestoreFPRegs(
1409 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1410 SmallVectorImpl<unsigned> &AvailableRegs) {
1411 if (STI->hasV8_1MMainlineOps())
1412 CMSERestoreFPRegsV81(MBB, MBBI, DL, AvailableRegs);
1413 else
1414 CMSERestoreFPRegsV8(MBB, MBBI, DL, AvailableRegs);
1415 }
1416
CMSERestoreFPRegsV8(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1417 void ARMExpandPseudo::CMSERestoreFPRegsV8(
1418 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1419 SmallVectorImpl<unsigned> &AvailableRegs) {
1420 if (!STI->hasFPRegs())
1421 return;
1422
1423 // Use AvailableRegs to store the fp regs
1424 std::vector<std::tuple<unsigned, unsigned, unsigned>> ClearedFPRegs;
1425 std::vector<unsigned> NonclearedFPRegs;
1426 for (const MachineOperand &Op : MBBI->operands()) {
1427 if (Op.isReg() && Op.isDef()) {
1428 unsigned Reg = Op.getReg();
1429 assert(!ARM::DPRRegClass.contains(Reg) ||
1430 ARM::DPR_VFP2RegClass.contains(Reg));
1431 assert(!ARM::QPRRegClass.contains(Reg));
1432 if (ARM::DPR_VFP2RegClass.contains(Reg)) {
1433 if (AvailableRegs.size() >= 2) {
1434 unsigned SaveReg2 = AvailableRegs.pop_back_val();
1435 unsigned SaveReg1 = AvailableRegs.pop_back_val();
1436 ClearedFPRegs.emplace_back(Reg, SaveReg1, SaveReg2);
1437
1438 // Save the fp register to the normal registers
1439 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRRD))
1440 .addReg(SaveReg1, RegState::Define)
1441 .addReg(SaveReg2, RegState::Define)
1442 .addReg(Reg)
1443 .add(predOps(ARMCC::AL));
1444 } else {
1445 NonclearedFPRegs.push_back(Reg);
1446 }
1447 } else if (ARM::SPRRegClass.contains(Reg)) {
1448 if (AvailableRegs.size() >= 1) {
1449 unsigned SaveReg = AvailableRegs.pop_back_val();
1450 ClearedFPRegs.emplace_back(Reg, SaveReg, 0);
1451
1452 // Save the fp register to the normal registers
1453 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVRS), SaveReg)
1454 .addReg(Reg)
1455 .add(predOps(ARMCC::AL));
1456 } else {
1457 NonclearedFPRegs.push_back(Reg);
1458 }
1459 }
1460 }
1461 }
1462
1463 // Push FP regs that cannot be restored via normal registers on the stack
1464 for (unsigned Reg : NonclearedFPRegs) {
1465 if (ARM::DPR_VFP2RegClass.contains(Reg))
1466 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTRD), Reg)
1467 .addReg(ARM::SP)
1468 .addImm((Reg - ARM::D0) * 2)
1469 .add(predOps(ARMCC::AL));
1470 else if (ARM::SPRRegClass.contains(Reg))
1471 BuildMI(MBB, MBBI, DL, TII->get(ARM::VSTRS), Reg)
1472 .addReg(ARM::SP)
1473 .addImm(Reg - ARM::S0)
1474 .add(predOps(ARMCC::AL));
1475 }
1476
1477 // Lazy load fp regs from stack
1478 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
1479 .addReg(ARM::SP)
1480 .add(predOps(ARMCC::AL));
1481
1482 // Restore all FP registers via normal registers
1483 for (const auto &Regs : ClearedFPRegs) {
1484 unsigned Reg, SaveReg1, SaveReg2;
1485 std::tie(Reg, SaveReg1, SaveReg2) = Regs;
1486 if (ARM::DPR_VFP2RegClass.contains(Reg))
1487 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVDRR), Reg)
1488 .addReg(SaveReg1)
1489 .addReg(SaveReg2)
1490 .add(predOps(ARMCC::AL));
1491 else if (ARM::SPRRegClass.contains(Reg))
1492 BuildMI(MBB, MBBI, DL, TII->get(ARM::VMOVSR), Reg)
1493 .addReg(SaveReg1)
1494 .add(predOps(ARMCC::AL));
1495 }
1496
1497 // Pop the stack space
1498 BuildMI(MBB, MBBI, DL, TII->get(ARM::tADDspi), ARM::SP)
1499 .addReg(ARM::SP)
1500 .addImm(CMSE_FP_SAVE_SIZE >> 2)
1501 .add(predOps(ARMCC::AL));
1502 }
1503
definesOrUsesFPReg(const MachineInstr & MI)1504 static bool definesOrUsesFPReg(const MachineInstr &MI) {
1505 for (const MachineOperand &Op : MI.operands()) {
1506 if (!Op.isReg())
1507 continue;
1508 unsigned Reg = Op.getReg();
1509 if ((Reg >= ARM::Q0 && Reg <= ARM::Q7) ||
1510 (Reg >= ARM::D0 && Reg <= ARM::D15) ||
1511 (Reg >= ARM::S0 && Reg <= ARM::S31))
1512 return true;
1513 }
1514 return false;
1515 }
1516
CMSERestoreFPRegsV81(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,DebugLoc & DL,SmallVectorImpl<unsigned> & AvailableRegs)1517 void ARMExpandPseudo::CMSERestoreFPRegsV81(
1518 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc &DL,
1519 SmallVectorImpl<unsigned> &AvailableRegs) {
1520 if (!definesOrUsesFPReg(*MBBI)) {
1521 // Load FP registers from stack.
1522 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLLDM))
1523 .addReg(ARM::SP)
1524 .add(predOps(ARMCC::AL));
1525
1526 // Pop the stack space
1527 BuildMI(MBB, MBBI, DL, TII->get(ARM::tADDspi), ARM::SP)
1528 .addReg(ARM::SP)
1529 .addImm(CMSE_FP_SAVE_SIZE >> 2)
1530 .add(predOps(ARMCC::AL));
1531 } else {
1532 // Restore the floating point context.
1533 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::VLDR_FPCXTS_post),
1534 ARM::SP)
1535 .addReg(ARM::SP)
1536 .addImm(8)
1537 .add(predOps(ARMCC::AL));
1538
1539 // Pop all the callee-saved registers (s16-s31).
1540 MachineInstrBuilder VPOP =
1541 BuildMI(MBB, MBBI, DL, TII->get(ARM::VLDMSIA_UPD), ARM::SP)
1542 .addReg(ARM::SP)
1543 .add(predOps(ARMCC::AL));
1544 for (int Reg = ARM::S16; Reg <= ARM::S31; ++Reg)
1545 VPOP.addReg(Reg, RegState::Define);
1546 }
1547 }
1548
1549 /// Expand a CMP_SWAP pseudo-inst to an ldrex/strex loop as simply as
1550 /// possible. This only gets used at -O0 so we don't care about efficiency of
1551 /// the generated code.
ExpandCMP_SWAP(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,unsigned LdrexOp,unsigned StrexOp,unsigned UxtOp,MachineBasicBlock::iterator & NextMBBI)1552 bool ARMExpandPseudo::ExpandCMP_SWAP(MachineBasicBlock &MBB,
1553 MachineBasicBlock::iterator MBBI,
1554 unsigned LdrexOp, unsigned StrexOp,
1555 unsigned UxtOp,
1556 MachineBasicBlock::iterator &NextMBBI) {
1557 bool IsThumb = STI->isThumb();
1558 MachineInstr &MI = *MBBI;
1559 DebugLoc DL = MI.getDebugLoc();
1560 const MachineOperand &Dest = MI.getOperand(0);
1561 Register TempReg = MI.getOperand(1).getReg();
1562 // Duplicating undef operands into 2 instructions does not guarantee the same
1563 // value on both; However undef should be replaced by xzr anyway.
1564 assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
1565 Register AddrReg = MI.getOperand(2).getReg();
1566 Register DesiredReg = MI.getOperand(3).getReg();
1567 Register NewReg = MI.getOperand(4).getReg();
1568
1569 MachineFunction *MF = MBB.getParent();
1570 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1571 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1572 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1573
1574 MF->insert(++MBB.getIterator(), LoadCmpBB);
1575 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
1576 MF->insert(++StoreBB->getIterator(), DoneBB);
1577
1578 if (UxtOp) {
1579 MachineInstrBuilder MIB =
1580 BuildMI(MBB, MBBI, DL, TII->get(UxtOp), DesiredReg)
1581 .addReg(DesiredReg, RegState::Kill);
1582 if (!IsThumb)
1583 MIB.addImm(0);
1584 MIB.add(predOps(ARMCC::AL));
1585 }
1586
1587 // .Lloadcmp:
1588 // ldrex rDest, [rAddr]
1589 // cmp rDest, rDesired
1590 // bne .Ldone
1591
1592 MachineInstrBuilder MIB;
1593 MIB = BuildMI(LoadCmpBB, DL, TII->get(LdrexOp), Dest.getReg());
1594 MIB.addReg(AddrReg);
1595 if (LdrexOp == ARM::t2LDREX)
1596 MIB.addImm(0); // a 32-bit Thumb ldrex (only) allows an offset.
1597 MIB.add(predOps(ARMCC::AL));
1598
1599 unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr;
1600 BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1601 .addReg(Dest.getReg(), getKillRegState(Dest.isDead()))
1602 .addReg(DesiredReg)
1603 .add(predOps(ARMCC::AL));
1604 unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc;
1605 BuildMI(LoadCmpBB, DL, TII->get(Bcc))
1606 .addMBB(DoneBB)
1607 .addImm(ARMCC::NE)
1608 .addReg(ARM::CPSR, RegState::Kill);
1609 LoadCmpBB->addSuccessor(DoneBB);
1610 LoadCmpBB->addSuccessor(StoreBB);
1611
1612 // .Lstore:
1613 // strex rTempReg, rNew, [rAddr]
1614 // cmp rTempReg, #0
1615 // bne .Lloadcmp
1616 MIB = BuildMI(StoreBB, DL, TII->get(StrexOp), TempReg)
1617 .addReg(NewReg)
1618 .addReg(AddrReg);
1619 if (StrexOp == ARM::t2STREX)
1620 MIB.addImm(0); // a 32-bit Thumb strex (only) allows an offset.
1621 MIB.add(predOps(ARMCC::AL));
1622
1623 unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri;
1624 BuildMI(StoreBB, DL, TII->get(CMPri))
1625 .addReg(TempReg, RegState::Kill)
1626 .addImm(0)
1627 .add(predOps(ARMCC::AL));
1628 BuildMI(StoreBB, DL, TII->get(Bcc))
1629 .addMBB(LoadCmpBB)
1630 .addImm(ARMCC::NE)
1631 .addReg(ARM::CPSR, RegState::Kill);
1632 StoreBB->addSuccessor(LoadCmpBB);
1633 StoreBB->addSuccessor(DoneBB);
1634
1635 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
1636 DoneBB->transferSuccessors(&MBB);
1637
1638 MBB.addSuccessor(LoadCmpBB);
1639
1640 NextMBBI = MBB.end();
1641 MI.eraseFromParent();
1642
1643 // Recompute livein lists.
1644 LivePhysRegs LiveRegs;
1645 computeAndAddLiveIns(LiveRegs, *DoneBB);
1646 computeAndAddLiveIns(LiveRegs, *StoreBB);
1647 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1648 // Do an extra pass around the loop to get loop carried registers right.
1649 StoreBB->clearLiveIns();
1650 computeAndAddLiveIns(LiveRegs, *StoreBB);
1651 LoadCmpBB->clearLiveIns();
1652 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1653
1654 return true;
1655 }
1656
1657 /// ARM's ldrexd/strexd take a consecutive register pair (represented as a
1658 /// single GPRPair register), Thumb's take two separate registers so we need to
1659 /// extract the subregs from the pair.
addExclusiveRegPair(MachineInstrBuilder & MIB,MachineOperand & Reg,unsigned Flags,bool IsThumb,const TargetRegisterInfo * TRI)1660 static void addExclusiveRegPair(MachineInstrBuilder &MIB, MachineOperand &Reg,
1661 unsigned Flags, bool IsThumb,
1662 const TargetRegisterInfo *TRI) {
1663 if (IsThumb) {
1664 Register RegLo = TRI->getSubReg(Reg.getReg(), ARM::gsub_0);
1665 Register RegHi = TRI->getSubReg(Reg.getReg(), ARM::gsub_1);
1666 MIB.addReg(RegLo, Flags);
1667 MIB.addReg(RegHi, Flags);
1668 } else
1669 MIB.addReg(Reg.getReg(), Flags);
1670 }
1671
1672 /// Expand a 64-bit CMP_SWAP to an ldrexd/strexd loop.
ExpandCMP_SWAP_64(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)1673 bool ARMExpandPseudo::ExpandCMP_SWAP_64(MachineBasicBlock &MBB,
1674 MachineBasicBlock::iterator MBBI,
1675 MachineBasicBlock::iterator &NextMBBI) {
1676 bool IsThumb = STI->isThumb();
1677 MachineInstr &MI = *MBBI;
1678 DebugLoc DL = MI.getDebugLoc();
1679 MachineOperand &Dest = MI.getOperand(0);
1680 Register TempReg = MI.getOperand(1).getReg();
1681 // Duplicating undef operands into 2 instructions does not guarantee the same
1682 // value on both; However undef should be replaced by xzr anyway.
1683 assert(!MI.getOperand(2).isUndef() && "cannot handle undef");
1684 Register AddrReg = MI.getOperand(2).getReg();
1685 Register DesiredReg = MI.getOperand(3).getReg();
1686 MachineOperand New = MI.getOperand(4);
1687 New.setIsKill(false);
1688
1689 Register DestLo = TRI->getSubReg(Dest.getReg(), ARM::gsub_0);
1690 Register DestHi = TRI->getSubReg(Dest.getReg(), ARM::gsub_1);
1691 Register DesiredLo = TRI->getSubReg(DesiredReg, ARM::gsub_0);
1692 Register DesiredHi = TRI->getSubReg(DesiredReg, ARM::gsub_1);
1693
1694 MachineFunction *MF = MBB.getParent();
1695 auto LoadCmpBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1696 auto StoreBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1697 auto DoneBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
1698
1699 MF->insert(++MBB.getIterator(), LoadCmpBB);
1700 MF->insert(++LoadCmpBB->getIterator(), StoreBB);
1701 MF->insert(++StoreBB->getIterator(), DoneBB);
1702
1703 // .Lloadcmp:
1704 // ldrexd rDestLo, rDestHi, [rAddr]
1705 // cmp rDestLo, rDesiredLo
1706 // sbcs dead rTempReg, rDestHi, rDesiredHi
1707 // bne .Ldone
1708 unsigned LDREXD = IsThumb ? ARM::t2LDREXD : ARM::LDREXD;
1709 MachineInstrBuilder MIB;
1710 MIB = BuildMI(LoadCmpBB, DL, TII->get(LDREXD));
1711 addExclusiveRegPair(MIB, Dest, RegState::Define, IsThumb, TRI);
1712 MIB.addReg(AddrReg).add(predOps(ARMCC::AL));
1713
1714 unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr;
1715 BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1716 .addReg(DestLo, getKillRegState(Dest.isDead()))
1717 .addReg(DesiredLo)
1718 .add(predOps(ARMCC::AL));
1719
1720 BuildMI(LoadCmpBB, DL, TII->get(CMPrr))
1721 .addReg(DestHi, getKillRegState(Dest.isDead()))
1722 .addReg(DesiredHi)
1723 .addImm(ARMCC::EQ).addReg(ARM::CPSR, RegState::Kill);
1724
1725 unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc;
1726 BuildMI(LoadCmpBB, DL, TII->get(Bcc))
1727 .addMBB(DoneBB)
1728 .addImm(ARMCC::NE)
1729 .addReg(ARM::CPSR, RegState::Kill);
1730 LoadCmpBB->addSuccessor(DoneBB);
1731 LoadCmpBB->addSuccessor(StoreBB);
1732
1733 // .Lstore:
1734 // strexd rTempReg, rNewLo, rNewHi, [rAddr]
1735 // cmp rTempReg, #0
1736 // bne .Lloadcmp
1737 unsigned STREXD = IsThumb ? ARM::t2STREXD : ARM::STREXD;
1738 MIB = BuildMI(StoreBB, DL, TII->get(STREXD), TempReg);
1739 unsigned Flags = getKillRegState(New.isDead());
1740 addExclusiveRegPair(MIB, New, Flags, IsThumb, TRI);
1741 MIB.addReg(AddrReg).add(predOps(ARMCC::AL));
1742
1743 unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri;
1744 BuildMI(StoreBB, DL, TII->get(CMPri))
1745 .addReg(TempReg, RegState::Kill)
1746 .addImm(0)
1747 .add(predOps(ARMCC::AL));
1748 BuildMI(StoreBB, DL, TII->get(Bcc))
1749 .addMBB(LoadCmpBB)
1750 .addImm(ARMCC::NE)
1751 .addReg(ARM::CPSR, RegState::Kill);
1752 StoreBB->addSuccessor(LoadCmpBB);
1753 StoreBB->addSuccessor(DoneBB);
1754
1755 DoneBB->splice(DoneBB->end(), &MBB, MI, MBB.end());
1756 DoneBB->transferSuccessors(&MBB);
1757
1758 MBB.addSuccessor(LoadCmpBB);
1759
1760 NextMBBI = MBB.end();
1761 MI.eraseFromParent();
1762
1763 // Recompute livein lists.
1764 LivePhysRegs LiveRegs;
1765 computeAndAddLiveIns(LiveRegs, *DoneBB);
1766 computeAndAddLiveIns(LiveRegs, *StoreBB);
1767 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1768 // Do an extra pass around the loop to get loop carried registers right.
1769 StoreBB->clearLiveIns();
1770 computeAndAddLiveIns(LiveRegs, *StoreBB);
1771 LoadCmpBB->clearLiveIns();
1772 computeAndAddLiveIns(LiveRegs, *LoadCmpBB);
1773
1774 return true;
1775 }
1776
CMSEPushCalleeSaves(const TargetInstrInfo & TII,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,int JumpReg,const LivePhysRegs & LiveRegs,bool Thumb1Only)1777 static void CMSEPushCalleeSaves(const TargetInstrInfo &TII,
1778 MachineBasicBlock &MBB,
1779 MachineBasicBlock::iterator MBBI, int JumpReg,
1780 const LivePhysRegs &LiveRegs, bool Thumb1Only) {
1781 const DebugLoc &DL = MBBI->getDebugLoc();
1782 if (Thumb1Only) { // push Lo and Hi regs separately
1783 MachineInstrBuilder PushMIB =
1784 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH)).add(predOps(ARMCC::AL));
1785 for (int Reg = ARM::R4; Reg < ARM::R8; ++Reg) {
1786 PushMIB.addReg(
1787 Reg, Reg == JumpReg || LiveRegs.contains(Reg) ? 0 : RegState::Undef);
1788 }
1789
1790 // Thumb1 can only tPUSH low regs, so we copy the high regs to the low
1791 // regs that we just saved and push the low regs again, taking care to
1792 // not clobber JumpReg. If JumpReg is one of the low registers, push first
1793 // the values of r9-r11, and then r8. That would leave them ordered in
1794 // memory, and allow us to later pop them with a single instructions.
1795 // FIXME: Could also use any of r0-r3 that are free (including in the
1796 // first PUSH above).
1797 for (int LoReg = ARM::R7, HiReg = ARM::R11; LoReg >= ARM::R4; --LoReg) {
1798 if (JumpReg == LoReg)
1799 continue;
1800 BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), LoReg)
1801 .addReg(HiReg, LiveRegs.contains(HiReg) ? 0 : RegState::Undef)
1802 .add(predOps(ARMCC::AL));
1803 --HiReg;
1804 }
1805 MachineInstrBuilder PushMIB2 =
1806 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH)).add(predOps(ARMCC::AL));
1807 for (int Reg = ARM::R4; Reg < ARM::R8; ++Reg) {
1808 if (Reg == JumpReg)
1809 continue;
1810 PushMIB2.addReg(Reg, RegState::Kill);
1811 }
1812
1813 // If we couldn't use a low register for temporary storage (because it was
1814 // the JumpReg), use r4 or r5, whichever is not JumpReg. It has already been
1815 // saved.
1816 if (JumpReg >= ARM::R4 && JumpReg <= ARM::R7) {
1817 int LoReg = JumpReg == ARM::R4 ? ARM::R5 : ARM::R4;
1818 BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), LoReg)
1819 .addReg(ARM::R8, LiveRegs.contains(ARM::R8) ? 0 : RegState::Undef)
1820 .add(predOps(ARMCC::AL));
1821 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPUSH))
1822 .add(predOps(ARMCC::AL))
1823 .addReg(LoReg, RegState::Kill);
1824 }
1825 } else { // push Lo and Hi registers with a single instruction
1826 MachineInstrBuilder PushMIB =
1827 BuildMI(MBB, MBBI, DL, TII.get(ARM::t2STMDB_UPD), ARM::SP)
1828 .addReg(ARM::SP)
1829 .add(predOps(ARMCC::AL));
1830 for (int Reg = ARM::R4; Reg < ARM::R12; ++Reg) {
1831 PushMIB.addReg(
1832 Reg, Reg == JumpReg || LiveRegs.contains(Reg) ? 0 : RegState::Undef);
1833 }
1834 }
1835 }
1836
CMSEPopCalleeSaves(const TargetInstrInfo & TII,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,int JumpReg,bool Thumb1Only)1837 static void CMSEPopCalleeSaves(const TargetInstrInfo &TII,
1838 MachineBasicBlock &MBB,
1839 MachineBasicBlock::iterator MBBI, int JumpReg,
1840 bool Thumb1Only) {
1841 const DebugLoc &DL = MBBI->getDebugLoc();
1842 if (Thumb1Only) {
1843 MachineInstrBuilder PopMIB =
1844 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPOP)).add(predOps(ARMCC::AL));
1845 for (int R = 0; R < 4; ++R) {
1846 PopMIB.addReg(ARM::R4 + R, RegState::Define);
1847 BuildMI(MBB, MBBI, DL, TII.get(ARM::tMOVr), ARM::R8 + R)
1848 .addReg(ARM::R4 + R, RegState::Kill)
1849 .add(predOps(ARMCC::AL));
1850 }
1851 MachineInstrBuilder PopMIB2 =
1852 BuildMI(MBB, MBBI, DL, TII.get(ARM::tPOP)).add(predOps(ARMCC::AL));
1853 for (int R = 0; R < 4; ++R)
1854 PopMIB2.addReg(ARM::R4 + R, RegState::Define);
1855 } else { // pop Lo and Hi registers with a single instruction
1856 MachineInstrBuilder PopMIB =
1857 BuildMI(MBB, MBBI, DL, TII.get(ARM::t2LDMIA_UPD), ARM::SP)
1858 .addReg(ARM::SP)
1859 .add(predOps(ARMCC::AL));
1860 for (int Reg = ARM::R4; Reg < ARM::R12; ++Reg)
1861 PopMIB.addReg(Reg, RegState::Define);
1862 }
1863 }
1864
ExpandMI(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineBasicBlock::iterator & NextMBBI)1865 bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
1866 MachineBasicBlock::iterator MBBI,
1867 MachineBasicBlock::iterator &NextMBBI) {
1868 MachineInstr &MI = *MBBI;
1869 unsigned Opcode = MI.getOpcode();
1870 switch (Opcode) {
1871 default:
1872 return false;
1873
1874 case ARM::VBSPd:
1875 case ARM::VBSPq: {
1876 Register DstReg = MI.getOperand(0).getReg();
1877 if (DstReg == MI.getOperand(3).getReg()) {
1878 // Expand to VBIT
1879 unsigned NewOpc = Opcode == ARM::VBSPd ? ARM::VBITd : ARM::VBITq;
1880 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
1881 .add(MI.getOperand(0))
1882 .add(MI.getOperand(3))
1883 .add(MI.getOperand(2))
1884 .add(MI.getOperand(1))
1885 .addImm(MI.getOperand(4).getImm())
1886 .add(MI.getOperand(5));
1887 } else if (DstReg == MI.getOperand(2).getReg()) {
1888 // Expand to VBIF
1889 unsigned NewOpc = Opcode == ARM::VBSPd ? ARM::VBIFd : ARM::VBIFq;
1890 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
1891 .add(MI.getOperand(0))
1892 .add(MI.getOperand(2))
1893 .add(MI.getOperand(3))
1894 .add(MI.getOperand(1))
1895 .addImm(MI.getOperand(4).getImm())
1896 .add(MI.getOperand(5));
1897 } else {
1898 // Expand to VBSL
1899 unsigned NewOpc = Opcode == ARM::VBSPd ? ARM::VBSLd : ARM::VBSLq;
1900 if (DstReg == MI.getOperand(1).getReg()) {
1901 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
1902 .add(MI.getOperand(0))
1903 .add(MI.getOperand(1))
1904 .add(MI.getOperand(2))
1905 .add(MI.getOperand(3))
1906 .addImm(MI.getOperand(4).getImm())
1907 .add(MI.getOperand(5));
1908 } else {
1909 // Use move to satisfy constraints
1910 unsigned MoveOpc = Opcode == ARM::VBSPd ? ARM::VORRd : ARM::VORRq;
1911 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(MoveOpc))
1912 .addReg(DstReg,
1913 RegState::Define |
1914 getRenamableRegState(MI.getOperand(0).isRenamable()))
1915 .add(MI.getOperand(1))
1916 .add(MI.getOperand(1))
1917 .addImm(MI.getOperand(4).getImm())
1918 .add(MI.getOperand(5));
1919 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc))
1920 .add(MI.getOperand(0))
1921 .addReg(DstReg,
1922 RegState::Kill |
1923 getRenamableRegState(MI.getOperand(0).isRenamable()))
1924 .add(MI.getOperand(2))
1925 .add(MI.getOperand(3))
1926 .addImm(MI.getOperand(4).getImm())
1927 .add(MI.getOperand(5));
1928 }
1929 }
1930 MI.eraseFromParent();
1931 return true;
1932 }
1933
1934 case ARM::TCRETURNdi:
1935 case ARM::TCRETURNri: {
1936 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
1937 assert(MBBI->isReturn() &&
1938 "Can only insert epilog into returning blocks");
1939 unsigned RetOpcode = MBBI->getOpcode();
1940 DebugLoc dl = MBBI->getDebugLoc();
1941 const ARMBaseInstrInfo &TII = *static_cast<const ARMBaseInstrInfo *>(
1942 MBB.getParent()->getSubtarget().getInstrInfo());
1943
1944 // Tail call return: adjust the stack pointer and jump to callee.
1945 MBBI = MBB.getLastNonDebugInstr();
1946 MachineOperand &JumpTarget = MBBI->getOperand(0);
1947
1948 // Jump to label or value in register.
1949 if (RetOpcode == ARM::TCRETURNdi) {
1950 unsigned TCOpcode =
1951 STI->isThumb()
1952 ? (STI->isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND)
1953 : ARM::TAILJMPd;
1954 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
1955 if (JumpTarget.isGlobal())
1956 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1957 JumpTarget.getTargetFlags());
1958 else {
1959 assert(JumpTarget.isSymbol());
1960 MIB.addExternalSymbol(JumpTarget.getSymbolName(),
1961 JumpTarget.getTargetFlags());
1962 }
1963
1964 // Add the default predicate in Thumb mode.
1965 if (STI->isThumb())
1966 MIB.add(predOps(ARMCC::AL));
1967 } else if (RetOpcode == ARM::TCRETURNri) {
1968 unsigned Opcode =
1969 STI->isThumb() ? ARM::tTAILJMPr
1970 : (STI->hasV4TOps() ? ARM::TAILJMPr : ARM::TAILJMPr4);
1971 BuildMI(MBB, MBBI, dl,
1972 TII.get(Opcode))
1973 .addReg(JumpTarget.getReg(), RegState::Kill);
1974 }
1975
1976 auto NewMI = std::prev(MBBI);
1977 for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
1978 NewMI->addOperand(MBBI->getOperand(i));
1979
1980
1981 // Update call site info and delete the pseudo instruction TCRETURN.
1982 if (MI.isCandidateForCallSiteEntry())
1983 MI.getMF()->moveCallSiteInfo(&MI, &*NewMI);
1984 MBB.erase(MBBI);
1985
1986 MBBI = NewMI;
1987 return true;
1988 }
1989 case ARM::tBXNS_RET: {
1990 MachineBasicBlock &AfterBB = CMSEClearFPRegs(MBB, MBBI);
1991
1992 if (STI->hasV8_1MMainlineOps()) {
1993 // Restore the non-secure floating point context.
1994 BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
1995 TII->get(ARM::VLDR_FPCXTNS_post), ARM::SP)
1996 .addReg(ARM::SP)
1997 .addImm(4)
1998 .add(predOps(ARMCC::AL));
1999 }
2000
2001 // Clear all GPR that are not a use of the return instruction.
2002 assert(llvm::all_of(MBBI->operands(), [](const MachineOperand &Op) {
2003 return !Op.isReg() || Op.getReg() != ARM::R12;
2004 }));
2005 SmallVector<unsigned, 5> ClearRegs;
2006 determineGPRegsToClear(
2007 *MBBI, {ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R12}, ClearRegs);
2008 CMSEClearGPRegs(AfterBB, AfterBB.end(), MBBI->getDebugLoc(), ClearRegs,
2009 ARM::LR);
2010
2011 MachineInstrBuilder NewMI =
2012 BuildMI(AfterBB, AfterBB.end(), MBBI->getDebugLoc(),
2013 TII->get(ARM::tBXNS))
2014 .addReg(ARM::LR)
2015 .add(predOps(ARMCC::AL));
2016 for (const MachineOperand &Op : MI.operands())
2017 NewMI->addOperand(Op);
2018 MI.eraseFromParent();
2019 return true;
2020 }
2021 case ARM::tBLXNS_CALL: {
2022 DebugLoc DL = MBBI->getDebugLoc();
2023 unsigned JumpReg = MBBI->getOperand(0).getReg();
2024
2025 // Figure out which registers are live at the point immediately before the
2026 // call. When we indiscriminately push a set of registers, the live
2027 // registers are added as ordinary use operands, whereas dead registers
2028 // are "undef".
2029 LivePhysRegs LiveRegs(*TRI);
2030 LiveRegs.addLiveOuts(MBB);
2031 for (const MachineInstr &MI : make_range(MBB.rbegin(), MBBI.getReverse()))
2032 LiveRegs.stepBackward(MI);
2033 LiveRegs.stepBackward(*MBBI);
2034
2035 CMSEPushCalleeSaves(*TII, MBB, MBBI, JumpReg, LiveRegs,
2036 AFI->isThumb1OnlyFunction());
2037
2038 SmallVector<unsigned, 16> ClearRegs;
2039 determineGPRegsToClear(*MBBI,
2040 {ARM::R0, ARM::R1, ARM::R2, ARM::R3, ARM::R4,
2041 ARM::R5, ARM::R6, ARM::R7, ARM::R8, ARM::R9,
2042 ARM::R10, ARM::R11, ARM::R12},
2043 ClearRegs);
2044 auto OriginalClearRegs = ClearRegs;
2045
2046 // Get the first cleared register as a scratch (to use later with tBIC).
2047 // We need to use the first so we can ensure it is a low register.
2048 unsigned ScratchReg = ClearRegs.front();
2049
2050 // Clear LSB of JumpReg
2051 if (AFI->isThumb2Function()) {
2052 BuildMI(MBB, MBBI, DL, TII->get(ARM::t2BICri), JumpReg)
2053 .addReg(JumpReg)
2054 .addImm(1)
2055 .add(predOps(ARMCC::AL))
2056 .add(condCodeOp());
2057 } else {
2058 // We need to use an extra register to cope with 8M Baseline,
2059 // since we have saved all of the registers we are ok to trash a non
2060 // argument register here.
2061 BuildMI(MBB, MBBI, DL, TII->get(ARM::tMOVi8), ScratchReg)
2062 .add(condCodeOp())
2063 .addImm(1)
2064 .add(predOps(ARMCC::AL));
2065 BuildMI(MBB, MBBI, DL, TII->get(ARM::tBIC), JumpReg)
2066 .addReg(ARM::CPSR, RegState::Define)
2067 .addReg(JumpReg)
2068 .addReg(ScratchReg)
2069 .add(predOps(ARMCC::AL));
2070 }
2071
2072 CMSESaveClearFPRegs(MBB, MBBI, DL, LiveRegs,
2073 ClearRegs); // save+clear FP regs with ClearRegs
2074 CMSEClearGPRegs(MBB, MBBI, DL, ClearRegs, JumpReg);
2075
2076 const MachineInstrBuilder NewCall =
2077 BuildMI(MBB, MBBI, DL, TII->get(ARM::tBLXNSr))
2078 .add(predOps(ARMCC::AL))
2079 .addReg(JumpReg, RegState::Kill);
2080
2081 for (int I = 1, E = MI.getNumOperands(); I != E; ++I)
2082 NewCall->addOperand(MI.getOperand(I));
2083 if (MI.isCandidateForCallSiteEntry())
2084 MI.getMF()->moveCallSiteInfo(&MI, NewCall.getInstr());
2085
2086 CMSERestoreFPRegs(MBB, MBBI, DL, OriginalClearRegs); // restore FP registers
2087
2088 CMSEPopCalleeSaves(*TII, MBB, MBBI, JumpReg, AFI->isThumb1OnlyFunction());
2089
2090 MI.eraseFromParent();
2091 return true;
2092 }
2093 case ARM::VMOVHcc:
2094 case ARM::VMOVScc:
2095 case ARM::VMOVDcc: {
2096 unsigned newOpc = Opcode != ARM::VMOVDcc ? ARM::VMOVS : ARM::VMOVD;
2097 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(newOpc),
2098 MI.getOperand(1).getReg())
2099 .add(MI.getOperand(2))
2100 .addImm(MI.getOperand(3).getImm()) // 'pred'
2101 .add(MI.getOperand(4))
2102 .add(makeImplicit(MI.getOperand(1)));
2103
2104 MI.eraseFromParent();
2105 return true;
2106 }
2107 case ARM::t2MOVCCr:
2108 case ARM::MOVCCr: {
2109 unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVr : ARM::MOVr;
2110 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2111 MI.getOperand(1).getReg())
2112 .add(MI.getOperand(2))
2113 .addImm(MI.getOperand(3).getImm()) // 'pred'
2114 .add(MI.getOperand(4))
2115 .add(condCodeOp()) // 's' bit
2116 .add(makeImplicit(MI.getOperand(1)));
2117
2118 MI.eraseFromParent();
2119 return true;
2120 }
2121 case ARM::MOVCCsi: {
2122 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2123 (MI.getOperand(1).getReg()))
2124 .add(MI.getOperand(2))
2125 .addImm(MI.getOperand(3).getImm())
2126 .addImm(MI.getOperand(4).getImm()) // 'pred'
2127 .add(MI.getOperand(5))
2128 .add(condCodeOp()) // 's' bit
2129 .add(makeImplicit(MI.getOperand(1)));
2130
2131 MI.eraseFromParent();
2132 return true;
2133 }
2134 case ARM::MOVCCsr: {
2135 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsr),
2136 (MI.getOperand(1).getReg()))
2137 .add(MI.getOperand(2))
2138 .add(MI.getOperand(3))
2139 .addImm(MI.getOperand(4).getImm())
2140 .addImm(MI.getOperand(5).getImm()) // 'pred'
2141 .add(MI.getOperand(6))
2142 .add(condCodeOp()) // 's' bit
2143 .add(makeImplicit(MI.getOperand(1)));
2144
2145 MI.eraseFromParent();
2146 return true;
2147 }
2148 case ARM::t2MOVCCi16:
2149 case ARM::MOVCCi16: {
2150 unsigned NewOpc = AFI->isThumbFunction() ? ARM::t2MOVi16 : ARM::MOVi16;
2151 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
2152 MI.getOperand(1).getReg())
2153 .addImm(MI.getOperand(2).getImm())
2154 .addImm(MI.getOperand(3).getImm()) // 'pred'
2155 .add(MI.getOperand(4))
2156 .add(makeImplicit(MI.getOperand(1)));
2157 MI.eraseFromParent();
2158 return true;
2159 }
2160 case ARM::t2MOVCCi:
2161 case ARM::MOVCCi: {
2162 unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVi : ARM::MOVi;
2163 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2164 MI.getOperand(1).getReg())
2165 .addImm(MI.getOperand(2).getImm())
2166 .addImm(MI.getOperand(3).getImm()) // 'pred'
2167 .add(MI.getOperand(4))
2168 .add(condCodeOp()) // 's' bit
2169 .add(makeImplicit(MI.getOperand(1)));
2170
2171 MI.eraseFromParent();
2172 return true;
2173 }
2174 case ARM::t2MVNCCi:
2175 case ARM::MVNCCi: {
2176 unsigned Opc = AFI->isThumbFunction() ? ARM::t2MVNi : ARM::MVNi;
2177 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc),
2178 MI.getOperand(1).getReg())
2179 .addImm(MI.getOperand(2).getImm())
2180 .addImm(MI.getOperand(3).getImm()) // 'pred'
2181 .add(MI.getOperand(4))
2182 .add(condCodeOp()) // 's' bit
2183 .add(makeImplicit(MI.getOperand(1)));
2184
2185 MI.eraseFromParent();
2186 return true;
2187 }
2188 case ARM::t2MOVCClsl:
2189 case ARM::t2MOVCClsr:
2190 case ARM::t2MOVCCasr:
2191 case ARM::t2MOVCCror: {
2192 unsigned NewOpc;
2193 switch (Opcode) {
2194 case ARM::t2MOVCClsl: NewOpc = ARM::t2LSLri; break;
2195 case ARM::t2MOVCClsr: NewOpc = ARM::t2LSRri; break;
2196 case ARM::t2MOVCCasr: NewOpc = ARM::t2ASRri; break;
2197 case ARM::t2MOVCCror: NewOpc = ARM::t2RORri; break;
2198 default: llvm_unreachable("unexpeced conditional move");
2199 }
2200 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc),
2201 MI.getOperand(1).getReg())
2202 .add(MI.getOperand(2))
2203 .addImm(MI.getOperand(3).getImm())
2204 .addImm(MI.getOperand(4).getImm()) // 'pred'
2205 .add(MI.getOperand(5))
2206 .add(condCodeOp()) // 's' bit
2207 .add(makeImplicit(MI.getOperand(1)));
2208 MI.eraseFromParent();
2209 return true;
2210 }
2211 case ARM::Int_eh_sjlj_dispatchsetup: {
2212 MachineFunction &MF = *MI.getParent()->getParent();
2213 const ARMBaseInstrInfo *AII =
2214 static_cast<const ARMBaseInstrInfo*>(TII);
2215 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
2216 // For functions using a base pointer, we rematerialize it (via the frame
2217 // pointer) here since eh.sjlj.setjmp and eh.sjlj.longjmp don't do it
2218 // for us. Otherwise, expand to nothing.
2219 if (RI.hasBasePointer(MF)) {
2220 int32_t NumBytes = AFI->getFramePtrSpillOffset();
2221 Register FramePtr = RI.getFrameRegister(MF);
2222 assert(MF.getSubtarget().getFrameLowering()->hasFP(MF) &&
2223 "base pointer without frame pointer?");
2224
2225 if (AFI->isThumb2Function()) {
2226 emitT2RegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2227 FramePtr, -NumBytes, ARMCC::AL, 0, *TII);
2228 } else if (AFI->isThumbFunction()) {
2229 emitThumbRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2230 FramePtr, -NumBytes, *TII, RI);
2231 } else {
2232 emitARMRegPlusImmediate(MBB, MBBI, MI.getDebugLoc(), ARM::R6,
2233 FramePtr, -NumBytes, ARMCC::AL, 0,
2234 *TII);
2235 }
2236 // If there's dynamic realignment, adjust for it.
2237 if (RI.needsStackRealignment(MF)) {
2238 MachineFrameInfo &MFI = MF.getFrameInfo();
2239 Align MaxAlign = MFI.getMaxAlign();
2240 assert (!AFI->isThumb1OnlyFunction());
2241 // Emit bic r6, r6, MaxAlign
2242 assert(MaxAlign <= Align(256) &&
2243 "The BIC instruction cannot encode "
2244 "immediates larger than 256 with all lower "
2245 "bits set.");
2246 unsigned bicOpc = AFI->isThumbFunction() ?
2247 ARM::t2BICri : ARM::BICri;
2248 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(bicOpc), ARM::R6)
2249 .addReg(ARM::R6, RegState::Kill)
2250 .addImm(MaxAlign.value() - 1)
2251 .add(predOps(ARMCC::AL))
2252 .add(condCodeOp());
2253 }
2254
2255 }
2256 MI.eraseFromParent();
2257 return true;
2258 }
2259
2260 case ARM::MOVsrl_flag:
2261 case ARM::MOVsra_flag: {
2262 // These are just fancy MOVs instructions.
2263 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2264 MI.getOperand(0).getReg())
2265 .add(MI.getOperand(1))
2266 .addImm(ARM_AM::getSORegOpc(
2267 (Opcode == ARM::MOVsrl_flag ? ARM_AM::lsr : ARM_AM::asr), 1))
2268 .add(predOps(ARMCC::AL))
2269 .addReg(ARM::CPSR, RegState::Define);
2270 MI.eraseFromParent();
2271 return true;
2272 }
2273 case ARM::RRX: {
2274 // This encodes as "MOVs Rd, Rm, rrx
2275 MachineInstrBuilder MIB =
2276 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi),
2277 MI.getOperand(0).getReg())
2278 .add(MI.getOperand(1))
2279 .addImm(ARM_AM::getSORegOpc(ARM_AM::rrx, 0))
2280 .add(predOps(ARMCC::AL))
2281 .add(condCodeOp());
2282 TransferImpOps(MI, MIB, MIB);
2283 MI.eraseFromParent();
2284 return true;
2285 }
2286 case ARM::tTPsoft:
2287 case ARM::TPsoft: {
2288 const bool Thumb = Opcode == ARM::tTPsoft;
2289
2290 MachineInstrBuilder MIB;
2291 MachineFunction *MF = MBB.getParent();
2292 if (STI->genLongCalls()) {
2293 MachineConstantPool *MCP = MF->getConstantPool();
2294 unsigned PCLabelID = AFI->createPICLabelUId();
2295 MachineConstantPoolValue *CPV =
2296 ARMConstantPoolSymbol::Create(MF->getFunction().getContext(),
2297 "__aeabi_read_tp", PCLabelID, 0);
2298 Register Reg = MI.getOperand(0).getReg();
2299 MIB =
2300 BuildMI(MBB, MBBI, MI.getDebugLoc(),
2301 TII->get(Thumb ? ARM::tLDRpci : ARM::LDRi12), Reg)
2302 .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, Align(4)));
2303 if (!Thumb)
2304 MIB.addImm(0);
2305 MIB.add(predOps(ARMCC::AL));
2306
2307 MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2308 TII->get(Thumb ? ARM::tBLXr : ARM::BLX));
2309 if (Thumb)
2310 MIB.add(predOps(ARMCC::AL));
2311 MIB.addReg(Reg, RegState::Kill);
2312 } else {
2313 MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2314 TII->get(Thumb ? ARM::tBL : ARM::BL));
2315 if (Thumb)
2316 MIB.add(predOps(ARMCC::AL));
2317 MIB.addExternalSymbol("__aeabi_read_tp", 0);
2318 }
2319
2320 MIB.cloneMemRefs(MI);
2321 TransferImpOps(MI, MIB, MIB);
2322 // Update the call site info.
2323 if (MI.isCandidateForCallSiteEntry())
2324 MF->moveCallSiteInfo(&MI, &*MIB);
2325 MI.eraseFromParent();
2326 return true;
2327 }
2328 case ARM::tLDRpci_pic:
2329 case ARM::t2LDRpci_pic: {
2330 unsigned NewLdOpc = (Opcode == ARM::tLDRpci_pic)
2331 ? ARM::tLDRpci : ARM::t2LDRpci;
2332 Register DstReg = MI.getOperand(0).getReg();
2333 bool DstIsDead = MI.getOperand(0).isDead();
2334 MachineInstrBuilder MIB1 =
2335 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg)
2336 .add(MI.getOperand(1))
2337 .add(predOps(ARMCC::AL));
2338 MIB1.cloneMemRefs(MI);
2339 MachineInstrBuilder MIB2 =
2340 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD))
2341 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2342 .addReg(DstReg)
2343 .add(MI.getOperand(2));
2344 TransferImpOps(MI, MIB1, MIB2);
2345 MI.eraseFromParent();
2346 return true;
2347 }
2348
2349 case ARM::LDRLIT_ga_abs:
2350 case ARM::LDRLIT_ga_pcrel:
2351 case ARM::LDRLIT_ga_pcrel_ldr:
2352 case ARM::tLDRLIT_ga_abs:
2353 case ARM::tLDRLIT_ga_pcrel: {
2354 Register DstReg = MI.getOperand(0).getReg();
2355 bool DstIsDead = MI.getOperand(0).isDead();
2356 const MachineOperand &MO1 = MI.getOperand(1);
2357 auto Flags = MO1.getTargetFlags();
2358 const GlobalValue *GV = MO1.getGlobal();
2359 bool IsARM =
2360 Opcode != ARM::tLDRLIT_ga_pcrel && Opcode != ARM::tLDRLIT_ga_abs;
2361 bool IsPIC =
2362 Opcode != ARM::LDRLIT_ga_abs && Opcode != ARM::tLDRLIT_ga_abs;
2363 unsigned LDRLITOpc = IsARM ? ARM::LDRi12 : ARM::tLDRpci;
2364 unsigned PICAddOpc =
2365 IsARM
2366 ? (Opcode == ARM::LDRLIT_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
2367 : ARM::tPICADD;
2368
2369 // We need a new const-pool entry to load from.
2370 MachineConstantPool *MCP = MBB.getParent()->getConstantPool();
2371 unsigned ARMPCLabelIndex = 0;
2372 MachineConstantPoolValue *CPV;
2373
2374 if (IsPIC) {
2375 unsigned PCAdj = IsARM ? 8 : 4;
2376 auto Modifier = (Flags & ARMII::MO_GOT)
2377 ? ARMCP::GOT_PREL
2378 : ARMCP::no_modifier;
2379 ARMPCLabelIndex = AFI->createPICLabelUId();
2380 CPV = ARMConstantPoolConstant::Create(
2381 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj, Modifier,
2382 /*AddCurrentAddr*/ Modifier == ARMCP::GOT_PREL);
2383 } else
2384 CPV = ARMConstantPoolConstant::Create(GV, ARMCP::no_modifier);
2385
2386 MachineInstrBuilder MIB =
2387 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(LDRLITOpc), DstReg)
2388 .addConstantPoolIndex(MCP->getConstantPoolIndex(CPV, Align(4)));
2389 if (IsARM)
2390 MIB.addImm(0);
2391 MIB.add(predOps(ARMCC::AL));
2392
2393 if (IsPIC) {
2394 MachineInstrBuilder MIB =
2395 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(PICAddOpc))
2396 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2397 .addReg(DstReg)
2398 .addImm(ARMPCLabelIndex);
2399
2400 if (IsARM)
2401 MIB.add(predOps(ARMCC::AL));
2402 }
2403
2404 MI.eraseFromParent();
2405 return true;
2406 }
2407 case ARM::MOV_ga_pcrel:
2408 case ARM::MOV_ga_pcrel_ldr:
2409 case ARM::t2MOV_ga_pcrel: {
2410 // Expand into movw + movw. Also "add pc" / ldr [pc] in PIC mode.
2411 unsigned LabelId = AFI->createPICLabelUId();
2412 Register DstReg = MI.getOperand(0).getReg();
2413 bool DstIsDead = MI.getOperand(0).isDead();
2414 const MachineOperand &MO1 = MI.getOperand(1);
2415 const GlobalValue *GV = MO1.getGlobal();
2416 unsigned TF = MO1.getTargetFlags();
2417 bool isARM = Opcode != ARM::t2MOV_ga_pcrel;
2418 unsigned LO16Opc = isARM ? ARM::MOVi16_ga_pcrel : ARM::t2MOVi16_ga_pcrel;
2419 unsigned HI16Opc = isARM ? ARM::MOVTi16_ga_pcrel :ARM::t2MOVTi16_ga_pcrel;
2420 unsigned LO16TF = TF | ARMII::MO_LO16;
2421 unsigned HI16TF = TF | ARMII::MO_HI16;
2422 unsigned PICAddOpc = isARM
2423 ? (Opcode == ARM::MOV_ga_pcrel_ldr ? ARM::PICLDR : ARM::PICADD)
2424 : ARM::tPICADD;
2425 MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2426 TII->get(LO16Opc), DstReg)
2427 .addGlobalAddress(GV, MO1.getOffset(), TF | LO16TF)
2428 .addImm(LabelId);
2429
2430 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(HI16Opc), DstReg)
2431 .addReg(DstReg)
2432 .addGlobalAddress(GV, MO1.getOffset(), TF | HI16TF)
2433 .addImm(LabelId);
2434
2435 MachineInstrBuilder MIB3 = BuildMI(MBB, MBBI, MI.getDebugLoc(),
2436 TII->get(PICAddOpc))
2437 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
2438 .addReg(DstReg).addImm(LabelId);
2439 if (isARM) {
2440 MIB3.add(predOps(ARMCC::AL));
2441 if (Opcode == ARM::MOV_ga_pcrel_ldr)
2442 MIB3.cloneMemRefs(MI);
2443 }
2444 TransferImpOps(MI, MIB1, MIB3);
2445 MI.eraseFromParent();
2446 return true;
2447 }
2448
2449 case ARM::MOVi32imm:
2450 case ARM::MOVCCi32imm:
2451 case ARM::t2MOVi32imm:
2452 case ARM::t2MOVCCi32imm:
2453 ExpandMOV32BitImm(MBB, MBBI);
2454 return true;
2455
2456 case ARM::SUBS_PC_LR: {
2457 MachineInstrBuilder MIB =
2458 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::SUBri), ARM::PC)
2459 .addReg(ARM::LR)
2460 .add(MI.getOperand(0))
2461 .add(MI.getOperand(1))
2462 .add(MI.getOperand(2))
2463 .addReg(ARM::CPSR, RegState::Undef);
2464 TransferImpOps(MI, MIB, MIB);
2465 MI.eraseFromParent();
2466 return true;
2467 }
2468 case ARM::VLDMQIA: {
2469 unsigned NewOpc = ARM::VLDMDIA;
2470 MachineInstrBuilder MIB =
2471 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
2472 unsigned OpIdx = 0;
2473
2474 // Grab the Q register destination.
2475 bool DstIsDead = MI.getOperand(OpIdx).isDead();
2476 Register DstReg = MI.getOperand(OpIdx++).getReg();
2477
2478 // Copy the source register.
2479 MIB.add(MI.getOperand(OpIdx++));
2480
2481 // Copy the predicate operands.
2482 MIB.add(MI.getOperand(OpIdx++));
2483 MIB.add(MI.getOperand(OpIdx++));
2484
2485 // Add the destination operands (D subregs).
2486 Register D0 = TRI->getSubReg(DstReg, ARM::dsub_0);
2487 Register D1 = TRI->getSubReg(DstReg, ARM::dsub_1);
2488 MIB.addReg(D0, RegState::Define | getDeadRegState(DstIsDead))
2489 .addReg(D1, RegState::Define | getDeadRegState(DstIsDead));
2490
2491 // Add an implicit def for the super-register.
2492 MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
2493 TransferImpOps(MI, MIB, MIB);
2494 MIB.cloneMemRefs(MI);
2495 MI.eraseFromParent();
2496 return true;
2497 }
2498
2499 case ARM::VSTMQIA: {
2500 unsigned NewOpc = ARM::VSTMDIA;
2501 MachineInstrBuilder MIB =
2502 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc));
2503 unsigned OpIdx = 0;
2504
2505 // Grab the Q register source.
2506 bool SrcIsKill = MI.getOperand(OpIdx).isKill();
2507 Register SrcReg = MI.getOperand(OpIdx++).getReg();
2508
2509 // Copy the destination register.
2510 MachineOperand Dst(MI.getOperand(OpIdx++));
2511 MIB.add(Dst);
2512
2513 // Copy the predicate operands.
2514 MIB.add(MI.getOperand(OpIdx++));
2515 MIB.add(MI.getOperand(OpIdx++));
2516
2517 // Add the source operands (D subregs).
2518 Register D0 = TRI->getSubReg(SrcReg, ARM::dsub_0);
2519 Register D1 = TRI->getSubReg(SrcReg, ARM::dsub_1);
2520 MIB.addReg(D0, SrcIsKill ? RegState::Kill : 0)
2521 .addReg(D1, SrcIsKill ? RegState::Kill : 0);
2522
2523 if (SrcIsKill) // Add an implicit kill for the Q register.
2524 MIB->addRegisterKilled(SrcReg, TRI, true);
2525
2526 TransferImpOps(MI, MIB, MIB);
2527 MIB.cloneMemRefs(MI);
2528 MI.eraseFromParent();
2529 return true;
2530 }
2531
2532 case ARM::VLD2q8Pseudo:
2533 case ARM::VLD2q16Pseudo:
2534 case ARM::VLD2q32Pseudo:
2535 case ARM::VLD2q8PseudoWB_fixed:
2536 case ARM::VLD2q16PseudoWB_fixed:
2537 case ARM::VLD2q32PseudoWB_fixed:
2538 case ARM::VLD2q8PseudoWB_register:
2539 case ARM::VLD2q16PseudoWB_register:
2540 case ARM::VLD2q32PseudoWB_register:
2541 case ARM::VLD3d8Pseudo:
2542 case ARM::VLD3d16Pseudo:
2543 case ARM::VLD3d32Pseudo:
2544 case ARM::VLD1d8TPseudo:
2545 case ARM::VLD1d16TPseudo:
2546 case ARM::VLD1d32TPseudo:
2547 case ARM::VLD1d64TPseudo:
2548 case ARM::VLD1d64TPseudoWB_fixed:
2549 case ARM::VLD1d64TPseudoWB_register:
2550 case ARM::VLD3d8Pseudo_UPD:
2551 case ARM::VLD3d16Pseudo_UPD:
2552 case ARM::VLD3d32Pseudo_UPD:
2553 case ARM::VLD3q8Pseudo_UPD:
2554 case ARM::VLD3q16Pseudo_UPD:
2555 case ARM::VLD3q32Pseudo_UPD:
2556 case ARM::VLD3q8oddPseudo:
2557 case ARM::VLD3q16oddPseudo:
2558 case ARM::VLD3q32oddPseudo:
2559 case ARM::VLD3q8oddPseudo_UPD:
2560 case ARM::VLD3q16oddPseudo_UPD:
2561 case ARM::VLD3q32oddPseudo_UPD:
2562 case ARM::VLD4d8Pseudo:
2563 case ARM::VLD4d16Pseudo:
2564 case ARM::VLD4d32Pseudo:
2565 case ARM::VLD1d8QPseudo:
2566 case ARM::VLD1d16QPseudo:
2567 case ARM::VLD1d32QPseudo:
2568 case ARM::VLD1d64QPseudo:
2569 case ARM::VLD1d64QPseudoWB_fixed:
2570 case ARM::VLD1d64QPseudoWB_register:
2571 case ARM::VLD1q8HighQPseudo:
2572 case ARM::VLD1q8LowQPseudo_UPD:
2573 case ARM::VLD1q8HighTPseudo:
2574 case ARM::VLD1q8LowTPseudo_UPD:
2575 case ARM::VLD1q16HighQPseudo:
2576 case ARM::VLD1q16LowQPseudo_UPD:
2577 case ARM::VLD1q16HighTPseudo:
2578 case ARM::VLD1q16LowTPseudo_UPD:
2579 case ARM::VLD1q32HighQPseudo:
2580 case ARM::VLD1q32LowQPseudo_UPD:
2581 case ARM::VLD1q32HighTPseudo:
2582 case ARM::VLD1q32LowTPseudo_UPD:
2583 case ARM::VLD1q64HighQPseudo:
2584 case ARM::VLD1q64LowQPseudo_UPD:
2585 case ARM::VLD1q64HighTPseudo:
2586 case ARM::VLD1q64LowTPseudo_UPD:
2587 case ARM::VLD4d8Pseudo_UPD:
2588 case ARM::VLD4d16Pseudo_UPD:
2589 case ARM::VLD4d32Pseudo_UPD:
2590 case ARM::VLD4q8Pseudo_UPD:
2591 case ARM::VLD4q16Pseudo_UPD:
2592 case ARM::VLD4q32Pseudo_UPD:
2593 case ARM::VLD4q8oddPseudo:
2594 case ARM::VLD4q16oddPseudo:
2595 case ARM::VLD4q32oddPseudo:
2596 case ARM::VLD4q8oddPseudo_UPD:
2597 case ARM::VLD4q16oddPseudo_UPD:
2598 case ARM::VLD4q32oddPseudo_UPD:
2599 case ARM::VLD3DUPd8Pseudo:
2600 case ARM::VLD3DUPd16Pseudo:
2601 case ARM::VLD3DUPd32Pseudo:
2602 case ARM::VLD3DUPd8Pseudo_UPD:
2603 case ARM::VLD3DUPd16Pseudo_UPD:
2604 case ARM::VLD3DUPd32Pseudo_UPD:
2605 case ARM::VLD4DUPd8Pseudo:
2606 case ARM::VLD4DUPd16Pseudo:
2607 case ARM::VLD4DUPd32Pseudo:
2608 case ARM::VLD4DUPd8Pseudo_UPD:
2609 case ARM::VLD4DUPd16Pseudo_UPD:
2610 case ARM::VLD4DUPd32Pseudo_UPD:
2611 case ARM::VLD2DUPq8EvenPseudo:
2612 case ARM::VLD2DUPq8OddPseudo:
2613 case ARM::VLD2DUPq16EvenPseudo:
2614 case ARM::VLD2DUPq16OddPseudo:
2615 case ARM::VLD2DUPq32EvenPseudo:
2616 case ARM::VLD2DUPq32OddPseudo:
2617 case ARM::VLD3DUPq8EvenPseudo:
2618 case ARM::VLD3DUPq8OddPseudo:
2619 case ARM::VLD3DUPq16EvenPseudo:
2620 case ARM::VLD3DUPq16OddPseudo:
2621 case ARM::VLD3DUPq32EvenPseudo:
2622 case ARM::VLD3DUPq32OddPseudo:
2623 case ARM::VLD4DUPq8EvenPseudo:
2624 case ARM::VLD4DUPq8OddPseudo:
2625 case ARM::VLD4DUPq16EvenPseudo:
2626 case ARM::VLD4DUPq16OddPseudo:
2627 case ARM::VLD4DUPq32EvenPseudo:
2628 case ARM::VLD4DUPq32OddPseudo:
2629 ExpandVLD(MBBI);
2630 return true;
2631
2632 case ARM::VST2q8Pseudo:
2633 case ARM::VST2q16Pseudo:
2634 case ARM::VST2q32Pseudo:
2635 case ARM::VST2q8PseudoWB_fixed:
2636 case ARM::VST2q16PseudoWB_fixed:
2637 case ARM::VST2q32PseudoWB_fixed:
2638 case ARM::VST2q8PseudoWB_register:
2639 case ARM::VST2q16PseudoWB_register:
2640 case ARM::VST2q32PseudoWB_register:
2641 case ARM::VST3d8Pseudo:
2642 case ARM::VST3d16Pseudo:
2643 case ARM::VST3d32Pseudo:
2644 case ARM::VST1d8TPseudo:
2645 case ARM::VST1d16TPseudo:
2646 case ARM::VST1d32TPseudo:
2647 case ARM::VST1d64TPseudo:
2648 case ARM::VST3d8Pseudo_UPD:
2649 case ARM::VST3d16Pseudo_UPD:
2650 case ARM::VST3d32Pseudo_UPD:
2651 case ARM::VST1d64TPseudoWB_fixed:
2652 case ARM::VST1d64TPseudoWB_register:
2653 case ARM::VST3q8Pseudo_UPD:
2654 case ARM::VST3q16Pseudo_UPD:
2655 case ARM::VST3q32Pseudo_UPD:
2656 case ARM::VST3q8oddPseudo:
2657 case ARM::VST3q16oddPseudo:
2658 case ARM::VST3q32oddPseudo:
2659 case ARM::VST3q8oddPseudo_UPD:
2660 case ARM::VST3q16oddPseudo_UPD:
2661 case ARM::VST3q32oddPseudo_UPD:
2662 case ARM::VST4d8Pseudo:
2663 case ARM::VST4d16Pseudo:
2664 case ARM::VST4d32Pseudo:
2665 case ARM::VST1d8QPseudo:
2666 case ARM::VST1d16QPseudo:
2667 case ARM::VST1d32QPseudo:
2668 case ARM::VST1d64QPseudo:
2669 case ARM::VST4d8Pseudo_UPD:
2670 case ARM::VST4d16Pseudo_UPD:
2671 case ARM::VST4d32Pseudo_UPD:
2672 case ARM::VST1d64QPseudoWB_fixed:
2673 case ARM::VST1d64QPseudoWB_register:
2674 case ARM::VST1q8HighQPseudo:
2675 case ARM::VST1q8LowQPseudo_UPD:
2676 case ARM::VST1q8HighTPseudo:
2677 case ARM::VST1q8LowTPseudo_UPD:
2678 case ARM::VST1q16HighQPseudo:
2679 case ARM::VST1q16LowQPseudo_UPD:
2680 case ARM::VST1q16HighTPseudo:
2681 case ARM::VST1q16LowTPseudo_UPD:
2682 case ARM::VST1q32HighQPseudo:
2683 case ARM::VST1q32LowQPseudo_UPD:
2684 case ARM::VST1q32HighTPseudo:
2685 case ARM::VST1q32LowTPseudo_UPD:
2686 case ARM::VST1q64HighQPseudo:
2687 case ARM::VST1q64LowQPseudo_UPD:
2688 case ARM::VST1q64HighTPseudo:
2689 case ARM::VST1q64LowTPseudo_UPD:
2690 case ARM::VST4q8Pseudo_UPD:
2691 case ARM::VST4q16Pseudo_UPD:
2692 case ARM::VST4q32Pseudo_UPD:
2693 case ARM::VST4q8oddPseudo:
2694 case ARM::VST4q16oddPseudo:
2695 case ARM::VST4q32oddPseudo:
2696 case ARM::VST4q8oddPseudo_UPD:
2697 case ARM::VST4q16oddPseudo_UPD:
2698 case ARM::VST4q32oddPseudo_UPD:
2699 ExpandVST(MBBI);
2700 return true;
2701
2702 case ARM::VLD1LNq8Pseudo:
2703 case ARM::VLD1LNq16Pseudo:
2704 case ARM::VLD1LNq32Pseudo:
2705 case ARM::VLD1LNq8Pseudo_UPD:
2706 case ARM::VLD1LNq16Pseudo_UPD:
2707 case ARM::VLD1LNq32Pseudo_UPD:
2708 case ARM::VLD2LNd8Pseudo:
2709 case ARM::VLD2LNd16Pseudo:
2710 case ARM::VLD2LNd32Pseudo:
2711 case ARM::VLD2LNq16Pseudo:
2712 case ARM::VLD2LNq32Pseudo:
2713 case ARM::VLD2LNd8Pseudo_UPD:
2714 case ARM::VLD2LNd16Pseudo_UPD:
2715 case ARM::VLD2LNd32Pseudo_UPD:
2716 case ARM::VLD2LNq16Pseudo_UPD:
2717 case ARM::VLD2LNq32Pseudo_UPD:
2718 case ARM::VLD3LNd8Pseudo:
2719 case ARM::VLD3LNd16Pseudo:
2720 case ARM::VLD3LNd32Pseudo:
2721 case ARM::VLD3LNq16Pseudo:
2722 case ARM::VLD3LNq32Pseudo:
2723 case ARM::VLD3LNd8Pseudo_UPD:
2724 case ARM::VLD3LNd16Pseudo_UPD:
2725 case ARM::VLD3LNd32Pseudo_UPD:
2726 case ARM::VLD3LNq16Pseudo_UPD:
2727 case ARM::VLD3LNq32Pseudo_UPD:
2728 case ARM::VLD4LNd8Pseudo:
2729 case ARM::VLD4LNd16Pseudo:
2730 case ARM::VLD4LNd32Pseudo:
2731 case ARM::VLD4LNq16Pseudo:
2732 case ARM::VLD4LNq32Pseudo:
2733 case ARM::VLD4LNd8Pseudo_UPD:
2734 case ARM::VLD4LNd16Pseudo_UPD:
2735 case ARM::VLD4LNd32Pseudo_UPD:
2736 case ARM::VLD4LNq16Pseudo_UPD:
2737 case ARM::VLD4LNq32Pseudo_UPD:
2738 case ARM::VST1LNq8Pseudo:
2739 case ARM::VST1LNq16Pseudo:
2740 case ARM::VST1LNq32Pseudo:
2741 case ARM::VST1LNq8Pseudo_UPD:
2742 case ARM::VST1LNq16Pseudo_UPD:
2743 case ARM::VST1LNq32Pseudo_UPD:
2744 case ARM::VST2LNd8Pseudo:
2745 case ARM::VST2LNd16Pseudo:
2746 case ARM::VST2LNd32Pseudo:
2747 case ARM::VST2LNq16Pseudo:
2748 case ARM::VST2LNq32Pseudo:
2749 case ARM::VST2LNd8Pseudo_UPD:
2750 case ARM::VST2LNd16Pseudo_UPD:
2751 case ARM::VST2LNd32Pseudo_UPD:
2752 case ARM::VST2LNq16Pseudo_UPD:
2753 case ARM::VST2LNq32Pseudo_UPD:
2754 case ARM::VST3LNd8Pseudo:
2755 case ARM::VST3LNd16Pseudo:
2756 case ARM::VST3LNd32Pseudo:
2757 case ARM::VST3LNq16Pseudo:
2758 case ARM::VST3LNq32Pseudo:
2759 case ARM::VST3LNd8Pseudo_UPD:
2760 case ARM::VST3LNd16Pseudo_UPD:
2761 case ARM::VST3LNd32Pseudo_UPD:
2762 case ARM::VST3LNq16Pseudo_UPD:
2763 case ARM::VST3LNq32Pseudo_UPD:
2764 case ARM::VST4LNd8Pseudo:
2765 case ARM::VST4LNd16Pseudo:
2766 case ARM::VST4LNd32Pseudo:
2767 case ARM::VST4LNq16Pseudo:
2768 case ARM::VST4LNq32Pseudo:
2769 case ARM::VST4LNd8Pseudo_UPD:
2770 case ARM::VST4LNd16Pseudo_UPD:
2771 case ARM::VST4LNd32Pseudo_UPD:
2772 case ARM::VST4LNq16Pseudo_UPD:
2773 case ARM::VST4LNq32Pseudo_UPD:
2774 ExpandLaneOp(MBBI);
2775 return true;
2776
2777 case ARM::VTBL3Pseudo: ExpandVTBL(MBBI, ARM::VTBL3, false); return true;
2778 case ARM::VTBL4Pseudo: ExpandVTBL(MBBI, ARM::VTBL4, false); return true;
2779 case ARM::VTBX3Pseudo: ExpandVTBL(MBBI, ARM::VTBX3, true); return true;
2780 case ARM::VTBX4Pseudo: ExpandVTBL(MBBI, ARM::VTBX4, true); return true;
2781
2782 case ARM::CMP_SWAP_8:
2783 if (STI->isThumb())
2784 return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREXB, ARM::t2STREXB,
2785 ARM::tUXTB, NextMBBI);
2786 else
2787 return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREXB, ARM::STREXB,
2788 ARM::UXTB, NextMBBI);
2789 case ARM::CMP_SWAP_16:
2790 if (STI->isThumb())
2791 return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREXH, ARM::t2STREXH,
2792 ARM::tUXTH, NextMBBI);
2793 else
2794 return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREXH, ARM::STREXH,
2795 ARM::UXTH, NextMBBI);
2796 case ARM::CMP_SWAP_32:
2797 if (STI->isThumb())
2798 return ExpandCMP_SWAP(MBB, MBBI, ARM::t2LDREX, ARM::t2STREX, 0,
2799 NextMBBI);
2800 else
2801 return ExpandCMP_SWAP(MBB, MBBI, ARM::LDREX, ARM::STREX, 0, NextMBBI);
2802
2803 case ARM::CMP_SWAP_64:
2804 return ExpandCMP_SWAP_64(MBB, MBBI, NextMBBI);
2805
2806 case ARM::tBL_PUSHLR:
2807 case ARM::BL_PUSHLR: {
2808 const bool Thumb = Opcode == ARM::tBL_PUSHLR;
2809 Register Reg = MI.getOperand(0).getReg();
2810 assert(Reg == ARM::LR && "expect LR register!");
2811 MachineInstrBuilder MIB;
2812 if (Thumb) {
2813 // push {lr}
2814 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPUSH))
2815 .add(predOps(ARMCC::AL))
2816 .addReg(Reg);
2817
2818 // bl __gnu_mcount_nc
2819 MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tBL));
2820 } else {
2821 // stmdb sp!, {lr}
2822 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::STMDB_UPD))
2823 .addReg(ARM::SP, RegState::Define)
2824 .addReg(ARM::SP)
2825 .add(predOps(ARMCC::AL))
2826 .addReg(Reg);
2827
2828 // bl __gnu_mcount_nc
2829 MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::BL));
2830 }
2831 MIB.cloneMemRefs(MI);
2832 for (unsigned i = 1; i < MI.getNumOperands(); ++i) MIB.add(MI.getOperand(i));
2833 MI.eraseFromParent();
2834 return true;
2835 }
2836 case ARM::LOADDUAL:
2837 case ARM::STOREDUAL: {
2838 Register PairReg = MI.getOperand(0).getReg();
2839
2840 MachineInstrBuilder MIB =
2841 BuildMI(MBB, MBBI, MI.getDebugLoc(),
2842 TII->get(Opcode == ARM::LOADDUAL ? ARM::LDRD : ARM::STRD))
2843 .addReg(TRI->getSubReg(PairReg, ARM::gsub_0),
2844 Opcode == ARM::LOADDUAL ? RegState::Define : 0)
2845 .addReg(TRI->getSubReg(PairReg, ARM::gsub_1),
2846 Opcode == ARM::LOADDUAL ? RegState::Define : 0);
2847 for (unsigned i = 1; i < MI.getNumOperands(); i++)
2848 MIB.add(MI.getOperand(i));
2849 MIB.add(predOps(ARMCC::AL));
2850 MIB.cloneMemRefs(MI);
2851 MI.eraseFromParent();
2852 return true;
2853 }
2854 }
2855 }
2856
ExpandMBB(MachineBasicBlock & MBB)2857 bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
2858 bool Modified = false;
2859
2860 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2861 while (MBBI != E) {
2862 MachineBasicBlock::iterator NMBBI = std::next(MBBI);
2863 Modified |= ExpandMI(MBB, MBBI, NMBBI);
2864 MBBI = NMBBI;
2865 }
2866
2867 return Modified;
2868 }
2869
runOnMachineFunction(MachineFunction & MF)2870 bool ARMExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
2871 STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
2872 TII = STI->getInstrInfo();
2873 TRI = STI->getRegisterInfo();
2874 AFI = MF.getInfo<ARMFunctionInfo>();
2875
2876 LLVM_DEBUG(dbgs() << "********** ARM EXPAND PSEUDO INSTRUCTIONS **********\n"
2877 << "********** Function: " << MF.getName() << '\n');
2878
2879 bool Modified = false;
2880 for (MachineBasicBlock &MBB : MF)
2881 Modified |= ExpandMBB(MBB);
2882 if (VerifyARMPseudo)
2883 MF.verify(this, "After expanding ARM pseudo instructions.");
2884
2885 LLVM_DEBUG(dbgs() << "***************************************************\n");
2886 return Modified;
2887 }
2888
2889 /// createARMExpandPseudoPass - returns an instance of the pseudo instruction
2890 /// expansion pass.
createARMExpandPseudoPass()2891 FunctionPass *llvm::createARMExpandPseudoPass() {
2892 return new ARMExpandPseudo();
2893 }
2894