1 //=- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -*- C++ -*-=//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "AArch64InstrInfo.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 using namespace llvm;
33
34 #define DEBUG_TYPE "aarch64-ldst-opt"
35
36 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
37 STATISTIC(NumPostFolded, "Number of post-index updates folded");
38 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
39 STATISTIC(NumUnscaledPairCreated,
40 "Number of load/store from unscaled generated");
41 STATISTIC(NumNarrowLoadsPromoted, "Number of narrow loads promoted");
42 STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
43 STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
44
45 // The LdStLimit limits how far we search for load/store pairs.
46 static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
47 cl::init(20), cl::Hidden);
48
49 // The UpdateLimit limits how far we search for update instructions when we form
50 // pre-/post-index instructions.
51 static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
52 cl::Hidden);
53
54 static cl::opt<bool> EnableNarrowLdMerge("enable-narrow-ld-merge", cl::Hidden,
55 cl::init(false),
56 cl::desc("Enable narrow load merge"));
57
58 namespace llvm {
59 void initializeAArch64LoadStoreOptPass(PassRegistry &);
60 }
61
62 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
63
64 namespace {
65
66 typedef struct LdStPairFlags {
67 // If a matching instruction is found, MergeForward is set to true if the
68 // merge is to remove the first instruction and replace the second with
69 // a pair-wise insn, and false if the reverse is true.
70 bool MergeForward;
71
72 // SExtIdx gives the index of the result of the load pair that must be
73 // extended. The value of SExtIdx assumes that the paired load produces the
74 // value in this order: (I, returned iterator), i.e., -1 means no value has
75 // to be extended, 0 means I, and 1 means the returned iterator.
76 int SExtIdx;
77
LdStPairFlags__anon98c2063e0111::LdStPairFlags78 LdStPairFlags() : MergeForward(false), SExtIdx(-1) {}
79
setMergeForward__anon98c2063e0111::LdStPairFlags80 void setMergeForward(bool V = true) { MergeForward = V; }
getMergeForward__anon98c2063e0111::LdStPairFlags81 bool getMergeForward() const { return MergeForward; }
82
setSExtIdx__anon98c2063e0111::LdStPairFlags83 void setSExtIdx(int V) { SExtIdx = V; }
getSExtIdx__anon98c2063e0111::LdStPairFlags84 int getSExtIdx() const { return SExtIdx; }
85
86 } LdStPairFlags;
87
88 struct AArch64LoadStoreOpt : public MachineFunctionPass {
89 static char ID;
AArch64LoadStoreOpt__anon98c2063e0111::AArch64LoadStoreOpt90 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
91 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
92 }
93
94 const AArch64InstrInfo *TII;
95 const TargetRegisterInfo *TRI;
96 const AArch64Subtarget *Subtarget;
97
98 // Track which registers have been modified and used.
99 BitVector ModifiedRegs, UsedRegs;
100
101 // Scan the instructions looking for a load/store that can be combined
102 // with the current instruction into a load/store pair.
103 // Return the matching instruction if one is found, else MBB->end().
104 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
105 LdStPairFlags &Flags,
106 unsigned Limit,
107 bool FindNarrowMerge);
108
109 // Scan the instructions looking for a store that writes to the address from
110 // which the current load instruction reads. Return true if one is found.
111 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
112 MachineBasicBlock::iterator &StoreI);
113
114 // Merge the two instructions indicated into a wider instruction.
115 MachineBasicBlock::iterator
116 mergeNarrowInsns(MachineBasicBlock::iterator I,
117 MachineBasicBlock::iterator MergeMI,
118 const LdStPairFlags &Flags);
119
120 // Merge the two instructions indicated into a single pair-wise instruction.
121 MachineBasicBlock::iterator
122 mergePairedInsns(MachineBasicBlock::iterator I,
123 MachineBasicBlock::iterator Paired,
124 const LdStPairFlags &Flags);
125
126 // Promote the load that reads directly from the address stored to.
127 MachineBasicBlock::iterator
128 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
129 MachineBasicBlock::iterator StoreI);
130
131 // Scan the instruction list to find a base register update that can
132 // be combined with the current instruction (a load or store) using
133 // pre or post indexed addressing with writeback. Scan forwards.
134 MachineBasicBlock::iterator
135 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
136 int UnscaledOffset, unsigned Limit);
137
138 // Scan the instruction list to find a base register update that can
139 // be combined with the current instruction (a load or store) using
140 // pre or post indexed addressing with writeback. Scan backwards.
141 MachineBasicBlock::iterator
142 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
143
144 // Find an instruction that updates the base register of the ld/st
145 // instruction.
146 bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
147 unsigned BaseReg, int Offset);
148
149 // Merge a pre- or post-index base register update into a ld/st instruction.
150 MachineBasicBlock::iterator
151 mergeUpdateInsn(MachineBasicBlock::iterator I,
152 MachineBasicBlock::iterator Update, bool IsPreIdx);
153
154 // Find and merge foldable ldr/str instructions.
155 bool tryToMergeLdStInst(MachineBasicBlock::iterator &MBBI);
156
157 // Find and pair ldr/str instructions.
158 bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
159
160 // Find and promote load instructions which read directly from store.
161 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
162
163 bool optimizeBlock(MachineBasicBlock &MBB, bool enableNarrowLdOpt);
164
165 bool runOnMachineFunction(MachineFunction &Fn) override;
166
getRequiredProperties__anon98c2063e0111::AArch64LoadStoreOpt167 MachineFunctionProperties getRequiredProperties() const override {
168 return MachineFunctionProperties().set(
169 MachineFunctionProperties::Property::AllVRegsAllocated);
170 }
171
getPassName__anon98c2063e0111::AArch64LoadStoreOpt172 const char *getPassName() const override {
173 return AARCH64_LOAD_STORE_OPT_NAME;
174 }
175 };
176 char AArch64LoadStoreOpt::ID = 0;
177 } // namespace
178
179 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
180 AARCH64_LOAD_STORE_OPT_NAME, false, false)
181
getBitExtrOpcode(MachineInstr & MI)182 static unsigned getBitExtrOpcode(MachineInstr &MI) {
183 switch (MI.getOpcode()) {
184 default:
185 llvm_unreachable("Unexpected opcode.");
186 case AArch64::LDRBBui:
187 case AArch64::LDURBBi:
188 case AArch64::LDRHHui:
189 case AArch64::LDURHHi:
190 return AArch64::UBFMWri;
191 case AArch64::LDRSBWui:
192 case AArch64::LDURSBWi:
193 case AArch64::LDRSHWui:
194 case AArch64::LDURSHWi:
195 return AArch64::SBFMWri;
196 }
197 }
198
isNarrowStore(unsigned Opc)199 static bool isNarrowStore(unsigned Opc) {
200 switch (Opc) {
201 default:
202 return false;
203 case AArch64::STRBBui:
204 case AArch64::STURBBi:
205 case AArch64::STRHHui:
206 case AArch64::STURHHi:
207 return true;
208 }
209 }
210
isNarrowLoad(unsigned Opc)211 static bool isNarrowLoad(unsigned Opc) {
212 switch (Opc) {
213 default:
214 return false;
215 case AArch64::LDRHHui:
216 case AArch64::LDURHHi:
217 case AArch64::LDRBBui:
218 case AArch64::LDURBBi:
219 case AArch64::LDRSHWui:
220 case AArch64::LDURSHWi:
221 case AArch64::LDRSBWui:
222 case AArch64::LDURSBWi:
223 return true;
224 }
225 }
226
isNarrowLoad(MachineInstr & MI)227 static bool isNarrowLoad(MachineInstr &MI) {
228 return isNarrowLoad(MI.getOpcode());
229 }
230
isNarrowLoadOrStore(unsigned Opc)231 static bool isNarrowLoadOrStore(unsigned Opc) {
232 return isNarrowLoad(Opc) || isNarrowStore(Opc);
233 }
234
235 // Scaling factor for unscaled load or store.
getMemScale(MachineInstr & MI)236 static int getMemScale(MachineInstr &MI) {
237 switch (MI.getOpcode()) {
238 default:
239 llvm_unreachable("Opcode has unknown scale!");
240 case AArch64::LDRBBui:
241 case AArch64::LDURBBi:
242 case AArch64::LDRSBWui:
243 case AArch64::LDURSBWi:
244 case AArch64::STRBBui:
245 case AArch64::STURBBi:
246 return 1;
247 case AArch64::LDRHHui:
248 case AArch64::LDURHHi:
249 case AArch64::LDRSHWui:
250 case AArch64::LDURSHWi:
251 case AArch64::STRHHui:
252 case AArch64::STURHHi:
253 return 2;
254 case AArch64::LDRSui:
255 case AArch64::LDURSi:
256 case AArch64::LDRSWui:
257 case AArch64::LDURSWi:
258 case AArch64::LDRWui:
259 case AArch64::LDURWi:
260 case AArch64::STRSui:
261 case AArch64::STURSi:
262 case AArch64::STRWui:
263 case AArch64::STURWi:
264 case AArch64::LDPSi:
265 case AArch64::LDPSWi:
266 case AArch64::LDPWi:
267 case AArch64::STPSi:
268 case AArch64::STPWi:
269 return 4;
270 case AArch64::LDRDui:
271 case AArch64::LDURDi:
272 case AArch64::LDRXui:
273 case AArch64::LDURXi:
274 case AArch64::STRDui:
275 case AArch64::STURDi:
276 case AArch64::STRXui:
277 case AArch64::STURXi:
278 case AArch64::LDPDi:
279 case AArch64::LDPXi:
280 case AArch64::STPDi:
281 case AArch64::STPXi:
282 return 8;
283 case AArch64::LDRQui:
284 case AArch64::LDURQi:
285 case AArch64::STRQui:
286 case AArch64::STURQi:
287 case AArch64::LDPQi:
288 case AArch64::STPQi:
289 return 16;
290 }
291 }
292
getMatchingNonSExtOpcode(unsigned Opc,bool * IsValidLdStrOpc=nullptr)293 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
294 bool *IsValidLdStrOpc = nullptr) {
295 if (IsValidLdStrOpc)
296 *IsValidLdStrOpc = true;
297 switch (Opc) {
298 default:
299 if (IsValidLdStrOpc)
300 *IsValidLdStrOpc = false;
301 return UINT_MAX;
302 case AArch64::STRDui:
303 case AArch64::STURDi:
304 case AArch64::STRQui:
305 case AArch64::STURQi:
306 case AArch64::STRBBui:
307 case AArch64::STURBBi:
308 case AArch64::STRHHui:
309 case AArch64::STURHHi:
310 case AArch64::STRWui:
311 case AArch64::STURWi:
312 case AArch64::STRXui:
313 case AArch64::STURXi:
314 case AArch64::LDRDui:
315 case AArch64::LDURDi:
316 case AArch64::LDRQui:
317 case AArch64::LDURQi:
318 case AArch64::LDRWui:
319 case AArch64::LDURWi:
320 case AArch64::LDRXui:
321 case AArch64::LDURXi:
322 case AArch64::STRSui:
323 case AArch64::STURSi:
324 case AArch64::LDRSui:
325 case AArch64::LDURSi:
326 case AArch64::LDRHHui:
327 case AArch64::LDURHHi:
328 case AArch64::LDRBBui:
329 case AArch64::LDURBBi:
330 return Opc;
331 case AArch64::LDRSWui:
332 return AArch64::LDRWui;
333 case AArch64::LDURSWi:
334 return AArch64::LDURWi;
335 case AArch64::LDRSBWui:
336 return AArch64::LDRBBui;
337 case AArch64::LDRSHWui:
338 return AArch64::LDRHHui;
339 case AArch64::LDURSBWi:
340 return AArch64::LDURBBi;
341 case AArch64::LDURSHWi:
342 return AArch64::LDURHHi;
343 }
344 }
345
getMatchingWideOpcode(unsigned Opc)346 static unsigned getMatchingWideOpcode(unsigned Opc) {
347 switch (Opc) {
348 default:
349 llvm_unreachable("Opcode has no wide equivalent!");
350 case AArch64::STRBBui:
351 return AArch64::STRHHui;
352 case AArch64::STRHHui:
353 return AArch64::STRWui;
354 case AArch64::STURBBi:
355 return AArch64::STURHHi;
356 case AArch64::STURHHi:
357 return AArch64::STURWi;
358 case AArch64::STURWi:
359 return AArch64::STURXi;
360 case AArch64::STRWui:
361 return AArch64::STRXui;
362 case AArch64::LDRHHui:
363 case AArch64::LDRSHWui:
364 return AArch64::LDRWui;
365 case AArch64::LDURHHi:
366 case AArch64::LDURSHWi:
367 return AArch64::LDURWi;
368 case AArch64::LDRBBui:
369 case AArch64::LDRSBWui:
370 return AArch64::LDRHHui;
371 case AArch64::LDURBBi:
372 case AArch64::LDURSBWi:
373 return AArch64::LDURHHi;
374 }
375 }
376
getMatchingPairOpcode(unsigned Opc)377 static unsigned getMatchingPairOpcode(unsigned Opc) {
378 switch (Opc) {
379 default:
380 llvm_unreachable("Opcode has no pairwise equivalent!");
381 case AArch64::STRSui:
382 case AArch64::STURSi:
383 return AArch64::STPSi;
384 case AArch64::STRDui:
385 case AArch64::STURDi:
386 return AArch64::STPDi;
387 case AArch64::STRQui:
388 case AArch64::STURQi:
389 return AArch64::STPQi;
390 case AArch64::STRWui:
391 case AArch64::STURWi:
392 return AArch64::STPWi;
393 case AArch64::STRXui:
394 case AArch64::STURXi:
395 return AArch64::STPXi;
396 case AArch64::LDRSui:
397 case AArch64::LDURSi:
398 return AArch64::LDPSi;
399 case AArch64::LDRDui:
400 case AArch64::LDURDi:
401 return AArch64::LDPDi;
402 case AArch64::LDRQui:
403 case AArch64::LDURQi:
404 return AArch64::LDPQi;
405 case AArch64::LDRWui:
406 case AArch64::LDURWi:
407 return AArch64::LDPWi;
408 case AArch64::LDRXui:
409 case AArch64::LDURXi:
410 return AArch64::LDPXi;
411 case AArch64::LDRSWui:
412 case AArch64::LDURSWi:
413 return AArch64::LDPSWi;
414 }
415 }
416
isMatchingStore(MachineInstr & LoadInst,MachineInstr & StoreInst)417 static unsigned isMatchingStore(MachineInstr &LoadInst,
418 MachineInstr &StoreInst) {
419 unsigned LdOpc = LoadInst.getOpcode();
420 unsigned StOpc = StoreInst.getOpcode();
421 switch (LdOpc) {
422 default:
423 llvm_unreachable("Unsupported load instruction!");
424 case AArch64::LDRBBui:
425 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
426 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
427 case AArch64::LDURBBi:
428 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
429 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
430 case AArch64::LDRHHui:
431 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
432 StOpc == AArch64::STRXui;
433 case AArch64::LDURHHi:
434 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
435 StOpc == AArch64::STURXi;
436 case AArch64::LDRWui:
437 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
438 case AArch64::LDURWi:
439 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
440 case AArch64::LDRXui:
441 return StOpc == AArch64::STRXui;
442 case AArch64::LDURXi:
443 return StOpc == AArch64::STURXi;
444 }
445 }
446
getPreIndexedOpcode(unsigned Opc)447 static unsigned getPreIndexedOpcode(unsigned Opc) {
448 switch (Opc) {
449 default:
450 llvm_unreachable("Opcode has no pre-indexed equivalent!");
451 case AArch64::STRSui:
452 return AArch64::STRSpre;
453 case AArch64::STRDui:
454 return AArch64::STRDpre;
455 case AArch64::STRQui:
456 return AArch64::STRQpre;
457 case AArch64::STRBBui:
458 return AArch64::STRBBpre;
459 case AArch64::STRHHui:
460 return AArch64::STRHHpre;
461 case AArch64::STRWui:
462 return AArch64::STRWpre;
463 case AArch64::STRXui:
464 return AArch64::STRXpre;
465 case AArch64::LDRSui:
466 return AArch64::LDRSpre;
467 case AArch64::LDRDui:
468 return AArch64::LDRDpre;
469 case AArch64::LDRQui:
470 return AArch64::LDRQpre;
471 case AArch64::LDRBBui:
472 return AArch64::LDRBBpre;
473 case AArch64::LDRHHui:
474 return AArch64::LDRHHpre;
475 case AArch64::LDRWui:
476 return AArch64::LDRWpre;
477 case AArch64::LDRXui:
478 return AArch64::LDRXpre;
479 case AArch64::LDRSWui:
480 return AArch64::LDRSWpre;
481 case AArch64::LDPSi:
482 return AArch64::LDPSpre;
483 case AArch64::LDPSWi:
484 return AArch64::LDPSWpre;
485 case AArch64::LDPDi:
486 return AArch64::LDPDpre;
487 case AArch64::LDPQi:
488 return AArch64::LDPQpre;
489 case AArch64::LDPWi:
490 return AArch64::LDPWpre;
491 case AArch64::LDPXi:
492 return AArch64::LDPXpre;
493 case AArch64::STPSi:
494 return AArch64::STPSpre;
495 case AArch64::STPDi:
496 return AArch64::STPDpre;
497 case AArch64::STPQi:
498 return AArch64::STPQpre;
499 case AArch64::STPWi:
500 return AArch64::STPWpre;
501 case AArch64::STPXi:
502 return AArch64::STPXpre;
503 }
504 }
505
getPostIndexedOpcode(unsigned Opc)506 static unsigned getPostIndexedOpcode(unsigned Opc) {
507 switch (Opc) {
508 default:
509 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
510 case AArch64::STRSui:
511 return AArch64::STRSpost;
512 case AArch64::STRDui:
513 return AArch64::STRDpost;
514 case AArch64::STRQui:
515 return AArch64::STRQpost;
516 case AArch64::STRBBui:
517 return AArch64::STRBBpost;
518 case AArch64::STRHHui:
519 return AArch64::STRHHpost;
520 case AArch64::STRWui:
521 return AArch64::STRWpost;
522 case AArch64::STRXui:
523 return AArch64::STRXpost;
524 case AArch64::LDRSui:
525 return AArch64::LDRSpost;
526 case AArch64::LDRDui:
527 return AArch64::LDRDpost;
528 case AArch64::LDRQui:
529 return AArch64::LDRQpost;
530 case AArch64::LDRBBui:
531 return AArch64::LDRBBpost;
532 case AArch64::LDRHHui:
533 return AArch64::LDRHHpost;
534 case AArch64::LDRWui:
535 return AArch64::LDRWpost;
536 case AArch64::LDRXui:
537 return AArch64::LDRXpost;
538 case AArch64::LDRSWui:
539 return AArch64::LDRSWpost;
540 case AArch64::LDPSi:
541 return AArch64::LDPSpost;
542 case AArch64::LDPSWi:
543 return AArch64::LDPSWpost;
544 case AArch64::LDPDi:
545 return AArch64::LDPDpost;
546 case AArch64::LDPQi:
547 return AArch64::LDPQpost;
548 case AArch64::LDPWi:
549 return AArch64::LDPWpost;
550 case AArch64::LDPXi:
551 return AArch64::LDPXpost;
552 case AArch64::STPSi:
553 return AArch64::STPSpost;
554 case AArch64::STPDi:
555 return AArch64::STPDpost;
556 case AArch64::STPQi:
557 return AArch64::STPQpost;
558 case AArch64::STPWi:
559 return AArch64::STPWpost;
560 case AArch64::STPXi:
561 return AArch64::STPXpost;
562 }
563 }
564
isPairedLdSt(const MachineInstr & MI)565 static bool isPairedLdSt(const MachineInstr &MI) {
566 switch (MI.getOpcode()) {
567 default:
568 return false;
569 case AArch64::LDPSi:
570 case AArch64::LDPSWi:
571 case AArch64::LDPDi:
572 case AArch64::LDPQi:
573 case AArch64::LDPWi:
574 case AArch64::LDPXi:
575 case AArch64::STPSi:
576 case AArch64::STPDi:
577 case AArch64::STPQi:
578 case AArch64::STPWi:
579 case AArch64::STPXi:
580 return true;
581 }
582 }
583
getLdStRegOp(const MachineInstr & MI,unsigned PairedRegOp=0)584 static const MachineOperand &getLdStRegOp(const MachineInstr &MI,
585 unsigned PairedRegOp = 0) {
586 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
587 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
588 return MI.getOperand(Idx);
589 }
590
getLdStBaseOp(const MachineInstr & MI)591 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) {
592 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
593 return MI.getOperand(Idx);
594 }
595
getLdStOffsetOp(const MachineInstr & MI)596 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) {
597 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
598 return MI.getOperand(Idx);
599 }
600
isLdOffsetInRangeOfSt(MachineInstr & LoadInst,MachineInstr & StoreInst,const AArch64InstrInfo * TII)601 static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
602 MachineInstr &StoreInst,
603 const AArch64InstrInfo *TII) {
604 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
605 int LoadSize = getMemScale(LoadInst);
606 int StoreSize = getMemScale(StoreInst);
607 int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
608 ? getLdStOffsetOp(StoreInst).getImm()
609 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
610 int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
611 ? getLdStOffsetOp(LoadInst).getImm()
612 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
613 return (UnscaledStOffset <= UnscaledLdOffset) &&
614 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
615 }
616
isPromotableZeroStoreOpcode(unsigned Opc)617 static bool isPromotableZeroStoreOpcode(unsigned Opc) {
618 return isNarrowStore(Opc) || Opc == AArch64::STRWui || Opc == AArch64::STURWi;
619 }
620
isPromotableZeroStoreOpcode(MachineInstr & MI)621 static bool isPromotableZeroStoreOpcode(MachineInstr &MI) {
622 return isPromotableZeroStoreOpcode(MI.getOpcode());
623 }
624
isPromotableZeroStoreInst(MachineInstr & MI)625 static bool isPromotableZeroStoreInst(MachineInstr &MI) {
626 return (isPromotableZeroStoreOpcode(MI)) &&
627 getLdStRegOp(MI).getReg() == AArch64::WZR;
628 }
629
630 MachineBasicBlock::iterator
mergeNarrowInsns(MachineBasicBlock::iterator I,MachineBasicBlock::iterator MergeMI,const LdStPairFlags & Flags)631 AArch64LoadStoreOpt::mergeNarrowInsns(MachineBasicBlock::iterator I,
632 MachineBasicBlock::iterator MergeMI,
633 const LdStPairFlags &Flags) {
634 MachineBasicBlock::iterator NextI = I;
635 ++NextI;
636 // If NextI is the second of the two instructions to be merged, we need
637 // to skip one further. Either way we merge will invalidate the iterator,
638 // and we don't need to scan the new instruction, as it's a pairwise
639 // instruction, which we're not considering for further action anyway.
640 if (NextI == MergeMI)
641 ++NextI;
642
643 unsigned Opc = I->getOpcode();
644 bool IsScaled = !TII->isUnscaledLdSt(Opc);
645 int OffsetStride = IsScaled ? 1 : getMemScale(*I);
646
647 bool MergeForward = Flags.getMergeForward();
648 // Insert our new paired instruction after whichever of the paired
649 // instructions MergeForward indicates.
650 MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
651 // Also based on MergeForward is from where we copy the base register operand
652 // so we get the flags compatible with the input code.
653 const MachineOperand &BaseRegOp =
654 MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I);
655
656 // Which register is Rt and which is Rt2 depends on the offset order.
657 MachineInstr *RtMI, *Rt2MI;
658 if (getLdStOffsetOp(*I).getImm() ==
659 getLdStOffsetOp(*MergeMI).getImm() + OffsetStride) {
660 RtMI = &*MergeMI;
661 Rt2MI = &*I;
662 } else {
663 RtMI = &*I;
664 Rt2MI = &*MergeMI;
665 }
666
667 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
668 // Change the scaled offset from small to large type.
669 if (IsScaled) {
670 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
671 OffsetImm /= 2;
672 }
673
674 DebugLoc DL = I->getDebugLoc();
675 MachineBasicBlock *MBB = I->getParent();
676 if (isNarrowLoad(Opc)) {
677 MachineInstr *RtNewDest = &*(MergeForward ? I : MergeMI);
678 // When merging small (< 32 bit) loads for big-endian targets, the order of
679 // the component parts gets swapped.
680 if (!Subtarget->isLittleEndian())
681 std::swap(RtMI, Rt2MI);
682 // Construct the new load instruction.
683 MachineInstr *NewMemMI, *BitExtMI1, *BitExtMI2;
684 NewMemMI =
685 BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
686 .addOperand(getLdStRegOp(*RtNewDest))
687 .addOperand(BaseRegOp)
688 .addImm(OffsetImm)
689 .setMemRefs(I->mergeMemRefsWith(*MergeMI));
690 (void)NewMemMI;
691
692 DEBUG(
693 dbgs()
694 << "Creating the new load and extract. Replacing instructions:\n ");
695 DEBUG(I->print(dbgs()));
696 DEBUG(dbgs() << " ");
697 DEBUG(MergeMI->print(dbgs()));
698 DEBUG(dbgs() << " with instructions:\n ");
699 DEBUG((NewMemMI)->print(dbgs()));
700
701 int Width = getMemScale(*I) == 1 ? 8 : 16;
702 int LSBLow = 0;
703 int LSBHigh = Width;
704 int ImmsLow = LSBLow + Width - 1;
705 int ImmsHigh = LSBHigh + Width - 1;
706 MachineInstr *ExtDestMI = &*(MergeForward ? MergeMI : I);
707 if ((ExtDestMI == Rt2MI) == Subtarget->isLittleEndian()) {
708 // Create the bitfield extract for high bits.
709 BitExtMI1 =
710 BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*Rt2MI)))
711 .addOperand(getLdStRegOp(*Rt2MI))
712 .addReg(getLdStRegOp(*RtNewDest).getReg())
713 .addImm(LSBHigh)
714 .addImm(ImmsHigh);
715 // Create the bitfield extract for low bits.
716 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
717 // For unsigned, prefer to use AND for low bits.
718 BitExtMI2 = BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::ANDWri))
719 .addOperand(getLdStRegOp(*RtMI))
720 .addReg(getLdStRegOp(*RtNewDest).getReg())
721 .addImm(ImmsLow);
722 } else {
723 BitExtMI2 =
724 BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*RtMI)))
725 .addOperand(getLdStRegOp(*RtMI))
726 .addReg(getLdStRegOp(*RtNewDest).getReg())
727 .addImm(LSBLow)
728 .addImm(ImmsLow);
729 }
730 } else {
731 // Create the bitfield extract for low bits.
732 if (RtMI->getOpcode() == getMatchingNonSExtOpcode(RtMI->getOpcode())) {
733 // For unsigned, prefer to use AND for low bits.
734 BitExtMI1 = BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::ANDWri))
735 .addOperand(getLdStRegOp(*RtMI))
736 .addReg(getLdStRegOp(*RtNewDest).getReg())
737 .addImm(ImmsLow);
738 } else {
739 BitExtMI1 =
740 BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*RtMI)))
741 .addOperand(getLdStRegOp(*RtMI))
742 .addReg(getLdStRegOp(*RtNewDest).getReg())
743 .addImm(LSBLow)
744 .addImm(ImmsLow);
745 }
746
747 // Create the bitfield extract for high bits.
748 BitExtMI2 =
749 BuildMI(*MBB, InsertionPoint, DL, TII->get(getBitExtrOpcode(*Rt2MI)))
750 .addOperand(getLdStRegOp(*Rt2MI))
751 .addReg(getLdStRegOp(*RtNewDest).getReg())
752 .addImm(LSBHigh)
753 .addImm(ImmsHigh);
754 }
755 (void)BitExtMI1;
756 (void)BitExtMI2;
757
758 DEBUG(dbgs() << " ");
759 DEBUG((BitExtMI1)->print(dbgs()));
760 DEBUG(dbgs() << " ");
761 DEBUG((BitExtMI2)->print(dbgs()));
762 DEBUG(dbgs() << "\n");
763
764 // Erase the old instructions.
765 I->eraseFromParent();
766 MergeMI->eraseFromParent();
767 return NextI;
768 }
769 assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
770 "Expected promotable zero store");
771
772 // Construct the new instruction.
773 MachineInstrBuilder MIB;
774 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
775 .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
776 .addOperand(BaseRegOp)
777 .addImm(OffsetImm)
778 .setMemRefs(I->mergeMemRefsWith(*MergeMI));
779 (void)MIB;
780
781 DEBUG(dbgs() << "Creating wider load/store. Replacing instructions:\n ");
782 DEBUG(I->print(dbgs()));
783 DEBUG(dbgs() << " ");
784 DEBUG(MergeMI->print(dbgs()));
785 DEBUG(dbgs() << " with instruction:\n ");
786 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
787 DEBUG(dbgs() << "\n");
788
789 // Erase the old instructions.
790 I->eraseFromParent();
791 MergeMI->eraseFromParent();
792 return NextI;
793 }
794
795 MachineBasicBlock::iterator
mergePairedInsns(MachineBasicBlock::iterator I,MachineBasicBlock::iterator Paired,const LdStPairFlags & Flags)796 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
797 MachineBasicBlock::iterator Paired,
798 const LdStPairFlags &Flags) {
799 MachineBasicBlock::iterator NextI = I;
800 ++NextI;
801 // If NextI is the second of the two instructions to be merged, we need
802 // to skip one further. Either way we merge will invalidate the iterator,
803 // and we don't need to scan the new instruction, as it's a pairwise
804 // instruction, which we're not considering for further action anyway.
805 if (NextI == Paired)
806 ++NextI;
807
808 int SExtIdx = Flags.getSExtIdx();
809 unsigned Opc =
810 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
811 bool IsUnscaled = TII->isUnscaledLdSt(Opc);
812 int OffsetStride = IsUnscaled ? getMemScale(*I) : 1;
813
814 bool MergeForward = Flags.getMergeForward();
815 // Insert our new paired instruction after whichever of the paired
816 // instructions MergeForward indicates.
817 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
818 // Also based on MergeForward is from where we copy the base register operand
819 // so we get the flags compatible with the input code.
820 const MachineOperand &BaseRegOp =
821 MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I);
822
823 int Offset = getLdStOffsetOp(*I).getImm();
824 int PairedOffset = getLdStOffsetOp(*Paired).getImm();
825 bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
826 if (IsUnscaled != PairedIsUnscaled) {
827 // We're trying to pair instructions that differ in how they are scaled. If
828 // I is scaled then scale the offset of Paired accordingly. Otherwise, do
829 // the opposite (i.e., make Paired's offset unscaled).
830 int MemSize = getMemScale(*Paired);
831 if (PairedIsUnscaled) {
832 // If the unscaled offset isn't a multiple of the MemSize, we can't
833 // pair the operations together.
834 assert(!(PairedOffset % getMemScale(*Paired)) &&
835 "Offset should be a multiple of the stride!");
836 PairedOffset /= MemSize;
837 } else {
838 PairedOffset *= MemSize;
839 }
840 }
841
842 // Which register is Rt and which is Rt2 depends on the offset order.
843 MachineInstr *RtMI, *Rt2MI;
844 if (Offset == PairedOffset + OffsetStride) {
845 RtMI = &*Paired;
846 Rt2MI = &*I;
847 // Here we swapped the assumption made for SExtIdx.
848 // I.e., we turn ldp I, Paired into ldp Paired, I.
849 // Update the index accordingly.
850 if (SExtIdx != -1)
851 SExtIdx = (SExtIdx + 1) % 2;
852 } else {
853 RtMI = &*I;
854 Rt2MI = &*Paired;
855 }
856 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
857 // Scale the immediate offset, if necessary.
858 if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
859 assert(!(OffsetImm % getMemScale(*RtMI)) &&
860 "Unscaled offset cannot be scaled.");
861 OffsetImm /= getMemScale(*RtMI);
862 }
863
864 // Construct the new instruction.
865 MachineInstrBuilder MIB;
866 DebugLoc DL = I->getDebugLoc();
867 MachineBasicBlock *MBB = I->getParent();
868 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc)))
869 .addOperand(getLdStRegOp(*RtMI))
870 .addOperand(getLdStRegOp(*Rt2MI))
871 .addOperand(BaseRegOp)
872 .addImm(OffsetImm)
873 .setMemRefs(I->mergeMemRefsWith(*Paired));
874
875 (void)MIB;
876
877 DEBUG(dbgs() << "Creating pair load/store. Replacing instructions:\n ");
878 DEBUG(I->print(dbgs()));
879 DEBUG(dbgs() << " ");
880 DEBUG(Paired->print(dbgs()));
881 DEBUG(dbgs() << " with instruction:\n ");
882 if (SExtIdx != -1) {
883 // Generate the sign extension for the proper result of the ldp.
884 // I.e., with X1, that would be:
885 // %W1<def> = KILL %W1, %X1<imp-def>
886 // %X1<def> = SBFMXri %X1<kill>, 0, 31
887 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
888 // Right now, DstMO has the extended register, since it comes from an
889 // extended opcode.
890 unsigned DstRegX = DstMO.getReg();
891 // Get the W variant of that register.
892 unsigned DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
893 // Update the result of LDP to use the W instead of the X variant.
894 DstMO.setReg(DstRegW);
895 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
896 DEBUG(dbgs() << "\n");
897 // Make the machine verifier happy by providing a definition for
898 // the X register.
899 // Insert this definition right after the generated LDP, i.e., before
900 // InsertionPoint.
901 MachineInstrBuilder MIBKill =
902 BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
903 .addReg(DstRegW)
904 .addReg(DstRegX, RegState::Define);
905 MIBKill->getOperand(2).setImplicit();
906 // Create the sign extension.
907 MachineInstrBuilder MIBSXTW =
908 BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
909 .addReg(DstRegX)
910 .addImm(0)
911 .addImm(31);
912 (void)MIBSXTW;
913 DEBUG(dbgs() << " Extend operand:\n ");
914 DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
915 } else {
916 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
917 }
918 DEBUG(dbgs() << "\n");
919
920 // Erase the old instructions.
921 I->eraseFromParent();
922 Paired->eraseFromParent();
923
924 return NextI;
925 }
926
927 MachineBasicBlock::iterator
promoteLoadFromStore(MachineBasicBlock::iterator LoadI,MachineBasicBlock::iterator StoreI)928 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
929 MachineBasicBlock::iterator StoreI) {
930 MachineBasicBlock::iterator NextI = LoadI;
931 ++NextI;
932
933 int LoadSize = getMemScale(*LoadI);
934 int StoreSize = getMemScale(*StoreI);
935 unsigned LdRt = getLdStRegOp(*LoadI).getReg();
936 unsigned StRt = getLdStRegOp(*StoreI).getReg();
937 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
938
939 assert((IsStoreXReg ||
940 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
941 "Unexpected RegClass");
942
943 MachineInstr *BitExtMI;
944 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
945 // Remove the load, if the destination register of the loads is the same
946 // register for stored value.
947 if (StRt == LdRt && LoadSize == 8) {
948 DEBUG(dbgs() << "Remove load instruction:\n ");
949 DEBUG(LoadI->print(dbgs()));
950 DEBUG(dbgs() << "\n");
951 LoadI->eraseFromParent();
952 return NextI;
953 }
954 // Replace the load with a mov if the load and store are in the same size.
955 BitExtMI =
956 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
957 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
958 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
959 .addReg(StRt)
960 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
961 } else {
962 // FIXME: Currently we disable this transformation in big-endian targets as
963 // performance and correctness are verified only in little-endian.
964 if (!Subtarget->isLittleEndian())
965 return NextI;
966 bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
967 assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
968 "Unsupported ld/st match");
969 assert(LoadSize <= StoreSize && "Invalid load size");
970 int UnscaledLdOffset = IsUnscaled
971 ? getLdStOffsetOp(*LoadI).getImm()
972 : getLdStOffsetOp(*LoadI).getImm() * LoadSize;
973 int UnscaledStOffset = IsUnscaled
974 ? getLdStOffsetOp(*StoreI).getImm()
975 : getLdStOffsetOp(*StoreI).getImm() * StoreSize;
976 int Width = LoadSize * 8;
977 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
978 int Imms = Immr + Width - 1;
979 unsigned DestReg = IsStoreXReg
980 ? TRI->getMatchingSuperReg(LdRt, AArch64::sub_32,
981 &AArch64::GPR64RegClass)
982 : LdRt;
983
984 assert((UnscaledLdOffset >= UnscaledStOffset &&
985 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
986 "Invalid offset");
987
988 Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
989 Imms = Immr + Width - 1;
990 if (UnscaledLdOffset == UnscaledStOffset) {
991 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
992 | ((Immr) << 6) // immr
993 | ((Imms) << 0) // imms
994 ;
995
996 BitExtMI =
997 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
998 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
999 DestReg)
1000 .addReg(StRt)
1001 .addImm(AndMaskEncoded);
1002 } else {
1003 BitExtMI =
1004 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1005 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
1006 DestReg)
1007 .addReg(StRt)
1008 .addImm(Immr)
1009 .addImm(Imms);
1010 }
1011 }
1012 (void)BitExtMI;
1013
1014 DEBUG(dbgs() << "Promoting load by replacing :\n ");
1015 DEBUG(StoreI->print(dbgs()));
1016 DEBUG(dbgs() << " ");
1017 DEBUG(LoadI->print(dbgs()));
1018 DEBUG(dbgs() << " with instructions:\n ");
1019 DEBUG(StoreI->print(dbgs()));
1020 DEBUG(dbgs() << " ");
1021 DEBUG((BitExtMI)->print(dbgs()));
1022 DEBUG(dbgs() << "\n");
1023
1024 // Erase the old instructions.
1025 LoadI->eraseFromParent();
1026 return NextI;
1027 }
1028
1029 /// trackRegDefsUses - Remember what registers the specified instruction uses
1030 /// and modifies.
trackRegDefsUses(const MachineInstr & MI,BitVector & ModifiedRegs,BitVector & UsedRegs,const TargetRegisterInfo * TRI)1031 static void trackRegDefsUses(const MachineInstr &MI, BitVector &ModifiedRegs,
1032 BitVector &UsedRegs,
1033 const TargetRegisterInfo *TRI) {
1034 for (const MachineOperand &MO : MI.operands()) {
1035 if (MO.isRegMask())
1036 ModifiedRegs.setBitsNotInMask(MO.getRegMask());
1037
1038 if (!MO.isReg())
1039 continue;
1040 unsigned Reg = MO.getReg();
1041 if (!Reg)
1042 continue;
1043 if (MO.isDef()) {
1044 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1045 ModifiedRegs.set(*AI);
1046 } else {
1047 assert(MO.isUse() && "Reg operand not a def and not a use?!?");
1048 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
1049 UsedRegs.set(*AI);
1050 }
1051 }
1052 }
1053
inBoundsForPair(bool IsUnscaled,int Offset,int OffsetStride)1054 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
1055 // Convert the byte-offset used by unscaled into an "element" offset used
1056 // by the scaled pair load/store instructions.
1057 if (IsUnscaled) {
1058 // If the byte-offset isn't a multiple of the stride, there's no point
1059 // trying to match it.
1060 if (Offset % OffsetStride)
1061 return false;
1062 Offset /= OffsetStride;
1063 }
1064 return Offset <= 63 && Offset >= -64;
1065 }
1066
1067 // Do alignment, specialized to power of 2 and for signed ints,
1068 // avoiding having to do a C-style cast from uint_64t to int when
1069 // using alignTo from include/llvm/Support/MathExtras.h.
1070 // FIXME: Move this function to include/MathExtras.h?
alignTo(int Num,int PowOf2)1071 static int alignTo(int Num, int PowOf2) {
1072 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1073 }
1074
mayAlias(MachineInstr & MIa,MachineInstr & MIb,const AArch64InstrInfo * TII)1075 static bool mayAlias(MachineInstr &MIa, MachineInstr &MIb,
1076 const AArch64InstrInfo *TII) {
1077 // One of the instructions must modify memory.
1078 if (!MIa.mayStore() && !MIb.mayStore())
1079 return false;
1080
1081 // Both instructions must be memory operations.
1082 if (!MIa.mayLoadOrStore() && !MIb.mayLoadOrStore())
1083 return false;
1084
1085 return !TII->areMemAccessesTriviallyDisjoint(MIa, MIb);
1086 }
1087
mayAlias(MachineInstr & MIa,SmallVectorImpl<MachineInstr * > & MemInsns,const AArch64InstrInfo * TII)1088 static bool mayAlias(MachineInstr &MIa,
1089 SmallVectorImpl<MachineInstr *> &MemInsns,
1090 const AArch64InstrInfo *TII) {
1091 for (MachineInstr *MIb : MemInsns)
1092 if (mayAlias(MIa, *MIb, TII))
1093 return true;
1094
1095 return false;
1096 }
1097
findMatchingStore(MachineBasicBlock::iterator I,unsigned Limit,MachineBasicBlock::iterator & StoreI)1098 bool AArch64LoadStoreOpt::findMatchingStore(
1099 MachineBasicBlock::iterator I, unsigned Limit,
1100 MachineBasicBlock::iterator &StoreI) {
1101 MachineBasicBlock::iterator B = I->getParent()->begin();
1102 MachineBasicBlock::iterator MBBI = I;
1103 MachineInstr &LoadMI = *I;
1104 unsigned BaseReg = getLdStBaseOp(LoadMI).getReg();
1105
1106 // If the load is the first instruction in the block, there's obviously
1107 // not any matching store.
1108 if (MBBI == B)
1109 return false;
1110
1111 // Track which registers have been modified and used between the first insn
1112 // and the second insn.
1113 ModifiedRegs.reset();
1114 UsedRegs.reset();
1115
1116 unsigned Count = 0;
1117 do {
1118 --MBBI;
1119 MachineInstr &MI = *MBBI;
1120
1121 // Don't count DBG_VALUE instructions towards the search limit.
1122 if (!MI.isDebugValue())
1123 ++Count;
1124
1125 // If the load instruction reads directly from the address to which the
1126 // store instruction writes and the stored value is not modified, we can
1127 // promote the load. Since we do not handle stores with pre-/post-index,
1128 // it's unnecessary to check if BaseReg is modified by the store itself.
1129 if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
1130 BaseReg == getLdStBaseOp(MI).getReg() &&
1131 isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
1132 !ModifiedRegs[getLdStRegOp(MI).getReg()]) {
1133 StoreI = MBBI;
1134 return true;
1135 }
1136
1137 if (MI.isCall())
1138 return false;
1139
1140 // Update modified / uses register lists.
1141 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1142
1143 // Otherwise, if the base register is modified, we have no match, so
1144 // return early.
1145 if (ModifiedRegs[BaseReg])
1146 return false;
1147
1148 // If we encounter a store aliased with the load, return early.
1149 if (MI.mayStore() && mayAlias(LoadMI, MI, TII))
1150 return false;
1151 } while (MBBI != B && Count < Limit);
1152 return false;
1153 }
1154
1155 // Returns true if FirstMI and MI are candidates for merging or pairing.
1156 // Otherwise, returns false.
areCandidatesToMergeOrPair(MachineInstr & FirstMI,MachineInstr & MI,LdStPairFlags & Flags,const AArch64InstrInfo * TII)1157 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
1158 LdStPairFlags &Flags,
1159 const AArch64InstrInfo *TII) {
1160 // If this is volatile or if pairing is suppressed, not a candidate.
1161 if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1162 return false;
1163
1164 // We should have already checked FirstMI for pair suppression and volatility.
1165 assert(!FirstMI.hasOrderedMemoryRef() &&
1166 !TII->isLdStPairSuppressed(FirstMI) &&
1167 "FirstMI shouldn't get here if either of these checks are true.");
1168
1169 unsigned OpcA = FirstMI.getOpcode();
1170 unsigned OpcB = MI.getOpcode();
1171
1172 // Opcodes match: nothing more to check.
1173 if (OpcA == OpcB)
1174 return true;
1175
1176 // Try to match a sign-extended load/store with a zero-extended load/store.
1177 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1178 unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1179 assert(IsValidLdStrOpc &&
1180 "Given Opc should be a Load or Store with an immediate");
1181 // OpcA will be the first instruction in the pair.
1182 if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1183 Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1184 return true;
1185 }
1186
1187 // If the second instruction isn't even a load/store, bail out.
1188 if (!PairIsValidLdStrOpc)
1189 return false;
1190
1191 // FIXME: We don't support merging narrow loads/stores with mixed
1192 // scaled/unscaled offsets.
1193 if (isNarrowLoadOrStore(OpcA) || isNarrowLoadOrStore(OpcB))
1194 return false;
1195
1196 // Try to match an unscaled load/store with a scaled load/store.
1197 return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
1198 getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1199
1200 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1201 }
1202
1203 /// Scan the instructions looking for a load/store that can be combined with the
1204 /// current instruction into a wider equivalent or a load/store pair.
1205 MachineBasicBlock::iterator
findMatchingInsn(MachineBasicBlock::iterator I,LdStPairFlags & Flags,unsigned Limit,bool FindNarrowMerge)1206 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
1207 LdStPairFlags &Flags, unsigned Limit,
1208 bool FindNarrowMerge) {
1209 MachineBasicBlock::iterator E = I->getParent()->end();
1210 MachineBasicBlock::iterator MBBI = I;
1211 MachineInstr &FirstMI = *I;
1212 ++MBBI;
1213
1214 bool MayLoad = FirstMI.mayLoad();
1215 bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
1216 unsigned Reg = getLdStRegOp(FirstMI).getReg();
1217 unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
1218 int Offset = getLdStOffsetOp(FirstMI).getImm();
1219 int OffsetStride = IsUnscaled ? getMemScale(FirstMI) : 1;
1220 bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
1221
1222 // Track which registers have been modified and used between the first insn
1223 // (inclusive) and the second insn.
1224 ModifiedRegs.reset();
1225 UsedRegs.reset();
1226
1227 // Remember any instructions that read/write memory between FirstMI and MI.
1228 SmallVector<MachineInstr *, 4> MemInsns;
1229
1230 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1231 MachineInstr &MI = *MBBI;
1232 // Skip DBG_VALUE instructions. Otherwise debug info can affect the
1233 // optimization by changing how far we scan.
1234 if (MI.isDebugValue())
1235 continue;
1236
1237 // Now that we know this is a real instruction, count it.
1238 ++Count;
1239
1240 Flags.setSExtIdx(-1);
1241 if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
1242 getLdStOffsetOp(MI).isImm()) {
1243 assert(MI.mayLoadOrStore() && "Expected memory operation.");
1244 // If we've found another instruction with the same opcode, check to see
1245 // if the base and offset are compatible with our starting instruction.
1246 // These instructions all have scaled immediate operands, so we just
1247 // check for +1/-1. Make sure to check the new instruction offset is
1248 // actually an immediate and not a symbolic reference destined for
1249 // a relocation.
1250 unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
1251 int MIOffset = getLdStOffsetOp(MI).getImm();
1252 bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
1253 if (IsUnscaled != MIIsUnscaled) {
1254 // We're trying to pair instructions that differ in how they are scaled.
1255 // If FirstMI is scaled then scale the offset of MI accordingly.
1256 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1257 int MemSize = getMemScale(MI);
1258 if (MIIsUnscaled) {
1259 // If the unscaled offset isn't a multiple of the MemSize, we can't
1260 // pair the operations together: bail and keep looking.
1261 if (MIOffset % MemSize)
1262 continue;
1263 MIOffset /= MemSize;
1264 } else {
1265 MIOffset *= MemSize;
1266 }
1267 }
1268
1269 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1270 (Offset + OffsetStride == MIOffset))) {
1271 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1272 if (FindNarrowMerge) {
1273 // If the alignment requirements of the scaled wide load/store
1274 // instruction can't express the offset of the scaled narrow input,
1275 // bail and keep looking. For promotable zero stores, allow only when
1276 // the stored value is the same (i.e., WZR).
1277 if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
1278 (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
1279 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1280 MemInsns.push_back(&MI);
1281 continue;
1282 }
1283 } else {
1284 // Pairwise instructions have a 7-bit signed offset field. Single
1285 // insns have a 12-bit unsigned offset field. If the resultant
1286 // immediate offset of merging these instructions is out of range for
1287 // a pairwise instruction, bail and keep looking.
1288 if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
1289 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1290 MemInsns.push_back(&MI);
1291 continue;
1292 }
1293 // If the alignment requirements of the paired (scaled) instruction
1294 // can't express the offset of the unscaled input, bail and keep
1295 // looking.
1296 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1297 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1298 MemInsns.push_back(&MI);
1299 continue;
1300 }
1301 }
1302 // If the destination register of the loads is the same register, bail
1303 // and keep looking. A load-pair instruction with both destination
1304 // registers the same is UNPREDICTABLE and will result in an exception.
1305 if (MayLoad && Reg == getLdStRegOp(MI).getReg()) {
1306 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1307 MemInsns.push_back(&MI);
1308 continue;
1309 }
1310
1311 // If the Rt of the second instruction was not modified or used between
1312 // the two instructions and none of the instructions between the second
1313 // and first alias with the second, we can combine the second into the
1314 // first.
1315 if (!ModifiedRegs[getLdStRegOp(MI).getReg()] &&
1316 !(MI.mayLoad() && UsedRegs[getLdStRegOp(MI).getReg()]) &&
1317 !mayAlias(MI, MemInsns, TII)) {
1318 Flags.setMergeForward(false);
1319 return MBBI;
1320 }
1321
1322 // Likewise, if the Rt of the first instruction is not modified or used
1323 // between the two instructions and none of the instructions between the
1324 // first and the second alias with the first, we can combine the first
1325 // into the second.
1326 if (!ModifiedRegs[getLdStRegOp(FirstMI).getReg()] &&
1327 !(MayLoad && UsedRegs[getLdStRegOp(FirstMI).getReg()]) &&
1328 !mayAlias(FirstMI, MemInsns, TII)) {
1329 Flags.setMergeForward(true);
1330 return MBBI;
1331 }
1332 // Unable to combine these instructions due to interference in between.
1333 // Keep looking.
1334 }
1335 }
1336
1337 // If the instruction wasn't a matching load or store. Stop searching if we
1338 // encounter a call instruction that might modify memory.
1339 if (MI.isCall())
1340 return E;
1341
1342 // Update modified / uses register lists.
1343 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1344
1345 // Otherwise, if the base register is modified, we have no match, so
1346 // return early.
1347 if (ModifiedRegs[BaseReg])
1348 return E;
1349
1350 // Update list of instructions that read/write memory.
1351 if (MI.mayLoadOrStore())
1352 MemInsns.push_back(&MI);
1353 }
1354 return E;
1355 }
1356
1357 MachineBasicBlock::iterator
mergeUpdateInsn(MachineBasicBlock::iterator I,MachineBasicBlock::iterator Update,bool IsPreIdx)1358 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1359 MachineBasicBlock::iterator Update,
1360 bool IsPreIdx) {
1361 assert((Update->getOpcode() == AArch64::ADDXri ||
1362 Update->getOpcode() == AArch64::SUBXri) &&
1363 "Unexpected base register update instruction to merge!");
1364 MachineBasicBlock::iterator NextI = I;
1365 // Return the instruction following the merged instruction, which is
1366 // the instruction following our unmerged load. Unless that's the add/sub
1367 // instruction we're merging, in which case it's the one after that.
1368 if (++NextI == Update)
1369 ++NextI;
1370
1371 int Value = Update->getOperand(2).getImm();
1372 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
1373 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
1374 if (Update->getOpcode() == AArch64::SUBXri)
1375 Value = -Value;
1376
1377 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1378 : getPostIndexedOpcode(I->getOpcode());
1379 MachineInstrBuilder MIB;
1380 if (!isPairedLdSt(*I)) {
1381 // Non-paired instruction.
1382 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1383 .addOperand(getLdStRegOp(*Update))
1384 .addOperand(getLdStRegOp(*I))
1385 .addOperand(getLdStBaseOp(*I))
1386 .addImm(Value)
1387 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
1388 } else {
1389 // Paired instruction.
1390 int Scale = getMemScale(*I);
1391 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1392 .addOperand(getLdStRegOp(*Update))
1393 .addOperand(getLdStRegOp(*I, 0))
1394 .addOperand(getLdStRegOp(*I, 1))
1395 .addOperand(getLdStBaseOp(*I))
1396 .addImm(Value / Scale)
1397 .setMemRefs(I->memoperands_begin(), I->memoperands_end());
1398 }
1399 (void)MIB;
1400
1401 if (IsPreIdx)
1402 DEBUG(dbgs() << "Creating pre-indexed load/store.");
1403 else
1404 DEBUG(dbgs() << "Creating post-indexed load/store.");
1405 DEBUG(dbgs() << " Replacing instructions:\n ");
1406 DEBUG(I->print(dbgs()));
1407 DEBUG(dbgs() << " ");
1408 DEBUG(Update->print(dbgs()));
1409 DEBUG(dbgs() << " with instruction:\n ");
1410 DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1411 DEBUG(dbgs() << "\n");
1412
1413 // Erase the old instructions for the block.
1414 I->eraseFromParent();
1415 Update->eraseFromParent();
1416
1417 return NextI;
1418 }
1419
isMatchingUpdateInsn(MachineInstr & MemMI,MachineInstr & MI,unsigned BaseReg,int Offset)1420 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
1421 MachineInstr &MI,
1422 unsigned BaseReg, int Offset) {
1423 switch (MI.getOpcode()) {
1424 default:
1425 break;
1426 case AArch64::SUBXri:
1427 // Negate the offset for a SUB instruction.
1428 Offset *= -1;
1429 // FALLTHROUGH
1430 case AArch64::ADDXri:
1431 // Make sure it's a vanilla immediate operand, not a relocation or
1432 // anything else we can't handle.
1433 if (!MI.getOperand(2).isImm())
1434 break;
1435 // Watch out for 1 << 12 shifted value.
1436 if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
1437 break;
1438
1439 // The update instruction source and destination register must be the
1440 // same as the load/store base register.
1441 if (MI.getOperand(0).getReg() != BaseReg ||
1442 MI.getOperand(1).getReg() != BaseReg)
1443 break;
1444
1445 bool IsPairedInsn = isPairedLdSt(MemMI);
1446 int UpdateOffset = MI.getOperand(2).getImm();
1447 // For non-paired load/store instructions, the immediate must fit in a
1448 // signed 9-bit integer.
1449 if (!IsPairedInsn && (UpdateOffset > 255 || UpdateOffset < -256))
1450 break;
1451
1452 // For paired load/store instructions, the immediate must be a multiple of
1453 // the scaling factor. The scaled offset must also fit into a signed 7-bit
1454 // integer.
1455 if (IsPairedInsn) {
1456 int Scale = getMemScale(MemMI);
1457 if (UpdateOffset % Scale != 0)
1458 break;
1459
1460 int ScaledOffset = UpdateOffset / Scale;
1461 if (ScaledOffset > 64 || ScaledOffset < -64)
1462 break;
1463 }
1464
1465 // If we have a non-zero Offset, we check that it matches the amount
1466 // we're adding to the register.
1467 if (!Offset || Offset == MI.getOperand(2).getImm())
1468 return true;
1469 break;
1470 }
1471 return false;
1472 }
1473
findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,int UnscaledOffset,unsigned Limit)1474 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
1475 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
1476 MachineBasicBlock::iterator E = I->getParent()->end();
1477 MachineInstr &MemMI = *I;
1478 MachineBasicBlock::iterator MBBI = I;
1479
1480 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1481 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * getMemScale(MemMI);
1482
1483 // Scan forward looking for post-index opportunities. Updating instructions
1484 // can't be formed if the memory instruction doesn't have the offset we're
1485 // looking for.
1486 if (MIUnscaledOffset != UnscaledOffset)
1487 return E;
1488
1489 // If the base register overlaps a destination register, we can't
1490 // merge the update.
1491 bool IsPairedInsn = isPairedLdSt(MemMI);
1492 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1493 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1494 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1495 return E;
1496 }
1497
1498 // Track which registers have been modified and used between the first insn
1499 // (inclusive) and the second insn.
1500 ModifiedRegs.reset();
1501 UsedRegs.reset();
1502 ++MBBI;
1503 for (unsigned Count = 0; MBBI != E && Count < Limit; ++MBBI) {
1504 MachineInstr &MI = *MBBI;
1505 // Skip DBG_VALUE instructions.
1506 if (MI.isDebugValue())
1507 continue;
1508
1509 // Now that we know this is a real instruction, count it.
1510 ++Count;
1511
1512 // If we found a match, return it.
1513 if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
1514 return MBBI;
1515
1516 // Update the status of what the instruction clobbered and used.
1517 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1518
1519 // Otherwise, if the base register is used or modified, we have no match, so
1520 // return early.
1521 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1522 return E;
1523 }
1524 return E;
1525 }
1526
findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I,unsigned Limit)1527 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
1528 MachineBasicBlock::iterator I, unsigned Limit) {
1529 MachineBasicBlock::iterator B = I->getParent()->begin();
1530 MachineBasicBlock::iterator E = I->getParent()->end();
1531 MachineInstr &MemMI = *I;
1532 MachineBasicBlock::iterator MBBI = I;
1533
1534 unsigned BaseReg = getLdStBaseOp(MemMI).getReg();
1535 int Offset = getLdStOffsetOp(MemMI).getImm();
1536
1537 // If the load/store is the first instruction in the block, there's obviously
1538 // not any matching update. Ditto if the memory offset isn't zero.
1539 if (MBBI == B || Offset != 0)
1540 return E;
1541 // If the base register overlaps a destination register, we can't
1542 // merge the update.
1543 bool IsPairedInsn = isPairedLdSt(MemMI);
1544 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1545 unsigned DestReg = getLdStRegOp(MemMI, i).getReg();
1546 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1547 return E;
1548 }
1549
1550 // Track which registers have been modified and used between the first insn
1551 // (inclusive) and the second insn.
1552 ModifiedRegs.reset();
1553 UsedRegs.reset();
1554 unsigned Count = 0;
1555 do {
1556 --MBBI;
1557 MachineInstr &MI = *MBBI;
1558
1559 // Don't count DBG_VALUE instructions towards the search limit.
1560 if (!MI.isDebugValue())
1561 ++Count;
1562
1563 // If we found a match, return it.
1564 if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset))
1565 return MBBI;
1566
1567 // Update the status of what the instruction clobbered and used.
1568 trackRegDefsUses(MI, ModifiedRegs, UsedRegs, TRI);
1569
1570 // Otherwise, if the base register is used or modified, we have no match, so
1571 // return early.
1572 if (ModifiedRegs[BaseReg] || UsedRegs[BaseReg])
1573 return E;
1574 } while (MBBI != B && Count < Limit);
1575 return E;
1576 }
1577
tryToPromoteLoadFromStore(MachineBasicBlock::iterator & MBBI)1578 bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1579 MachineBasicBlock::iterator &MBBI) {
1580 MachineInstr &MI = *MBBI;
1581 // If this is a volatile load, don't mess with it.
1582 if (MI.hasOrderedMemoryRef())
1583 return false;
1584
1585 // Make sure this is a reg+imm.
1586 // FIXME: It is possible to extend it to handle reg+reg cases.
1587 if (!getLdStOffsetOp(MI).isImm())
1588 return false;
1589
1590 // Look backward up to LdStLimit instructions.
1591 MachineBasicBlock::iterator StoreI;
1592 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
1593 ++NumLoadsFromStoresPromoted;
1594 // Promote the load. Keeping the iterator straight is a
1595 // pain, so we let the merge routine tell us what the next instruction
1596 // is after it's done mucking about.
1597 MBBI = promoteLoadFromStore(MBBI, StoreI);
1598 return true;
1599 }
1600 return false;
1601 }
1602
1603 // Find narrow loads that can be converted into a single wider load with
1604 // bitfield extract instructions. Also merge adjacent zero stores into a wider
1605 // store.
tryToMergeLdStInst(MachineBasicBlock::iterator & MBBI)1606 bool AArch64LoadStoreOpt::tryToMergeLdStInst(
1607 MachineBasicBlock::iterator &MBBI) {
1608 assert((isNarrowLoad(*MBBI) || isPromotableZeroStoreOpcode(*MBBI)) &&
1609 "Expected narrow op.");
1610 MachineInstr &MI = *MBBI;
1611 MachineBasicBlock::iterator E = MI.getParent()->end();
1612
1613 if (!TII->isCandidateToMergeOrPair(MI))
1614 return false;
1615
1616 // For promotable zero stores, the stored value should be WZR.
1617 if (isPromotableZeroStoreOpcode(MI) &&
1618 getLdStRegOp(MI).getReg() != AArch64::WZR)
1619 return false;
1620
1621 // Look ahead up to LdStLimit instructions for a mergable instruction.
1622 LdStPairFlags Flags;
1623 MachineBasicBlock::iterator MergeMI =
1624 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
1625 if (MergeMI != E) {
1626 if (isNarrowLoad(MI)) {
1627 ++NumNarrowLoadsPromoted;
1628 } else if (isPromotableZeroStoreInst(MI)) {
1629 ++NumZeroStoresPromoted;
1630 }
1631 // Keeping the iterator straight is a pain, so we let the merge routine tell
1632 // us what the next instruction is after it's done mucking about.
1633 MBBI = mergeNarrowInsns(MBBI, MergeMI, Flags);
1634 return true;
1635 }
1636 return false;
1637 }
1638
1639 // Find loads and stores that can be merged into a single load or store pair
1640 // instruction.
tryToPairLdStInst(MachineBasicBlock::iterator & MBBI)1641 bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1642 MachineInstr &MI = *MBBI;
1643 MachineBasicBlock::iterator E = MI.getParent()->end();
1644
1645 if (!TII->isCandidateToMergeOrPair(MI))
1646 return false;
1647
1648 // Early exit if the offset is not possible to match. (6 bits of positive
1649 // range, plus allow an extra one in case we find a later insn that matches
1650 // with Offset-1)
1651 bool IsUnscaled = TII->isUnscaledLdSt(MI);
1652 int Offset = getLdStOffsetOp(MI).getImm();
1653 int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
1654 if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
1655 return false;
1656
1657 // Look ahead up to LdStLimit instructions for a pairable instruction.
1658 LdStPairFlags Flags;
1659 MachineBasicBlock::iterator Paired =
1660 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
1661 if (Paired != E) {
1662 ++NumPairCreated;
1663 if (TII->isUnscaledLdSt(MI))
1664 ++NumUnscaledPairCreated;
1665 // Keeping the iterator straight is a pain, so we let the merge routine tell
1666 // us what the next instruction is after it's done mucking about.
1667 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1668 return true;
1669 }
1670 return false;
1671 }
1672
optimizeBlock(MachineBasicBlock & MBB,bool enableNarrowLdOpt)1673 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
1674 bool enableNarrowLdOpt) {
1675 bool Modified = false;
1676 // Four tranformations to do here:
1677 // 1) Find loads that directly read from stores and promote them by
1678 // replacing with mov instructions. If the store is wider than the load,
1679 // the load will be replaced with a bitfield extract.
1680 // e.g.,
1681 // str w1, [x0, #4]
1682 // ldrh w2, [x0, #6]
1683 // ; becomes
1684 // str w1, [x0, #4]
1685 // lsr w2, w1, #16
1686 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1687 MBBI != E;) {
1688 MachineInstr &MI = *MBBI;
1689 switch (MI.getOpcode()) {
1690 default:
1691 // Just move on to the next instruction.
1692 ++MBBI;
1693 break;
1694 // Scaled instructions.
1695 case AArch64::LDRBBui:
1696 case AArch64::LDRHHui:
1697 case AArch64::LDRWui:
1698 case AArch64::LDRXui:
1699 // Unscaled instructions.
1700 case AArch64::LDURBBi:
1701 case AArch64::LDURHHi:
1702 case AArch64::LDURWi:
1703 case AArch64::LDURXi: {
1704 if (tryToPromoteLoadFromStore(MBBI)) {
1705 Modified = true;
1706 break;
1707 }
1708 ++MBBI;
1709 break;
1710 }
1711 }
1712 }
1713 // 2) Find narrow loads that can be converted into a single wider load
1714 // with bitfield extract instructions.
1715 // e.g.,
1716 // ldrh w0, [x2]
1717 // ldrh w1, [x2, #2]
1718 // ; becomes
1719 // ldr w0, [x2]
1720 // ubfx w1, w0, #16, #16
1721 // and w0, w0, #ffff
1722 //
1723 // Also merge adjacent zero stores into a wider store.
1724 // e.g.,
1725 // strh wzr, [x0]
1726 // strh wzr, [x0, #2]
1727 // ; becomes
1728 // str wzr, [x0]
1729 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1730 enableNarrowLdOpt && MBBI != E;) {
1731 MachineInstr &MI = *MBBI;
1732 unsigned Opc = MI.getOpcode();
1733 if (isPromotableZeroStoreOpcode(Opc) ||
1734 (EnableNarrowLdMerge && isNarrowLoad(Opc))) {
1735 if (tryToMergeLdStInst(MBBI)) {
1736 Modified = true;
1737 } else
1738 ++MBBI;
1739 } else
1740 ++MBBI;
1741 }
1742
1743 // 3) Find loads and stores that can be merged into a single load or store
1744 // pair instruction.
1745 // e.g.,
1746 // ldr x0, [x2]
1747 // ldr x1, [x2, #8]
1748 // ; becomes
1749 // ldp x0, x1, [x2]
1750 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1751 MBBI != E;) {
1752 MachineInstr &MI = *MBBI;
1753 switch (MI.getOpcode()) {
1754 default:
1755 // Just move on to the next instruction.
1756 ++MBBI;
1757 break;
1758 // Scaled instructions.
1759 case AArch64::STRSui:
1760 case AArch64::STRDui:
1761 case AArch64::STRQui:
1762 case AArch64::STRXui:
1763 case AArch64::STRWui:
1764 case AArch64::LDRSui:
1765 case AArch64::LDRDui:
1766 case AArch64::LDRQui:
1767 case AArch64::LDRXui:
1768 case AArch64::LDRWui:
1769 case AArch64::LDRSWui:
1770 // Unscaled instructions.
1771 case AArch64::STURSi:
1772 case AArch64::STURDi:
1773 case AArch64::STURQi:
1774 case AArch64::STURWi:
1775 case AArch64::STURXi:
1776 case AArch64::LDURSi:
1777 case AArch64::LDURDi:
1778 case AArch64::LDURQi:
1779 case AArch64::LDURWi:
1780 case AArch64::LDURXi:
1781 case AArch64::LDURSWi: {
1782 if (tryToPairLdStInst(MBBI)) {
1783 Modified = true;
1784 break;
1785 }
1786 ++MBBI;
1787 break;
1788 }
1789 }
1790 }
1791 // 4) Find base register updates that can be merged into the load or store
1792 // as a base-reg writeback.
1793 // e.g.,
1794 // ldr x0, [x2]
1795 // add x2, x2, #4
1796 // ; becomes
1797 // ldr x0, [x2], #4
1798 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
1799 MBBI != E;) {
1800 MachineInstr &MI = *MBBI;
1801 // Do update merging. It's simpler to keep this separate from the above
1802 // switchs, though not strictly necessary.
1803 unsigned Opc = MI.getOpcode();
1804 switch (Opc) {
1805 default:
1806 // Just move on to the next instruction.
1807 ++MBBI;
1808 break;
1809 // Scaled instructions.
1810 case AArch64::STRSui:
1811 case AArch64::STRDui:
1812 case AArch64::STRQui:
1813 case AArch64::STRXui:
1814 case AArch64::STRWui:
1815 case AArch64::STRHHui:
1816 case AArch64::STRBBui:
1817 case AArch64::LDRSui:
1818 case AArch64::LDRDui:
1819 case AArch64::LDRQui:
1820 case AArch64::LDRXui:
1821 case AArch64::LDRWui:
1822 case AArch64::LDRHHui:
1823 case AArch64::LDRBBui:
1824 // Unscaled instructions.
1825 case AArch64::STURSi:
1826 case AArch64::STURDi:
1827 case AArch64::STURQi:
1828 case AArch64::STURWi:
1829 case AArch64::STURXi:
1830 case AArch64::LDURSi:
1831 case AArch64::LDURDi:
1832 case AArch64::LDURQi:
1833 case AArch64::LDURWi:
1834 case AArch64::LDURXi:
1835 // Paired instructions.
1836 case AArch64::LDPSi:
1837 case AArch64::LDPSWi:
1838 case AArch64::LDPDi:
1839 case AArch64::LDPQi:
1840 case AArch64::LDPWi:
1841 case AArch64::LDPXi:
1842 case AArch64::STPSi:
1843 case AArch64::STPDi:
1844 case AArch64::STPQi:
1845 case AArch64::STPWi:
1846 case AArch64::STPXi: {
1847 // Make sure this is a reg+imm (as opposed to an address reloc).
1848 if (!getLdStOffsetOp(MI).isImm()) {
1849 ++MBBI;
1850 break;
1851 }
1852 // Look forward to try to form a post-index instruction. For example,
1853 // ldr x0, [x20]
1854 // add x20, x20, #32
1855 // merged into:
1856 // ldr x0, [x20], #32
1857 MachineBasicBlock::iterator Update =
1858 findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
1859 if (Update != E) {
1860 // Merge the update into the ld/st.
1861 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
1862 Modified = true;
1863 ++NumPostFolded;
1864 break;
1865 }
1866 // Don't know how to handle pre/post-index versions, so move to the next
1867 // instruction.
1868 if (TII->isUnscaledLdSt(Opc)) {
1869 ++MBBI;
1870 break;
1871 }
1872
1873 // Look back to try to find a pre-index instruction. For example,
1874 // add x0, x0, #8
1875 // ldr x1, [x0]
1876 // merged into:
1877 // ldr x1, [x0, #8]!
1878 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
1879 if (Update != E) {
1880 // Merge the update into the ld/st.
1881 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1882 Modified = true;
1883 ++NumPreFolded;
1884 break;
1885 }
1886 // The immediate in the load/store is scaled by the size of the memory
1887 // operation. The immediate in the add we're looking for,
1888 // however, is not, so adjust here.
1889 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * getMemScale(MI);
1890
1891 // Look forward to try to find a post-index instruction. For example,
1892 // ldr x1, [x0, #64]
1893 // add x0, x0, #64
1894 // merged into:
1895 // ldr x1, [x0, #64]!
1896 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
1897 if (Update != E) {
1898 // Merge the update into the ld/st.
1899 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
1900 Modified = true;
1901 ++NumPreFolded;
1902 break;
1903 }
1904
1905 // Nothing found. Just move to the next instruction.
1906 ++MBBI;
1907 break;
1908 }
1909 }
1910 }
1911
1912 return Modified;
1913 }
1914
runOnMachineFunction(MachineFunction & Fn)1915 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1916 if (skipFunction(*Fn.getFunction()))
1917 return false;
1918
1919 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
1920 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
1921 TRI = Subtarget->getRegisterInfo();
1922
1923 // Resize the modified and used register bitfield trackers. We do this once
1924 // per function and then clear the bitfield each time we optimize a load or
1925 // store.
1926 ModifiedRegs.resize(TRI->getNumRegs());
1927 UsedRegs.resize(TRI->getNumRegs());
1928
1929 bool Modified = false;
1930 bool enableNarrowLdOpt =
1931 Subtarget->mergeNarrowLoads() && !Subtarget->requiresStrictAlign();
1932 for (auto &MBB : Fn)
1933 Modified |= optimizeBlock(MBB, enableNarrowLdOpt);
1934
1935 return Modified;
1936 }
1937
1938 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep
1939 // loads and stores near one another?
1940
1941 // FIXME: When pairing store instructions it's very possible for this pass to
1942 // hoist a store with a KILL marker above another use (without a KILL marker).
1943 // The resulting IR is invalid, but nothing uses the KILL markers after this
1944 // pass, so it's never caused a problem in practice.
1945
1946 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
1947 /// load / store optimization pass.
createAArch64LoadStoreOptimizationPass()1948 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
1949 return new AArch64LoadStoreOpt();
1950 }
1951