1 //===----------- PPCVSXSwapRemoval.cpp - Remove VSX LE Swaps -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===---------------------------------------------------------------------===//
9 //
10 // This pass analyzes vector computations and removes unnecessary
11 // doubleword swaps (xxswapd instructions). This pass is performed
12 // only for little-endian VSX code generation.
13 //
14 // For this specific case, loads and stores of v4i32, v4f32, v2i64,
15 // and v2f64 vectors are inefficient. These are implemented using
16 // the lxvd2x and stxvd2x instructions, which invert the order of
17 // doublewords in a vector register. Thus code generation inserts
18 // an xxswapd after each such load, and prior to each such store.
19 //
20 // The extra xxswapd instructions reduce performance. The purpose
21 // of this pass is to reduce the number of xxswapd instructions
22 // required for correctness.
23 //
24 // The primary insight is that much code that operates on vectors
25 // does not care about the relative order of elements in a register,
26 // so long as the correct memory order is preserved. If we have a
27 // computation where all input values are provided by lxvd2x/xxswapd,
28 // all outputs are stored using xxswapd/lxvd2x, and all intermediate
29 // computations are lane-insensitive (independent of element order),
30 // then all the xxswapd instructions associated with the loads and
31 // stores may be removed without changing observable semantics.
32 //
33 // This pass uses standard equivalence class infrastructure to create
34 // maximal webs of computations fitting the above description. Each
35 // such web is then optimized by removing its unnecessary xxswapd
36 // instructions.
37 //
38 // There are some lane-sensitive operations for which we can still
39 // permit the optimization, provided we modify those operations
40 // accordingly. Such operations are identified as using "special
41 // handling" within this module.
42 //
43 //===---------------------------------------------------------------------===//
44
45 #include "PPCInstrInfo.h"
46 #include "PPC.h"
47 #include "PPCInstrBuilder.h"
48 #include "PPCTargetMachine.h"
49 #include "llvm/ADT/DenseMap.h"
50 #include "llvm/ADT/EquivalenceClasses.h"
51 #include "llvm/CodeGen/MachineFunctionPass.h"
52 #include "llvm/CodeGen/MachineInstrBuilder.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/Format.h"
56 #include "llvm/Support/raw_ostream.h"
57
58 using namespace llvm;
59
60 #define DEBUG_TYPE "ppc-vsx-swaps"
61
62 namespace llvm {
63 void initializePPCVSXSwapRemovalPass(PassRegistry&);
64 }
65
66 namespace {
67
68 // A PPCVSXSwapEntry is created for each machine instruction that
69 // is relevant to a vector computation.
70 struct PPCVSXSwapEntry {
71 // Pointer to the instruction.
72 MachineInstr *VSEMI;
73
74 // Unique ID (position in the swap vector).
75 int VSEId;
76
77 // Attributes of this node.
78 unsigned int IsLoad : 1;
79 unsigned int IsStore : 1;
80 unsigned int IsSwap : 1;
81 unsigned int MentionsPhysVR : 1;
82 unsigned int IsSwappable : 1;
83 unsigned int MentionsPartialVR : 1;
84 unsigned int SpecialHandling : 3;
85 unsigned int WebRejected : 1;
86 unsigned int WillRemove : 1;
87 };
88
89 enum SHValues {
90 SH_NONE = 0,
91 SH_EXTRACT,
92 SH_INSERT,
93 SH_NOSWAP_LD,
94 SH_NOSWAP_ST,
95 SH_SPLAT,
96 SH_XXPERMDI,
97 SH_COPYWIDEN
98 };
99
100 struct PPCVSXSwapRemoval : public MachineFunctionPass {
101
102 static char ID;
103 const PPCInstrInfo *TII;
104 MachineFunction *MF;
105 MachineRegisterInfo *MRI;
106
107 // Swap entries are allocated in a vector for better performance.
108 std::vector<PPCVSXSwapEntry> SwapVector;
109
110 // A mapping is maintained between machine instructions and
111 // their swap entries. The key is the address of the MI.
112 DenseMap<MachineInstr*, int> SwapMap;
113
114 // Equivalence classes are used to gather webs of related computation.
115 // Swap entries are represented by their VSEId fields.
116 EquivalenceClasses<int> *EC;
117
PPCVSXSwapRemoval__anon4594d3f50111::PPCVSXSwapRemoval118 PPCVSXSwapRemoval() : MachineFunctionPass(ID) {
119 initializePPCVSXSwapRemovalPass(*PassRegistry::getPassRegistry());
120 }
121
122 private:
123 // Initialize data structures.
124 void initialize(MachineFunction &MFParm);
125
126 // Walk the machine instructions to gather vector usage information.
127 // Return true iff vector mentions are present.
128 bool gatherVectorInstructions();
129
130 // Add an entry to the swap vector and swap map.
131 int addSwapEntry(MachineInstr *MI, PPCVSXSwapEntry &SwapEntry);
132
133 // Hunt backwards through COPY and SUBREG_TO_REG chains for a
134 // source register. VecIdx indicates the swap vector entry to
135 // mark as mentioning a physical register if the search leads
136 // to one.
137 unsigned lookThruCopyLike(unsigned SrcReg, unsigned VecIdx);
138
139 // Generate equivalence classes for related computations (webs).
140 void formWebs();
141
142 // Analyze webs and determine those that cannot be optimized.
143 void recordUnoptimizableWebs();
144
145 // Record which swap instructions can be safely removed.
146 void markSwapsForRemoval();
147
148 // Remove swaps and update other instructions requiring special
149 // handling. Return true iff any changes are made.
150 bool removeSwaps();
151
152 // Insert a swap instruction from SrcReg to DstReg at the given
153 // InsertPoint.
154 void insertSwap(MachineInstr *MI, MachineBasicBlock::iterator InsertPoint,
155 unsigned DstReg, unsigned SrcReg);
156
157 // Update instructions requiring special handling.
158 void handleSpecialSwappables(int EntryIdx);
159
160 // Dump a description of the entries in the swap vector.
161 void dumpSwapVector();
162
163 // Return true iff the given register is in the given class.
isRegInClass__anon4594d3f50111::PPCVSXSwapRemoval164 bool isRegInClass(unsigned Reg, const TargetRegisterClass *RC) {
165 if (TargetRegisterInfo::isVirtualRegister(Reg))
166 return RC->hasSubClassEq(MRI->getRegClass(Reg));
167 return RC->contains(Reg);
168 }
169
170 // Return true iff the given register is a full vector register.
isVecReg__anon4594d3f50111::PPCVSXSwapRemoval171 bool isVecReg(unsigned Reg) {
172 return (isRegInClass(Reg, &PPC::VSRCRegClass) ||
173 isRegInClass(Reg, &PPC::VRRCRegClass));
174 }
175
176 // Return true iff the given register is a partial vector register.
isScalarVecReg__anon4594d3f50111::PPCVSXSwapRemoval177 bool isScalarVecReg(unsigned Reg) {
178 return (isRegInClass(Reg, &PPC::VSFRCRegClass) ||
179 isRegInClass(Reg, &PPC::VSSRCRegClass));
180 }
181
182 // Return true iff the given register mentions all or part of a
183 // vector register. Also sets Partial to true if the mention
184 // is for just the floating-point register overlap of the register.
isAnyVecReg__anon4594d3f50111::PPCVSXSwapRemoval185 bool isAnyVecReg(unsigned Reg, bool &Partial) {
186 if (isScalarVecReg(Reg))
187 Partial = true;
188 return isScalarVecReg(Reg) || isVecReg(Reg);
189 }
190
191 public:
192 // Main entry point for this pass.
runOnMachineFunction__anon4594d3f50111::PPCVSXSwapRemoval193 bool runOnMachineFunction(MachineFunction &MF) override {
194 if (skipFunction(*MF.getFunction()))
195 return false;
196
197 // If we don't have VSX on the subtarget, don't do anything.
198 const PPCSubtarget &STI = MF.getSubtarget<PPCSubtarget>();
199 if (!STI.hasVSX())
200 return false;
201
202 bool Changed = false;
203 initialize(MF);
204
205 if (gatherVectorInstructions()) {
206 formWebs();
207 recordUnoptimizableWebs();
208 markSwapsForRemoval();
209 Changed = removeSwaps();
210 }
211
212 // FIXME: See the allocation of EC in initialize().
213 delete EC;
214 return Changed;
215 }
216 };
217
218 // Initialize data structures for this pass. In particular, clear the
219 // swap vector and allocate the equivalence class mapping before
220 // processing each function.
initialize(MachineFunction & MFParm)221 void PPCVSXSwapRemoval::initialize(MachineFunction &MFParm) {
222 MF = &MFParm;
223 MRI = &MF->getRegInfo();
224 TII = MF->getSubtarget<PPCSubtarget>().getInstrInfo();
225
226 // An initial vector size of 256 appears to work well in practice.
227 // Small/medium functions with vector content tend not to incur a
228 // reallocation at this size. Three of the vector tests in
229 // projects/test-suite reallocate, which seems like a reasonable rate.
230 const int InitialVectorSize(256);
231 SwapVector.clear();
232 SwapVector.reserve(InitialVectorSize);
233
234 // FIXME: Currently we allocate EC each time because we don't have
235 // access to the set representation on which to call clear(). Should
236 // consider adding a clear() method to the EquivalenceClasses class.
237 EC = new EquivalenceClasses<int>;
238 }
239
240 // Create an entry in the swap vector for each instruction that mentions
241 // a full vector register, recording various characteristics of the
242 // instructions there.
gatherVectorInstructions()243 bool PPCVSXSwapRemoval::gatherVectorInstructions() {
244 bool RelevantFunction = false;
245
246 for (MachineBasicBlock &MBB : *MF) {
247 for (MachineInstr &MI : MBB) {
248
249 if (MI.isDebugValue())
250 continue;
251
252 bool RelevantInstr = false;
253 bool Partial = false;
254
255 for (const MachineOperand &MO : MI.operands()) {
256 if (!MO.isReg())
257 continue;
258 unsigned Reg = MO.getReg();
259 if (isAnyVecReg(Reg, Partial)) {
260 RelevantInstr = true;
261 break;
262 }
263 }
264
265 if (!RelevantInstr)
266 continue;
267
268 RelevantFunction = true;
269
270 // Create a SwapEntry initialized to zeros, then fill in the
271 // instruction and ID fields before pushing it to the back
272 // of the swap vector.
273 PPCVSXSwapEntry SwapEntry{};
274 int VecIdx = addSwapEntry(&MI, SwapEntry);
275
276 switch(MI.getOpcode()) {
277 default:
278 // Unless noted otherwise, an instruction is considered
279 // safe for the optimization. There are a large number of
280 // such true-SIMD instructions (all vector math, logical,
281 // select, compare, etc.). However, if the instruction
282 // mentions a partial vector register and does not have
283 // special handling defined, it is not swappable.
284 if (Partial)
285 SwapVector[VecIdx].MentionsPartialVR = 1;
286 else
287 SwapVector[VecIdx].IsSwappable = 1;
288 break;
289 case PPC::XXPERMDI: {
290 // This is a swap if it is of the form XXPERMDI t, s, s, 2.
291 // Unfortunately, MachineCSE ignores COPY and SUBREG_TO_REG, so we
292 // can also see XXPERMDI t, SUBREG_TO_REG(s), SUBREG_TO_REG(s), 2,
293 // for example. We have to look through chains of COPY and
294 // SUBREG_TO_REG to find the real source value for comparison.
295 // If the real source value is a physical register, then mark the
296 // XXPERMDI as mentioning a physical register.
297 int immed = MI.getOperand(3).getImm();
298 if (immed == 2) {
299 unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(),
300 VecIdx);
301 unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(),
302 VecIdx);
303 if (trueReg1 == trueReg2)
304 SwapVector[VecIdx].IsSwap = 1;
305 else {
306 // We can still handle these if the two registers are not
307 // identical, by adjusting the form of the XXPERMDI.
308 SwapVector[VecIdx].IsSwappable = 1;
309 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
310 }
311 // This is a doubleword splat if it is of the form
312 // XXPERMDI t, s, s, 0 or XXPERMDI t, s, s, 3. As above we
313 // must look through chains of copy-likes to find the source
314 // register. We turn off the marking for mention of a physical
315 // register, because splatting it is safe; the optimization
316 // will not swap the value in the physical register. Whether
317 // or not the two input registers are identical, we can handle
318 // these by adjusting the form of the XXPERMDI.
319 } else if (immed == 0 || immed == 3) {
320
321 SwapVector[VecIdx].IsSwappable = 1;
322 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
323
324 unsigned trueReg1 = lookThruCopyLike(MI.getOperand(1).getReg(),
325 VecIdx);
326 unsigned trueReg2 = lookThruCopyLike(MI.getOperand(2).getReg(),
327 VecIdx);
328 if (trueReg1 == trueReg2)
329 SwapVector[VecIdx].MentionsPhysVR = 0;
330
331 } else {
332 // We can still handle these by adjusting the form of the XXPERMDI.
333 SwapVector[VecIdx].IsSwappable = 1;
334 SwapVector[VecIdx].SpecialHandling = SHValues::SH_XXPERMDI;
335 }
336 break;
337 }
338 case PPC::LVX:
339 // Non-permuting loads are currently unsafe. We can use special
340 // handling for this in the future. By not marking these as
341 // IsSwap, we ensure computations containing them will be rejected
342 // for now.
343 SwapVector[VecIdx].IsLoad = 1;
344 break;
345 case PPC::LXVD2X:
346 case PPC::LXVW4X:
347 // Permuting loads are marked as both load and swap, and are
348 // safe for optimization.
349 SwapVector[VecIdx].IsLoad = 1;
350 SwapVector[VecIdx].IsSwap = 1;
351 break;
352 case PPC::LXSDX:
353 case PPC::LXSSPX:
354 // A load of a floating-point value into the high-order half of
355 // a vector register is safe, provided that we introduce a swap
356 // following the load, which will be done by the SUBREG_TO_REG
357 // support. So just mark these as safe.
358 SwapVector[VecIdx].IsLoad = 1;
359 SwapVector[VecIdx].IsSwappable = 1;
360 break;
361 case PPC::STVX:
362 // Non-permuting stores are currently unsafe. We can use special
363 // handling for this in the future. By not marking these as
364 // IsSwap, we ensure computations containing them will be rejected
365 // for now.
366 SwapVector[VecIdx].IsStore = 1;
367 break;
368 case PPC::STXVD2X:
369 case PPC::STXVW4X:
370 // Permuting stores are marked as both store and swap, and are
371 // safe for optimization.
372 SwapVector[VecIdx].IsStore = 1;
373 SwapVector[VecIdx].IsSwap = 1;
374 break;
375 case PPC::COPY:
376 // These are fine provided they are moving between full vector
377 // register classes.
378 if (isVecReg(MI.getOperand(0).getReg()) &&
379 isVecReg(MI.getOperand(1).getReg()))
380 SwapVector[VecIdx].IsSwappable = 1;
381 // If we have a copy from one scalar floating-point register
382 // to another, we can accept this even if it is a physical
383 // register. The only way this gets involved is if it feeds
384 // a SUBREG_TO_REG, which is handled by introducing a swap.
385 else if (isScalarVecReg(MI.getOperand(0).getReg()) &&
386 isScalarVecReg(MI.getOperand(1).getReg()))
387 SwapVector[VecIdx].IsSwappable = 1;
388 break;
389 case PPC::SUBREG_TO_REG: {
390 // These are fine provided they are moving between full vector
391 // register classes. If they are moving from a scalar
392 // floating-point class to a vector class, we can handle those
393 // as well, provided we introduce a swap. It is generally the
394 // case that we will introduce fewer swaps than we remove, but
395 // (FIXME) a cost model could be used. However, introduced
396 // swaps could potentially be CSEd, so this is not trivial.
397 if (isVecReg(MI.getOperand(0).getReg()) &&
398 isVecReg(MI.getOperand(2).getReg()))
399 SwapVector[VecIdx].IsSwappable = 1;
400 else if (isVecReg(MI.getOperand(0).getReg()) &&
401 isScalarVecReg(MI.getOperand(2).getReg())) {
402 SwapVector[VecIdx].IsSwappable = 1;
403 SwapVector[VecIdx].SpecialHandling = SHValues::SH_COPYWIDEN;
404 }
405 break;
406 }
407 case PPC::VSPLTB:
408 case PPC::VSPLTH:
409 case PPC::VSPLTW:
410 case PPC::XXSPLTW:
411 // Splats are lane-sensitive, but we can use special handling
412 // to adjust the source lane for the splat.
413 SwapVector[VecIdx].IsSwappable = 1;
414 SwapVector[VecIdx].SpecialHandling = SHValues::SH_SPLAT;
415 break;
416 // The presence of the following lane-sensitive operations in a
417 // web will kill the optimization, at least for now. For these
418 // we do nothing, causing the optimization to fail.
419 // FIXME: Some of these could be permitted with special handling,
420 // and will be phased in as time permits.
421 // FIXME: There is no simple and maintainable way to express a set
422 // of opcodes having a common attribute in TableGen. Should this
423 // change, this is a prime candidate to use such a mechanism.
424 case PPC::INLINEASM:
425 case PPC::EXTRACT_SUBREG:
426 case PPC::INSERT_SUBREG:
427 case PPC::COPY_TO_REGCLASS:
428 case PPC::LVEBX:
429 case PPC::LVEHX:
430 case PPC::LVEWX:
431 case PPC::LVSL:
432 case PPC::LVSR:
433 case PPC::LVXL:
434 case PPC::STVEBX:
435 case PPC::STVEHX:
436 case PPC::STVEWX:
437 case PPC::STVXL:
438 // We can handle STXSDX and STXSSPX similarly to LXSDX and LXSSPX,
439 // by adding special handling for narrowing copies as well as
440 // widening ones. However, I've experimented with this, and in
441 // practice we currently do not appear to use STXSDX fed by
442 // a narrowing copy from a full vector register. Since I can't
443 // generate any useful test cases, I've left this alone for now.
444 case PPC::STXSDX:
445 case PPC::STXSSPX:
446 case PPC::VCIPHER:
447 case PPC::VCIPHERLAST:
448 case PPC::VMRGHB:
449 case PPC::VMRGHH:
450 case PPC::VMRGHW:
451 case PPC::VMRGLB:
452 case PPC::VMRGLH:
453 case PPC::VMRGLW:
454 case PPC::VMULESB:
455 case PPC::VMULESH:
456 case PPC::VMULESW:
457 case PPC::VMULEUB:
458 case PPC::VMULEUH:
459 case PPC::VMULEUW:
460 case PPC::VMULOSB:
461 case PPC::VMULOSH:
462 case PPC::VMULOSW:
463 case PPC::VMULOUB:
464 case PPC::VMULOUH:
465 case PPC::VMULOUW:
466 case PPC::VNCIPHER:
467 case PPC::VNCIPHERLAST:
468 case PPC::VPERM:
469 case PPC::VPERMXOR:
470 case PPC::VPKPX:
471 case PPC::VPKSHSS:
472 case PPC::VPKSHUS:
473 case PPC::VPKSDSS:
474 case PPC::VPKSDUS:
475 case PPC::VPKSWSS:
476 case PPC::VPKSWUS:
477 case PPC::VPKUDUM:
478 case PPC::VPKUDUS:
479 case PPC::VPKUHUM:
480 case PPC::VPKUHUS:
481 case PPC::VPKUWUM:
482 case PPC::VPKUWUS:
483 case PPC::VPMSUMB:
484 case PPC::VPMSUMD:
485 case PPC::VPMSUMH:
486 case PPC::VPMSUMW:
487 case PPC::VRLB:
488 case PPC::VRLD:
489 case PPC::VRLH:
490 case PPC::VRLW:
491 case PPC::VSBOX:
492 case PPC::VSHASIGMAD:
493 case PPC::VSHASIGMAW:
494 case PPC::VSL:
495 case PPC::VSLDOI:
496 case PPC::VSLO:
497 case PPC::VSR:
498 case PPC::VSRO:
499 case PPC::VSUM2SWS:
500 case PPC::VSUM4SBS:
501 case PPC::VSUM4SHS:
502 case PPC::VSUM4UBS:
503 case PPC::VSUMSWS:
504 case PPC::VUPKHPX:
505 case PPC::VUPKHSB:
506 case PPC::VUPKHSH:
507 case PPC::VUPKHSW:
508 case PPC::VUPKLPX:
509 case PPC::VUPKLSB:
510 case PPC::VUPKLSH:
511 case PPC::VUPKLSW:
512 case PPC::XXMRGHW:
513 case PPC::XXMRGLW:
514 // XXSLDWI could be replaced by a general permute with one of three
515 // permute control vectors (for shift values 1, 2, 3). However,
516 // VPERM has a more restrictive register class.
517 case PPC::XXSLDWI:
518 break;
519 }
520 }
521 }
522
523 if (RelevantFunction) {
524 DEBUG(dbgs() << "Swap vector when first built\n\n");
525 dumpSwapVector();
526 }
527
528 return RelevantFunction;
529 }
530
531 // Add an entry to the swap vector and swap map, and make a
532 // singleton equivalence class for the entry.
addSwapEntry(MachineInstr * MI,PPCVSXSwapEntry & SwapEntry)533 int PPCVSXSwapRemoval::addSwapEntry(MachineInstr *MI,
534 PPCVSXSwapEntry& SwapEntry) {
535 SwapEntry.VSEMI = MI;
536 SwapEntry.VSEId = SwapVector.size();
537 SwapVector.push_back(SwapEntry);
538 EC->insert(SwapEntry.VSEId);
539 SwapMap[MI] = SwapEntry.VSEId;
540 return SwapEntry.VSEId;
541 }
542
543 // This is used to find the "true" source register for an
544 // XXPERMDI instruction, since MachineCSE does not handle the
545 // "copy-like" operations (Copy and SubregToReg). Returns
546 // the original SrcReg unless it is the target of a copy-like
547 // operation, in which case we chain backwards through all
548 // such operations to the ultimate source register. If a
549 // physical register is encountered, we stop the search and
550 // flag the swap entry indicated by VecIdx (the original
551 // XXPERMDI) as mentioning a physical register.
lookThruCopyLike(unsigned SrcReg,unsigned VecIdx)552 unsigned PPCVSXSwapRemoval::lookThruCopyLike(unsigned SrcReg,
553 unsigned VecIdx) {
554 MachineInstr *MI = MRI->getVRegDef(SrcReg);
555 if (!MI->isCopyLike())
556 return SrcReg;
557
558 unsigned CopySrcReg;
559 if (MI->isCopy())
560 CopySrcReg = MI->getOperand(1).getReg();
561 else {
562 assert(MI->isSubregToReg() && "bad opcode for lookThruCopyLike");
563 CopySrcReg = MI->getOperand(2).getReg();
564 }
565
566 if (!TargetRegisterInfo::isVirtualRegister(CopySrcReg)) {
567 if (!isScalarVecReg(CopySrcReg))
568 SwapVector[VecIdx].MentionsPhysVR = 1;
569 return CopySrcReg;
570 }
571
572 return lookThruCopyLike(CopySrcReg, VecIdx);
573 }
574
575 // Generate equivalence classes for related computations (webs) by
576 // def-use relationships of virtual registers. Mention of a physical
577 // register terminates the generation of equivalence classes as this
578 // indicates a use of a parameter, definition of a return value, use
579 // of a value returned from a call, or definition of a parameter to a
580 // call. Computations with physical register mentions are flagged
581 // as such so their containing webs will not be optimized.
formWebs()582 void PPCVSXSwapRemoval::formWebs() {
583
584 DEBUG(dbgs() << "\n*** Forming webs for swap removal ***\n\n");
585
586 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
587
588 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
589
590 DEBUG(dbgs() << "\n" << SwapVector[EntryIdx].VSEId << " ");
591 DEBUG(MI->dump());
592
593 // It's sufficient to walk vector uses and join them to their unique
594 // definitions. In addition, check full vector register operands
595 // for physical regs. We exclude partial-vector register operands
596 // because we can handle them if copied to a full vector.
597 for (const MachineOperand &MO : MI->operands()) {
598 if (!MO.isReg())
599 continue;
600
601 unsigned Reg = MO.getReg();
602 if (!isVecReg(Reg) && !isScalarVecReg(Reg))
603 continue;
604
605 if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
606 if (!(MI->isCopy() && isScalarVecReg(Reg)))
607 SwapVector[EntryIdx].MentionsPhysVR = 1;
608 continue;
609 }
610
611 if (!MO.isUse())
612 continue;
613
614 MachineInstr* DefMI = MRI->getVRegDef(Reg);
615 assert(SwapMap.find(DefMI) != SwapMap.end() &&
616 "Inconsistency: def of vector reg not found in swap map!");
617 int DefIdx = SwapMap[DefMI];
618 (void)EC->unionSets(SwapVector[DefIdx].VSEId,
619 SwapVector[EntryIdx].VSEId);
620
621 DEBUG(dbgs() << format("Unioning %d with %d\n", SwapVector[DefIdx].VSEId,
622 SwapVector[EntryIdx].VSEId));
623 DEBUG(dbgs() << " Def: ");
624 DEBUG(DefMI->dump());
625 }
626 }
627 }
628
629 // Walk the swap vector entries looking for conditions that prevent their
630 // containing computations from being optimized. When such conditions are
631 // found, mark the representative of the computation's equivalence class
632 // as rejected.
recordUnoptimizableWebs()633 void PPCVSXSwapRemoval::recordUnoptimizableWebs() {
634
635 DEBUG(dbgs() << "\n*** Rejecting webs for swap removal ***\n\n");
636
637 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
638 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
639
640 // If representative is already rejected, don't waste further time.
641 if (SwapVector[Repr].WebRejected)
642 continue;
643
644 // Reject webs containing mentions of physical or partial registers, or
645 // containing operations that we don't know how to handle in a lane-
646 // permuted region.
647 if (SwapVector[EntryIdx].MentionsPhysVR ||
648 SwapVector[EntryIdx].MentionsPartialVR ||
649 !(SwapVector[EntryIdx].IsSwappable || SwapVector[EntryIdx].IsSwap)) {
650
651 SwapVector[Repr].WebRejected = 1;
652
653 DEBUG(dbgs() <<
654 format("Web %d rejected for physreg, partial reg, or not "
655 "swap[pable]\n", Repr));
656 DEBUG(dbgs() << " in " << EntryIdx << ": ");
657 DEBUG(SwapVector[EntryIdx].VSEMI->dump());
658 DEBUG(dbgs() << "\n");
659 }
660
661 // Reject webs than contain swapping loads that feed something other
662 // than a swap instruction.
663 else if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) {
664 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
665 unsigned DefReg = MI->getOperand(0).getReg();
666
667 // We skip debug instructions in the analysis. (Note that debug
668 // location information is still maintained by this optimization
669 // because it remains on the LXVD2X and STXVD2X instructions after
670 // the XXPERMDIs are removed.)
671 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) {
672 int UseIdx = SwapMap[&UseMI];
673
674 if (!SwapVector[UseIdx].IsSwap || SwapVector[UseIdx].IsLoad ||
675 SwapVector[UseIdx].IsStore) {
676
677 SwapVector[Repr].WebRejected = 1;
678
679 DEBUG(dbgs() <<
680 format("Web %d rejected for load not feeding swap\n", Repr));
681 DEBUG(dbgs() << " def " << EntryIdx << ": ");
682 DEBUG(MI->dump());
683 DEBUG(dbgs() << " use " << UseIdx << ": ");
684 DEBUG(UseMI.dump());
685 DEBUG(dbgs() << "\n");
686 }
687 }
688
689 // Reject webs that contain swapping stores that are fed by something
690 // other than a swap instruction.
691 } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) {
692 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
693 unsigned UseReg = MI->getOperand(0).getReg();
694 MachineInstr *DefMI = MRI->getVRegDef(UseReg);
695 unsigned DefReg = DefMI->getOperand(0).getReg();
696 int DefIdx = SwapMap[DefMI];
697
698 if (!SwapVector[DefIdx].IsSwap || SwapVector[DefIdx].IsLoad ||
699 SwapVector[DefIdx].IsStore) {
700
701 SwapVector[Repr].WebRejected = 1;
702
703 DEBUG(dbgs() <<
704 format("Web %d rejected for store not fed by swap\n", Repr));
705 DEBUG(dbgs() << " def " << DefIdx << ": ");
706 DEBUG(DefMI->dump());
707 DEBUG(dbgs() << " use " << EntryIdx << ": ");
708 DEBUG(MI->dump());
709 DEBUG(dbgs() << "\n");
710 }
711
712 // Ensure all uses of the register defined by DefMI feed store
713 // instructions
714 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) {
715 int UseIdx = SwapMap[&UseMI];
716
717 if (SwapVector[UseIdx].VSEMI->getOpcode() != MI->getOpcode()) {
718 SwapVector[Repr].WebRejected = 1;
719
720 DEBUG(dbgs() <<
721 format("Web %d rejected for swap not feeding only stores\n",
722 Repr));
723 DEBUG(dbgs() << " def " << " : ");
724 DEBUG(DefMI->dump());
725 DEBUG(dbgs() << " use " << UseIdx << ": ");
726 DEBUG(SwapVector[UseIdx].VSEMI->dump());
727 DEBUG(dbgs() << "\n");
728 }
729 }
730 }
731 }
732
733 DEBUG(dbgs() << "Swap vector after web analysis:\n\n");
734 dumpSwapVector();
735 }
736
737 // Walk the swap vector entries looking for swaps fed by permuting loads
738 // and swaps that feed permuting stores. If the containing computation
739 // has not been marked rejected, mark each such swap for removal.
740 // (Removal is delayed in case optimization has disturbed the pattern,
741 // such that multiple loads feed the same swap, etc.)
markSwapsForRemoval()742 void PPCVSXSwapRemoval::markSwapsForRemoval() {
743
744 DEBUG(dbgs() << "\n*** Marking swaps for removal ***\n\n");
745
746 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
747
748 if (SwapVector[EntryIdx].IsLoad && SwapVector[EntryIdx].IsSwap) {
749 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
750
751 if (!SwapVector[Repr].WebRejected) {
752 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
753 unsigned DefReg = MI->getOperand(0).getReg();
754
755 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DefReg)) {
756 int UseIdx = SwapMap[&UseMI];
757 SwapVector[UseIdx].WillRemove = 1;
758
759 DEBUG(dbgs() << "Marking swap fed by load for removal: ");
760 DEBUG(UseMI.dump());
761 }
762 }
763
764 } else if (SwapVector[EntryIdx].IsStore && SwapVector[EntryIdx].IsSwap) {
765 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
766
767 if (!SwapVector[Repr].WebRejected) {
768 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
769 unsigned UseReg = MI->getOperand(0).getReg();
770 MachineInstr *DefMI = MRI->getVRegDef(UseReg);
771 int DefIdx = SwapMap[DefMI];
772 SwapVector[DefIdx].WillRemove = 1;
773
774 DEBUG(dbgs() << "Marking swap feeding store for removal: ");
775 DEBUG(DefMI->dump());
776 }
777
778 } else if (SwapVector[EntryIdx].IsSwappable &&
779 SwapVector[EntryIdx].SpecialHandling != 0) {
780 int Repr = EC->getLeaderValue(SwapVector[EntryIdx].VSEId);
781
782 if (!SwapVector[Repr].WebRejected)
783 handleSpecialSwappables(EntryIdx);
784 }
785 }
786 }
787
788 // Create an xxswapd instruction and insert it prior to the given point.
789 // MI is used to determine basic block and debug loc information.
790 // FIXME: When inserting a swap, we should check whether SrcReg is
791 // defined by another swap: SrcReg = XXPERMDI Reg, Reg, 2; If so,
792 // then instead we should generate a copy from Reg to DstReg.
insertSwap(MachineInstr * MI,MachineBasicBlock::iterator InsertPoint,unsigned DstReg,unsigned SrcReg)793 void PPCVSXSwapRemoval::insertSwap(MachineInstr *MI,
794 MachineBasicBlock::iterator InsertPoint,
795 unsigned DstReg, unsigned SrcReg) {
796 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
797 TII->get(PPC::XXPERMDI), DstReg)
798 .addReg(SrcReg)
799 .addReg(SrcReg)
800 .addImm(2);
801 }
802
803 // The identified swap entry requires special handling to allow its
804 // containing computation to be optimized. Perform that handling
805 // here.
806 // FIXME: Additional opportunities will be phased in with subsequent
807 // patches.
handleSpecialSwappables(int EntryIdx)808 void PPCVSXSwapRemoval::handleSpecialSwappables(int EntryIdx) {
809 switch (SwapVector[EntryIdx].SpecialHandling) {
810
811 default:
812 llvm_unreachable("Unexpected special handling type");
813
814 // For splats based on an index into a vector, add N/2 modulo N
815 // to the index, where N is the number of vector elements.
816 case SHValues::SH_SPLAT: {
817 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
818 unsigned NElts;
819
820 DEBUG(dbgs() << "Changing splat: ");
821 DEBUG(MI->dump());
822
823 switch (MI->getOpcode()) {
824 default:
825 llvm_unreachable("Unexpected splat opcode");
826 case PPC::VSPLTB: NElts = 16; break;
827 case PPC::VSPLTH: NElts = 8; break;
828 case PPC::VSPLTW:
829 case PPC::XXSPLTW: NElts = 4; break;
830 }
831
832 unsigned EltNo;
833 if (MI->getOpcode() == PPC::XXSPLTW)
834 EltNo = MI->getOperand(2).getImm();
835 else
836 EltNo = MI->getOperand(1).getImm();
837
838 EltNo = (EltNo + NElts / 2) % NElts;
839 if (MI->getOpcode() == PPC::XXSPLTW)
840 MI->getOperand(2).setImm(EltNo);
841 else
842 MI->getOperand(1).setImm(EltNo);
843
844 DEBUG(dbgs() << " Into: ");
845 DEBUG(MI->dump());
846 break;
847 }
848
849 // For an XXPERMDI that isn't handled otherwise, we need to
850 // reverse the order of the operands. If the selector operand
851 // has a value of 0 or 3, we need to change it to 3 or 0,
852 // respectively. Otherwise we should leave it alone. (This
853 // is equivalent to reversing the two bits of the selector
854 // operand and complementing the result.)
855 case SHValues::SH_XXPERMDI: {
856 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
857
858 DEBUG(dbgs() << "Changing XXPERMDI: ");
859 DEBUG(MI->dump());
860
861 unsigned Selector = MI->getOperand(3).getImm();
862 if (Selector == 0 || Selector == 3)
863 Selector = 3 - Selector;
864 MI->getOperand(3).setImm(Selector);
865
866 unsigned Reg1 = MI->getOperand(1).getReg();
867 unsigned Reg2 = MI->getOperand(2).getReg();
868 MI->getOperand(1).setReg(Reg2);
869 MI->getOperand(2).setReg(Reg1);
870
871 DEBUG(dbgs() << " Into: ");
872 DEBUG(MI->dump());
873 break;
874 }
875
876 // For a copy from a scalar floating-point register to a vector
877 // register, removing swaps will leave the copied value in the
878 // wrong lane. Insert a swap following the copy to fix this.
879 case SHValues::SH_COPYWIDEN: {
880 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
881
882 DEBUG(dbgs() << "Changing SUBREG_TO_REG: ");
883 DEBUG(MI->dump());
884
885 unsigned DstReg = MI->getOperand(0).getReg();
886 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
887 unsigned NewVReg = MRI->createVirtualRegister(DstRC);
888
889 MI->getOperand(0).setReg(NewVReg);
890 DEBUG(dbgs() << " Into: ");
891 DEBUG(MI->dump());
892
893 auto InsertPoint = ++MachineBasicBlock::iterator(MI);
894
895 // Note that an XXPERMDI requires a VSRC, so if the SUBREG_TO_REG
896 // is copying to a VRRC, we need to be careful to avoid a register
897 // assignment problem. In this case we must copy from VRRC to VSRC
898 // prior to the swap, and from VSRC to VRRC following the swap.
899 // Coalescing will usually remove all this mess.
900 if (DstRC == &PPC::VRRCRegClass) {
901 unsigned VSRCTmp1 = MRI->createVirtualRegister(&PPC::VSRCRegClass);
902 unsigned VSRCTmp2 = MRI->createVirtualRegister(&PPC::VSRCRegClass);
903
904 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
905 TII->get(PPC::COPY), VSRCTmp1)
906 .addReg(NewVReg);
907 DEBUG(std::prev(InsertPoint)->dump());
908
909 insertSwap(MI, InsertPoint, VSRCTmp2, VSRCTmp1);
910 DEBUG(std::prev(InsertPoint)->dump());
911
912 BuildMI(*MI->getParent(), InsertPoint, MI->getDebugLoc(),
913 TII->get(PPC::COPY), DstReg)
914 .addReg(VSRCTmp2);
915 DEBUG(std::prev(InsertPoint)->dump());
916
917 } else {
918 insertSwap(MI, InsertPoint, DstReg, NewVReg);
919 DEBUG(std::prev(InsertPoint)->dump());
920 }
921 break;
922 }
923 }
924 }
925
926 // Walk the swap vector and replace each entry marked for removal with
927 // a copy operation.
removeSwaps()928 bool PPCVSXSwapRemoval::removeSwaps() {
929
930 DEBUG(dbgs() << "\n*** Removing swaps ***\n\n");
931
932 bool Changed = false;
933
934 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
935 if (SwapVector[EntryIdx].WillRemove) {
936 Changed = true;
937 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
938 MachineBasicBlock *MBB = MI->getParent();
939 BuildMI(*MBB, MI, MI->getDebugLoc(),
940 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
941 .addOperand(MI->getOperand(1));
942
943 DEBUG(dbgs() << format("Replaced %d with copy: ",
944 SwapVector[EntryIdx].VSEId));
945 DEBUG(MI->dump());
946
947 MI->eraseFromParent();
948 }
949 }
950
951 return Changed;
952 }
953
954 // For debug purposes, dump the contents of the swap vector.
dumpSwapVector()955 void PPCVSXSwapRemoval::dumpSwapVector() {
956
957 for (unsigned EntryIdx = 0; EntryIdx < SwapVector.size(); ++EntryIdx) {
958
959 MachineInstr *MI = SwapVector[EntryIdx].VSEMI;
960 int ID = SwapVector[EntryIdx].VSEId;
961
962 DEBUG(dbgs() << format("%6d", ID));
963 DEBUG(dbgs() << format("%6d", EC->getLeaderValue(ID)));
964 DEBUG(dbgs() << format(" BB#%3d", MI->getParent()->getNumber()));
965 DEBUG(dbgs() << format(" %14s ", TII->getName(MI->getOpcode())));
966
967 if (SwapVector[EntryIdx].IsLoad)
968 DEBUG(dbgs() << "load ");
969 if (SwapVector[EntryIdx].IsStore)
970 DEBUG(dbgs() << "store ");
971 if (SwapVector[EntryIdx].IsSwap)
972 DEBUG(dbgs() << "swap ");
973 if (SwapVector[EntryIdx].MentionsPhysVR)
974 DEBUG(dbgs() << "physreg ");
975 if (SwapVector[EntryIdx].MentionsPartialVR)
976 DEBUG(dbgs() << "partialreg ");
977
978 if (SwapVector[EntryIdx].IsSwappable) {
979 DEBUG(dbgs() << "swappable ");
980 switch(SwapVector[EntryIdx].SpecialHandling) {
981 default:
982 DEBUG(dbgs() << "special:**unknown**");
983 break;
984 case SH_NONE:
985 break;
986 case SH_EXTRACT:
987 DEBUG(dbgs() << "special:extract ");
988 break;
989 case SH_INSERT:
990 DEBUG(dbgs() << "special:insert ");
991 break;
992 case SH_NOSWAP_LD:
993 DEBUG(dbgs() << "special:load ");
994 break;
995 case SH_NOSWAP_ST:
996 DEBUG(dbgs() << "special:store ");
997 break;
998 case SH_SPLAT:
999 DEBUG(dbgs() << "special:splat ");
1000 break;
1001 case SH_XXPERMDI:
1002 DEBUG(dbgs() << "special:xxpermdi ");
1003 break;
1004 case SH_COPYWIDEN:
1005 DEBUG(dbgs() << "special:copywiden ");
1006 break;
1007 }
1008 }
1009
1010 if (SwapVector[EntryIdx].WebRejected)
1011 DEBUG(dbgs() << "rejected ");
1012 if (SwapVector[EntryIdx].WillRemove)
1013 DEBUG(dbgs() << "remove ");
1014
1015 DEBUG(dbgs() << "\n");
1016
1017 // For no-asserts builds.
1018 (void)MI;
1019 (void)ID;
1020 }
1021
1022 DEBUG(dbgs() << "\n");
1023 }
1024
1025 } // end default namespace
1026
1027 INITIALIZE_PASS_BEGIN(PPCVSXSwapRemoval, DEBUG_TYPE,
1028 "PowerPC VSX Swap Removal", false, false)
1029 INITIALIZE_PASS_END(PPCVSXSwapRemoval, DEBUG_TYPE,
1030 "PowerPC VSX Swap Removal", false, false)
1031
1032 char PPCVSXSwapRemoval::ID = 0;
1033 FunctionPass*
createPPCVSXSwapRemovalPass()1034 llvm::createPPCVSXSwapRemovalPass() { return new PPCVSXSwapRemoval(); }
1035