1 //===- HexagonBitSimplify.cpp ---------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "BitTracker.h"
10 #include "HexagonBitTracker.h"
11 #include "HexagonInstrInfo.h"
12 #include "HexagonRegisterInfo.h"
13 #include "HexagonSubtarget.h"
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/GraphTraits.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineOperand.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetRegisterInfo.h"
29 #include "llvm/IR/DebugLoc.h"
30 #include "llvm/InitializePasses.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/Pass.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Compiler.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include <algorithm>
40 #include <cassert>
41 #include <cstdint>
42 #include <iterator>
43 #include <limits>
44 #include <utility>
45 #include <vector>
46
47 #define DEBUG_TYPE "hexbit"
48
49 using namespace llvm;
50
51 static cl::opt<bool> PreserveTiedOps("hexbit-keep-tied", cl::Hidden,
52 cl::init(true), cl::desc("Preserve subregisters in tied operands"));
53 static cl::opt<bool> GenExtract("hexbit-extract", cl::Hidden,
54 cl::init(true), cl::desc("Generate extract instructions"));
55 static cl::opt<bool> GenBitSplit("hexbit-bitsplit", cl::Hidden,
56 cl::init(true), cl::desc("Generate bitsplit instructions"));
57
58 static cl::opt<unsigned> MaxExtract("hexbit-max-extract", cl::Hidden,
59 cl::init(std::numeric_limits<unsigned>::max()));
60 static unsigned CountExtract = 0;
61 static cl::opt<unsigned> MaxBitSplit("hexbit-max-bitsplit", cl::Hidden,
62 cl::init(std::numeric_limits<unsigned>::max()));
63 static unsigned CountBitSplit = 0;
64
65 namespace llvm {
66
67 void initializeHexagonBitSimplifyPass(PassRegistry& Registry);
68 FunctionPass *createHexagonBitSimplify();
69
70 } // end namespace llvm
71
72 namespace {
73
74 // Set of virtual registers, based on BitVector.
75 struct RegisterSet : private BitVector {
76 RegisterSet() = default;
RegisterSet__anon9e7a0ec10111::RegisterSet77 explicit RegisterSet(unsigned s, bool t = false) : BitVector(s, t) {}
78 RegisterSet(const RegisterSet &RS) = default;
79
80 using BitVector::clear;
81 using BitVector::count;
82
find_first__anon9e7a0ec10111::RegisterSet83 unsigned find_first() const {
84 int First = BitVector::find_first();
85 if (First < 0)
86 return 0;
87 return x2v(First);
88 }
89
find_next__anon9e7a0ec10111::RegisterSet90 unsigned find_next(unsigned Prev) const {
91 int Next = BitVector::find_next(v2x(Prev));
92 if (Next < 0)
93 return 0;
94 return x2v(Next);
95 }
96
insert__anon9e7a0ec10111::RegisterSet97 RegisterSet &insert(unsigned R) {
98 unsigned Idx = v2x(R);
99 ensure(Idx);
100 return static_cast<RegisterSet&>(BitVector::set(Idx));
101 }
remove__anon9e7a0ec10111::RegisterSet102 RegisterSet &remove(unsigned R) {
103 unsigned Idx = v2x(R);
104 if (Idx >= size())
105 return *this;
106 return static_cast<RegisterSet&>(BitVector::reset(Idx));
107 }
108
insert__anon9e7a0ec10111::RegisterSet109 RegisterSet &insert(const RegisterSet &Rs) {
110 return static_cast<RegisterSet&>(BitVector::operator|=(Rs));
111 }
remove__anon9e7a0ec10111::RegisterSet112 RegisterSet &remove(const RegisterSet &Rs) {
113 return static_cast<RegisterSet&>(BitVector::reset(Rs));
114 }
115
operator []__anon9e7a0ec10111::RegisterSet116 reference operator[](unsigned R) {
117 unsigned Idx = v2x(R);
118 ensure(Idx);
119 return BitVector::operator[](Idx);
120 }
operator []__anon9e7a0ec10111::RegisterSet121 bool operator[](unsigned R) const {
122 unsigned Idx = v2x(R);
123 assert(Idx < size());
124 return BitVector::operator[](Idx);
125 }
has__anon9e7a0ec10111::RegisterSet126 bool has(unsigned R) const {
127 unsigned Idx = v2x(R);
128 if (Idx >= size())
129 return false;
130 return BitVector::test(Idx);
131 }
132
empty__anon9e7a0ec10111::RegisterSet133 bool empty() const {
134 return !BitVector::any();
135 }
includes__anon9e7a0ec10111::RegisterSet136 bool includes(const RegisterSet &Rs) const {
137 // A.BitVector::test(B) <=> A-B != {}
138 return !Rs.BitVector::test(*this);
139 }
intersects__anon9e7a0ec10111::RegisterSet140 bool intersects(const RegisterSet &Rs) const {
141 return BitVector::anyCommon(Rs);
142 }
143
144 private:
ensure__anon9e7a0ec10111::RegisterSet145 void ensure(unsigned Idx) {
146 if (size() <= Idx)
147 resize(std::max(Idx+1, 32U));
148 }
149
v2x__anon9e7a0ec10111::RegisterSet150 static inline unsigned v2x(unsigned v) {
151 return Register::virtReg2Index(v);
152 }
153
x2v__anon9e7a0ec10111::RegisterSet154 static inline unsigned x2v(unsigned x) {
155 return Register::index2VirtReg(x);
156 }
157 };
158
159 struct PrintRegSet {
PrintRegSet__anon9e7a0ec10111::PrintRegSet160 PrintRegSet(const RegisterSet &S, const TargetRegisterInfo *RI)
161 : RS(S), TRI(RI) {}
162
163 friend raw_ostream &operator<< (raw_ostream &OS,
164 const PrintRegSet &P);
165
166 private:
167 const RegisterSet &RS;
168 const TargetRegisterInfo *TRI;
169 };
170
171 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P)
172 LLVM_ATTRIBUTE_UNUSED;
operator <<(raw_ostream & OS,const PrintRegSet & P)173 raw_ostream &operator<< (raw_ostream &OS, const PrintRegSet &P) {
174 OS << '{';
175 for (unsigned R = P.RS.find_first(); R; R = P.RS.find_next(R))
176 OS << ' ' << printReg(R, P.TRI);
177 OS << " }";
178 return OS;
179 }
180
181 class Transformation;
182
183 class HexagonBitSimplify : public MachineFunctionPass {
184 public:
185 static char ID;
186
HexagonBitSimplify()187 HexagonBitSimplify() : MachineFunctionPass(ID) {}
188
getPassName() const189 StringRef getPassName() const override {
190 return "Hexagon bit simplification";
191 }
192
getAnalysisUsage(AnalysisUsage & AU) const193 void getAnalysisUsage(AnalysisUsage &AU) const override {
194 AU.addRequired<MachineDominatorTree>();
195 AU.addPreserved<MachineDominatorTree>();
196 MachineFunctionPass::getAnalysisUsage(AU);
197 }
198
199 bool runOnMachineFunction(MachineFunction &MF) override;
200
201 static void getInstrDefs(const MachineInstr &MI, RegisterSet &Defs);
202 static void getInstrUses(const MachineInstr &MI, RegisterSet &Uses);
203 static bool isEqual(const BitTracker::RegisterCell &RC1, uint16_t B1,
204 const BitTracker::RegisterCell &RC2, uint16_t B2, uint16_t W);
205 static bool isZero(const BitTracker::RegisterCell &RC, uint16_t B,
206 uint16_t W);
207 static bool getConst(const BitTracker::RegisterCell &RC, uint16_t B,
208 uint16_t W, uint64_t &U);
209 static bool replaceReg(unsigned OldR, unsigned NewR,
210 MachineRegisterInfo &MRI);
211 static bool getSubregMask(const BitTracker::RegisterRef &RR,
212 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI);
213 static bool replaceRegWithSub(unsigned OldR, unsigned NewR,
214 unsigned NewSR, MachineRegisterInfo &MRI);
215 static bool replaceSubWithSub(unsigned OldR, unsigned OldSR,
216 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI);
217 static bool parseRegSequence(const MachineInstr &I,
218 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH,
219 const MachineRegisterInfo &MRI);
220
221 static bool getUsedBitsInStore(unsigned Opc, BitVector &Bits,
222 uint16_t Begin);
223 static bool getUsedBits(unsigned Opc, unsigned OpN, BitVector &Bits,
224 uint16_t Begin, const HexagonInstrInfo &HII);
225
226 static const TargetRegisterClass *getFinalVRegClass(
227 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI);
228 static bool isTransparentCopy(const BitTracker::RegisterRef &RD,
229 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI);
230
231 private:
232 MachineDominatorTree *MDT = nullptr;
233
234 bool visitBlock(MachineBasicBlock &B, Transformation &T, RegisterSet &AVs);
235 static bool hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI,
236 unsigned NewSub = Hexagon::NoSubRegister);
237 };
238
239 using HBS = HexagonBitSimplify;
240
241 // The purpose of this class is to provide a common facility to traverse
242 // the function top-down or bottom-up via the dominator tree, and keep
243 // track of the available registers.
244 class Transformation {
245 public:
246 bool TopDown;
247
Transformation(bool TD)248 Transformation(bool TD) : TopDown(TD) {}
249 virtual ~Transformation() = default;
250
251 virtual bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) = 0;
252 };
253
254 } // end anonymous namespace
255
256 char HexagonBitSimplify::ID = 0;
257
258 INITIALIZE_PASS_BEGIN(HexagonBitSimplify, "hexagon-bit-simplify",
259 "Hexagon bit simplification", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)260 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
261 INITIALIZE_PASS_END(HexagonBitSimplify, "hexagon-bit-simplify",
262 "Hexagon bit simplification", false, false)
263
264 bool HexagonBitSimplify::visitBlock(MachineBasicBlock &B, Transformation &T,
265 RegisterSet &AVs) {
266 bool Changed = false;
267
268 if (T.TopDown)
269 Changed = T.processBlock(B, AVs);
270
271 RegisterSet Defs;
272 for (auto &I : B)
273 getInstrDefs(I, Defs);
274 RegisterSet NewAVs = AVs;
275 NewAVs.insert(Defs);
276
277 for (auto *DTN : children<MachineDomTreeNode*>(MDT->getNode(&B)))
278 Changed |= visitBlock(*(DTN->getBlock()), T, NewAVs);
279
280 if (!T.TopDown)
281 Changed |= T.processBlock(B, AVs);
282
283 return Changed;
284 }
285
286 //
287 // Utility functions:
288 //
getInstrDefs(const MachineInstr & MI,RegisterSet & Defs)289 void HexagonBitSimplify::getInstrDefs(const MachineInstr &MI,
290 RegisterSet &Defs) {
291 for (auto &Op : MI.operands()) {
292 if (!Op.isReg() || !Op.isDef())
293 continue;
294 Register R = Op.getReg();
295 if (!Register::isVirtualRegister(R))
296 continue;
297 Defs.insert(R);
298 }
299 }
300
getInstrUses(const MachineInstr & MI,RegisterSet & Uses)301 void HexagonBitSimplify::getInstrUses(const MachineInstr &MI,
302 RegisterSet &Uses) {
303 for (auto &Op : MI.operands()) {
304 if (!Op.isReg() || !Op.isUse())
305 continue;
306 Register R = Op.getReg();
307 if (!Register::isVirtualRegister(R))
308 continue;
309 Uses.insert(R);
310 }
311 }
312
313 // Check if all the bits in range [B, E) in both cells are equal.
isEqual(const BitTracker::RegisterCell & RC1,uint16_t B1,const BitTracker::RegisterCell & RC2,uint16_t B2,uint16_t W)314 bool HexagonBitSimplify::isEqual(const BitTracker::RegisterCell &RC1,
315 uint16_t B1, const BitTracker::RegisterCell &RC2, uint16_t B2,
316 uint16_t W) {
317 for (uint16_t i = 0; i < W; ++i) {
318 // If RC1[i] is "bottom", it cannot be proven equal to RC2[i].
319 if (RC1[B1+i].Type == BitTracker::BitValue::Ref && RC1[B1+i].RefI.Reg == 0)
320 return false;
321 // Same for RC2[i].
322 if (RC2[B2+i].Type == BitTracker::BitValue::Ref && RC2[B2+i].RefI.Reg == 0)
323 return false;
324 if (RC1[B1+i] != RC2[B2+i])
325 return false;
326 }
327 return true;
328 }
329
isZero(const BitTracker::RegisterCell & RC,uint16_t B,uint16_t W)330 bool HexagonBitSimplify::isZero(const BitTracker::RegisterCell &RC,
331 uint16_t B, uint16_t W) {
332 assert(B < RC.width() && B+W <= RC.width());
333 for (uint16_t i = B; i < B+W; ++i)
334 if (!RC[i].is(0))
335 return false;
336 return true;
337 }
338
getConst(const BitTracker::RegisterCell & RC,uint16_t B,uint16_t W,uint64_t & U)339 bool HexagonBitSimplify::getConst(const BitTracker::RegisterCell &RC,
340 uint16_t B, uint16_t W, uint64_t &U) {
341 assert(B < RC.width() && B+W <= RC.width());
342 int64_t T = 0;
343 for (uint16_t i = B+W; i > B; --i) {
344 const BitTracker::BitValue &BV = RC[i-1];
345 T <<= 1;
346 if (BV.is(1))
347 T |= 1;
348 else if (!BV.is(0))
349 return false;
350 }
351 U = T;
352 return true;
353 }
354
replaceReg(unsigned OldR,unsigned NewR,MachineRegisterInfo & MRI)355 bool HexagonBitSimplify::replaceReg(unsigned OldR, unsigned NewR,
356 MachineRegisterInfo &MRI) {
357 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR))
358 return false;
359 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
360 decltype(End) NextI;
361 for (auto I = Begin; I != End; I = NextI) {
362 NextI = std::next(I);
363 I->setReg(NewR);
364 }
365 return Begin != End;
366 }
367
replaceRegWithSub(unsigned OldR,unsigned NewR,unsigned NewSR,MachineRegisterInfo & MRI)368 bool HexagonBitSimplify::replaceRegWithSub(unsigned OldR, unsigned NewR,
369 unsigned NewSR, MachineRegisterInfo &MRI) {
370 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR))
371 return false;
372 if (hasTiedUse(OldR, MRI, NewSR))
373 return false;
374 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
375 decltype(End) NextI;
376 for (auto I = Begin; I != End; I = NextI) {
377 NextI = std::next(I);
378 I->setReg(NewR);
379 I->setSubReg(NewSR);
380 }
381 return Begin != End;
382 }
383
replaceSubWithSub(unsigned OldR,unsigned OldSR,unsigned NewR,unsigned NewSR,MachineRegisterInfo & MRI)384 bool HexagonBitSimplify::replaceSubWithSub(unsigned OldR, unsigned OldSR,
385 unsigned NewR, unsigned NewSR, MachineRegisterInfo &MRI) {
386 if (!Register::isVirtualRegister(OldR) || !Register::isVirtualRegister(NewR))
387 return false;
388 if (OldSR != NewSR && hasTiedUse(OldR, MRI, NewSR))
389 return false;
390 auto Begin = MRI.use_begin(OldR), End = MRI.use_end();
391 decltype(End) NextI;
392 for (auto I = Begin; I != End; I = NextI) {
393 NextI = std::next(I);
394 if (I->getSubReg() != OldSR)
395 continue;
396 I->setReg(NewR);
397 I->setSubReg(NewSR);
398 }
399 return Begin != End;
400 }
401
402 // For a register ref (pair Reg:Sub), set Begin to the position of the LSB
403 // of Sub in Reg, and set Width to the size of Sub in bits. Return true,
404 // if this succeeded, otherwise return false.
getSubregMask(const BitTracker::RegisterRef & RR,unsigned & Begin,unsigned & Width,MachineRegisterInfo & MRI)405 bool HexagonBitSimplify::getSubregMask(const BitTracker::RegisterRef &RR,
406 unsigned &Begin, unsigned &Width, MachineRegisterInfo &MRI) {
407 const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg);
408 if (RR.Sub == 0) {
409 Begin = 0;
410 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC);
411 return true;
412 }
413
414 Begin = 0;
415
416 switch (RC->getID()) {
417 case Hexagon::DoubleRegsRegClassID:
418 case Hexagon::HvxWRRegClassID:
419 Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 2;
420 if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi)
421 Begin = Width;
422 break;
423 default:
424 return false;
425 }
426 return true;
427 }
428
429
430 // For a REG_SEQUENCE, set SL to the low subregister and SH to the high
431 // subregister.
parseRegSequence(const MachineInstr & I,BitTracker::RegisterRef & SL,BitTracker::RegisterRef & SH,const MachineRegisterInfo & MRI)432 bool HexagonBitSimplify::parseRegSequence(const MachineInstr &I,
433 BitTracker::RegisterRef &SL, BitTracker::RegisterRef &SH,
434 const MachineRegisterInfo &MRI) {
435 assert(I.getOpcode() == TargetOpcode::REG_SEQUENCE);
436 unsigned Sub1 = I.getOperand(2).getImm(), Sub2 = I.getOperand(4).getImm();
437 auto &DstRC = *MRI.getRegClass(I.getOperand(0).getReg());
438 auto &HRI = static_cast<const HexagonRegisterInfo&>(
439 *MRI.getTargetRegisterInfo());
440 unsigned SubLo = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_lo);
441 unsigned SubHi = HRI.getHexagonSubRegIndex(DstRC, Hexagon::ps_sub_hi);
442 assert((Sub1 == SubLo && Sub2 == SubHi) || (Sub1 == SubHi && Sub2 == SubLo));
443 if (Sub1 == SubLo && Sub2 == SubHi) {
444 SL = I.getOperand(1);
445 SH = I.getOperand(3);
446 return true;
447 }
448 if (Sub1 == SubHi && Sub2 == SubLo) {
449 SH = I.getOperand(1);
450 SL = I.getOperand(3);
451 return true;
452 }
453 return false;
454 }
455
456 // All stores (except 64-bit stores) take a 32-bit register as the source
457 // of the value to be stored. If the instruction stores into a location
458 // that is shorter than 32 bits, some bits of the source register are not
459 // used. For each store instruction, calculate the set of used bits in
460 // the source register, and set appropriate bits in Bits. Return true if
461 // the bits are calculated, false otherwise.
getUsedBitsInStore(unsigned Opc,BitVector & Bits,uint16_t Begin)462 bool HexagonBitSimplify::getUsedBitsInStore(unsigned Opc, BitVector &Bits,
463 uint16_t Begin) {
464 using namespace Hexagon;
465
466 switch (Opc) {
467 // Store byte
468 case S2_storerb_io: // memb(Rs32+#s11:0)=Rt32
469 case S2_storerbnew_io: // memb(Rs32+#s11:0)=Nt8.new
470 case S2_pstorerbt_io: // if (Pv4) memb(Rs32+#u6:0)=Rt32
471 case S2_pstorerbf_io: // if (!Pv4) memb(Rs32+#u6:0)=Rt32
472 case S4_pstorerbtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Rt32
473 case S4_pstorerbfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Rt32
474 case S2_pstorerbnewt_io: // if (Pv4) memb(Rs32+#u6:0)=Nt8.new
475 case S2_pstorerbnewf_io: // if (!Pv4) memb(Rs32+#u6:0)=Nt8.new
476 case S4_pstorerbnewtnew_io: // if (Pv4.new) memb(Rs32+#u6:0)=Nt8.new
477 case S4_pstorerbnewfnew_io: // if (!Pv4.new) memb(Rs32+#u6:0)=Nt8.new
478 case S2_storerb_pi: // memb(Rx32++#s4:0)=Rt32
479 case S2_storerbnew_pi: // memb(Rx32++#s4:0)=Nt8.new
480 case S2_pstorerbt_pi: // if (Pv4) memb(Rx32++#s4:0)=Rt32
481 case S2_pstorerbf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Rt32
482 case S2_pstorerbtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Rt32
483 case S2_pstorerbfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Rt32
484 case S2_pstorerbnewt_pi: // if (Pv4) memb(Rx32++#s4:0)=Nt8.new
485 case S2_pstorerbnewf_pi: // if (!Pv4) memb(Rx32++#s4:0)=Nt8.new
486 case S2_pstorerbnewtnew_pi: // if (Pv4.new) memb(Rx32++#s4:0)=Nt8.new
487 case S2_pstorerbnewfnew_pi: // if (!Pv4.new) memb(Rx32++#s4:0)=Nt8.new
488 case S4_storerb_ap: // memb(Re32=#U6)=Rt32
489 case S4_storerbnew_ap: // memb(Re32=#U6)=Nt8.new
490 case S2_storerb_pr: // memb(Rx32++Mu2)=Rt32
491 case S2_storerbnew_pr: // memb(Rx32++Mu2)=Nt8.new
492 case S4_storerb_ur: // memb(Ru32<<#u2+#U6)=Rt32
493 case S4_storerbnew_ur: // memb(Ru32<<#u2+#U6)=Nt8.new
494 case S2_storerb_pbr: // memb(Rx32++Mu2:brev)=Rt32
495 case S2_storerbnew_pbr: // memb(Rx32++Mu2:brev)=Nt8.new
496 case S2_storerb_pci: // memb(Rx32++#s4:0:circ(Mu2))=Rt32
497 case S2_storerbnew_pci: // memb(Rx32++#s4:0:circ(Mu2))=Nt8.new
498 case S2_storerb_pcr: // memb(Rx32++I:circ(Mu2))=Rt32
499 case S2_storerbnew_pcr: // memb(Rx32++I:circ(Mu2))=Nt8.new
500 case S4_storerb_rr: // memb(Rs32+Ru32<<#u2)=Rt32
501 case S4_storerbnew_rr: // memb(Rs32+Ru32<<#u2)=Nt8.new
502 case S4_pstorerbt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Rt32
503 case S4_pstorerbf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Rt32
504 case S4_pstorerbtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32
505 case S4_pstorerbfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Rt32
506 case S4_pstorerbnewt_rr: // if (Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new
507 case S4_pstorerbnewf_rr: // if (!Pv4) memb(Rs32+Ru32<<#u2)=Nt8.new
508 case S4_pstorerbnewtnew_rr: // if (Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new
509 case S4_pstorerbnewfnew_rr: // if (!Pv4.new) memb(Rs32+Ru32<<#u2)=Nt8.new
510 case S2_storerbgp: // memb(gp+#u16:0)=Rt32
511 case S2_storerbnewgp: // memb(gp+#u16:0)=Nt8.new
512 case S4_pstorerbt_abs: // if (Pv4) memb(#u6)=Rt32
513 case S4_pstorerbf_abs: // if (!Pv4) memb(#u6)=Rt32
514 case S4_pstorerbtnew_abs: // if (Pv4.new) memb(#u6)=Rt32
515 case S4_pstorerbfnew_abs: // if (!Pv4.new) memb(#u6)=Rt32
516 case S4_pstorerbnewt_abs: // if (Pv4) memb(#u6)=Nt8.new
517 case S4_pstorerbnewf_abs: // if (!Pv4) memb(#u6)=Nt8.new
518 case S4_pstorerbnewtnew_abs: // if (Pv4.new) memb(#u6)=Nt8.new
519 case S4_pstorerbnewfnew_abs: // if (!Pv4.new) memb(#u6)=Nt8.new
520 Bits.set(Begin, Begin+8);
521 return true;
522
523 // Store low half
524 case S2_storerh_io: // memh(Rs32+#s11:1)=Rt32
525 case S2_storerhnew_io: // memh(Rs32+#s11:1)=Nt8.new
526 case S2_pstorerht_io: // if (Pv4) memh(Rs32+#u6:1)=Rt32
527 case S2_pstorerhf_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt32
528 case S4_pstorerhtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt32
529 case S4_pstorerhfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt32
530 case S2_pstorerhnewt_io: // if (Pv4) memh(Rs32+#u6:1)=Nt8.new
531 case S2_pstorerhnewf_io: // if (!Pv4) memh(Rs32+#u6:1)=Nt8.new
532 case S4_pstorerhnewtnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Nt8.new
533 case S4_pstorerhnewfnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Nt8.new
534 case S2_storerh_pi: // memh(Rx32++#s4:1)=Rt32
535 case S2_storerhnew_pi: // memh(Rx32++#s4:1)=Nt8.new
536 case S2_pstorerht_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt32
537 case S2_pstorerhf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt32
538 case S2_pstorerhtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt32
539 case S2_pstorerhfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt32
540 case S2_pstorerhnewt_pi: // if (Pv4) memh(Rx32++#s4:1)=Nt8.new
541 case S2_pstorerhnewf_pi: // if (!Pv4) memh(Rx32++#s4:1)=Nt8.new
542 case S2_pstorerhnewtnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Nt8.new
543 case S2_pstorerhnewfnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Nt8.new
544 case S4_storerh_ap: // memh(Re32=#U6)=Rt32
545 case S4_storerhnew_ap: // memh(Re32=#U6)=Nt8.new
546 case S2_storerh_pr: // memh(Rx32++Mu2)=Rt32
547 case S2_storerhnew_pr: // memh(Rx32++Mu2)=Nt8.new
548 case S4_storerh_ur: // memh(Ru32<<#u2+#U6)=Rt32
549 case S4_storerhnew_ur: // memh(Ru32<<#u2+#U6)=Nt8.new
550 case S2_storerh_pbr: // memh(Rx32++Mu2:brev)=Rt32
551 case S2_storerhnew_pbr: // memh(Rx32++Mu2:brev)=Nt8.new
552 case S2_storerh_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt32
553 case S2_storerhnew_pci: // memh(Rx32++#s4:1:circ(Mu2))=Nt8.new
554 case S2_storerh_pcr: // memh(Rx32++I:circ(Mu2))=Rt32
555 case S2_storerhnew_pcr: // memh(Rx32++I:circ(Mu2))=Nt8.new
556 case S4_storerh_rr: // memh(Rs32+Ru32<<#u2)=Rt32
557 case S4_pstorerht_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt32
558 case S4_pstorerhf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt32
559 case S4_pstorerhtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32
560 case S4_pstorerhfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt32
561 case S4_storerhnew_rr: // memh(Rs32+Ru32<<#u2)=Nt8.new
562 case S4_pstorerhnewt_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new
563 case S4_pstorerhnewf_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Nt8.new
564 case S4_pstorerhnewtnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new
565 case S4_pstorerhnewfnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Nt8.new
566 case S2_storerhgp: // memh(gp+#u16:1)=Rt32
567 case S2_storerhnewgp: // memh(gp+#u16:1)=Nt8.new
568 case S4_pstorerht_abs: // if (Pv4) memh(#u6)=Rt32
569 case S4_pstorerhf_abs: // if (!Pv4) memh(#u6)=Rt32
570 case S4_pstorerhtnew_abs: // if (Pv4.new) memh(#u6)=Rt32
571 case S4_pstorerhfnew_abs: // if (!Pv4.new) memh(#u6)=Rt32
572 case S4_pstorerhnewt_abs: // if (Pv4) memh(#u6)=Nt8.new
573 case S4_pstorerhnewf_abs: // if (!Pv4) memh(#u6)=Nt8.new
574 case S4_pstorerhnewtnew_abs: // if (Pv4.new) memh(#u6)=Nt8.new
575 case S4_pstorerhnewfnew_abs: // if (!Pv4.new) memh(#u6)=Nt8.new
576 Bits.set(Begin, Begin+16);
577 return true;
578
579 // Store high half
580 case S2_storerf_io: // memh(Rs32+#s11:1)=Rt.H32
581 case S2_pstorerft_io: // if (Pv4) memh(Rs32+#u6:1)=Rt.H32
582 case S2_pstorerff_io: // if (!Pv4) memh(Rs32+#u6:1)=Rt.H32
583 case S4_pstorerftnew_io: // if (Pv4.new) memh(Rs32+#u6:1)=Rt.H32
584 case S4_pstorerffnew_io: // if (!Pv4.new) memh(Rs32+#u6:1)=Rt.H32
585 case S2_storerf_pi: // memh(Rx32++#s4:1)=Rt.H32
586 case S2_pstorerft_pi: // if (Pv4) memh(Rx32++#s4:1)=Rt.H32
587 case S2_pstorerff_pi: // if (!Pv4) memh(Rx32++#s4:1)=Rt.H32
588 case S2_pstorerftnew_pi: // if (Pv4.new) memh(Rx32++#s4:1)=Rt.H32
589 case S2_pstorerffnew_pi: // if (!Pv4.new) memh(Rx32++#s4:1)=Rt.H32
590 case S4_storerf_ap: // memh(Re32=#U6)=Rt.H32
591 case S2_storerf_pr: // memh(Rx32++Mu2)=Rt.H32
592 case S4_storerf_ur: // memh(Ru32<<#u2+#U6)=Rt.H32
593 case S2_storerf_pbr: // memh(Rx32++Mu2:brev)=Rt.H32
594 case S2_storerf_pci: // memh(Rx32++#s4:1:circ(Mu2))=Rt.H32
595 case S2_storerf_pcr: // memh(Rx32++I:circ(Mu2))=Rt.H32
596 case S4_storerf_rr: // memh(Rs32+Ru32<<#u2)=Rt.H32
597 case S4_pstorerft_rr: // if (Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32
598 case S4_pstorerff_rr: // if (!Pv4) memh(Rs32+Ru32<<#u2)=Rt.H32
599 case S4_pstorerftnew_rr: // if (Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32
600 case S4_pstorerffnew_rr: // if (!Pv4.new) memh(Rs32+Ru32<<#u2)=Rt.H32
601 case S2_storerfgp: // memh(gp+#u16:1)=Rt.H32
602 case S4_pstorerft_abs: // if (Pv4) memh(#u6)=Rt.H32
603 case S4_pstorerff_abs: // if (!Pv4) memh(#u6)=Rt.H32
604 case S4_pstorerftnew_abs: // if (Pv4.new) memh(#u6)=Rt.H32
605 case S4_pstorerffnew_abs: // if (!Pv4.new) memh(#u6)=Rt.H32
606 Bits.set(Begin+16, Begin+32);
607 return true;
608 }
609
610 return false;
611 }
612
613 // For an instruction with opcode Opc, calculate the set of bits that it
614 // uses in a register in operand OpN. This only calculates the set of used
615 // bits for cases where it does not depend on any operands (as is the case
616 // in shifts, for example). For concrete instructions from a program, the
617 // operand may be a subregister of a larger register, while Bits would
618 // correspond to the larger register in its entirety. Because of that,
619 // the parameter Begin can be used to indicate which bit of Bits should be
620 // considered the LSB of the operand.
getUsedBits(unsigned Opc,unsigned OpN,BitVector & Bits,uint16_t Begin,const HexagonInstrInfo & HII)621 bool HexagonBitSimplify::getUsedBits(unsigned Opc, unsigned OpN,
622 BitVector &Bits, uint16_t Begin, const HexagonInstrInfo &HII) {
623 using namespace Hexagon;
624
625 const MCInstrDesc &D = HII.get(Opc);
626 if (D.mayStore()) {
627 if (OpN == D.getNumOperands()-1)
628 return getUsedBitsInStore(Opc, Bits, Begin);
629 return false;
630 }
631
632 switch (Opc) {
633 // One register source. Used bits: R1[0-7].
634 case A2_sxtb:
635 case A2_zxtb:
636 case A4_cmpbeqi:
637 case A4_cmpbgti:
638 case A4_cmpbgtui:
639 if (OpN == 1) {
640 Bits.set(Begin, Begin+8);
641 return true;
642 }
643 break;
644
645 // One register source. Used bits: R1[0-15].
646 case A2_aslh:
647 case A2_sxth:
648 case A2_zxth:
649 case A4_cmpheqi:
650 case A4_cmphgti:
651 case A4_cmphgtui:
652 if (OpN == 1) {
653 Bits.set(Begin, Begin+16);
654 return true;
655 }
656 break;
657
658 // One register source. Used bits: R1[16-31].
659 case A2_asrh:
660 if (OpN == 1) {
661 Bits.set(Begin+16, Begin+32);
662 return true;
663 }
664 break;
665
666 // Two register sources. Used bits: R1[0-7], R2[0-7].
667 case A4_cmpbeq:
668 case A4_cmpbgt:
669 case A4_cmpbgtu:
670 if (OpN == 1) {
671 Bits.set(Begin, Begin+8);
672 return true;
673 }
674 break;
675
676 // Two register sources. Used bits: R1[0-15], R2[0-15].
677 case A4_cmpheq:
678 case A4_cmphgt:
679 case A4_cmphgtu:
680 case A2_addh_h16_ll:
681 case A2_addh_h16_sat_ll:
682 case A2_addh_l16_ll:
683 case A2_addh_l16_sat_ll:
684 case A2_combine_ll:
685 case A2_subh_h16_ll:
686 case A2_subh_h16_sat_ll:
687 case A2_subh_l16_ll:
688 case A2_subh_l16_sat_ll:
689 case M2_mpy_acc_ll_s0:
690 case M2_mpy_acc_ll_s1:
691 case M2_mpy_acc_sat_ll_s0:
692 case M2_mpy_acc_sat_ll_s1:
693 case M2_mpy_ll_s0:
694 case M2_mpy_ll_s1:
695 case M2_mpy_nac_ll_s0:
696 case M2_mpy_nac_ll_s1:
697 case M2_mpy_nac_sat_ll_s0:
698 case M2_mpy_nac_sat_ll_s1:
699 case M2_mpy_rnd_ll_s0:
700 case M2_mpy_rnd_ll_s1:
701 case M2_mpy_sat_ll_s0:
702 case M2_mpy_sat_ll_s1:
703 case M2_mpy_sat_rnd_ll_s0:
704 case M2_mpy_sat_rnd_ll_s1:
705 case M2_mpyd_acc_ll_s0:
706 case M2_mpyd_acc_ll_s1:
707 case M2_mpyd_ll_s0:
708 case M2_mpyd_ll_s1:
709 case M2_mpyd_nac_ll_s0:
710 case M2_mpyd_nac_ll_s1:
711 case M2_mpyd_rnd_ll_s0:
712 case M2_mpyd_rnd_ll_s1:
713 case M2_mpyu_acc_ll_s0:
714 case M2_mpyu_acc_ll_s1:
715 case M2_mpyu_ll_s0:
716 case M2_mpyu_ll_s1:
717 case M2_mpyu_nac_ll_s0:
718 case M2_mpyu_nac_ll_s1:
719 case M2_mpyud_acc_ll_s0:
720 case M2_mpyud_acc_ll_s1:
721 case M2_mpyud_ll_s0:
722 case M2_mpyud_ll_s1:
723 case M2_mpyud_nac_ll_s0:
724 case M2_mpyud_nac_ll_s1:
725 if (OpN == 1 || OpN == 2) {
726 Bits.set(Begin, Begin+16);
727 return true;
728 }
729 break;
730
731 // Two register sources. Used bits: R1[0-15], R2[16-31].
732 case A2_addh_h16_lh:
733 case A2_addh_h16_sat_lh:
734 case A2_combine_lh:
735 case A2_subh_h16_lh:
736 case A2_subh_h16_sat_lh:
737 case M2_mpy_acc_lh_s0:
738 case M2_mpy_acc_lh_s1:
739 case M2_mpy_acc_sat_lh_s0:
740 case M2_mpy_acc_sat_lh_s1:
741 case M2_mpy_lh_s0:
742 case M2_mpy_lh_s1:
743 case M2_mpy_nac_lh_s0:
744 case M2_mpy_nac_lh_s1:
745 case M2_mpy_nac_sat_lh_s0:
746 case M2_mpy_nac_sat_lh_s1:
747 case M2_mpy_rnd_lh_s0:
748 case M2_mpy_rnd_lh_s1:
749 case M2_mpy_sat_lh_s0:
750 case M2_mpy_sat_lh_s1:
751 case M2_mpy_sat_rnd_lh_s0:
752 case M2_mpy_sat_rnd_lh_s1:
753 case M2_mpyd_acc_lh_s0:
754 case M2_mpyd_acc_lh_s1:
755 case M2_mpyd_lh_s0:
756 case M2_mpyd_lh_s1:
757 case M2_mpyd_nac_lh_s0:
758 case M2_mpyd_nac_lh_s1:
759 case M2_mpyd_rnd_lh_s0:
760 case M2_mpyd_rnd_lh_s1:
761 case M2_mpyu_acc_lh_s0:
762 case M2_mpyu_acc_lh_s1:
763 case M2_mpyu_lh_s0:
764 case M2_mpyu_lh_s1:
765 case M2_mpyu_nac_lh_s0:
766 case M2_mpyu_nac_lh_s1:
767 case M2_mpyud_acc_lh_s0:
768 case M2_mpyud_acc_lh_s1:
769 case M2_mpyud_lh_s0:
770 case M2_mpyud_lh_s1:
771 case M2_mpyud_nac_lh_s0:
772 case M2_mpyud_nac_lh_s1:
773 // These four are actually LH.
774 case A2_addh_l16_hl:
775 case A2_addh_l16_sat_hl:
776 case A2_subh_l16_hl:
777 case A2_subh_l16_sat_hl:
778 if (OpN == 1) {
779 Bits.set(Begin, Begin+16);
780 return true;
781 }
782 if (OpN == 2) {
783 Bits.set(Begin+16, Begin+32);
784 return true;
785 }
786 break;
787
788 // Two register sources, used bits: R1[16-31], R2[0-15].
789 case A2_addh_h16_hl:
790 case A2_addh_h16_sat_hl:
791 case A2_combine_hl:
792 case A2_subh_h16_hl:
793 case A2_subh_h16_sat_hl:
794 case M2_mpy_acc_hl_s0:
795 case M2_mpy_acc_hl_s1:
796 case M2_mpy_acc_sat_hl_s0:
797 case M2_mpy_acc_sat_hl_s1:
798 case M2_mpy_hl_s0:
799 case M2_mpy_hl_s1:
800 case M2_mpy_nac_hl_s0:
801 case M2_mpy_nac_hl_s1:
802 case M2_mpy_nac_sat_hl_s0:
803 case M2_mpy_nac_sat_hl_s1:
804 case M2_mpy_rnd_hl_s0:
805 case M2_mpy_rnd_hl_s1:
806 case M2_mpy_sat_hl_s0:
807 case M2_mpy_sat_hl_s1:
808 case M2_mpy_sat_rnd_hl_s0:
809 case M2_mpy_sat_rnd_hl_s1:
810 case M2_mpyd_acc_hl_s0:
811 case M2_mpyd_acc_hl_s1:
812 case M2_mpyd_hl_s0:
813 case M2_mpyd_hl_s1:
814 case M2_mpyd_nac_hl_s0:
815 case M2_mpyd_nac_hl_s1:
816 case M2_mpyd_rnd_hl_s0:
817 case M2_mpyd_rnd_hl_s1:
818 case M2_mpyu_acc_hl_s0:
819 case M2_mpyu_acc_hl_s1:
820 case M2_mpyu_hl_s0:
821 case M2_mpyu_hl_s1:
822 case M2_mpyu_nac_hl_s0:
823 case M2_mpyu_nac_hl_s1:
824 case M2_mpyud_acc_hl_s0:
825 case M2_mpyud_acc_hl_s1:
826 case M2_mpyud_hl_s0:
827 case M2_mpyud_hl_s1:
828 case M2_mpyud_nac_hl_s0:
829 case M2_mpyud_nac_hl_s1:
830 if (OpN == 1) {
831 Bits.set(Begin+16, Begin+32);
832 return true;
833 }
834 if (OpN == 2) {
835 Bits.set(Begin, Begin+16);
836 return true;
837 }
838 break;
839
840 // Two register sources, used bits: R1[16-31], R2[16-31].
841 case A2_addh_h16_hh:
842 case A2_addh_h16_sat_hh:
843 case A2_combine_hh:
844 case A2_subh_h16_hh:
845 case A2_subh_h16_sat_hh:
846 case M2_mpy_acc_hh_s0:
847 case M2_mpy_acc_hh_s1:
848 case M2_mpy_acc_sat_hh_s0:
849 case M2_mpy_acc_sat_hh_s1:
850 case M2_mpy_hh_s0:
851 case M2_mpy_hh_s1:
852 case M2_mpy_nac_hh_s0:
853 case M2_mpy_nac_hh_s1:
854 case M2_mpy_nac_sat_hh_s0:
855 case M2_mpy_nac_sat_hh_s1:
856 case M2_mpy_rnd_hh_s0:
857 case M2_mpy_rnd_hh_s1:
858 case M2_mpy_sat_hh_s0:
859 case M2_mpy_sat_hh_s1:
860 case M2_mpy_sat_rnd_hh_s0:
861 case M2_mpy_sat_rnd_hh_s1:
862 case M2_mpyd_acc_hh_s0:
863 case M2_mpyd_acc_hh_s1:
864 case M2_mpyd_hh_s0:
865 case M2_mpyd_hh_s1:
866 case M2_mpyd_nac_hh_s0:
867 case M2_mpyd_nac_hh_s1:
868 case M2_mpyd_rnd_hh_s0:
869 case M2_mpyd_rnd_hh_s1:
870 case M2_mpyu_acc_hh_s0:
871 case M2_mpyu_acc_hh_s1:
872 case M2_mpyu_hh_s0:
873 case M2_mpyu_hh_s1:
874 case M2_mpyu_nac_hh_s0:
875 case M2_mpyu_nac_hh_s1:
876 case M2_mpyud_acc_hh_s0:
877 case M2_mpyud_acc_hh_s1:
878 case M2_mpyud_hh_s0:
879 case M2_mpyud_hh_s1:
880 case M2_mpyud_nac_hh_s0:
881 case M2_mpyud_nac_hh_s1:
882 if (OpN == 1 || OpN == 2) {
883 Bits.set(Begin+16, Begin+32);
884 return true;
885 }
886 break;
887 }
888
889 return false;
890 }
891
892 // Calculate the register class that matches Reg:Sub. For example, if
893 // %1 is a double register, then %1:isub_hi would match the "int"
894 // register class.
getFinalVRegClass(const BitTracker::RegisterRef & RR,MachineRegisterInfo & MRI)895 const TargetRegisterClass *HexagonBitSimplify::getFinalVRegClass(
896 const BitTracker::RegisterRef &RR, MachineRegisterInfo &MRI) {
897 if (!Register::isVirtualRegister(RR.Reg))
898 return nullptr;
899 auto *RC = MRI.getRegClass(RR.Reg);
900 if (RR.Sub == 0)
901 return RC;
902 auto &HRI = static_cast<const HexagonRegisterInfo&>(
903 *MRI.getTargetRegisterInfo());
904
905 auto VerifySR = [&HRI] (const TargetRegisterClass *RC, unsigned Sub) -> void {
906 (void)HRI;
907 assert(Sub == HRI.getHexagonSubRegIndex(*RC, Hexagon::ps_sub_lo) ||
908 Sub == HRI.getHexagonSubRegIndex(*RC, Hexagon::ps_sub_hi));
909 };
910
911 switch (RC->getID()) {
912 case Hexagon::DoubleRegsRegClassID:
913 VerifySR(RC, RR.Sub);
914 return &Hexagon::IntRegsRegClass;
915 case Hexagon::HvxWRRegClassID:
916 VerifySR(RC, RR.Sub);
917 return &Hexagon::HvxVRRegClass;
918 }
919 return nullptr;
920 }
921
922 // Check if RD could be replaced with RS at any possible use of RD.
923 // For example a predicate register cannot be replaced with a integer
924 // register, but a 64-bit register with a subregister can be replaced
925 // with a 32-bit register.
isTransparentCopy(const BitTracker::RegisterRef & RD,const BitTracker::RegisterRef & RS,MachineRegisterInfo & MRI)926 bool HexagonBitSimplify::isTransparentCopy(const BitTracker::RegisterRef &RD,
927 const BitTracker::RegisterRef &RS, MachineRegisterInfo &MRI) {
928 if (!Register::isVirtualRegister(RD.Reg) ||
929 !Register::isVirtualRegister(RS.Reg))
930 return false;
931 // Return false if one (or both) classes are nullptr.
932 auto *DRC = getFinalVRegClass(RD, MRI);
933 if (!DRC)
934 return false;
935
936 return DRC == getFinalVRegClass(RS, MRI);
937 }
938
hasTiedUse(unsigned Reg,MachineRegisterInfo & MRI,unsigned NewSub)939 bool HexagonBitSimplify::hasTiedUse(unsigned Reg, MachineRegisterInfo &MRI,
940 unsigned NewSub) {
941 if (!PreserveTiedOps)
942 return false;
943 return llvm::any_of(MRI.use_operands(Reg),
944 [NewSub] (const MachineOperand &Op) -> bool {
945 return Op.getSubReg() != NewSub && Op.isTied();
946 });
947 }
948
949 namespace {
950
951 class DeadCodeElimination {
952 public:
DeadCodeElimination(MachineFunction & mf,MachineDominatorTree & mdt)953 DeadCodeElimination(MachineFunction &mf, MachineDominatorTree &mdt)
954 : MF(mf), HII(*MF.getSubtarget<HexagonSubtarget>().getInstrInfo()),
955 MDT(mdt), MRI(mf.getRegInfo()) {}
956
run()957 bool run() {
958 return runOnNode(MDT.getRootNode());
959 }
960
961 private:
962 bool isDead(unsigned R) const;
963 bool runOnNode(MachineDomTreeNode *N);
964
965 MachineFunction &MF;
966 const HexagonInstrInfo &HII;
967 MachineDominatorTree &MDT;
968 MachineRegisterInfo &MRI;
969 };
970
971 } // end anonymous namespace
972
isDead(unsigned R) const973 bool DeadCodeElimination::isDead(unsigned R) const {
974 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) {
975 MachineInstr *UseI = I->getParent();
976 if (UseI->isDebugValue())
977 continue;
978 if (UseI->isPHI()) {
979 assert(!UseI->getOperand(0).getSubReg());
980 Register DR = UseI->getOperand(0).getReg();
981 if (DR == R)
982 continue;
983 }
984 return false;
985 }
986 return true;
987 }
988
runOnNode(MachineDomTreeNode * N)989 bool DeadCodeElimination::runOnNode(MachineDomTreeNode *N) {
990 bool Changed = false;
991
992 for (auto *DTN : children<MachineDomTreeNode*>(N))
993 Changed |= runOnNode(DTN);
994
995 MachineBasicBlock *B = N->getBlock();
996 std::vector<MachineInstr*> Instrs;
997 for (auto I = B->rbegin(), E = B->rend(); I != E; ++I)
998 Instrs.push_back(&*I);
999
1000 for (auto MI : Instrs) {
1001 unsigned Opc = MI->getOpcode();
1002 // Do not touch lifetime markers. This is why the target-independent DCE
1003 // cannot be used.
1004 if (Opc == TargetOpcode::LIFETIME_START ||
1005 Opc == TargetOpcode::LIFETIME_END)
1006 continue;
1007 bool Store = false;
1008 if (MI->isInlineAsm())
1009 continue;
1010 // Delete PHIs if possible.
1011 if (!MI->isPHI() && !MI->isSafeToMove(nullptr, Store))
1012 continue;
1013
1014 bool AllDead = true;
1015 SmallVector<unsigned,2> Regs;
1016 for (auto &Op : MI->operands()) {
1017 if (!Op.isReg() || !Op.isDef())
1018 continue;
1019 Register R = Op.getReg();
1020 if (!Register::isVirtualRegister(R) || !isDead(R)) {
1021 AllDead = false;
1022 break;
1023 }
1024 Regs.push_back(R);
1025 }
1026 if (!AllDead)
1027 continue;
1028
1029 B->erase(MI);
1030 for (unsigned i = 0, n = Regs.size(); i != n; ++i)
1031 MRI.markUsesInDebugValueAsUndef(Regs[i]);
1032 Changed = true;
1033 }
1034
1035 return Changed;
1036 }
1037
1038 namespace {
1039
1040 // Eliminate redundant instructions
1041 //
1042 // This transformation will identify instructions where the output register
1043 // is the same as one of its input registers. This only works on instructions
1044 // that define a single register (unlike post-increment loads, for example).
1045 // The equality check is actually more detailed: the code calculates which
1046 // bits of the output are used, and only compares these bits with the input
1047 // registers.
1048 // If the output matches an input, the instruction is replaced with COPY.
1049 // The copies will be removed by another transformation.
1050 class RedundantInstrElimination : public Transformation {
1051 public:
RedundantInstrElimination(BitTracker & bt,const HexagonInstrInfo & hii,const HexagonRegisterInfo & hri,MachineRegisterInfo & mri)1052 RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii,
1053 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1054 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {}
1055
1056 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1057
1058 private:
1059 bool isLossyShiftLeft(const MachineInstr &MI, unsigned OpN,
1060 unsigned &LostB, unsigned &LostE);
1061 bool isLossyShiftRight(const MachineInstr &MI, unsigned OpN,
1062 unsigned &LostB, unsigned &LostE);
1063 bool computeUsedBits(unsigned Reg, BitVector &Bits);
1064 bool computeUsedBits(const MachineInstr &MI, unsigned OpN, BitVector &Bits,
1065 uint16_t Begin);
1066 bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS);
1067
1068 const HexagonInstrInfo &HII;
1069 const HexagonRegisterInfo &HRI;
1070 MachineRegisterInfo &MRI;
1071 BitTracker &BT;
1072 };
1073
1074 } // end anonymous namespace
1075
1076 // Check if the instruction is a lossy shift left, where the input being
1077 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range
1078 // of bit indices that are lost.
isLossyShiftLeft(const MachineInstr & MI,unsigned OpN,unsigned & LostB,unsigned & LostE)1079 bool RedundantInstrElimination::isLossyShiftLeft(const MachineInstr &MI,
1080 unsigned OpN, unsigned &LostB, unsigned &LostE) {
1081 using namespace Hexagon;
1082
1083 unsigned Opc = MI.getOpcode();
1084 unsigned ImN, RegN, Width;
1085 switch (Opc) {
1086 case S2_asl_i_p:
1087 ImN = 2;
1088 RegN = 1;
1089 Width = 64;
1090 break;
1091 case S2_asl_i_p_acc:
1092 case S2_asl_i_p_and:
1093 case S2_asl_i_p_nac:
1094 case S2_asl_i_p_or:
1095 case S2_asl_i_p_xacc:
1096 ImN = 3;
1097 RegN = 2;
1098 Width = 64;
1099 break;
1100 case S2_asl_i_r:
1101 ImN = 2;
1102 RegN = 1;
1103 Width = 32;
1104 break;
1105 case S2_addasl_rrri:
1106 case S4_andi_asl_ri:
1107 case S4_ori_asl_ri:
1108 case S4_addi_asl_ri:
1109 case S4_subi_asl_ri:
1110 case S2_asl_i_r_acc:
1111 case S2_asl_i_r_and:
1112 case S2_asl_i_r_nac:
1113 case S2_asl_i_r_or:
1114 case S2_asl_i_r_sat:
1115 case S2_asl_i_r_xacc:
1116 ImN = 3;
1117 RegN = 2;
1118 Width = 32;
1119 break;
1120 default:
1121 return false;
1122 }
1123
1124 if (RegN != OpN)
1125 return false;
1126
1127 assert(MI.getOperand(ImN).isImm());
1128 unsigned S = MI.getOperand(ImN).getImm();
1129 if (S == 0)
1130 return false;
1131 LostB = Width-S;
1132 LostE = Width;
1133 return true;
1134 }
1135
1136 // Check if the instruction is a lossy shift right, where the input being
1137 // shifted is the operand OpN of MI. If true, [LostB, LostE) is the range
1138 // of bit indices that are lost.
isLossyShiftRight(const MachineInstr & MI,unsigned OpN,unsigned & LostB,unsigned & LostE)1139 bool RedundantInstrElimination::isLossyShiftRight(const MachineInstr &MI,
1140 unsigned OpN, unsigned &LostB, unsigned &LostE) {
1141 using namespace Hexagon;
1142
1143 unsigned Opc = MI.getOpcode();
1144 unsigned ImN, RegN;
1145 switch (Opc) {
1146 case S2_asr_i_p:
1147 case S2_lsr_i_p:
1148 ImN = 2;
1149 RegN = 1;
1150 break;
1151 case S2_asr_i_p_acc:
1152 case S2_asr_i_p_and:
1153 case S2_asr_i_p_nac:
1154 case S2_asr_i_p_or:
1155 case S2_lsr_i_p_acc:
1156 case S2_lsr_i_p_and:
1157 case S2_lsr_i_p_nac:
1158 case S2_lsr_i_p_or:
1159 case S2_lsr_i_p_xacc:
1160 ImN = 3;
1161 RegN = 2;
1162 break;
1163 case S2_asr_i_r:
1164 case S2_lsr_i_r:
1165 ImN = 2;
1166 RegN = 1;
1167 break;
1168 case S4_andi_lsr_ri:
1169 case S4_ori_lsr_ri:
1170 case S4_addi_lsr_ri:
1171 case S4_subi_lsr_ri:
1172 case S2_asr_i_r_acc:
1173 case S2_asr_i_r_and:
1174 case S2_asr_i_r_nac:
1175 case S2_asr_i_r_or:
1176 case S2_lsr_i_r_acc:
1177 case S2_lsr_i_r_and:
1178 case S2_lsr_i_r_nac:
1179 case S2_lsr_i_r_or:
1180 case S2_lsr_i_r_xacc:
1181 ImN = 3;
1182 RegN = 2;
1183 break;
1184
1185 default:
1186 return false;
1187 }
1188
1189 if (RegN != OpN)
1190 return false;
1191
1192 assert(MI.getOperand(ImN).isImm());
1193 unsigned S = MI.getOperand(ImN).getImm();
1194 LostB = 0;
1195 LostE = S;
1196 return true;
1197 }
1198
1199 // Calculate the bit vector that corresponds to the used bits of register Reg.
1200 // The vector Bits has the same size, as the size of Reg in bits. If the cal-
1201 // culation fails (i.e. the used bits are unknown), it returns false. Other-
1202 // wise, it returns true and sets the corresponding bits in Bits.
computeUsedBits(unsigned Reg,BitVector & Bits)1203 bool RedundantInstrElimination::computeUsedBits(unsigned Reg, BitVector &Bits) {
1204 BitVector Used(Bits.size());
1205 RegisterSet Visited;
1206 std::vector<unsigned> Pending;
1207 Pending.push_back(Reg);
1208
1209 for (unsigned i = 0; i < Pending.size(); ++i) {
1210 unsigned R = Pending[i];
1211 if (Visited.has(R))
1212 continue;
1213 Visited.insert(R);
1214 for (auto I = MRI.use_begin(R), E = MRI.use_end(); I != E; ++I) {
1215 BitTracker::RegisterRef UR = *I;
1216 unsigned B, W;
1217 if (!HBS::getSubregMask(UR, B, W, MRI))
1218 return false;
1219 MachineInstr &UseI = *I->getParent();
1220 if (UseI.isPHI() || UseI.isCopy()) {
1221 Register DefR = UseI.getOperand(0).getReg();
1222 if (!Register::isVirtualRegister(DefR))
1223 return false;
1224 Pending.push_back(DefR);
1225 } else {
1226 if (!computeUsedBits(UseI, I.getOperandNo(), Used, B))
1227 return false;
1228 }
1229 }
1230 }
1231 Bits |= Used;
1232 return true;
1233 }
1234
1235 // Calculate the bits used by instruction MI in a register in operand OpN.
1236 // Return true/false if the calculation succeeds/fails. If is succeeds, set
1237 // used bits in Bits. This function does not reset any bits in Bits, so
1238 // subsequent calls over different instructions will result in the union
1239 // of the used bits in all these instructions.
1240 // The register in question may be used with a sub-register, whereas Bits
1241 // holds the bits for the entire register. To keep track of that, the
1242 // argument Begin indicates where in Bits is the lowest-significant bit
1243 // of the register used in operand OpN. For example, in instruction:
1244 // %1 = S2_lsr_i_r %2:isub_hi, 10
1245 // the operand 1 is a 32-bit register, which happens to be a subregister
1246 // of the 64-bit register %2, and that subregister starts at position 32.
1247 // In this case Begin=32, since Bits[32] would be the lowest-significant bit
1248 // of %2:isub_hi.
computeUsedBits(const MachineInstr & MI,unsigned OpN,BitVector & Bits,uint16_t Begin)1249 bool RedundantInstrElimination::computeUsedBits(const MachineInstr &MI,
1250 unsigned OpN, BitVector &Bits, uint16_t Begin) {
1251 unsigned Opc = MI.getOpcode();
1252 BitVector T(Bits.size());
1253 bool GotBits = HBS::getUsedBits(Opc, OpN, T, Begin, HII);
1254 // Even if we don't have bits yet, we could still provide some information
1255 // if the instruction is a lossy shift: the lost bits will be marked as
1256 // not used.
1257 unsigned LB, LE;
1258 if (isLossyShiftLeft(MI, OpN, LB, LE) || isLossyShiftRight(MI, OpN, LB, LE)) {
1259 assert(MI.getOperand(OpN).isReg());
1260 BitTracker::RegisterRef RR = MI.getOperand(OpN);
1261 const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI);
1262 uint16_t Width = HRI.getRegSizeInBits(*RC);
1263
1264 if (!GotBits)
1265 T.set(Begin, Begin+Width);
1266 assert(LB <= LE && LB < Width && LE <= Width);
1267 T.reset(Begin+LB, Begin+LE);
1268 GotBits = true;
1269 }
1270 if (GotBits)
1271 Bits |= T;
1272 return GotBits;
1273 }
1274
1275 // Calculates the used bits in RD ("defined register"), and checks if these
1276 // bits in RS ("used register") and RD are identical.
usedBitsEqual(BitTracker::RegisterRef RD,BitTracker::RegisterRef RS)1277 bool RedundantInstrElimination::usedBitsEqual(BitTracker::RegisterRef RD,
1278 BitTracker::RegisterRef RS) {
1279 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg);
1280 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
1281
1282 unsigned DB, DW;
1283 if (!HBS::getSubregMask(RD, DB, DW, MRI))
1284 return false;
1285 unsigned SB, SW;
1286 if (!HBS::getSubregMask(RS, SB, SW, MRI))
1287 return false;
1288 if (SW != DW)
1289 return false;
1290
1291 BitVector Used(DC.width());
1292 if (!computeUsedBits(RD.Reg, Used))
1293 return false;
1294
1295 for (unsigned i = 0; i != DW; ++i)
1296 if (Used[i+DB] && DC[DB+i] != SC[SB+i])
1297 return false;
1298 return true;
1299 }
1300
processBlock(MachineBasicBlock & B,const RegisterSet &)1301 bool RedundantInstrElimination::processBlock(MachineBasicBlock &B,
1302 const RegisterSet&) {
1303 if (!BT.reached(&B))
1304 return false;
1305 bool Changed = false;
1306
1307 for (auto I = B.begin(), E = B.end(), NextI = I; I != E; ++I) {
1308 NextI = std::next(I);
1309 MachineInstr *MI = &*I;
1310
1311 if (MI->getOpcode() == TargetOpcode::COPY)
1312 continue;
1313 if (MI->isPHI() || MI->hasUnmodeledSideEffects() || MI->isInlineAsm())
1314 continue;
1315 unsigned NumD = MI->getDesc().getNumDefs();
1316 if (NumD != 1)
1317 continue;
1318
1319 BitTracker::RegisterRef RD = MI->getOperand(0);
1320 if (!BT.has(RD.Reg))
1321 continue;
1322 const BitTracker::RegisterCell &DC = BT.lookup(RD.Reg);
1323 auto At = MachineBasicBlock::iterator(MI);
1324
1325 // Find a source operand that is equal to the result.
1326 for (auto &Op : MI->uses()) {
1327 if (!Op.isReg())
1328 continue;
1329 BitTracker::RegisterRef RS = Op;
1330 if (!BT.has(RS.Reg))
1331 continue;
1332 if (!HBS::isTransparentCopy(RD, RS, MRI))
1333 continue;
1334
1335 unsigned BN, BW;
1336 if (!HBS::getSubregMask(RS, BN, BW, MRI))
1337 continue;
1338
1339 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
1340 if (!usedBitsEqual(RD, RS) && !HBS::isEqual(DC, 0, SC, BN, BW))
1341 continue;
1342
1343 // If found, replace the instruction with a COPY.
1344 const DebugLoc &DL = MI->getDebugLoc();
1345 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
1346 Register NewR = MRI.createVirtualRegister(FRC);
1347 MachineInstr *CopyI =
1348 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
1349 .addReg(RS.Reg, 0, RS.Sub);
1350 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
1351 // This pass can create copies between registers that don't have the
1352 // exact same values. Updating the tracker has to involve updating
1353 // all dependent cells. Example:
1354 // %1 = inst %2 ; %1 != %2, but used bits are equal
1355 //
1356 // %3 = copy %2 ; <- inserted
1357 // ... = %3 ; <- replaced from %2
1358 // Indirectly, we can create a "copy" between %1 and %2 even
1359 // though their exact values do not match.
1360 BT.visit(*CopyI);
1361 Changed = true;
1362 break;
1363 }
1364 }
1365
1366 return Changed;
1367 }
1368
1369 namespace {
1370
1371 // Recognize instructions that produce constant values known at compile-time.
1372 // Replace them with register definitions that load these constants directly.
1373 class ConstGeneration : public Transformation {
1374 public:
ConstGeneration(BitTracker & bt,const HexagonInstrInfo & hii,MachineRegisterInfo & mri)1375 ConstGeneration(BitTracker &bt, const HexagonInstrInfo &hii,
1376 MachineRegisterInfo &mri)
1377 : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
1378
1379 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1380 static bool isTfrConst(const MachineInstr &MI);
1381
1382 private:
1383 unsigned genTfrConst(const TargetRegisterClass *RC, int64_t C,
1384 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL);
1385
1386 const HexagonInstrInfo &HII;
1387 MachineRegisterInfo &MRI;
1388 BitTracker &BT;
1389 };
1390
1391 } // end anonymous namespace
1392
isTfrConst(const MachineInstr & MI)1393 bool ConstGeneration::isTfrConst(const MachineInstr &MI) {
1394 unsigned Opc = MI.getOpcode();
1395 switch (Opc) {
1396 case Hexagon::A2_combineii:
1397 case Hexagon::A4_combineii:
1398 case Hexagon::A2_tfrsi:
1399 case Hexagon::A2_tfrpi:
1400 case Hexagon::PS_true:
1401 case Hexagon::PS_false:
1402 case Hexagon::CONST32:
1403 case Hexagon::CONST64:
1404 return true;
1405 }
1406 return false;
1407 }
1408
1409 // Generate a transfer-immediate instruction that is appropriate for the
1410 // register class and the actual value being transferred.
genTfrConst(const TargetRegisterClass * RC,int64_t C,MachineBasicBlock & B,MachineBasicBlock::iterator At,DebugLoc & DL)1411 unsigned ConstGeneration::genTfrConst(const TargetRegisterClass *RC, int64_t C,
1412 MachineBasicBlock &B, MachineBasicBlock::iterator At, DebugLoc &DL) {
1413 Register Reg = MRI.createVirtualRegister(RC);
1414 if (RC == &Hexagon::IntRegsRegClass) {
1415 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), Reg)
1416 .addImm(int32_t(C));
1417 return Reg;
1418 }
1419
1420 if (RC == &Hexagon::DoubleRegsRegClass) {
1421 if (isInt<8>(C)) {
1422 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrpi), Reg)
1423 .addImm(C);
1424 return Reg;
1425 }
1426
1427 unsigned Lo = Lo_32(C), Hi = Hi_32(C);
1428 if (isInt<8>(Lo) || isInt<8>(Hi)) {
1429 unsigned Opc = isInt<8>(Lo) ? Hexagon::A2_combineii
1430 : Hexagon::A4_combineii;
1431 BuildMI(B, At, DL, HII.get(Opc), Reg)
1432 .addImm(int32_t(Hi))
1433 .addImm(int32_t(Lo));
1434 return Reg;
1435 }
1436
1437 BuildMI(B, At, DL, HII.get(Hexagon::CONST64), Reg)
1438 .addImm(C);
1439 return Reg;
1440 }
1441
1442 if (RC == &Hexagon::PredRegsRegClass) {
1443 unsigned Opc;
1444 if (C == 0)
1445 Opc = Hexagon::PS_false;
1446 else if ((C & 0xFF) == 0xFF)
1447 Opc = Hexagon::PS_true;
1448 else
1449 return 0;
1450 BuildMI(B, At, DL, HII.get(Opc), Reg);
1451 return Reg;
1452 }
1453
1454 return 0;
1455 }
1456
processBlock(MachineBasicBlock & B,const RegisterSet &)1457 bool ConstGeneration::processBlock(MachineBasicBlock &B, const RegisterSet&) {
1458 if (!BT.reached(&B))
1459 return false;
1460 bool Changed = false;
1461 RegisterSet Defs;
1462
1463 for (auto I = B.begin(), E = B.end(); I != E; ++I) {
1464 if (isTfrConst(*I))
1465 continue;
1466 Defs.clear();
1467 HBS::getInstrDefs(*I, Defs);
1468 if (Defs.count() != 1)
1469 continue;
1470 unsigned DR = Defs.find_first();
1471 if (!Register::isVirtualRegister(DR))
1472 continue;
1473 uint64_t U;
1474 const BitTracker::RegisterCell &DRC = BT.lookup(DR);
1475 if (HBS::getConst(DRC, 0, DRC.width(), U)) {
1476 int64_t C = U;
1477 DebugLoc DL = I->getDebugLoc();
1478 auto At = I->isPHI() ? B.getFirstNonPHI() : I;
1479 unsigned ImmReg = genTfrConst(MRI.getRegClass(DR), C, B, At, DL);
1480 if (ImmReg) {
1481 HBS::replaceReg(DR, ImmReg, MRI);
1482 BT.put(ImmReg, DRC);
1483 Changed = true;
1484 }
1485 }
1486 }
1487 return Changed;
1488 }
1489
1490 namespace {
1491
1492 // Identify pairs of available registers which hold identical values.
1493 // In such cases, only one of them needs to be calculated, the other one
1494 // will be defined as a copy of the first.
1495 class CopyGeneration : public Transformation {
1496 public:
CopyGeneration(BitTracker & bt,const HexagonInstrInfo & hii,const HexagonRegisterInfo & hri,MachineRegisterInfo & mri)1497 CopyGeneration(BitTracker &bt, const HexagonInstrInfo &hii,
1498 const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1499 : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {}
1500
1501 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1502
1503 private:
1504 bool findMatch(const BitTracker::RegisterRef &Inp,
1505 BitTracker::RegisterRef &Out, const RegisterSet &AVs);
1506
1507 const HexagonInstrInfo &HII;
1508 const HexagonRegisterInfo &HRI;
1509 MachineRegisterInfo &MRI;
1510 BitTracker &BT;
1511 RegisterSet Forbidden;
1512 };
1513
1514 // Eliminate register copies RD = RS, by replacing the uses of RD with
1515 // with uses of RS.
1516 class CopyPropagation : public Transformation {
1517 public:
CopyPropagation(const HexagonRegisterInfo & hri,MachineRegisterInfo & mri)1518 CopyPropagation(const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
1519 : Transformation(false), HRI(hri), MRI(mri) {}
1520
1521 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1522
1523 static bool isCopyReg(unsigned Opc, bool NoConv);
1524
1525 private:
1526 bool propagateRegCopy(MachineInstr &MI);
1527
1528 const HexagonRegisterInfo &HRI;
1529 MachineRegisterInfo &MRI;
1530 };
1531
1532 } // end anonymous namespace
1533
1534 /// Check if there is a register in AVs that is identical to Inp. If so,
1535 /// set Out to the found register. The output may be a pair Reg:Sub.
findMatch(const BitTracker::RegisterRef & Inp,BitTracker::RegisterRef & Out,const RegisterSet & AVs)1536 bool CopyGeneration::findMatch(const BitTracker::RegisterRef &Inp,
1537 BitTracker::RegisterRef &Out, const RegisterSet &AVs) {
1538 if (!BT.has(Inp.Reg))
1539 return false;
1540 const BitTracker::RegisterCell &InpRC = BT.lookup(Inp.Reg);
1541 auto *FRC = HBS::getFinalVRegClass(Inp, MRI);
1542 unsigned B, W;
1543 if (!HBS::getSubregMask(Inp, B, W, MRI))
1544 return false;
1545
1546 for (unsigned R = AVs.find_first(); R; R = AVs.find_next(R)) {
1547 if (!BT.has(R) || Forbidden[R])
1548 continue;
1549 const BitTracker::RegisterCell &RC = BT.lookup(R);
1550 unsigned RW = RC.width();
1551 if (W == RW) {
1552 if (FRC != MRI.getRegClass(R))
1553 continue;
1554 if (!HBS::isTransparentCopy(R, Inp, MRI))
1555 continue;
1556 if (!HBS::isEqual(InpRC, B, RC, 0, W))
1557 continue;
1558 Out.Reg = R;
1559 Out.Sub = 0;
1560 return true;
1561 }
1562 // Check if there is a super-register, whose part (with a subregister)
1563 // is equal to the input.
1564 // Only do double registers for now.
1565 if (W*2 != RW)
1566 continue;
1567 if (MRI.getRegClass(R) != &Hexagon::DoubleRegsRegClass)
1568 continue;
1569
1570 if (HBS::isEqual(InpRC, B, RC, 0, W))
1571 Out.Sub = Hexagon::isub_lo;
1572 else if (HBS::isEqual(InpRC, B, RC, W, W))
1573 Out.Sub = Hexagon::isub_hi;
1574 else
1575 continue;
1576 Out.Reg = R;
1577 if (HBS::isTransparentCopy(Out, Inp, MRI))
1578 return true;
1579 }
1580 return false;
1581 }
1582
processBlock(MachineBasicBlock & B,const RegisterSet & AVs)1583 bool CopyGeneration::processBlock(MachineBasicBlock &B,
1584 const RegisterSet &AVs) {
1585 if (!BT.reached(&B))
1586 return false;
1587 RegisterSet AVB(AVs);
1588 bool Changed = false;
1589 RegisterSet Defs;
1590
1591 for (auto I = B.begin(), E = B.end(), NextI = I; I != E;
1592 ++I, AVB.insert(Defs)) {
1593 NextI = std::next(I);
1594 Defs.clear();
1595 HBS::getInstrDefs(*I, Defs);
1596
1597 unsigned Opc = I->getOpcode();
1598 if (CopyPropagation::isCopyReg(Opc, false) ||
1599 ConstGeneration::isTfrConst(*I))
1600 continue;
1601
1602 DebugLoc DL = I->getDebugLoc();
1603 auto At = I->isPHI() ? B.getFirstNonPHI() : I;
1604
1605 for (unsigned R = Defs.find_first(); R; R = Defs.find_next(R)) {
1606 BitTracker::RegisterRef MR;
1607 auto *FRC = HBS::getFinalVRegClass(R, MRI);
1608
1609 if (findMatch(R, MR, AVB)) {
1610 Register NewR = MRI.createVirtualRegister(FRC);
1611 BuildMI(B, At, DL, HII.get(TargetOpcode::COPY), NewR)
1612 .addReg(MR.Reg, 0, MR.Sub);
1613 BT.put(BitTracker::RegisterRef(NewR), BT.get(MR));
1614 HBS::replaceReg(R, NewR, MRI);
1615 Forbidden.insert(R);
1616 continue;
1617 }
1618
1619 if (FRC == &Hexagon::DoubleRegsRegClass ||
1620 FRC == &Hexagon::HvxWRRegClass) {
1621 // Try to generate REG_SEQUENCE.
1622 unsigned SubLo = HRI.getHexagonSubRegIndex(*FRC, Hexagon::ps_sub_lo);
1623 unsigned SubHi = HRI.getHexagonSubRegIndex(*FRC, Hexagon::ps_sub_hi);
1624 BitTracker::RegisterRef TL = { R, SubLo };
1625 BitTracker::RegisterRef TH = { R, SubHi };
1626 BitTracker::RegisterRef ML, MH;
1627 if (findMatch(TL, ML, AVB) && findMatch(TH, MH, AVB)) {
1628 auto *FRC = HBS::getFinalVRegClass(R, MRI);
1629 Register NewR = MRI.createVirtualRegister(FRC);
1630 BuildMI(B, At, DL, HII.get(TargetOpcode::REG_SEQUENCE), NewR)
1631 .addReg(ML.Reg, 0, ML.Sub)
1632 .addImm(SubLo)
1633 .addReg(MH.Reg, 0, MH.Sub)
1634 .addImm(SubHi);
1635 BT.put(BitTracker::RegisterRef(NewR), BT.get(R));
1636 HBS::replaceReg(R, NewR, MRI);
1637 Forbidden.insert(R);
1638 }
1639 }
1640 }
1641 }
1642
1643 return Changed;
1644 }
1645
isCopyReg(unsigned Opc,bool NoConv)1646 bool CopyPropagation::isCopyReg(unsigned Opc, bool NoConv) {
1647 switch (Opc) {
1648 case TargetOpcode::COPY:
1649 case TargetOpcode::REG_SEQUENCE:
1650 case Hexagon::A4_combineir:
1651 case Hexagon::A4_combineri:
1652 return true;
1653 case Hexagon::A2_tfr:
1654 case Hexagon::A2_tfrp:
1655 case Hexagon::A2_combinew:
1656 case Hexagon::V6_vcombine:
1657 return NoConv;
1658 default:
1659 break;
1660 }
1661 return false;
1662 }
1663
propagateRegCopy(MachineInstr & MI)1664 bool CopyPropagation::propagateRegCopy(MachineInstr &MI) {
1665 bool Changed = false;
1666 unsigned Opc = MI.getOpcode();
1667 BitTracker::RegisterRef RD = MI.getOperand(0);
1668 assert(MI.getOperand(0).getSubReg() == 0);
1669
1670 switch (Opc) {
1671 case TargetOpcode::COPY:
1672 case Hexagon::A2_tfr:
1673 case Hexagon::A2_tfrp: {
1674 BitTracker::RegisterRef RS = MI.getOperand(1);
1675 if (!HBS::isTransparentCopy(RD, RS, MRI))
1676 break;
1677 if (RS.Sub != 0)
1678 Changed = HBS::replaceRegWithSub(RD.Reg, RS.Reg, RS.Sub, MRI);
1679 else
1680 Changed = HBS::replaceReg(RD.Reg, RS.Reg, MRI);
1681 break;
1682 }
1683 case TargetOpcode::REG_SEQUENCE: {
1684 BitTracker::RegisterRef SL, SH;
1685 if (HBS::parseRegSequence(MI, SL, SH, MRI)) {
1686 const TargetRegisterClass &RC = *MRI.getRegClass(RD.Reg);
1687 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo);
1688 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi);
1689 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, SL.Reg, SL.Sub, MRI);
1690 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, SH.Reg, SH.Sub, MRI);
1691 }
1692 break;
1693 }
1694 case Hexagon::A2_combinew:
1695 case Hexagon::V6_vcombine: {
1696 const TargetRegisterClass &RC = *MRI.getRegClass(RD.Reg);
1697 unsigned SubLo = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_lo);
1698 unsigned SubHi = HRI.getHexagonSubRegIndex(RC, Hexagon::ps_sub_hi);
1699 BitTracker::RegisterRef RH = MI.getOperand(1), RL = MI.getOperand(2);
1700 Changed = HBS::replaceSubWithSub(RD.Reg, SubLo, RL.Reg, RL.Sub, MRI);
1701 Changed |= HBS::replaceSubWithSub(RD.Reg, SubHi, RH.Reg, RH.Sub, MRI);
1702 break;
1703 }
1704 case Hexagon::A4_combineir:
1705 case Hexagon::A4_combineri: {
1706 unsigned SrcX = (Opc == Hexagon::A4_combineir) ? 2 : 1;
1707 unsigned Sub = (Opc == Hexagon::A4_combineir) ? Hexagon::isub_lo
1708 : Hexagon::isub_hi;
1709 BitTracker::RegisterRef RS = MI.getOperand(SrcX);
1710 Changed = HBS::replaceSubWithSub(RD.Reg, Sub, RS.Reg, RS.Sub, MRI);
1711 break;
1712 }
1713 }
1714 return Changed;
1715 }
1716
processBlock(MachineBasicBlock & B,const RegisterSet &)1717 bool CopyPropagation::processBlock(MachineBasicBlock &B, const RegisterSet&) {
1718 std::vector<MachineInstr*> Instrs;
1719 for (auto I = B.rbegin(), E = B.rend(); I != E; ++I)
1720 Instrs.push_back(&*I);
1721
1722 bool Changed = false;
1723 for (auto I : Instrs) {
1724 unsigned Opc = I->getOpcode();
1725 if (!CopyPropagation::isCopyReg(Opc, true))
1726 continue;
1727 Changed |= propagateRegCopy(*I);
1728 }
1729
1730 return Changed;
1731 }
1732
1733 namespace {
1734
1735 // Recognize patterns that can be simplified and replace them with the
1736 // simpler forms.
1737 // This is by no means complete
1738 class BitSimplification : public Transformation {
1739 public:
BitSimplification(BitTracker & bt,const MachineDominatorTree & mdt,const HexagonInstrInfo & hii,const HexagonRegisterInfo & hri,MachineRegisterInfo & mri,MachineFunction & mf)1740 BitSimplification(BitTracker &bt, const MachineDominatorTree &mdt,
1741 const HexagonInstrInfo &hii, const HexagonRegisterInfo &hri,
1742 MachineRegisterInfo &mri, MachineFunction &mf)
1743 : Transformation(true), MDT(mdt), HII(hii), HRI(hri), MRI(mri),
1744 MF(mf), BT(bt) {}
1745
1746 bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
1747
1748 private:
1749 struct RegHalf : public BitTracker::RegisterRef {
1750 bool Low; // Low/High halfword.
1751 };
1752
1753 bool matchHalf(unsigned SelfR, const BitTracker::RegisterCell &RC,
1754 unsigned B, RegHalf &RH);
1755 bool validateReg(BitTracker::RegisterRef R, unsigned Opc, unsigned OpNum);
1756
1757 bool matchPackhl(unsigned SelfR, const BitTracker::RegisterCell &RC,
1758 BitTracker::RegisterRef &Rs, BitTracker::RegisterRef &Rt);
1759 unsigned getCombineOpcode(bool HLow, bool LLow);
1760
1761 bool genStoreUpperHalf(MachineInstr *MI);
1762 bool genStoreImmediate(MachineInstr *MI);
1763 bool genPackhl(MachineInstr *MI, BitTracker::RegisterRef RD,
1764 const BitTracker::RegisterCell &RC);
1765 bool genExtractHalf(MachineInstr *MI, BitTracker::RegisterRef RD,
1766 const BitTracker::RegisterCell &RC);
1767 bool genCombineHalf(MachineInstr *MI, BitTracker::RegisterRef RD,
1768 const BitTracker::RegisterCell &RC);
1769 bool genExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD,
1770 const BitTracker::RegisterCell &RC);
1771 bool genBitSplit(MachineInstr *MI, BitTracker::RegisterRef RD,
1772 const BitTracker::RegisterCell &RC, const RegisterSet &AVs);
1773 bool simplifyTstbit(MachineInstr *MI, BitTracker::RegisterRef RD,
1774 const BitTracker::RegisterCell &RC);
1775 bool simplifyExtractLow(MachineInstr *MI, BitTracker::RegisterRef RD,
1776 const BitTracker::RegisterCell &RC, const RegisterSet &AVs);
1777 bool simplifyRCmp0(MachineInstr *MI, BitTracker::RegisterRef RD);
1778
1779 // Cache of created instructions to avoid creating duplicates.
1780 // XXX Currently only used by genBitSplit.
1781 std::vector<MachineInstr*> NewMIs;
1782
1783 const MachineDominatorTree &MDT;
1784 const HexagonInstrInfo &HII;
1785 const HexagonRegisterInfo &HRI;
1786 MachineRegisterInfo &MRI;
1787 MachineFunction &MF;
1788 BitTracker &BT;
1789 };
1790
1791 } // end anonymous namespace
1792
1793 // Check if the bits [B..B+16) in register cell RC form a valid halfword,
1794 // i.e. [0..16), [16..32), etc. of some register. If so, return true and
1795 // set the information about the found register in RH.
matchHalf(unsigned SelfR,const BitTracker::RegisterCell & RC,unsigned B,RegHalf & RH)1796 bool BitSimplification::matchHalf(unsigned SelfR,
1797 const BitTracker::RegisterCell &RC, unsigned B, RegHalf &RH) {
1798 // XXX This could be searching in the set of available registers, in case
1799 // the match is not exact.
1800
1801 // Match 16-bit chunks, where the RC[B..B+15] references exactly one
1802 // register and all the bits B..B+15 match between RC and the register.
1803 // This is meant to match "v1[0-15]", where v1 = { [0]:0 [1-15]:v1... },
1804 // and RC = { [0]:0 [1-15]:v1[1-15]... }.
1805 bool Low = false;
1806 unsigned I = B;
1807 while (I < B+16 && RC[I].num())
1808 I++;
1809 if (I == B+16)
1810 return false;
1811
1812 unsigned Reg = RC[I].RefI.Reg;
1813 unsigned P = RC[I].RefI.Pos; // The RefI.Pos will be advanced by I-B.
1814 if (P < I-B)
1815 return false;
1816 unsigned Pos = P - (I-B);
1817
1818 if (Reg == 0 || Reg == SelfR) // Don't match "self".
1819 return false;
1820 if (!Register::isVirtualRegister(Reg))
1821 return false;
1822 if (!BT.has(Reg))
1823 return false;
1824
1825 const BitTracker::RegisterCell &SC = BT.lookup(Reg);
1826 if (Pos+16 > SC.width())
1827 return false;
1828
1829 for (unsigned i = 0; i < 16; ++i) {
1830 const BitTracker::BitValue &RV = RC[i+B];
1831 if (RV.Type == BitTracker::BitValue::Ref) {
1832 if (RV.RefI.Reg != Reg)
1833 return false;
1834 if (RV.RefI.Pos != i+Pos)
1835 return false;
1836 continue;
1837 }
1838 if (RC[i+B] != SC[i+Pos])
1839 return false;
1840 }
1841
1842 unsigned Sub = 0;
1843 switch (Pos) {
1844 case 0:
1845 Sub = Hexagon::isub_lo;
1846 Low = true;
1847 break;
1848 case 16:
1849 Sub = Hexagon::isub_lo;
1850 Low = false;
1851 break;
1852 case 32:
1853 Sub = Hexagon::isub_hi;
1854 Low = true;
1855 break;
1856 case 48:
1857 Sub = Hexagon::isub_hi;
1858 Low = false;
1859 break;
1860 default:
1861 return false;
1862 }
1863
1864 RH.Reg = Reg;
1865 RH.Sub = Sub;
1866 RH.Low = Low;
1867 // If the subregister is not valid with the register, set it to 0.
1868 if (!HBS::getFinalVRegClass(RH, MRI))
1869 RH.Sub = 0;
1870
1871 return true;
1872 }
1873
validateReg(BitTracker::RegisterRef R,unsigned Opc,unsigned OpNum)1874 bool BitSimplification::validateReg(BitTracker::RegisterRef R, unsigned Opc,
1875 unsigned OpNum) {
1876 auto *OpRC = HII.getRegClass(HII.get(Opc), OpNum, &HRI, MF);
1877 auto *RRC = HBS::getFinalVRegClass(R, MRI);
1878 return OpRC->hasSubClassEq(RRC);
1879 }
1880
1881 // Check if RC matches the pattern of a S2_packhl. If so, return true and
1882 // set the inputs Rs and Rt.
matchPackhl(unsigned SelfR,const BitTracker::RegisterCell & RC,BitTracker::RegisterRef & Rs,BitTracker::RegisterRef & Rt)1883 bool BitSimplification::matchPackhl(unsigned SelfR,
1884 const BitTracker::RegisterCell &RC, BitTracker::RegisterRef &Rs,
1885 BitTracker::RegisterRef &Rt) {
1886 RegHalf L1, H1, L2, H2;
1887
1888 if (!matchHalf(SelfR, RC, 0, L2) || !matchHalf(SelfR, RC, 16, L1))
1889 return false;
1890 if (!matchHalf(SelfR, RC, 32, H2) || !matchHalf(SelfR, RC, 48, H1))
1891 return false;
1892
1893 // Rs = H1.L1, Rt = H2.L2
1894 if (H1.Reg != L1.Reg || H1.Sub != L1.Sub || H1.Low || !L1.Low)
1895 return false;
1896 if (H2.Reg != L2.Reg || H2.Sub != L2.Sub || H2.Low || !L2.Low)
1897 return false;
1898
1899 Rs = H1;
1900 Rt = H2;
1901 return true;
1902 }
1903
getCombineOpcode(bool HLow,bool LLow)1904 unsigned BitSimplification::getCombineOpcode(bool HLow, bool LLow) {
1905 return HLow ? LLow ? Hexagon::A2_combine_ll
1906 : Hexagon::A2_combine_lh
1907 : LLow ? Hexagon::A2_combine_hl
1908 : Hexagon::A2_combine_hh;
1909 }
1910
1911 // If MI stores the upper halfword of a register (potentially obtained via
1912 // shifts or extracts), replace it with a storerf instruction. This could
1913 // cause the "extraction" code to become dead.
genStoreUpperHalf(MachineInstr * MI)1914 bool BitSimplification::genStoreUpperHalf(MachineInstr *MI) {
1915 unsigned Opc = MI->getOpcode();
1916 if (Opc != Hexagon::S2_storerh_io)
1917 return false;
1918
1919 MachineOperand &ValOp = MI->getOperand(2);
1920 BitTracker::RegisterRef RS = ValOp;
1921 if (!BT.has(RS.Reg))
1922 return false;
1923 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
1924 RegHalf H;
1925 if (!matchHalf(0, RC, 0, H))
1926 return false;
1927 if (H.Low)
1928 return false;
1929 MI->setDesc(HII.get(Hexagon::S2_storerf_io));
1930 ValOp.setReg(H.Reg);
1931 ValOp.setSubReg(H.Sub);
1932 return true;
1933 }
1934
1935 // If MI stores a value known at compile-time, and the value is within a range
1936 // that avoids using constant-extenders, replace it with a store-immediate.
genStoreImmediate(MachineInstr * MI)1937 bool BitSimplification::genStoreImmediate(MachineInstr *MI) {
1938 unsigned Opc = MI->getOpcode();
1939 unsigned Align = 0;
1940 switch (Opc) {
1941 case Hexagon::S2_storeri_io:
1942 Align++;
1943 LLVM_FALLTHROUGH;
1944 case Hexagon::S2_storerh_io:
1945 Align++;
1946 LLVM_FALLTHROUGH;
1947 case Hexagon::S2_storerb_io:
1948 break;
1949 default:
1950 return false;
1951 }
1952
1953 // Avoid stores to frame-indices (due to an unknown offset).
1954 if (!MI->getOperand(0).isReg())
1955 return false;
1956 MachineOperand &OffOp = MI->getOperand(1);
1957 if (!OffOp.isImm())
1958 return false;
1959
1960 int64_t Off = OffOp.getImm();
1961 // Offset is u6:a. Sadly, there is no isShiftedUInt(n,x).
1962 if (!isUIntN(6+Align, Off) || (Off & ((1<<Align)-1)))
1963 return false;
1964 // Source register:
1965 BitTracker::RegisterRef RS = MI->getOperand(2);
1966 if (!BT.has(RS.Reg))
1967 return false;
1968 const BitTracker::RegisterCell &RC = BT.lookup(RS.Reg);
1969 uint64_t U;
1970 if (!HBS::getConst(RC, 0, RC.width(), U))
1971 return false;
1972
1973 // Only consider 8-bit values to avoid constant-extenders.
1974 int V;
1975 switch (Opc) {
1976 case Hexagon::S2_storerb_io:
1977 V = int8_t(U);
1978 break;
1979 case Hexagon::S2_storerh_io:
1980 V = int16_t(U);
1981 break;
1982 case Hexagon::S2_storeri_io:
1983 V = int32_t(U);
1984 break;
1985 default:
1986 // Opc is already checked above to be one of the three store instructions.
1987 // This silences a -Wuninitialized false positive on GCC 5.4.
1988 llvm_unreachable("Unexpected store opcode");
1989 }
1990 if (!isInt<8>(V))
1991 return false;
1992
1993 MI->RemoveOperand(2);
1994 switch (Opc) {
1995 case Hexagon::S2_storerb_io:
1996 MI->setDesc(HII.get(Hexagon::S4_storeirb_io));
1997 break;
1998 case Hexagon::S2_storerh_io:
1999 MI->setDesc(HII.get(Hexagon::S4_storeirh_io));
2000 break;
2001 case Hexagon::S2_storeri_io:
2002 MI->setDesc(HII.get(Hexagon::S4_storeiri_io));
2003 break;
2004 }
2005 MI->addOperand(MachineOperand::CreateImm(V));
2006 return true;
2007 }
2008
2009 // If MI is equivalent o S2_packhl, generate the S2_packhl. MI could be the
2010 // last instruction in a sequence that results in something equivalent to
2011 // the pack-halfwords. The intent is to cause the entire sequence to become
2012 // dead.
genPackhl(MachineInstr * MI,BitTracker::RegisterRef RD,const BitTracker::RegisterCell & RC)2013 bool BitSimplification::genPackhl(MachineInstr *MI,
2014 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2015 unsigned Opc = MI->getOpcode();
2016 if (Opc == Hexagon::S2_packhl)
2017 return false;
2018 BitTracker::RegisterRef Rs, Rt;
2019 if (!matchPackhl(RD.Reg, RC, Rs, Rt))
2020 return false;
2021 if (!validateReg(Rs, Hexagon::S2_packhl, 1) ||
2022 !validateReg(Rt, Hexagon::S2_packhl, 2))
2023 return false;
2024
2025 MachineBasicBlock &B = *MI->getParent();
2026 Register NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
2027 DebugLoc DL = MI->getDebugLoc();
2028 auto At = MI->isPHI() ? B.getFirstNonPHI()
2029 : MachineBasicBlock::iterator(MI);
2030 BuildMI(B, At, DL, HII.get(Hexagon::S2_packhl), NewR)
2031 .addReg(Rs.Reg, 0, Rs.Sub)
2032 .addReg(Rt.Reg, 0, Rt.Sub);
2033 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2034 BT.put(BitTracker::RegisterRef(NewR), RC);
2035 return true;
2036 }
2037
2038 // If MI produces halfword of the input in the low half of the output,
2039 // replace it with zero-extend or extractu.
genExtractHalf(MachineInstr * MI,BitTracker::RegisterRef RD,const BitTracker::RegisterCell & RC)2040 bool BitSimplification::genExtractHalf(MachineInstr *MI,
2041 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2042 RegHalf L;
2043 // Check for halfword in low 16 bits, zeros elsewhere.
2044 if (!matchHalf(RD.Reg, RC, 0, L) || !HBS::isZero(RC, 16, 16))
2045 return false;
2046
2047 unsigned Opc = MI->getOpcode();
2048 MachineBasicBlock &B = *MI->getParent();
2049 DebugLoc DL = MI->getDebugLoc();
2050
2051 // Prefer zxth, since zxth can go in any slot, while extractu only in
2052 // slots 2 and 3.
2053 unsigned NewR = 0;
2054 auto At = MI->isPHI() ? B.getFirstNonPHI()
2055 : MachineBasicBlock::iterator(MI);
2056 if (L.Low && Opc != Hexagon::A2_zxth) {
2057 if (validateReg(L, Hexagon::A2_zxth, 1)) {
2058 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2059 BuildMI(B, At, DL, HII.get(Hexagon::A2_zxth), NewR)
2060 .addReg(L.Reg, 0, L.Sub);
2061 }
2062 } else if (!L.Low && Opc != Hexagon::S2_lsr_i_r) {
2063 if (validateReg(L, Hexagon::S2_lsr_i_r, 1)) {
2064 NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2065 BuildMI(B, MI, DL, HII.get(Hexagon::S2_lsr_i_r), NewR)
2066 .addReg(L.Reg, 0, L.Sub)
2067 .addImm(16);
2068 }
2069 }
2070 if (NewR == 0)
2071 return false;
2072 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2073 BT.put(BitTracker::RegisterRef(NewR), RC);
2074 return true;
2075 }
2076
2077 // If MI is equivalent to a combine(.L/.H, .L/.H) replace with with the
2078 // combine.
genCombineHalf(MachineInstr * MI,BitTracker::RegisterRef RD,const BitTracker::RegisterCell & RC)2079 bool BitSimplification::genCombineHalf(MachineInstr *MI,
2080 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2081 RegHalf L, H;
2082 // Check for combine h/l
2083 if (!matchHalf(RD.Reg, RC, 0, L) || !matchHalf(RD.Reg, RC, 16, H))
2084 return false;
2085 // Do nothing if this is just a reg copy.
2086 if (L.Reg == H.Reg && L.Sub == H.Sub && !H.Low && L.Low)
2087 return false;
2088
2089 unsigned Opc = MI->getOpcode();
2090 unsigned COpc = getCombineOpcode(H.Low, L.Low);
2091 if (COpc == Opc)
2092 return false;
2093 if (!validateReg(H, COpc, 1) || !validateReg(L, COpc, 2))
2094 return false;
2095
2096 MachineBasicBlock &B = *MI->getParent();
2097 DebugLoc DL = MI->getDebugLoc();
2098 Register NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2099 auto At = MI->isPHI() ? B.getFirstNonPHI()
2100 : MachineBasicBlock::iterator(MI);
2101 BuildMI(B, At, DL, HII.get(COpc), NewR)
2102 .addReg(H.Reg, 0, H.Sub)
2103 .addReg(L.Reg, 0, L.Sub);
2104 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2105 BT.put(BitTracker::RegisterRef(NewR), RC);
2106 return true;
2107 }
2108
2109 // If MI resets high bits of a register and keeps the lower ones, replace it
2110 // with zero-extend byte/half, and-immediate, or extractu, as appropriate.
genExtractLow(MachineInstr * MI,BitTracker::RegisterRef RD,const BitTracker::RegisterCell & RC)2111 bool BitSimplification::genExtractLow(MachineInstr *MI,
2112 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2113 unsigned Opc = MI->getOpcode();
2114 switch (Opc) {
2115 case Hexagon::A2_zxtb:
2116 case Hexagon::A2_zxth:
2117 case Hexagon::S2_extractu:
2118 return false;
2119 }
2120 if (Opc == Hexagon::A2_andir && MI->getOperand(2).isImm()) {
2121 int32_t Imm = MI->getOperand(2).getImm();
2122 if (isInt<10>(Imm))
2123 return false;
2124 }
2125
2126 if (MI->hasUnmodeledSideEffects() || MI->isInlineAsm())
2127 return false;
2128 unsigned W = RC.width();
2129 while (W > 0 && RC[W-1].is(0))
2130 W--;
2131 if (W == 0 || W == RC.width())
2132 return false;
2133 unsigned NewOpc = (W == 8) ? Hexagon::A2_zxtb
2134 : (W == 16) ? Hexagon::A2_zxth
2135 : (W < 10) ? Hexagon::A2_andir
2136 : Hexagon::S2_extractu;
2137 MachineBasicBlock &B = *MI->getParent();
2138 DebugLoc DL = MI->getDebugLoc();
2139
2140 for (auto &Op : MI->uses()) {
2141 if (!Op.isReg())
2142 continue;
2143 BitTracker::RegisterRef RS = Op;
2144 if (!BT.has(RS.Reg))
2145 continue;
2146 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
2147 unsigned BN, BW;
2148 if (!HBS::getSubregMask(RS, BN, BW, MRI))
2149 continue;
2150 if (BW < W || !HBS::isEqual(RC, 0, SC, BN, W))
2151 continue;
2152 if (!validateReg(RS, NewOpc, 1))
2153 continue;
2154
2155 Register NewR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
2156 auto At = MI->isPHI() ? B.getFirstNonPHI()
2157 : MachineBasicBlock::iterator(MI);
2158 auto MIB = BuildMI(B, At, DL, HII.get(NewOpc), NewR)
2159 .addReg(RS.Reg, 0, RS.Sub);
2160 if (NewOpc == Hexagon::A2_andir)
2161 MIB.addImm((1 << W) - 1);
2162 else if (NewOpc == Hexagon::S2_extractu)
2163 MIB.addImm(W).addImm(0);
2164 HBS::replaceSubWithSub(RD.Reg, RD.Sub, NewR, 0, MRI);
2165 BT.put(BitTracker::RegisterRef(NewR), RC);
2166 return true;
2167 }
2168 return false;
2169 }
2170
genBitSplit(MachineInstr * MI,BitTracker::RegisterRef RD,const BitTracker::RegisterCell & RC,const RegisterSet & AVs)2171 bool BitSimplification::genBitSplit(MachineInstr *MI,
2172 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC,
2173 const RegisterSet &AVs) {
2174 if (!GenBitSplit)
2175 return false;
2176 if (MaxBitSplit.getNumOccurrences()) {
2177 if (CountBitSplit >= MaxBitSplit)
2178 return false;
2179 }
2180
2181 unsigned Opc = MI->getOpcode();
2182 switch (Opc) {
2183 case Hexagon::A4_bitsplit:
2184 case Hexagon::A4_bitspliti:
2185 return false;
2186 }
2187
2188 unsigned W = RC.width();
2189 if (W != 32)
2190 return false;
2191
2192 auto ctlz = [] (const BitTracker::RegisterCell &C) -> unsigned {
2193 unsigned Z = C.width();
2194 while (Z > 0 && C[Z-1].is(0))
2195 --Z;
2196 return C.width() - Z;
2197 };
2198
2199 // Count the number of leading zeros in the target RC.
2200 unsigned Z = ctlz(RC);
2201 if (Z == 0 || Z == W)
2202 return false;
2203
2204 // A simplistic analysis: assume the source register (the one being split)
2205 // is fully unknown, and that all its bits are self-references.
2206 const BitTracker::BitValue &B0 = RC[0];
2207 if (B0.Type != BitTracker::BitValue::Ref)
2208 return false;
2209
2210 unsigned SrcR = B0.RefI.Reg;
2211 unsigned SrcSR = 0;
2212 unsigned Pos = B0.RefI.Pos;
2213
2214 // All the non-zero bits should be consecutive bits from the same register.
2215 for (unsigned i = 1; i < W-Z; ++i) {
2216 const BitTracker::BitValue &V = RC[i];
2217 if (V.Type != BitTracker::BitValue::Ref)
2218 return false;
2219 if (V.RefI.Reg != SrcR || V.RefI.Pos != Pos+i)
2220 return false;
2221 }
2222
2223 // Now, find the other bitfield among AVs.
2224 for (unsigned S = AVs.find_first(); S; S = AVs.find_next(S)) {
2225 // The number of leading zeros here should be the number of trailing
2226 // non-zeros in RC.
2227 unsigned SRC = MRI.getRegClass(S)->getID();
2228 if (SRC != Hexagon::IntRegsRegClassID &&
2229 SRC != Hexagon::DoubleRegsRegClassID)
2230 continue;
2231 if (!BT.has(S))
2232 continue;
2233 const BitTracker::RegisterCell &SC = BT.lookup(S);
2234 if (SC.width() != W || ctlz(SC) != W-Z)
2235 continue;
2236 // The Z lower bits should now match SrcR.
2237 const BitTracker::BitValue &S0 = SC[0];
2238 if (S0.Type != BitTracker::BitValue::Ref || S0.RefI.Reg != SrcR)
2239 continue;
2240 unsigned P = S0.RefI.Pos;
2241
2242 if (Pos <= P && (Pos + W-Z) != P)
2243 continue;
2244 if (P < Pos && (P + Z) != Pos)
2245 continue;
2246 // The starting bitfield position must be at a subregister boundary.
2247 if (std::min(P, Pos) != 0 && std::min(P, Pos) != 32)
2248 continue;
2249
2250 unsigned I;
2251 for (I = 1; I < Z; ++I) {
2252 const BitTracker::BitValue &V = SC[I];
2253 if (V.Type != BitTracker::BitValue::Ref)
2254 break;
2255 if (V.RefI.Reg != SrcR || V.RefI.Pos != P+I)
2256 break;
2257 }
2258 if (I != Z)
2259 continue;
2260
2261 // Generate bitsplit where S is defined.
2262 if (MaxBitSplit.getNumOccurrences())
2263 CountBitSplit++;
2264 MachineInstr *DefS = MRI.getVRegDef(S);
2265 assert(DefS != nullptr);
2266 DebugLoc DL = DefS->getDebugLoc();
2267 MachineBasicBlock &B = *DefS->getParent();
2268 auto At = DefS->isPHI() ? B.getFirstNonPHI()
2269 : MachineBasicBlock::iterator(DefS);
2270 if (MRI.getRegClass(SrcR)->getID() == Hexagon::DoubleRegsRegClassID)
2271 SrcSR = (std::min(Pos, P) == 32) ? Hexagon::isub_hi : Hexagon::isub_lo;
2272 if (!validateReg({SrcR,SrcSR}, Hexagon::A4_bitspliti, 1))
2273 continue;
2274 unsigned ImmOp = Pos <= P ? W-Z : Z;
2275
2276 // Find an existing bitsplit instruction if one already exists.
2277 unsigned NewR = 0;
2278 for (MachineInstr *In : NewMIs) {
2279 if (In->getOpcode() != Hexagon::A4_bitspliti)
2280 continue;
2281 MachineOperand &Op1 = In->getOperand(1);
2282 if (Op1.getReg() != SrcR || Op1.getSubReg() != SrcSR)
2283 continue;
2284 if (In->getOperand(2).getImm() != ImmOp)
2285 continue;
2286 // Check if the target register is available here.
2287 MachineOperand &Op0 = In->getOperand(0);
2288 MachineInstr *DefI = MRI.getVRegDef(Op0.getReg());
2289 assert(DefI != nullptr);
2290 if (!MDT.dominates(DefI, &*At))
2291 continue;
2292
2293 // Found one that can be reused.
2294 assert(Op0.getSubReg() == 0);
2295 NewR = Op0.getReg();
2296 break;
2297 }
2298 if (!NewR) {
2299 NewR = MRI.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
2300 auto NewBS = BuildMI(B, At, DL, HII.get(Hexagon::A4_bitspliti), NewR)
2301 .addReg(SrcR, 0, SrcSR)
2302 .addImm(ImmOp);
2303 NewMIs.push_back(NewBS);
2304 }
2305 if (Pos <= P) {
2306 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_lo, MRI);
2307 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_hi, MRI);
2308 } else {
2309 HBS::replaceRegWithSub(S, NewR, Hexagon::isub_lo, MRI);
2310 HBS::replaceRegWithSub(RD.Reg, NewR, Hexagon::isub_hi, MRI);
2311 }
2312 return true;
2313 }
2314
2315 return false;
2316 }
2317
2318 // Check for tstbit simplification opportunity, where the bit being checked
2319 // can be tracked back to another register. For example:
2320 // %2 = S2_lsr_i_r %1, 5
2321 // %3 = S2_tstbit_i %2, 0
2322 // =>
2323 // %3 = S2_tstbit_i %1, 5
simplifyTstbit(MachineInstr * MI,BitTracker::RegisterRef RD,const BitTracker::RegisterCell & RC)2324 bool BitSimplification::simplifyTstbit(MachineInstr *MI,
2325 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC) {
2326 unsigned Opc = MI->getOpcode();
2327 if (Opc != Hexagon::S2_tstbit_i)
2328 return false;
2329
2330 unsigned BN = MI->getOperand(2).getImm();
2331 BitTracker::RegisterRef RS = MI->getOperand(1);
2332 unsigned F, W;
2333 DebugLoc DL = MI->getDebugLoc();
2334 if (!BT.has(RS.Reg) || !HBS::getSubregMask(RS, F, W, MRI))
2335 return false;
2336 MachineBasicBlock &B = *MI->getParent();
2337 auto At = MI->isPHI() ? B.getFirstNonPHI()
2338 : MachineBasicBlock::iterator(MI);
2339
2340 const BitTracker::RegisterCell &SC = BT.lookup(RS.Reg);
2341 const BitTracker::BitValue &V = SC[F+BN];
2342 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg != RS.Reg) {
2343 const TargetRegisterClass *TC = MRI.getRegClass(V.RefI.Reg);
2344 // Need to map V.RefI.Reg to a 32-bit register, i.e. if it is
2345 // a double register, need to use a subregister and adjust bit
2346 // number.
2347 unsigned P = std::numeric_limits<unsigned>::max();
2348 BitTracker::RegisterRef RR(V.RefI.Reg, 0);
2349 if (TC == &Hexagon::DoubleRegsRegClass) {
2350 P = V.RefI.Pos;
2351 RR.Sub = Hexagon::isub_lo;
2352 if (P >= 32) {
2353 P -= 32;
2354 RR.Sub = Hexagon::isub_hi;
2355 }
2356 } else if (TC == &Hexagon::IntRegsRegClass) {
2357 P = V.RefI.Pos;
2358 }
2359 if (P != std::numeric_limits<unsigned>::max()) {
2360 unsigned NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
2361 BuildMI(B, At, DL, HII.get(Hexagon::S2_tstbit_i), NewR)
2362 .addReg(RR.Reg, 0, RR.Sub)
2363 .addImm(P);
2364 HBS::replaceReg(RD.Reg, NewR, MRI);
2365 BT.put(NewR, RC);
2366 return true;
2367 }
2368 } else if (V.is(0) || V.is(1)) {
2369 Register NewR = MRI.createVirtualRegister(&Hexagon::PredRegsRegClass);
2370 unsigned NewOpc = V.is(0) ? Hexagon::PS_false : Hexagon::PS_true;
2371 BuildMI(B, At, DL, HII.get(NewOpc), NewR);
2372 HBS::replaceReg(RD.Reg, NewR, MRI);
2373 return true;
2374 }
2375
2376 return false;
2377 }
2378
2379 // Detect whether RD is a bitfield extract (sign- or zero-extended) of
2380 // some register from the AVs set. Create a new corresponding instruction
2381 // at the location of MI. The intent is to recognize situations where
2382 // a sequence of instructions performs an operation that is equivalent to
2383 // an extract operation, such as a shift left followed by a shift right.
simplifyExtractLow(MachineInstr * MI,BitTracker::RegisterRef RD,const BitTracker::RegisterCell & RC,const RegisterSet & AVs)2384 bool BitSimplification::simplifyExtractLow(MachineInstr *MI,
2385 BitTracker::RegisterRef RD, const BitTracker::RegisterCell &RC,
2386 const RegisterSet &AVs) {
2387 if (!GenExtract)
2388 return false;
2389 if (MaxExtract.getNumOccurrences()) {
2390 if (CountExtract >= MaxExtract)
2391 return false;
2392 CountExtract++;
2393 }
2394
2395 unsigned W = RC.width();
2396 unsigned RW = W;
2397 unsigned Len;
2398 bool Signed;
2399
2400 // The code is mostly class-independent, except for the part that generates
2401 // the extract instruction, and establishes the source register (in case it
2402 // needs to use a subregister).
2403 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
2404 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass)
2405 return false;
2406 assert(RD.Sub == 0);
2407
2408 // Observation:
2409 // If the cell has a form of 00..0xx..x with k zeros and n remaining
2410 // bits, this could be an extractu of the n bits, but it could also be
2411 // an extractu of a longer field which happens to have 0s in the top
2412 // bit positions.
2413 // The same logic applies to sign-extended fields.
2414 //
2415 // Do not check for the extended extracts, since it would expand the
2416 // search space quite a bit. The search may be expensive as it is.
2417
2418 const BitTracker::BitValue &TopV = RC[W-1];
2419
2420 // Eliminate candidates that have self-referential bits, since they
2421 // cannot be extracts from other registers. Also, skip registers that
2422 // have compile-time constant values.
2423 bool IsConst = true;
2424 for (unsigned I = 0; I != W; ++I) {
2425 const BitTracker::BitValue &V = RC[I];
2426 if (V.Type == BitTracker::BitValue::Ref && V.RefI.Reg == RD.Reg)
2427 return false;
2428 IsConst = IsConst && (V.is(0) || V.is(1));
2429 }
2430 if (IsConst)
2431 return false;
2432
2433 if (TopV.is(0) || TopV.is(1)) {
2434 bool S = TopV.is(1);
2435 for (--W; W > 0 && RC[W-1].is(S); --W)
2436 ;
2437 Len = W;
2438 Signed = S;
2439 // The sign bit must be a part of the field being extended.
2440 if (Signed)
2441 ++Len;
2442 } else {
2443 // This could still be a sign-extended extract.
2444 assert(TopV.Type == BitTracker::BitValue::Ref);
2445 if (TopV.RefI.Reg == RD.Reg || TopV.RefI.Pos == W-1)
2446 return false;
2447 for (--W; W > 0 && RC[W-1] == TopV; --W)
2448 ;
2449 // The top bits of RC are copies of TopV. One occurrence of TopV will
2450 // be a part of the field.
2451 Len = W + 1;
2452 Signed = true;
2453 }
2454
2455 // This would be just a copy. It should be handled elsewhere.
2456 if (Len == RW)
2457 return false;
2458
2459 LLVM_DEBUG({
2460 dbgs() << __func__ << " on reg: " << printReg(RD.Reg, &HRI, RD.Sub)
2461 << ", MI: " << *MI;
2462 dbgs() << "Cell: " << RC << '\n';
2463 dbgs() << "Expected bitfield size: " << Len << " bits, "
2464 << (Signed ? "sign" : "zero") << "-extended\n";
2465 });
2466
2467 bool Changed = false;
2468
2469 for (unsigned R = AVs.find_first(); R != 0; R = AVs.find_next(R)) {
2470 if (!BT.has(R))
2471 continue;
2472 const BitTracker::RegisterCell &SC = BT.lookup(R);
2473 unsigned SW = SC.width();
2474
2475 // The source can be longer than the destination, as long as its size is
2476 // a multiple of the size of the destination. Also, we would need to be
2477 // able to refer to the subregister in the source that would be of the
2478 // same size as the destination, but only check the sizes here.
2479 if (SW < RW || (SW % RW) != 0)
2480 continue;
2481
2482 // The field can start at any offset in SC as long as it contains Len
2483 // bits and does not cross subregister boundary (if the source register
2484 // is longer than the destination).
2485 unsigned Off = 0;
2486 while (Off <= SW-Len) {
2487 unsigned OE = (Off+Len)/RW;
2488 if (OE != Off/RW) {
2489 // The assumption here is that if the source (R) is longer than the
2490 // destination, then the destination is a sequence of words of
2491 // size RW, and each such word in R can be accessed via a subregister.
2492 //
2493 // If the beginning and the end of the field cross the subregister
2494 // boundary, advance to the next subregister.
2495 Off = OE*RW;
2496 continue;
2497 }
2498 if (HBS::isEqual(RC, 0, SC, Off, Len))
2499 break;
2500 ++Off;
2501 }
2502
2503 if (Off > SW-Len)
2504 continue;
2505
2506 // Found match.
2507 unsigned ExtOpc = 0;
2508 if (Off == 0) {
2509 if (Len == 8)
2510 ExtOpc = Signed ? Hexagon::A2_sxtb : Hexagon::A2_zxtb;
2511 else if (Len == 16)
2512 ExtOpc = Signed ? Hexagon::A2_sxth : Hexagon::A2_zxth;
2513 else if (Len < 10 && !Signed)
2514 ExtOpc = Hexagon::A2_andir;
2515 }
2516 if (ExtOpc == 0) {
2517 ExtOpc =
2518 Signed ? (RW == 32 ? Hexagon::S4_extract : Hexagon::S4_extractp)
2519 : (RW == 32 ? Hexagon::S2_extractu : Hexagon::S2_extractup);
2520 }
2521 unsigned SR = 0;
2522 // This only recognizes isub_lo and isub_hi.
2523 if (RW != SW && RW*2 != SW)
2524 continue;
2525 if (RW != SW)
2526 SR = (Off/RW == 0) ? Hexagon::isub_lo : Hexagon::isub_hi;
2527 Off = Off % RW;
2528
2529 if (!validateReg({R,SR}, ExtOpc, 1))
2530 continue;
2531
2532 // Don't generate the same instruction as the one being optimized.
2533 if (MI->getOpcode() == ExtOpc) {
2534 // All possible ExtOpc's have the source in operand(1).
2535 const MachineOperand &SrcOp = MI->getOperand(1);
2536 if (SrcOp.getReg() == R)
2537 continue;
2538 }
2539
2540 DebugLoc DL = MI->getDebugLoc();
2541 MachineBasicBlock &B = *MI->getParent();
2542 Register NewR = MRI.createVirtualRegister(FRC);
2543 auto At = MI->isPHI() ? B.getFirstNonPHI()
2544 : MachineBasicBlock::iterator(MI);
2545 auto MIB = BuildMI(B, At, DL, HII.get(ExtOpc), NewR)
2546 .addReg(R, 0, SR);
2547 switch (ExtOpc) {
2548 case Hexagon::A2_sxtb:
2549 case Hexagon::A2_zxtb:
2550 case Hexagon::A2_sxth:
2551 case Hexagon::A2_zxth:
2552 break;
2553 case Hexagon::A2_andir:
2554 MIB.addImm((1u << Len) - 1);
2555 break;
2556 case Hexagon::S4_extract:
2557 case Hexagon::S2_extractu:
2558 case Hexagon::S4_extractp:
2559 case Hexagon::S2_extractup:
2560 MIB.addImm(Len)
2561 .addImm(Off);
2562 break;
2563 default:
2564 llvm_unreachable("Unexpected opcode");
2565 }
2566
2567 HBS::replaceReg(RD.Reg, NewR, MRI);
2568 BT.put(BitTracker::RegisterRef(NewR), RC);
2569 Changed = true;
2570 break;
2571 }
2572
2573 return Changed;
2574 }
2575
simplifyRCmp0(MachineInstr * MI,BitTracker::RegisterRef RD)2576 bool BitSimplification::simplifyRCmp0(MachineInstr *MI,
2577 BitTracker::RegisterRef RD) {
2578 unsigned Opc = MI->getOpcode();
2579 if (Opc != Hexagon::A4_rcmpeqi && Opc != Hexagon::A4_rcmpneqi)
2580 return false;
2581 MachineOperand &CmpOp = MI->getOperand(2);
2582 if (!CmpOp.isImm() || CmpOp.getImm() != 0)
2583 return false;
2584
2585 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
2586 if (FRC != &Hexagon::IntRegsRegClass && FRC != &Hexagon::DoubleRegsRegClass)
2587 return false;
2588 assert(RD.Sub == 0);
2589
2590 MachineBasicBlock &B = *MI->getParent();
2591 const DebugLoc &DL = MI->getDebugLoc();
2592 auto At = MI->isPHI() ? B.getFirstNonPHI()
2593 : MachineBasicBlock::iterator(MI);
2594 bool KnownZ = true;
2595 bool KnownNZ = false;
2596
2597 BitTracker::RegisterRef SR = MI->getOperand(1);
2598 if (!BT.has(SR.Reg))
2599 return false;
2600 const BitTracker::RegisterCell &SC = BT.lookup(SR.Reg);
2601 unsigned F, W;
2602 if (!HBS::getSubregMask(SR, F, W, MRI))
2603 return false;
2604
2605 for (uint16_t I = F; I != F+W; ++I) {
2606 const BitTracker::BitValue &V = SC[I];
2607 if (!V.is(0))
2608 KnownZ = false;
2609 if (V.is(1))
2610 KnownNZ = true;
2611 }
2612
2613 auto ReplaceWithConst = [&](int C) {
2614 Register NewR = MRI.createVirtualRegister(FRC);
2615 BuildMI(B, At, DL, HII.get(Hexagon::A2_tfrsi), NewR)
2616 .addImm(C);
2617 HBS::replaceReg(RD.Reg, NewR, MRI);
2618 BitTracker::RegisterCell NewRC(W);
2619 for (uint16_t I = 0; I != W; ++I) {
2620 NewRC[I] = BitTracker::BitValue(C & 1);
2621 C = unsigned(C) >> 1;
2622 }
2623 BT.put(BitTracker::RegisterRef(NewR), NewRC);
2624 return true;
2625 };
2626
2627 auto IsNonZero = [] (const MachineOperand &Op) {
2628 if (Op.isGlobal() || Op.isBlockAddress())
2629 return true;
2630 if (Op.isImm())
2631 return Op.getImm() != 0;
2632 if (Op.isCImm())
2633 return !Op.getCImm()->isZero();
2634 if (Op.isFPImm())
2635 return !Op.getFPImm()->isZero();
2636 return false;
2637 };
2638
2639 auto IsZero = [] (const MachineOperand &Op) {
2640 if (Op.isGlobal() || Op.isBlockAddress())
2641 return false;
2642 if (Op.isImm())
2643 return Op.getImm() == 0;
2644 if (Op.isCImm())
2645 return Op.getCImm()->isZero();
2646 if (Op.isFPImm())
2647 return Op.getFPImm()->isZero();
2648 return false;
2649 };
2650
2651 // If the source register is known to be 0 or non-0, the comparison can
2652 // be folded to a load of a constant.
2653 if (KnownZ || KnownNZ) {
2654 assert(KnownZ != KnownNZ && "Register cannot be both 0 and non-0");
2655 return ReplaceWithConst(KnownZ == (Opc == Hexagon::A4_rcmpeqi));
2656 }
2657
2658 // Special case: if the compare comes from a C2_muxii, then we know the
2659 // two possible constants that can be the source value.
2660 MachineInstr *InpDef = MRI.getVRegDef(SR.Reg);
2661 if (!InpDef)
2662 return false;
2663 if (SR.Sub == 0 && InpDef->getOpcode() == Hexagon::C2_muxii) {
2664 MachineOperand &Src1 = InpDef->getOperand(2);
2665 MachineOperand &Src2 = InpDef->getOperand(3);
2666 // Check if both are non-zero.
2667 bool KnownNZ1 = IsNonZero(Src1), KnownNZ2 = IsNonZero(Src2);
2668 if (KnownNZ1 && KnownNZ2)
2669 return ReplaceWithConst(Opc == Hexagon::A4_rcmpneqi);
2670 // Check if both are zero.
2671 bool KnownZ1 = IsZero(Src1), KnownZ2 = IsZero(Src2);
2672 if (KnownZ1 && KnownZ2)
2673 return ReplaceWithConst(Opc == Hexagon::A4_rcmpeqi);
2674
2675 // If for both operands we know that they are either 0 or non-0,
2676 // replace the comparison with a C2_muxii, using the same predicate
2677 // register, but with operands substituted with 0/1 accordingly.
2678 if ((KnownZ1 || KnownNZ1) && (KnownZ2 || KnownNZ2)) {
2679 Register NewR = MRI.createVirtualRegister(FRC);
2680 BuildMI(B, At, DL, HII.get(Hexagon::C2_muxii), NewR)
2681 .addReg(InpDef->getOperand(1).getReg())
2682 .addImm(KnownZ1 == (Opc == Hexagon::A4_rcmpeqi))
2683 .addImm(KnownZ2 == (Opc == Hexagon::A4_rcmpeqi));
2684 HBS::replaceReg(RD.Reg, NewR, MRI);
2685 // Create a new cell with only the least significant bit unknown.
2686 BitTracker::RegisterCell NewRC(W);
2687 NewRC[0] = BitTracker::BitValue::self();
2688 NewRC.fill(1, W, BitTracker::BitValue::Zero);
2689 BT.put(BitTracker::RegisterRef(NewR), NewRC);
2690 return true;
2691 }
2692 }
2693
2694 return false;
2695 }
2696
processBlock(MachineBasicBlock & B,const RegisterSet & AVs)2697 bool BitSimplification::processBlock(MachineBasicBlock &B,
2698 const RegisterSet &AVs) {
2699 if (!BT.reached(&B))
2700 return false;
2701 bool Changed = false;
2702 RegisterSet AVB = AVs;
2703 RegisterSet Defs;
2704
2705 for (auto I = B.begin(), E = B.end(); I != E; ++I, AVB.insert(Defs)) {
2706 MachineInstr *MI = &*I;
2707 Defs.clear();
2708 HBS::getInstrDefs(*MI, Defs);
2709
2710 unsigned Opc = MI->getOpcode();
2711 if (Opc == TargetOpcode::COPY || Opc == TargetOpcode::REG_SEQUENCE)
2712 continue;
2713
2714 if (MI->mayStore()) {
2715 bool T = genStoreUpperHalf(MI);
2716 T = T || genStoreImmediate(MI);
2717 Changed |= T;
2718 continue;
2719 }
2720
2721 if (Defs.count() != 1)
2722 continue;
2723 const MachineOperand &Op0 = MI->getOperand(0);
2724 if (!Op0.isReg() || !Op0.isDef())
2725 continue;
2726 BitTracker::RegisterRef RD = Op0;
2727 if (!BT.has(RD.Reg))
2728 continue;
2729 const TargetRegisterClass *FRC = HBS::getFinalVRegClass(RD, MRI);
2730 const BitTracker::RegisterCell &RC = BT.lookup(RD.Reg);
2731
2732 if (FRC->getID() == Hexagon::DoubleRegsRegClassID) {
2733 bool T = genPackhl(MI, RD, RC);
2734 T = T || simplifyExtractLow(MI, RD, RC, AVB);
2735 Changed |= T;
2736 continue;
2737 }
2738
2739 if (FRC->getID() == Hexagon::IntRegsRegClassID) {
2740 bool T = genBitSplit(MI, RD, RC, AVB);
2741 T = T || simplifyExtractLow(MI, RD, RC, AVB);
2742 T = T || genExtractHalf(MI, RD, RC);
2743 T = T || genCombineHalf(MI, RD, RC);
2744 T = T || genExtractLow(MI, RD, RC);
2745 T = T || simplifyRCmp0(MI, RD);
2746 Changed |= T;
2747 continue;
2748 }
2749
2750 if (FRC->getID() == Hexagon::PredRegsRegClassID) {
2751 bool T = simplifyTstbit(MI, RD, RC);
2752 Changed |= T;
2753 continue;
2754 }
2755 }
2756 return Changed;
2757 }
2758
runOnMachineFunction(MachineFunction & MF)2759 bool HexagonBitSimplify::runOnMachineFunction(MachineFunction &MF) {
2760 if (skipFunction(MF.getFunction()))
2761 return false;
2762
2763 auto &HST = MF.getSubtarget<HexagonSubtarget>();
2764 auto &HRI = *HST.getRegisterInfo();
2765 auto &HII = *HST.getInstrInfo();
2766
2767 MDT = &getAnalysis<MachineDominatorTree>();
2768 MachineRegisterInfo &MRI = MF.getRegInfo();
2769 bool Changed;
2770
2771 Changed = DeadCodeElimination(MF, *MDT).run();
2772
2773 const HexagonEvaluator HE(HRI, MRI, HII, MF);
2774 BitTracker BT(HE, MF);
2775 LLVM_DEBUG(BT.trace(true));
2776 BT.run();
2777
2778 MachineBasicBlock &Entry = MF.front();
2779
2780 RegisterSet AIG; // Available registers for IG.
2781 ConstGeneration ImmG(BT, HII, MRI);
2782 Changed |= visitBlock(Entry, ImmG, AIG);
2783
2784 RegisterSet ARE; // Available registers for RIE.
2785 RedundantInstrElimination RIE(BT, HII, HRI, MRI);
2786 bool Ried = visitBlock(Entry, RIE, ARE);
2787 if (Ried) {
2788 Changed = true;
2789 BT.run();
2790 }
2791
2792 RegisterSet ACG; // Available registers for CG.
2793 CopyGeneration CopyG(BT, HII, HRI, MRI);
2794 Changed |= visitBlock(Entry, CopyG, ACG);
2795
2796 RegisterSet ACP; // Available registers for CP.
2797 CopyPropagation CopyP(HRI, MRI);
2798 Changed |= visitBlock(Entry, CopyP, ACP);
2799
2800 Changed = DeadCodeElimination(MF, *MDT).run() || Changed;
2801
2802 BT.run();
2803 RegisterSet ABS; // Available registers for BS.
2804 BitSimplification BitS(BT, *MDT, HII, HRI, MRI, MF);
2805 Changed |= visitBlock(Entry, BitS, ABS);
2806
2807 Changed = DeadCodeElimination(MF, *MDT).run() || Changed;
2808
2809 if (Changed) {
2810 for (auto &B : MF)
2811 for (auto &I : B)
2812 I.clearKillInfo();
2813 DeadCodeElimination(MF, *MDT).run();
2814 }
2815 return Changed;
2816 }
2817
2818 // Recognize loops where the code at the end of the loop matches the code
2819 // before the entry of the loop, and the matching code is such that is can
2820 // be simplified. This pass relies on the bit simplification above and only
2821 // prepares code in a way that can be handled by the bit simplifcation.
2822 //
2823 // This is the motivating testcase (and explanation):
2824 //
2825 // {
2826 // loop0(.LBB0_2, r1) // %for.body.preheader
2827 // r5:4 = memd(r0++#8)
2828 // }
2829 // {
2830 // r3 = lsr(r4, #16)
2831 // r7:6 = combine(r5, r5)
2832 // }
2833 // {
2834 // r3 = insert(r5, #16, #16)
2835 // r7:6 = vlsrw(r7:6, #16)
2836 // }
2837 // .LBB0_2:
2838 // {
2839 // memh(r2+#4) = r5
2840 // memh(r2+#6) = r6 # R6 is really R5.H
2841 // }
2842 // {
2843 // r2 = add(r2, #8)
2844 // memh(r2+#0) = r4
2845 // memh(r2+#2) = r3 # R3 is really R4.H
2846 // }
2847 // {
2848 // r5:4 = memd(r0++#8)
2849 // }
2850 // { # "Shuffling" code that sets up R3 and R6
2851 // r3 = lsr(r4, #16) # so that their halves can be stored in the
2852 // r7:6 = combine(r5, r5) # next iteration. This could be folded into
2853 // } # the stores if the code was at the beginning
2854 // { # of the loop iteration. Since the same code
2855 // r3 = insert(r5, #16, #16) # precedes the loop, it can actually be moved
2856 // r7:6 = vlsrw(r7:6, #16) # there.
2857 // }:endloop0
2858 //
2859 //
2860 // The outcome:
2861 //
2862 // {
2863 // loop0(.LBB0_2, r1)
2864 // r5:4 = memd(r0++#8)
2865 // }
2866 // .LBB0_2:
2867 // {
2868 // memh(r2+#4) = r5
2869 // memh(r2+#6) = r5.h
2870 // }
2871 // {
2872 // r2 = add(r2, #8)
2873 // memh(r2+#0) = r4
2874 // memh(r2+#2) = r4.h
2875 // }
2876 // {
2877 // r5:4 = memd(r0++#8)
2878 // }:endloop0
2879
2880 namespace llvm {
2881
2882 FunctionPass *createHexagonLoopRescheduling();
2883 void initializeHexagonLoopReschedulingPass(PassRegistry&);
2884
2885 } // end namespace llvm
2886
2887 namespace {
2888
2889 class HexagonLoopRescheduling : public MachineFunctionPass {
2890 public:
2891 static char ID;
2892
HexagonLoopRescheduling()2893 HexagonLoopRescheduling() : MachineFunctionPass(ID) {
2894 initializeHexagonLoopReschedulingPass(*PassRegistry::getPassRegistry());
2895 }
2896
2897 bool runOnMachineFunction(MachineFunction &MF) override;
2898
2899 private:
2900 const HexagonInstrInfo *HII = nullptr;
2901 const HexagonRegisterInfo *HRI = nullptr;
2902 MachineRegisterInfo *MRI = nullptr;
2903 BitTracker *BTP = nullptr;
2904
2905 struct LoopCand {
LoopCand__anon9e7a0ec10d11::HexagonLoopRescheduling::LoopCand2906 LoopCand(MachineBasicBlock *lb, MachineBasicBlock *pb,
2907 MachineBasicBlock *eb) : LB(lb), PB(pb), EB(eb) {}
2908
2909 MachineBasicBlock *LB, *PB, *EB;
2910 };
2911 using InstrList = std::vector<MachineInstr *>;
2912 struct InstrGroup {
2913 BitTracker::RegisterRef Inp, Out;
2914 InstrList Ins;
2915 };
2916 struct PhiInfo {
2917 PhiInfo(MachineInstr &P, MachineBasicBlock &B);
2918
2919 unsigned DefR;
2920 BitTracker::RegisterRef LR, PR; // Loop Register, Preheader Register
2921 MachineBasicBlock *LB, *PB; // Loop Block, Preheader Block
2922 };
2923
2924 static unsigned getDefReg(const MachineInstr *MI);
2925 bool isConst(unsigned Reg) const;
2926 bool isBitShuffle(const MachineInstr *MI, unsigned DefR) const;
2927 bool isStoreInput(const MachineInstr *MI, unsigned DefR) const;
2928 bool isShuffleOf(unsigned OutR, unsigned InpR) const;
2929 bool isSameShuffle(unsigned OutR1, unsigned InpR1, unsigned OutR2,
2930 unsigned &InpR2) const;
2931 void moveGroup(InstrGroup &G, MachineBasicBlock &LB, MachineBasicBlock &PB,
2932 MachineBasicBlock::iterator At, unsigned OldPhiR, unsigned NewPredR);
2933 bool processLoop(LoopCand &C);
2934 };
2935
2936 } // end anonymous namespace
2937
2938 char HexagonLoopRescheduling::ID = 0;
2939
2940 INITIALIZE_PASS(HexagonLoopRescheduling, "hexagon-loop-resched",
2941 "Hexagon Loop Rescheduling", false, false)
2942
PhiInfo(MachineInstr & P,MachineBasicBlock & B)2943 HexagonLoopRescheduling::PhiInfo::PhiInfo(MachineInstr &P,
2944 MachineBasicBlock &B) {
2945 DefR = HexagonLoopRescheduling::getDefReg(&P);
2946 LB = &B;
2947 PB = nullptr;
2948 for (unsigned i = 1, n = P.getNumOperands(); i < n; i += 2) {
2949 const MachineOperand &OpB = P.getOperand(i+1);
2950 if (OpB.getMBB() == &B) {
2951 LR = P.getOperand(i);
2952 continue;
2953 }
2954 PB = OpB.getMBB();
2955 PR = P.getOperand(i);
2956 }
2957 }
2958
getDefReg(const MachineInstr * MI)2959 unsigned HexagonLoopRescheduling::getDefReg(const MachineInstr *MI) {
2960 RegisterSet Defs;
2961 HBS::getInstrDefs(*MI, Defs);
2962 if (Defs.count() != 1)
2963 return 0;
2964 return Defs.find_first();
2965 }
2966
isConst(unsigned Reg) const2967 bool HexagonLoopRescheduling::isConst(unsigned Reg) const {
2968 if (!BTP->has(Reg))
2969 return false;
2970 const BitTracker::RegisterCell &RC = BTP->lookup(Reg);
2971 for (unsigned i = 0, w = RC.width(); i < w; ++i) {
2972 const BitTracker::BitValue &V = RC[i];
2973 if (!V.is(0) && !V.is(1))
2974 return false;
2975 }
2976 return true;
2977 }
2978
isBitShuffle(const MachineInstr * MI,unsigned DefR) const2979 bool HexagonLoopRescheduling::isBitShuffle(const MachineInstr *MI,
2980 unsigned DefR) const {
2981 unsigned Opc = MI->getOpcode();
2982 switch (Opc) {
2983 case TargetOpcode::COPY:
2984 case Hexagon::S2_lsr_i_r:
2985 case Hexagon::S2_asr_i_r:
2986 case Hexagon::S2_asl_i_r:
2987 case Hexagon::S2_lsr_i_p:
2988 case Hexagon::S2_asr_i_p:
2989 case Hexagon::S2_asl_i_p:
2990 case Hexagon::S2_insert:
2991 case Hexagon::A2_or:
2992 case Hexagon::A2_orp:
2993 case Hexagon::A2_and:
2994 case Hexagon::A2_andp:
2995 case Hexagon::A2_combinew:
2996 case Hexagon::A4_combineri:
2997 case Hexagon::A4_combineir:
2998 case Hexagon::A2_combineii:
2999 case Hexagon::A4_combineii:
3000 case Hexagon::A2_combine_ll:
3001 case Hexagon::A2_combine_lh:
3002 case Hexagon::A2_combine_hl:
3003 case Hexagon::A2_combine_hh:
3004 return true;
3005 }
3006 return false;
3007 }
3008
isStoreInput(const MachineInstr * MI,unsigned InpR) const3009 bool HexagonLoopRescheduling::isStoreInput(const MachineInstr *MI,
3010 unsigned InpR) const {
3011 for (unsigned i = 0, n = MI->getNumOperands(); i < n; ++i) {
3012 const MachineOperand &Op = MI->getOperand(i);
3013 if (!Op.isReg())
3014 continue;
3015 if (Op.getReg() == InpR)
3016 return i == n-1;
3017 }
3018 return false;
3019 }
3020
isShuffleOf(unsigned OutR,unsigned InpR) const3021 bool HexagonLoopRescheduling::isShuffleOf(unsigned OutR, unsigned InpR) const {
3022 if (!BTP->has(OutR) || !BTP->has(InpR))
3023 return false;
3024 const BitTracker::RegisterCell &OutC = BTP->lookup(OutR);
3025 for (unsigned i = 0, w = OutC.width(); i < w; ++i) {
3026 const BitTracker::BitValue &V = OutC[i];
3027 if (V.Type != BitTracker::BitValue::Ref)
3028 continue;
3029 if (V.RefI.Reg != InpR)
3030 return false;
3031 }
3032 return true;
3033 }
3034
isSameShuffle(unsigned OutR1,unsigned InpR1,unsigned OutR2,unsigned & InpR2) const3035 bool HexagonLoopRescheduling::isSameShuffle(unsigned OutR1, unsigned InpR1,
3036 unsigned OutR2, unsigned &InpR2) const {
3037 if (!BTP->has(OutR1) || !BTP->has(InpR1) || !BTP->has(OutR2))
3038 return false;
3039 const BitTracker::RegisterCell &OutC1 = BTP->lookup(OutR1);
3040 const BitTracker::RegisterCell &OutC2 = BTP->lookup(OutR2);
3041 unsigned W = OutC1.width();
3042 unsigned MatchR = 0;
3043 if (W != OutC2.width())
3044 return false;
3045 for (unsigned i = 0; i < W; ++i) {
3046 const BitTracker::BitValue &V1 = OutC1[i], &V2 = OutC2[i];
3047 if (V1.Type != V2.Type || V1.Type == BitTracker::BitValue::One)
3048 return false;
3049 if (V1.Type != BitTracker::BitValue::Ref)
3050 continue;
3051 if (V1.RefI.Pos != V2.RefI.Pos)
3052 return false;
3053 if (V1.RefI.Reg != InpR1)
3054 return false;
3055 if (V2.RefI.Reg == 0 || V2.RefI.Reg == OutR2)
3056 return false;
3057 if (!MatchR)
3058 MatchR = V2.RefI.Reg;
3059 else if (V2.RefI.Reg != MatchR)
3060 return false;
3061 }
3062 InpR2 = MatchR;
3063 return true;
3064 }
3065
moveGroup(InstrGroup & G,MachineBasicBlock & LB,MachineBasicBlock & PB,MachineBasicBlock::iterator At,unsigned OldPhiR,unsigned NewPredR)3066 void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB,
3067 MachineBasicBlock &PB, MachineBasicBlock::iterator At, unsigned OldPhiR,
3068 unsigned NewPredR) {
3069 DenseMap<unsigned,unsigned> RegMap;
3070
3071 const TargetRegisterClass *PhiRC = MRI->getRegClass(NewPredR);
3072 Register PhiR = MRI->createVirtualRegister(PhiRC);
3073 BuildMI(LB, At, At->getDebugLoc(), HII->get(TargetOpcode::PHI), PhiR)
3074 .addReg(NewPredR)
3075 .addMBB(&PB)
3076 .addReg(G.Inp.Reg)
3077 .addMBB(&LB);
3078 RegMap.insert(std::make_pair(G.Inp.Reg, PhiR));
3079
3080 for (unsigned i = G.Ins.size(); i > 0; --i) {
3081 const MachineInstr *SI = G.Ins[i-1];
3082 unsigned DR = getDefReg(SI);
3083 const TargetRegisterClass *RC = MRI->getRegClass(DR);
3084 Register NewDR = MRI->createVirtualRegister(RC);
3085 DebugLoc DL = SI->getDebugLoc();
3086
3087 auto MIB = BuildMI(LB, At, DL, HII->get(SI->getOpcode()), NewDR);
3088 for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) {
3089 const MachineOperand &Op = SI->getOperand(j);
3090 if (!Op.isReg()) {
3091 MIB.add(Op);
3092 continue;
3093 }
3094 if (!Op.isUse())
3095 continue;
3096 unsigned UseR = RegMap[Op.getReg()];
3097 MIB.addReg(UseR, 0, Op.getSubReg());
3098 }
3099 RegMap.insert(std::make_pair(DR, NewDR));
3100 }
3101
3102 HBS::replaceReg(OldPhiR, RegMap[G.Out.Reg], *MRI);
3103 }
3104
processLoop(LoopCand & C)3105 bool HexagonLoopRescheduling::processLoop(LoopCand &C) {
3106 LLVM_DEBUG(dbgs() << "Processing loop in " << printMBBReference(*C.LB)
3107 << "\n");
3108 std::vector<PhiInfo> Phis;
3109 for (auto &I : *C.LB) {
3110 if (!I.isPHI())
3111 break;
3112 unsigned PR = getDefReg(&I);
3113 if (isConst(PR))
3114 continue;
3115 bool BadUse = false, GoodUse = false;
3116 for (auto UI = MRI->use_begin(PR), UE = MRI->use_end(); UI != UE; ++UI) {
3117 MachineInstr *UseI = UI->getParent();
3118 if (UseI->getParent() != C.LB) {
3119 BadUse = true;
3120 break;
3121 }
3122 if (isBitShuffle(UseI, PR) || isStoreInput(UseI, PR))
3123 GoodUse = true;
3124 }
3125 if (BadUse || !GoodUse)
3126 continue;
3127
3128 Phis.push_back(PhiInfo(I, *C.LB));
3129 }
3130
3131 LLVM_DEBUG({
3132 dbgs() << "Phis: {";
3133 for (auto &I : Phis) {
3134 dbgs() << ' ' << printReg(I.DefR, HRI) << "=phi("
3135 << printReg(I.PR.Reg, HRI, I.PR.Sub) << ":b" << I.PB->getNumber()
3136 << ',' << printReg(I.LR.Reg, HRI, I.LR.Sub) << ":b"
3137 << I.LB->getNumber() << ')';
3138 }
3139 dbgs() << " }\n";
3140 });
3141
3142 if (Phis.empty())
3143 return false;
3144
3145 bool Changed = false;
3146 InstrList ShufIns;
3147
3148 // Go backwards in the block: for each bit shuffling instruction, check
3149 // if that instruction could potentially be moved to the front of the loop:
3150 // the output of the loop cannot be used in a non-shuffling instruction
3151 // in this loop.
3152 for (auto I = C.LB->rbegin(), E = C.LB->rend(); I != E; ++I) {
3153 if (I->isTerminator())
3154 continue;
3155 if (I->isPHI())
3156 break;
3157
3158 RegisterSet Defs;
3159 HBS::getInstrDefs(*I, Defs);
3160 if (Defs.count() != 1)
3161 continue;
3162 unsigned DefR = Defs.find_first();
3163 if (!Register::isVirtualRegister(DefR))
3164 continue;
3165 if (!isBitShuffle(&*I, DefR))
3166 continue;
3167
3168 bool BadUse = false;
3169 for (auto UI = MRI->use_begin(DefR), UE = MRI->use_end(); UI != UE; ++UI) {
3170 MachineInstr *UseI = UI->getParent();
3171 if (UseI->getParent() == C.LB) {
3172 if (UseI->isPHI()) {
3173 // If the use is in a phi node in this loop, then it should be
3174 // the value corresponding to the back edge.
3175 unsigned Idx = UI.getOperandNo();
3176 if (UseI->getOperand(Idx+1).getMBB() != C.LB)
3177 BadUse = true;
3178 } else {
3179 auto F = find(ShufIns, UseI);
3180 if (F == ShufIns.end())
3181 BadUse = true;
3182 }
3183 } else {
3184 // There is a use outside of the loop, but there is no epilog block
3185 // suitable for a copy-out.
3186 if (C.EB == nullptr)
3187 BadUse = true;
3188 }
3189 if (BadUse)
3190 break;
3191 }
3192
3193 if (BadUse)
3194 continue;
3195 ShufIns.push_back(&*I);
3196 }
3197
3198 // Partition the list of shuffling instructions into instruction groups,
3199 // where each group has to be moved as a whole (i.e. a group is a chain of
3200 // dependent instructions). A group produces a single live output register,
3201 // which is meant to be the input of the loop phi node (although this is
3202 // not checked here yet). It also uses a single register as its input,
3203 // which is some value produced in the loop body. After moving the group
3204 // to the beginning of the loop, that input register would need to be
3205 // the loop-carried register (through a phi node) instead of the (currently
3206 // loop-carried) output register.
3207 using InstrGroupList = std::vector<InstrGroup>;
3208 InstrGroupList Groups;
3209
3210 for (unsigned i = 0, n = ShufIns.size(); i < n; ++i) {
3211 MachineInstr *SI = ShufIns[i];
3212 if (SI == nullptr)
3213 continue;
3214
3215 InstrGroup G;
3216 G.Ins.push_back(SI);
3217 G.Out.Reg = getDefReg(SI);
3218 RegisterSet Inputs;
3219 HBS::getInstrUses(*SI, Inputs);
3220
3221 for (unsigned j = i+1; j < n; ++j) {
3222 MachineInstr *MI = ShufIns[j];
3223 if (MI == nullptr)
3224 continue;
3225 RegisterSet Defs;
3226 HBS::getInstrDefs(*MI, Defs);
3227 // If this instruction does not define any pending inputs, skip it.
3228 if (!Defs.intersects(Inputs))
3229 continue;
3230 // Otherwise, add it to the current group and remove the inputs that
3231 // are defined by MI.
3232 G.Ins.push_back(MI);
3233 Inputs.remove(Defs);
3234 // Then add all registers used by MI.
3235 HBS::getInstrUses(*MI, Inputs);
3236 ShufIns[j] = nullptr;
3237 }
3238
3239 // Only add a group if it requires at most one register.
3240 if (Inputs.count() > 1)
3241 continue;
3242 auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
3243 return G.Out.Reg == P.LR.Reg;
3244 };
3245 if (llvm::find_if(Phis, LoopInpEq) == Phis.end())
3246 continue;
3247
3248 G.Inp.Reg = Inputs.find_first();
3249 Groups.push_back(G);
3250 }
3251
3252 LLVM_DEBUG({
3253 for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
3254 InstrGroup &G = Groups[i];
3255 dbgs() << "Group[" << i << "] inp: "
3256 << printReg(G.Inp.Reg, HRI, G.Inp.Sub)
3257 << " out: " << printReg(G.Out.Reg, HRI, G.Out.Sub) << "\n";
3258 for (unsigned j = 0, m = G.Ins.size(); j < m; ++j)
3259 dbgs() << " " << *G.Ins[j];
3260 }
3261 });
3262
3263 for (unsigned i = 0, n = Groups.size(); i < n; ++i) {
3264 InstrGroup &G = Groups[i];
3265 if (!isShuffleOf(G.Out.Reg, G.Inp.Reg))
3266 continue;
3267 auto LoopInpEq = [G] (const PhiInfo &P) -> bool {
3268 return G.Out.Reg == P.LR.Reg;
3269 };
3270 auto F = llvm::find_if(Phis, LoopInpEq);
3271 if (F == Phis.end())
3272 continue;
3273 unsigned PrehR = 0;
3274 if (!isSameShuffle(G.Out.Reg, G.Inp.Reg, F->PR.Reg, PrehR)) {
3275 const MachineInstr *DefPrehR = MRI->getVRegDef(F->PR.Reg);
3276 unsigned Opc = DefPrehR->getOpcode();
3277 if (Opc != Hexagon::A2_tfrsi && Opc != Hexagon::A2_tfrpi)
3278 continue;
3279 if (!DefPrehR->getOperand(1).isImm())
3280 continue;
3281 if (DefPrehR->getOperand(1).getImm() != 0)
3282 continue;
3283 const TargetRegisterClass *RC = MRI->getRegClass(G.Inp.Reg);
3284 if (RC != MRI->getRegClass(F->PR.Reg)) {
3285 PrehR = MRI->createVirtualRegister(RC);
3286 unsigned TfrI = (RC == &Hexagon::IntRegsRegClass) ? Hexagon::A2_tfrsi
3287 : Hexagon::A2_tfrpi;
3288 auto T = C.PB->getFirstTerminator();
3289 DebugLoc DL = (T != C.PB->end()) ? T->getDebugLoc() : DebugLoc();
3290 BuildMI(*C.PB, T, DL, HII->get(TfrI), PrehR)
3291 .addImm(0);
3292 } else {
3293 PrehR = F->PR.Reg;
3294 }
3295 }
3296 // isSameShuffle could match with PrehR being of a wider class than
3297 // G.Inp.Reg, for example if G shuffles the low 32 bits of its input,
3298 // it would match for the input being a 32-bit register, and PrehR
3299 // being a 64-bit register (where the low 32 bits match). This could
3300 // be handled, but for now skip these cases.
3301 if (MRI->getRegClass(PrehR) != MRI->getRegClass(G.Inp.Reg))
3302 continue;
3303 moveGroup(G, *F->LB, *F->PB, F->LB->getFirstNonPHI(), F->DefR, PrehR);
3304 Changed = true;
3305 }
3306
3307 return Changed;
3308 }
3309
runOnMachineFunction(MachineFunction & MF)3310 bool HexagonLoopRescheduling::runOnMachineFunction(MachineFunction &MF) {
3311 if (skipFunction(MF.getFunction()))
3312 return false;
3313
3314 auto &HST = MF.getSubtarget<HexagonSubtarget>();
3315 HII = HST.getInstrInfo();
3316 HRI = HST.getRegisterInfo();
3317 MRI = &MF.getRegInfo();
3318 const HexagonEvaluator HE(*HRI, *MRI, *HII, MF);
3319 BitTracker BT(HE, MF);
3320 LLVM_DEBUG(BT.trace(true));
3321 BT.run();
3322 BTP = &BT;
3323
3324 std::vector<LoopCand> Cand;
3325
3326 for (auto &B : MF) {
3327 if (B.pred_size() != 2 || B.succ_size() != 2)
3328 continue;
3329 MachineBasicBlock *PB = nullptr;
3330 bool IsLoop = false;
3331 for (auto PI = B.pred_begin(), PE = B.pred_end(); PI != PE; ++PI) {
3332 if (*PI != &B)
3333 PB = *PI;
3334 else
3335 IsLoop = true;
3336 }
3337 if (!IsLoop)
3338 continue;
3339
3340 MachineBasicBlock *EB = nullptr;
3341 for (auto SI = B.succ_begin(), SE = B.succ_end(); SI != SE; ++SI) {
3342 if (*SI == &B)
3343 continue;
3344 // Set EP to the epilog block, if it has only 1 predecessor (i.e. the
3345 // edge from B to EP is non-critical.
3346 if ((*SI)->pred_size() == 1)
3347 EB = *SI;
3348 break;
3349 }
3350
3351 Cand.push_back(LoopCand(&B, PB, EB));
3352 }
3353
3354 bool Changed = false;
3355 for (auto &C : Cand)
3356 Changed |= processLoop(C);
3357
3358 return Changed;
3359 }
3360
3361 //===----------------------------------------------------------------------===//
3362 // Public Constructor Functions
3363 //===----------------------------------------------------------------------===//
3364
createHexagonLoopRescheduling()3365 FunctionPass *llvm::createHexagonLoopRescheduling() {
3366 return new HexagonLoopRescheduling();
3367 }
3368
createHexagonBitSimplify()3369 FunctionPass *llvm::createHexagonBitSimplify() {
3370 return new HexagonBitSimplify();
3371 }
3372