1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "AArch64.h"
14 #include "AArch64TargetMachine.h"
15 #include "AArch64TargetObjectFile.h"
16 #include "AArch64TargetTransformInfo.h"
17 #include "llvm/CodeGen/Passes.h"
18 #include "llvm/CodeGen/RegAllocRegistry.h"
19 #include "llvm/IR/Function.h"
20 #include "llvm/IR/LegacyPassManager.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Support/TargetRegistry.h"
23 #include "llvm/Target/TargetOptions.h"
24 #include "llvm/Transforms/Scalar.h"
25 using namespace llvm;
26
27 static cl::opt<bool>
28 EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"),
29 cl::init(true), cl::Hidden);
30
31 static cl::opt<bool> EnableMCR("aarch64-mcr",
32 cl::desc("Enable the machine combiner pass"),
33 cl::init(true), cl::Hidden);
34
35 static cl::opt<bool>
36 EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"),
37 cl::init(true), cl::Hidden);
38
39 static cl::opt<bool>
40 EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar"
41 " integer instructions"), cl::init(false), cl::Hidden);
42
43 static cl::opt<bool>
44 EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote "
45 "constant pass"), cl::init(true), cl::Hidden);
46
47 static cl::opt<bool>
48 EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the"
49 " linker optimization hints (LOH)"), cl::init(true),
50 cl::Hidden);
51
52 static cl::opt<bool>
53 EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden,
54 cl::desc("Enable the pass that removes dead"
55 " definitons and replaces stores to"
56 " them with stores to the zero"
57 " register"),
58 cl::init(true));
59
60 static cl::opt<bool>
61 EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair"
62 " optimization pass"), cl::init(true), cl::Hidden);
63
64 static cl::opt<bool>
65 EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden,
66 cl::desc("Run SimplifyCFG after expanding atomic operations"
67 " to make use of cmpxchg flow-based information"),
68 cl::init(true));
69
70 static cl::opt<bool>
71 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
72 cl::desc("Run early if-conversion"),
73 cl::init(true));
74
75 static cl::opt<bool>
76 EnableCondOpt("aarch64-condopt",
77 cl::desc("Enable the condition optimizer pass"),
78 cl::init(true), cl::Hidden);
79
80 static cl::opt<bool>
81 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
82 cl::desc("Work around Cortex-A53 erratum 835769"),
83 cl::init(false));
84
85 static cl::opt<bool>
86 EnableGEPOpt("aarch64-gep-opt", cl::Hidden,
87 cl::desc("Enable optimizations on complex GEPs"),
88 cl::init(false));
89
90 // FIXME: Unify control over GlobalMerge.
91 static cl::opt<cl::boolOrDefault>
92 EnableGlobalMerge("aarch64-global-merge", cl::Hidden,
93 cl::desc("Enable the global merge pass"));
94
LLVMInitializeAArch64Target()95 extern "C" void LLVMInitializeAArch64Target() {
96 // Register the target.
97 RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
98 RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
99 RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target);
100 }
101
102 //===----------------------------------------------------------------------===//
103 // AArch64 Lowering public interface.
104 //===----------------------------------------------------------------------===//
createTLOF(const Triple & TT)105 static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
106 if (TT.isOSBinFormatMachO())
107 return make_unique<AArch64_MachoTargetObjectFile>();
108
109 return make_unique<AArch64_ELFTargetObjectFile>();
110 }
111
112 // Helper function to build a DataLayout string
computeDataLayout(const Triple & TT,bool LittleEndian)113 static std::string computeDataLayout(const Triple &TT, bool LittleEndian) {
114 if (TT.isOSBinFormatMachO())
115 return "e-m:o-i64:64-i128:128-n32:64-S128";
116 if (LittleEndian)
117 return "e-m:e-i64:64-i128:128-n32:64-S128";
118 return "E-m:e-i64:64-i128:128-n32:64-S128";
119 }
120
121 /// TargetMachine ctor - Create an AArch64 architecture model.
122 ///
AArch64TargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Reloc::Model RM,CodeModel::Model CM,CodeGenOpt::Level OL,bool LittleEndian)123 AArch64TargetMachine::AArch64TargetMachine(const Target &T, const Triple &TT,
124 StringRef CPU, StringRef FS,
125 const TargetOptions &Options,
126 Reloc::Model RM, CodeModel::Model CM,
127 CodeGenOpt::Level OL,
128 bool LittleEndian)
129 // This nested ternary is horrible, but DL needs to be properly
130 // initialized before TLInfo is constructed.
131 : LLVMTargetMachine(T, computeDataLayout(TT, LittleEndian), TT, CPU, FS,
132 Options, RM, CM, OL),
133 TLOF(createTLOF(getTargetTriple())),
134 isLittle(LittleEndian) {
135 initAsmInfo();
136 }
137
~AArch64TargetMachine()138 AArch64TargetMachine::~AArch64TargetMachine() {}
139
140 const AArch64Subtarget *
getSubtargetImpl(const Function & F) const141 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
142 Attribute CPUAttr = F.getFnAttribute("target-cpu");
143 Attribute FSAttr = F.getFnAttribute("target-features");
144
145 std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
146 ? CPUAttr.getValueAsString().str()
147 : TargetCPU;
148 std::string FS = !FSAttr.hasAttribute(Attribute::None)
149 ? FSAttr.getValueAsString().str()
150 : TargetFS;
151
152 auto &I = SubtargetMap[CPU + FS];
153 if (!I) {
154 // This needs to be done before we create a new subtarget since any
155 // creation will depend on the TM and the code generation flags on the
156 // function that reside in TargetOptions.
157 resetTargetOptions(F);
158 I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this,
159 isLittle);
160 }
161 return I.get();
162 }
163
anchor()164 void AArch64leTargetMachine::anchor() { }
165
AArch64leTargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Reloc::Model RM,CodeModel::Model CM,CodeGenOpt::Level OL)166 AArch64leTargetMachine::AArch64leTargetMachine(
167 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
168 const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
169 CodeGenOpt::Level OL)
170 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
171
anchor()172 void AArch64beTargetMachine::anchor() { }
173
AArch64beTargetMachine(const Target & T,const Triple & TT,StringRef CPU,StringRef FS,const TargetOptions & Options,Reloc::Model RM,CodeModel::Model CM,CodeGenOpt::Level OL)174 AArch64beTargetMachine::AArch64beTargetMachine(
175 const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
176 const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM,
177 CodeGenOpt::Level OL)
178 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
179
180 namespace {
181 /// AArch64 Code Generator Pass Configuration Options.
182 class AArch64PassConfig : public TargetPassConfig {
183 public:
AArch64PassConfig(AArch64TargetMachine * TM,PassManagerBase & PM)184 AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
185 : TargetPassConfig(TM, PM) {
186 if (TM->getOptLevel() != CodeGenOpt::None)
187 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
188 }
189
getAArch64TargetMachine() const190 AArch64TargetMachine &getAArch64TargetMachine() const {
191 return getTM<AArch64TargetMachine>();
192 }
193
194 void addIRPasses() override;
195 bool addPreISel() override;
196 bool addInstSelector() override;
197 bool addILPOpts() override;
198 void addPreRegAlloc() override;
199 void addPostRegAlloc() override;
200 void addPreSched2() override;
201 void addPreEmitPass() override;
202 };
203 } // namespace
204
getTargetIRAnalysis()205 TargetIRAnalysis AArch64TargetMachine::getTargetIRAnalysis() {
206 return TargetIRAnalysis([this](const Function &F) {
207 return TargetTransformInfo(AArch64TTIImpl(this, F));
208 });
209 }
210
createPassConfig(PassManagerBase & PM)211 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
212 return new AArch64PassConfig(this, PM);
213 }
214
addIRPasses()215 void AArch64PassConfig::addIRPasses() {
216 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
217 // ourselves.
218 addPass(createAtomicExpandPass(TM));
219
220 // Cmpxchg instructions are often used with a subsequent comparison to
221 // determine whether it succeeded. We can exploit existing control-flow in
222 // ldrex/strex loops to simplify this, but it needs tidying up.
223 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
224 addPass(createCFGSimplificationPass());
225
226 TargetPassConfig::addIRPasses();
227
228 // Match interleaved memory accesses to ldN/stN intrinsics.
229 if (TM->getOptLevel() != CodeGenOpt::None)
230 addPass(createInterleavedAccessPass(TM));
231
232 if (TM->getOptLevel() == CodeGenOpt::Aggressive && EnableGEPOpt) {
233 // Call SeparateConstOffsetFromGEP pass to extract constants within indices
234 // and lower a GEP with multiple indices to either arithmetic operations or
235 // multiple GEPs with single index.
236 addPass(createSeparateConstOffsetFromGEPPass(TM, true));
237 // Call EarlyCSE pass to find and remove subexpressions in the lowered
238 // result.
239 addPass(createEarlyCSEPass());
240 // Do loop invariant code motion in case part of the lowered result is
241 // invariant.
242 addPass(createLICMPass());
243 }
244 }
245
246 // Pass Pipeline Configuration
addPreISel()247 bool AArch64PassConfig::addPreISel() {
248 // Run promote constant before global merge, so that the promoted constants
249 // get a chance to be merged
250 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
251 addPass(createAArch64PromoteConstantPass());
252 // FIXME: On AArch64, this depends on the type.
253 // Basically, the addressable offsets are up to 4095 * Ty.getSizeInBytes().
254 // and the offset has to be a multiple of the related size in bytes.
255 if ((TM->getOptLevel() != CodeGenOpt::None &&
256 EnableGlobalMerge == cl::BOU_UNSET) ||
257 EnableGlobalMerge == cl::BOU_TRUE) {
258 bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) &&
259 (EnableGlobalMerge == cl::BOU_UNSET);
260 addPass(createGlobalMergePass(TM, 4095, OnlyOptimizeForSize));
261 }
262
263 if (TM->getOptLevel() != CodeGenOpt::None)
264 addPass(createAArch64AddressTypePromotionPass());
265
266 return false;
267 }
268
addInstSelector()269 bool AArch64PassConfig::addInstSelector() {
270 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
271
272 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
273 // references to _TLS_MODULE_BASE_ as possible.
274 if (TM->getTargetTriple().isOSBinFormatELF() &&
275 getOptLevel() != CodeGenOpt::None)
276 addPass(createAArch64CleanupLocalDynamicTLSPass());
277
278 return false;
279 }
280
addILPOpts()281 bool AArch64PassConfig::addILPOpts() {
282 if (EnableCondOpt)
283 addPass(createAArch64ConditionOptimizerPass());
284 if (EnableCCMP)
285 addPass(createAArch64ConditionalCompares());
286 if (EnableMCR)
287 addPass(&MachineCombinerID);
288 if (EnableEarlyIfConversion)
289 addPass(&EarlyIfConverterID);
290 if (EnableStPairSuppress)
291 addPass(createAArch64StorePairSuppressPass());
292 return true;
293 }
294
addPreRegAlloc()295 void AArch64PassConfig::addPreRegAlloc() {
296 // Use AdvSIMD scalar instructions whenever profitable.
297 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
298 addPass(createAArch64AdvSIMDScalar());
299 // The AdvSIMD pass may produce copies that can be rewritten to
300 // be register coaleascer friendly.
301 addPass(&PeepholeOptimizerID);
302 }
303 }
304
addPostRegAlloc()305 void AArch64PassConfig::addPostRegAlloc() {
306 // Change dead register definitions to refer to the zero register.
307 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
308 addPass(createAArch64DeadRegisterDefinitions());
309 if (TM->getOptLevel() != CodeGenOpt::None && usingDefaultRegAlloc())
310 // Improve performance for some FP/SIMD code for A57.
311 addPass(createAArch64A57FPLoadBalancing());
312 }
313
addPreSched2()314 void AArch64PassConfig::addPreSched2() {
315 // Expand some pseudo instructions to allow proper scheduling.
316 addPass(createAArch64ExpandPseudoPass());
317 // Use load/store pair instructions when possible.
318 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
319 addPass(createAArch64LoadStoreOptimizationPass());
320 }
321
addPreEmitPass()322 void AArch64PassConfig::addPreEmitPass() {
323 if (EnableA53Fix835769)
324 addPass(createAArch64A53Fix835769());
325 // Relax conditional branch instructions if they're otherwise out of
326 // range of their destination.
327 addPass(createAArch64BranchRelaxation());
328 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
329 TM->getTargetTriple().isOSBinFormatMachO())
330 addPass(createAArch64CollectLOHPass());
331 }
332