1 //===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements the TargetLoweringBase class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/ADT/BitVector.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/Analysis/Loads.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/ISDOpcodes.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineOperand.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/RuntimeLibcalls.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/CodeGen/TargetLowering.h"
35 #include "llvm/CodeGen/TargetOpcodes.h"
36 #include "llvm/CodeGen/TargetRegisterInfo.h"
37 #include "llvm/CodeGen/ValueTypes.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/IRBuilder.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Support/BranchProbability.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/CommandLine.h"
51 #include "llvm/Support/Compiler.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/MachineValueType.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Target/TargetMachine.h"
56 #include "llvm/Transforms/Utils/SizeOpts.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <cstring>
62 #include <iterator>
63 #include <string>
64 #include <tuple>
65 #include <utility>
66
67 using namespace llvm;
68
69 static cl::opt<bool> JumpIsExpensiveOverride(
70 "jump-is-expensive", cl::init(false),
71 cl::desc("Do not create extra branches to split comparison logic."),
72 cl::Hidden);
73
74 static cl::opt<unsigned> MinimumJumpTableEntries
75 ("min-jump-table-entries", cl::init(4), cl::Hidden,
76 cl::desc("Set minimum number of entries to use a jump table."));
77
78 static cl::opt<unsigned> MaximumJumpTableSize
79 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
80 cl::desc("Set maximum size of jump tables."));
81
82 /// Minimum jump table density for normal functions.
83 static cl::opt<unsigned>
84 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
85 cl::desc("Minimum density for building a jump table in "
86 "a normal function"));
87
88 /// Minimum jump table density for -Os or -Oz functions.
89 static cl::opt<unsigned> OptsizeJumpTableDensity(
90 "optsize-jump-table-density", cl::init(40), cl::Hidden,
91 cl::desc("Minimum density for building a jump table in "
92 "an optsize function"));
93
94 // FIXME: This option is only to test if the strict fp operation processed
95 // correctly by preventing mutating strict fp operation to normal fp operation
96 // during development. When the backend supports strict float operation, this
97 // option will be meaningless.
98 static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
99 cl::desc("Don't mutate strict-float node to a legalize node"),
100 cl::init(false), cl::Hidden);
101
darwinHasSinCos(const Triple & TT)102 static bool darwinHasSinCos(const Triple &TT) {
103 assert(TT.isOSDarwin() && "should be called with darwin triple");
104 // Don't bother with 32 bit x86.
105 if (TT.getArch() == Triple::x86)
106 return false;
107 // Macos < 10.9 has no sincos_stret.
108 if (TT.isMacOSX())
109 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
110 // iOS < 7.0 has no sincos_stret.
111 if (TT.isiOS())
112 return !TT.isOSVersionLT(7, 0);
113 // Any other darwin such as WatchOS/TvOS is new enough.
114 return true;
115 }
116
117 // Although this default value is arbitrary, it is not random. It is assumed
118 // that a condition that evaluates the same way by a higher percentage than this
119 // is best represented as control flow. Therefore, the default value N should be
120 // set such that the win from N% correct executions is greater than the loss
121 // from (100 - N)% mispredicted executions for the majority of intended targets.
122 static cl::opt<int> MinPercentageForPredictableBranch(
123 "min-predictable-branch", cl::init(99),
124 cl::desc("Minimum percentage (0-100) that a condition must be either true "
125 "or false to assume that the condition is predictable"),
126 cl::Hidden);
127
InitLibcalls(const Triple & TT)128 void TargetLoweringBase::InitLibcalls(const Triple &TT) {
129 #define HANDLE_LIBCALL(code, name) \
130 setLibcallName(RTLIB::code, name);
131 #include "llvm/IR/RuntimeLibcalls.def"
132 #undef HANDLE_LIBCALL
133 // Initialize calling conventions to their default.
134 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
135 setLibcallCallingConv((RTLIB::Libcall)LC, CallingConv::C);
136
137 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf".
138 if (TT.getArch() == Triple::ppc || TT.isPPC64()) {
139 setLibcallName(RTLIB::ADD_F128, "__addkf3");
140 setLibcallName(RTLIB::SUB_F128, "__subkf3");
141 setLibcallName(RTLIB::MUL_F128, "__mulkf3");
142 setLibcallName(RTLIB::DIV_F128, "__divkf3");
143 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2");
144 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2");
145 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2");
146 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2");
147 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi");
148 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi");
149 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi");
150 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi");
151 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf");
152 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf");
153 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf");
154 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf");
155 setLibcallName(RTLIB::OEQ_F128, "__eqkf2");
156 setLibcallName(RTLIB::UNE_F128, "__nekf2");
157 setLibcallName(RTLIB::OGE_F128, "__gekf2");
158 setLibcallName(RTLIB::OLT_F128, "__ltkf2");
159 setLibcallName(RTLIB::OLE_F128, "__lekf2");
160 setLibcallName(RTLIB::OGT_F128, "__gtkf2");
161 setLibcallName(RTLIB::UO_F128, "__unordkf2");
162 }
163
164 // A few names are different on particular architectures or environments.
165 if (TT.isOSDarwin()) {
166 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
167 // of the gnueabi-style __gnu_*_ieee.
168 // FIXME: What about other targets?
169 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
170 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
171
172 // Some darwins have an optimized __bzero/bzero function.
173 switch (TT.getArch()) {
174 case Triple::x86:
175 case Triple::x86_64:
176 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
177 setLibcallName(RTLIB::BZERO, "__bzero");
178 break;
179 case Triple::aarch64:
180 case Triple::aarch64_32:
181 setLibcallName(RTLIB::BZERO, "bzero");
182 break;
183 default:
184 break;
185 }
186
187 if (darwinHasSinCos(TT)) {
188 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
189 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
190 if (TT.isWatchABI()) {
191 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
192 CallingConv::ARM_AAPCS_VFP);
193 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
194 CallingConv::ARM_AAPCS_VFP);
195 }
196 }
197 } else {
198 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
199 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
200 }
201
202 if (TT.isGNUEnvironment() || TT.isOSFuchsia() ||
203 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) {
204 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
205 setLibcallName(RTLIB::SINCOS_F64, "sincos");
206 setLibcallName(RTLIB::SINCOS_F80, "sincosl");
207 setLibcallName(RTLIB::SINCOS_F128, "sincosl");
208 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
209 }
210
211 if (TT.isPS4CPU()) {
212 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
213 setLibcallName(RTLIB::SINCOS_F64, "sincos");
214 }
215
216 if (TT.isOSOpenBSD()) {
217 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
218 }
219 }
220
221 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
222 /// UNKNOWN_LIBCALL if there is none.
getFPEXT(EVT OpVT,EVT RetVT)223 RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
224 if (OpVT == MVT::f16) {
225 if (RetVT == MVT::f32)
226 return FPEXT_F16_F32;
227 if (RetVT == MVT::f64)
228 return FPEXT_F16_F64;
229 if (RetVT == MVT::f128)
230 return FPEXT_F16_F128;
231 } else if (OpVT == MVT::f32) {
232 if (RetVT == MVT::f64)
233 return FPEXT_F32_F64;
234 if (RetVT == MVT::f128)
235 return FPEXT_F32_F128;
236 if (RetVT == MVT::ppcf128)
237 return FPEXT_F32_PPCF128;
238 } else if (OpVT == MVT::f64) {
239 if (RetVT == MVT::f128)
240 return FPEXT_F64_F128;
241 else if (RetVT == MVT::ppcf128)
242 return FPEXT_F64_PPCF128;
243 } else if (OpVT == MVT::f80) {
244 if (RetVT == MVT::f128)
245 return FPEXT_F80_F128;
246 }
247
248 return UNKNOWN_LIBCALL;
249 }
250
251 /// getFPROUND - Return the FPROUND_*_* value for the given types, or
252 /// UNKNOWN_LIBCALL if there is none.
getFPROUND(EVT OpVT,EVT RetVT)253 RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
254 if (RetVT == MVT::f16) {
255 if (OpVT == MVT::f32)
256 return FPROUND_F32_F16;
257 if (OpVT == MVT::f64)
258 return FPROUND_F64_F16;
259 if (OpVT == MVT::f80)
260 return FPROUND_F80_F16;
261 if (OpVT == MVT::f128)
262 return FPROUND_F128_F16;
263 if (OpVT == MVT::ppcf128)
264 return FPROUND_PPCF128_F16;
265 } else if (RetVT == MVT::f32) {
266 if (OpVT == MVT::f64)
267 return FPROUND_F64_F32;
268 if (OpVT == MVT::f80)
269 return FPROUND_F80_F32;
270 if (OpVT == MVT::f128)
271 return FPROUND_F128_F32;
272 if (OpVT == MVT::ppcf128)
273 return FPROUND_PPCF128_F32;
274 } else if (RetVT == MVT::f64) {
275 if (OpVT == MVT::f80)
276 return FPROUND_F80_F64;
277 if (OpVT == MVT::f128)
278 return FPROUND_F128_F64;
279 if (OpVT == MVT::ppcf128)
280 return FPROUND_PPCF128_F64;
281 } else if (RetVT == MVT::f80) {
282 if (OpVT == MVT::f128)
283 return FPROUND_F128_F80;
284 }
285
286 return UNKNOWN_LIBCALL;
287 }
288
289 /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
290 /// UNKNOWN_LIBCALL if there is none.
getFPTOSINT(EVT OpVT,EVT RetVT)291 RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
292 if (OpVT == MVT::f16) {
293 if (RetVT == MVT::i32)
294 return FPTOSINT_F16_I32;
295 if (RetVT == MVT::i64)
296 return FPTOSINT_F16_I64;
297 if (RetVT == MVT::i128)
298 return FPTOSINT_F16_I128;
299 } else if (OpVT == MVT::f32) {
300 if (RetVT == MVT::i32)
301 return FPTOSINT_F32_I32;
302 if (RetVT == MVT::i64)
303 return FPTOSINT_F32_I64;
304 if (RetVT == MVT::i128)
305 return FPTOSINT_F32_I128;
306 } else if (OpVT == MVT::f64) {
307 if (RetVT == MVT::i32)
308 return FPTOSINT_F64_I32;
309 if (RetVT == MVT::i64)
310 return FPTOSINT_F64_I64;
311 if (RetVT == MVT::i128)
312 return FPTOSINT_F64_I128;
313 } else if (OpVT == MVT::f80) {
314 if (RetVT == MVT::i32)
315 return FPTOSINT_F80_I32;
316 if (RetVT == MVT::i64)
317 return FPTOSINT_F80_I64;
318 if (RetVT == MVT::i128)
319 return FPTOSINT_F80_I128;
320 } else if (OpVT == MVT::f128) {
321 if (RetVT == MVT::i32)
322 return FPTOSINT_F128_I32;
323 if (RetVT == MVT::i64)
324 return FPTOSINT_F128_I64;
325 if (RetVT == MVT::i128)
326 return FPTOSINT_F128_I128;
327 } else if (OpVT == MVT::ppcf128) {
328 if (RetVT == MVT::i32)
329 return FPTOSINT_PPCF128_I32;
330 if (RetVT == MVT::i64)
331 return FPTOSINT_PPCF128_I64;
332 if (RetVT == MVT::i128)
333 return FPTOSINT_PPCF128_I128;
334 }
335 return UNKNOWN_LIBCALL;
336 }
337
338 /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
339 /// UNKNOWN_LIBCALL if there is none.
getFPTOUINT(EVT OpVT,EVT RetVT)340 RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
341 if (OpVT == MVT::f16) {
342 if (RetVT == MVT::i32)
343 return FPTOUINT_F16_I32;
344 if (RetVT == MVT::i64)
345 return FPTOUINT_F16_I64;
346 if (RetVT == MVT::i128)
347 return FPTOUINT_F16_I128;
348 } else if (OpVT == MVT::f32) {
349 if (RetVT == MVT::i32)
350 return FPTOUINT_F32_I32;
351 if (RetVT == MVT::i64)
352 return FPTOUINT_F32_I64;
353 if (RetVT == MVT::i128)
354 return FPTOUINT_F32_I128;
355 } else if (OpVT == MVT::f64) {
356 if (RetVT == MVT::i32)
357 return FPTOUINT_F64_I32;
358 if (RetVT == MVT::i64)
359 return FPTOUINT_F64_I64;
360 if (RetVT == MVT::i128)
361 return FPTOUINT_F64_I128;
362 } else if (OpVT == MVT::f80) {
363 if (RetVT == MVT::i32)
364 return FPTOUINT_F80_I32;
365 if (RetVT == MVT::i64)
366 return FPTOUINT_F80_I64;
367 if (RetVT == MVT::i128)
368 return FPTOUINT_F80_I128;
369 } else if (OpVT == MVT::f128) {
370 if (RetVT == MVT::i32)
371 return FPTOUINT_F128_I32;
372 if (RetVT == MVT::i64)
373 return FPTOUINT_F128_I64;
374 if (RetVT == MVT::i128)
375 return FPTOUINT_F128_I128;
376 } else if (OpVT == MVT::ppcf128) {
377 if (RetVT == MVT::i32)
378 return FPTOUINT_PPCF128_I32;
379 if (RetVT == MVT::i64)
380 return FPTOUINT_PPCF128_I64;
381 if (RetVT == MVT::i128)
382 return FPTOUINT_PPCF128_I128;
383 }
384 return UNKNOWN_LIBCALL;
385 }
386
387 /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
388 /// UNKNOWN_LIBCALL if there is none.
getSINTTOFP(EVT OpVT,EVT RetVT)389 RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
390 if (OpVT == MVT::i32) {
391 if (RetVT == MVT::f16)
392 return SINTTOFP_I32_F16;
393 if (RetVT == MVT::f32)
394 return SINTTOFP_I32_F32;
395 if (RetVT == MVT::f64)
396 return SINTTOFP_I32_F64;
397 if (RetVT == MVT::f80)
398 return SINTTOFP_I32_F80;
399 if (RetVT == MVT::f128)
400 return SINTTOFP_I32_F128;
401 if (RetVT == MVT::ppcf128)
402 return SINTTOFP_I32_PPCF128;
403 } else if (OpVT == MVT::i64) {
404 if (RetVT == MVT::f16)
405 return SINTTOFP_I64_F16;
406 if (RetVT == MVT::f32)
407 return SINTTOFP_I64_F32;
408 if (RetVT == MVT::f64)
409 return SINTTOFP_I64_F64;
410 if (RetVT == MVT::f80)
411 return SINTTOFP_I64_F80;
412 if (RetVT == MVT::f128)
413 return SINTTOFP_I64_F128;
414 if (RetVT == MVT::ppcf128)
415 return SINTTOFP_I64_PPCF128;
416 } else if (OpVT == MVT::i128) {
417 if (RetVT == MVT::f16)
418 return SINTTOFP_I128_F16;
419 if (RetVT == MVT::f32)
420 return SINTTOFP_I128_F32;
421 if (RetVT == MVT::f64)
422 return SINTTOFP_I128_F64;
423 if (RetVT == MVT::f80)
424 return SINTTOFP_I128_F80;
425 if (RetVT == MVT::f128)
426 return SINTTOFP_I128_F128;
427 if (RetVT == MVT::ppcf128)
428 return SINTTOFP_I128_PPCF128;
429 }
430 return UNKNOWN_LIBCALL;
431 }
432
433 /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
434 /// UNKNOWN_LIBCALL if there is none.
getUINTTOFP(EVT OpVT,EVT RetVT)435 RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
436 if (OpVT == MVT::i32) {
437 if (RetVT == MVT::f16)
438 return UINTTOFP_I32_F16;
439 if (RetVT == MVT::f32)
440 return UINTTOFP_I32_F32;
441 if (RetVT == MVT::f64)
442 return UINTTOFP_I32_F64;
443 if (RetVT == MVT::f80)
444 return UINTTOFP_I32_F80;
445 if (RetVT == MVT::f128)
446 return UINTTOFP_I32_F128;
447 if (RetVT == MVT::ppcf128)
448 return UINTTOFP_I32_PPCF128;
449 } else if (OpVT == MVT::i64) {
450 if (RetVT == MVT::f16)
451 return UINTTOFP_I64_F16;
452 if (RetVT == MVT::f32)
453 return UINTTOFP_I64_F32;
454 if (RetVT == MVT::f64)
455 return UINTTOFP_I64_F64;
456 if (RetVT == MVT::f80)
457 return UINTTOFP_I64_F80;
458 if (RetVT == MVT::f128)
459 return UINTTOFP_I64_F128;
460 if (RetVT == MVT::ppcf128)
461 return UINTTOFP_I64_PPCF128;
462 } else if (OpVT == MVT::i128) {
463 if (RetVT == MVT::f16)
464 return UINTTOFP_I128_F16;
465 if (RetVT == MVT::f32)
466 return UINTTOFP_I128_F32;
467 if (RetVT == MVT::f64)
468 return UINTTOFP_I128_F64;
469 if (RetVT == MVT::f80)
470 return UINTTOFP_I128_F80;
471 if (RetVT == MVT::f128)
472 return UINTTOFP_I128_F128;
473 if (RetVT == MVT::ppcf128)
474 return UINTTOFP_I128_PPCF128;
475 }
476 return UNKNOWN_LIBCALL;
477 }
478
getOUTLINE_ATOMIC(unsigned Opc,AtomicOrdering Order,MVT VT)479 RTLIB::Libcall RTLIB::getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order,
480 MVT VT) {
481 unsigned ModeN, ModelN;
482 switch (VT.SimpleTy) {
483 case MVT::i8:
484 ModeN = 0;
485 break;
486 case MVT::i16:
487 ModeN = 1;
488 break;
489 case MVT::i32:
490 ModeN = 2;
491 break;
492 case MVT::i64:
493 ModeN = 3;
494 break;
495 case MVT::i128:
496 ModeN = 4;
497 break;
498 default:
499 return UNKNOWN_LIBCALL;
500 }
501
502 switch (Order) {
503 case AtomicOrdering::Monotonic:
504 ModelN = 0;
505 break;
506 case AtomicOrdering::Acquire:
507 ModelN = 1;
508 break;
509 case AtomicOrdering::Release:
510 ModelN = 2;
511 break;
512 case AtomicOrdering::AcquireRelease:
513 case AtomicOrdering::SequentiallyConsistent:
514 ModelN = 3;
515 break;
516 default:
517 return UNKNOWN_LIBCALL;
518 }
519
520 #define LCALLS(A, B) \
521 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
522 #define LCALL5(A) \
523 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
524 switch (Opc) {
525 case ISD::ATOMIC_CMP_SWAP: {
526 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
527 return LC[ModeN][ModelN];
528 }
529 case ISD::ATOMIC_SWAP: {
530 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
531 return LC[ModeN][ModelN];
532 }
533 case ISD::ATOMIC_LOAD_ADD: {
534 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
535 return LC[ModeN][ModelN];
536 }
537 case ISD::ATOMIC_LOAD_OR: {
538 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
539 return LC[ModeN][ModelN];
540 }
541 case ISD::ATOMIC_LOAD_CLR: {
542 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
543 return LC[ModeN][ModelN];
544 }
545 case ISD::ATOMIC_LOAD_XOR: {
546 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
547 return LC[ModeN][ModelN];
548 }
549 default:
550 return UNKNOWN_LIBCALL;
551 }
552 #undef LCALLS
553 #undef LCALL5
554 }
555
getSYNC(unsigned Opc,MVT VT)556 RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
557 #define OP_TO_LIBCALL(Name, Enum) \
558 case Name: \
559 switch (VT.SimpleTy) { \
560 default: \
561 return UNKNOWN_LIBCALL; \
562 case MVT::i8: \
563 return Enum##_1; \
564 case MVT::i16: \
565 return Enum##_2; \
566 case MVT::i32: \
567 return Enum##_4; \
568 case MVT::i64: \
569 return Enum##_8; \
570 case MVT::i128: \
571 return Enum##_16; \
572 }
573
574 switch (Opc) {
575 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
576 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
577 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
578 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
579 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
580 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
581 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
582 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
583 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
584 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
585 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
586 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
587 }
588
589 #undef OP_TO_LIBCALL
590
591 return UNKNOWN_LIBCALL;
592 }
593
getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)594 RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
595 switch (ElementSize) {
596 case 1:
597 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
598 case 2:
599 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
600 case 4:
601 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
602 case 8:
603 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
604 case 16:
605 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
606 default:
607 return UNKNOWN_LIBCALL;
608 }
609 }
610
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)611 RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
612 switch (ElementSize) {
613 case 1:
614 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
615 case 2:
616 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
617 case 4:
618 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
619 case 8:
620 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
621 case 16:
622 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
623 default:
624 return UNKNOWN_LIBCALL;
625 }
626 }
627
getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)628 RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
629 switch (ElementSize) {
630 case 1:
631 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
632 case 2:
633 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
634 case 4:
635 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
636 case 8:
637 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
638 case 16:
639 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
640 default:
641 return UNKNOWN_LIBCALL;
642 }
643 }
644
645 /// InitCmpLibcallCCs - Set default comparison libcall CC.
InitCmpLibcallCCs(ISD::CondCode * CCs)646 static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
647 memset(CCs, ISD::SETCC_INVALID, sizeof(ISD::CondCode)*RTLIB::UNKNOWN_LIBCALL);
648 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
649 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
650 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
651 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
652 CCs[RTLIB::UNE_F32] = ISD::SETNE;
653 CCs[RTLIB::UNE_F64] = ISD::SETNE;
654 CCs[RTLIB::UNE_F128] = ISD::SETNE;
655 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
656 CCs[RTLIB::OGE_F32] = ISD::SETGE;
657 CCs[RTLIB::OGE_F64] = ISD::SETGE;
658 CCs[RTLIB::OGE_F128] = ISD::SETGE;
659 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
660 CCs[RTLIB::OLT_F32] = ISD::SETLT;
661 CCs[RTLIB::OLT_F64] = ISD::SETLT;
662 CCs[RTLIB::OLT_F128] = ISD::SETLT;
663 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
664 CCs[RTLIB::OLE_F32] = ISD::SETLE;
665 CCs[RTLIB::OLE_F64] = ISD::SETLE;
666 CCs[RTLIB::OLE_F128] = ISD::SETLE;
667 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
668 CCs[RTLIB::OGT_F32] = ISD::SETGT;
669 CCs[RTLIB::OGT_F64] = ISD::SETGT;
670 CCs[RTLIB::OGT_F128] = ISD::SETGT;
671 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
672 CCs[RTLIB::UO_F32] = ISD::SETNE;
673 CCs[RTLIB::UO_F64] = ISD::SETNE;
674 CCs[RTLIB::UO_F128] = ISD::SETNE;
675 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
676 }
677
678 /// NOTE: The TargetMachine owns TLOF.
TargetLoweringBase(const TargetMachine & tm)679 TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
680 initActions();
681
682 // Perform these initializations only once.
683 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
684 MaxLoadsPerMemcmp = 8;
685 MaxGluedStoresPerMemcpy = 0;
686 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
687 MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
688 HasMultipleConditionRegisters = false;
689 HasExtractBitsInsn = false;
690 JumpIsExpensive = JumpIsExpensiveOverride;
691 PredictableSelectIsExpensive = false;
692 EnableExtLdPromotion = false;
693 StackPointerRegisterToSaveRestore = 0;
694 BooleanContents = UndefinedBooleanContent;
695 BooleanFloatContents = UndefinedBooleanContent;
696 BooleanVectorContents = UndefinedBooleanContent;
697 SchedPreferenceInfo = Sched::ILP;
698 GatherAllAliasesMaxDepth = 18;
699 IsStrictFPEnabled = DisableStrictNodeMutation;
700 // TODO: the default will be switched to 0 in the next commit, along
701 // with the Target-specific changes necessary.
702 MaxAtomicSizeInBitsSupported = 1024;
703
704 MinCmpXchgSizeInBits = 0;
705 SupportsUnalignedAtomics = false;
706
707 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
708
709 InitLibcalls(TM.getTargetTriple());
710 InitCmpLibcallCCs(CmpLibcallCCs);
711 }
712
initActions()713 void TargetLoweringBase::initActions() {
714 // All operations default to being supported.
715 memset(OpActions, 0, sizeof(OpActions));
716 memset(LoadExtActions, 0, sizeof(LoadExtActions));
717 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
718 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
719 memset(CondCodeActions, 0, sizeof(CondCodeActions));
720 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
721 std::fill(std::begin(TargetDAGCombineArray),
722 std::end(TargetDAGCombineArray), 0);
723
724 for (MVT VT : MVT::fp_valuetypes()) {
725 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
726 if (IntVT.isValid()) {
727 setOperationAction(ISD::ATOMIC_SWAP, VT, Promote);
728 AddPromotedToType(ISD::ATOMIC_SWAP, VT, IntVT);
729 }
730 }
731
732 // Set default actions for various operations.
733 for (MVT VT : MVT::all_valuetypes()) {
734 // Default all indexed load / store to expand.
735 for (unsigned IM = (unsigned)ISD::PRE_INC;
736 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
737 setIndexedLoadAction(IM, VT, Expand);
738 setIndexedStoreAction(IM, VT, Expand);
739 setIndexedMaskedLoadAction(IM, VT, Expand);
740 setIndexedMaskedStoreAction(IM, VT, Expand);
741 }
742
743 // Most backends expect to see the node which just returns the value loaded.
744 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand);
745
746 // These operations default to expand.
747 setOperationAction(ISD::FGETSIGN, VT, Expand);
748 setOperationAction(ISD::CONCAT_VECTORS, VT, Expand);
749 setOperationAction(ISD::FMINNUM, VT, Expand);
750 setOperationAction(ISD::FMAXNUM, VT, Expand);
751 setOperationAction(ISD::FMINNUM_IEEE, VT, Expand);
752 setOperationAction(ISD::FMAXNUM_IEEE, VT, Expand);
753 setOperationAction(ISD::FMINIMUM, VT, Expand);
754 setOperationAction(ISD::FMAXIMUM, VT, Expand);
755 setOperationAction(ISD::FMAD, VT, Expand);
756 setOperationAction(ISD::SMIN, VT, Expand);
757 setOperationAction(ISD::SMAX, VT, Expand);
758 setOperationAction(ISD::UMIN, VT, Expand);
759 setOperationAction(ISD::UMAX, VT, Expand);
760 setOperationAction(ISD::ABS, VT, Expand);
761 setOperationAction(ISD::FSHL, VT, Expand);
762 setOperationAction(ISD::FSHR, VT, Expand);
763 setOperationAction(ISD::SADDSAT, VT, Expand);
764 setOperationAction(ISD::UADDSAT, VT, Expand);
765 setOperationAction(ISD::SSUBSAT, VT, Expand);
766 setOperationAction(ISD::USUBSAT, VT, Expand);
767 setOperationAction(ISD::SSHLSAT, VT, Expand);
768 setOperationAction(ISD::USHLSAT, VT, Expand);
769 setOperationAction(ISD::SMULFIX, VT, Expand);
770 setOperationAction(ISD::SMULFIXSAT, VT, Expand);
771 setOperationAction(ISD::UMULFIX, VT, Expand);
772 setOperationAction(ISD::UMULFIXSAT, VT, Expand);
773 setOperationAction(ISD::SDIVFIX, VT, Expand);
774 setOperationAction(ISD::SDIVFIXSAT, VT, Expand);
775 setOperationAction(ISD::UDIVFIX, VT, Expand);
776 setOperationAction(ISD::UDIVFIXSAT, VT, Expand);
777
778 // Overflow operations default to expand
779 setOperationAction(ISD::SADDO, VT, Expand);
780 setOperationAction(ISD::SSUBO, VT, Expand);
781 setOperationAction(ISD::UADDO, VT, Expand);
782 setOperationAction(ISD::USUBO, VT, Expand);
783 setOperationAction(ISD::SMULO, VT, Expand);
784 setOperationAction(ISD::UMULO, VT, Expand);
785
786 // ADDCARRY operations default to expand
787 setOperationAction(ISD::ADDCARRY, VT, Expand);
788 setOperationAction(ISD::SUBCARRY, VT, Expand);
789 setOperationAction(ISD::SETCCCARRY, VT, Expand);
790 setOperationAction(ISD::SADDO_CARRY, VT, Expand);
791 setOperationAction(ISD::SSUBO_CARRY, VT, Expand);
792
793 // ADDC/ADDE/SUBC/SUBE default to expand.
794 setOperationAction(ISD::ADDC, VT, Expand);
795 setOperationAction(ISD::ADDE, VT, Expand);
796 setOperationAction(ISD::SUBC, VT, Expand);
797 setOperationAction(ISD::SUBE, VT, Expand);
798
799 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
800 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
801 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
802
803 setOperationAction(ISD::BITREVERSE, VT, Expand);
804 setOperationAction(ISD::PARITY, VT, Expand);
805
806 // These library functions default to expand.
807 setOperationAction(ISD::FROUND, VT, Expand);
808 setOperationAction(ISD::FROUNDEVEN, VT, Expand);
809 setOperationAction(ISD::FPOWI, VT, Expand);
810
811 // These operations default to expand for vector types.
812 if (VT.isVector()) {
813 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
814 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
815 setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand);
816 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand);
817 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand);
818 setOperationAction(ISD::SPLAT_VECTOR, VT, Expand);
819 }
820
821 // Constrained floating-point operations default to expand.
822 #define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
823 setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
824 #include "llvm/IR/ConstrainedOps.def"
825
826 // For most targets @llvm.get.dynamic.area.offset just returns 0.
827 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, VT, Expand);
828
829 // Vector reduction default to expand.
830 setOperationAction(ISD::VECREDUCE_FADD, VT, Expand);
831 setOperationAction(ISD::VECREDUCE_FMUL, VT, Expand);
832 setOperationAction(ISD::VECREDUCE_ADD, VT, Expand);
833 setOperationAction(ISD::VECREDUCE_MUL, VT, Expand);
834 setOperationAction(ISD::VECREDUCE_AND, VT, Expand);
835 setOperationAction(ISD::VECREDUCE_OR, VT, Expand);
836 setOperationAction(ISD::VECREDUCE_XOR, VT, Expand);
837 setOperationAction(ISD::VECREDUCE_SMAX, VT, Expand);
838 setOperationAction(ISD::VECREDUCE_SMIN, VT, Expand);
839 setOperationAction(ISD::VECREDUCE_UMAX, VT, Expand);
840 setOperationAction(ISD::VECREDUCE_UMIN, VT, Expand);
841 setOperationAction(ISD::VECREDUCE_FMAX, VT, Expand);
842 setOperationAction(ISD::VECREDUCE_FMIN, VT, Expand);
843 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Expand);
844 setOperationAction(ISD::VECREDUCE_SEQ_FMUL, VT, Expand);
845 }
846
847 // Most targets ignore the @llvm.prefetch intrinsic.
848 setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
849
850 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
851 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand);
852
853 // ConstantFP nodes default to expand. Targets can either change this to
854 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
855 // to optimize expansions for certain constants.
856 setOperationAction(ISD::ConstantFP, MVT::f16, Expand);
857 setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
858 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
859 setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
860 setOperationAction(ISD::ConstantFP, MVT::f128, Expand);
861
862 // These library functions default to expand.
863 for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
864 setOperationAction(ISD::FCBRT, VT, Expand);
865 setOperationAction(ISD::FLOG , VT, Expand);
866 setOperationAction(ISD::FLOG2, VT, Expand);
867 setOperationAction(ISD::FLOG10, VT, Expand);
868 setOperationAction(ISD::FEXP , VT, Expand);
869 setOperationAction(ISD::FEXP2, VT, Expand);
870 setOperationAction(ISD::FFLOOR, VT, Expand);
871 setOperationAction(ISD::FNEARBYINT, VT, Expand);
872 setOperationAction(ISD::FCEIL, VT, Expand);
873 setOperationAction(ISD::FRINT, VT, Expand);
874 setOperationAction(ISD::FTRUNC, VT, Expand);
875 setOperationAction(ISD::FROUND, VT, Expand);
876 setOperationAction(ISD::FROUNDEVEN, VT, Expand);
877 setOperationAction(ISD::LROUND, VT, Expand);
878 setOperationAction(ISD::LLROUND, VT, Expand);
879 setOperationAction(ISD::LRINT, VT, Expand);
880 setOperationAction(ISD::LLRINT, VT, Expand);
881 }
882
883 // Default ISD::TRAP to expand (which turns it into abort).
884 setOperationAction(ISD::TRAP, MVT::Other, Expand);
885
886 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
887 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
888 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand);
889
890 setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
891 }
892
getScalarShiftAmountTy(const DataLayout & DL,EVT) const893 MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL,
894 EVT) const {
895 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
896 }
897
getShiftAmountTy(EVT LHSTy,const DataLayout & DL,bool LegalTypes) const898 EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, const DataLayout &DL,
899 bool LegalTypes) const {
900 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
901 if (LHSTy.isVector())
902 return LHSTy;
903 return LegalTypes ? getScalarShiftAmountTy(DL, LHSTy)
904 : getPointerTy(DL);
905 }
906
canOpTrap(unsigned Op,EVT VT) const907 bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
908 assert(isTypeLegal(VT));
909 switch (Op) {
910 default:
911 return false;
912 case ISD::SDIV:
913 case ISD::UDIV:
914 case ISD::SREM:
915 case ISD::UREM:
916 return true;
917 }
918 }
919
isFreeAddrSpaceCast(unsigned SrcAS,unsigned DestAS) const920 bool TargetLoweringBase::isFreeAddrSpaceCast(unsigned SrcAS,
921 unsigned DestAS) const {
922 return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
923 }
924
setJumpIsExpensive(bool isExpensive)925 void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) {
926 // If the command-line option was specified, ignore this request.
927 if (!JumpIsExpensiveOverride.getNumOccurrences())
928 JumpIsExpensive = isExpensive;
929 }
930
931 TargetLoweringBase::LegalizeKind
getTypeConversion(LLVMContext & Context,EVT VT) const932 TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const {
933 // If this is a simple type, use the ComputeRegisterProp mechanism.
934 if (VT.isSimple()) {
935 MVT SVT = VT.getSimpleVT();
936 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
937 MVT NVT = TransformToType[SVT.SimpleTy];
938 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
939
940 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
941 LA == TypeSoftPromoteHalf ||
942 (NVT.isVector() ||
943 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
944 "Promote may not follow Expand or Promote");
945
946 if (LA == TypeSplitVector)
947 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
948 if (LA == TypeScalarizeVector)
949 return LegalizeKind(LA, SVT.getVectorElementType());
950 return LegalizeKind(LA, NVT);
951 }
952
953 // Handle Extended Scalar Types.
954 if (!VT.isVector()) {
955 assert(VT.isInteger() && "Float types must be simple");
956 unsigned BitSize = VT.getSizeInBits();
957 // First promote to a power-of-two size, then expand if necessary.
958 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
959 EVT NVT = VT.getRoundIntegerType(Context);
960 assert(NVT != VT && "Unable to round integer VT");
961 LegalizeKind NextStep = getTypeConversion(Context, NVT);
962 // Avoid multi-step promotion.
963 if (NextStep.first == TypePromoteInteger)
964 return NextStep;
965 // Return rounded integer type.
966 return LegalizeKind(TypePromoteInteger, NVT);
967 }
968
969 return LegalizeKind(TypeExpandInteger,
970 EVT::getIntegerVT(Context, VT.getSizeInBits() / 2));
971 }
972
973 // Handle vector types.
974 ElementCount NumElts = VT.getVectorElementCount();
975 EVT EltVT = VT.getVectorElementType();
976
977 // Vectors with only one element are always scalarized.
978 if (NumElts.isScalar())
979 return LegalizeKind(TypeScalarizeVector, EltVT);
980
981 if (VT.getVectorElementCount() == ElementCount::getScalable(1))
982 report_fatal_error("Cannot legalize this vector");
983
984 // Try to widen vector elements until the element type is a power of two and
985 // promote it to a legal type later on, for example:
986 // <3 x i8> -> <4 x i8> -> <4 x i32>
987 if (EltVT.isInteger()) {
988 // Vectors with a number of elements that is not a power of two are always
989 // widened, for example <3 x i8> -> <4 x i8>.
990 if (!VT.isPow2VectorType()) {
991 NumElts = NumElts.coefficientNextPowerOf2();
992 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
993 return LegalizeKind(TypeWidenVector, NVT);
994 }
995
996 // Examine the element type.
997 LegalizeKind LK = getTypeConversion(Context, EltVT);
998
999 // If type is to be expanded, split the vector.
1000 // <4 x i140> -> <2 x i140>
1001 if (LK.first == TypeExpandInteger)
1002 return LegalizeKind(TypeSplitVector,
1003 VT.getHalfNumVectorElementsVT(Context));
1004
1005 // Promote the integer element types until a legal vector type is found
1006 // or until the element integer type is too big. If a legal type was not
1007 // found, fallback to the usual mechanism of widening/splitting the
1008 // vector.
1009 EVT OldEltVT = EltVT;
1010 while (true) {
1011 // Increase the bitwidth of the element to the next pow-of-two
1012 // (which is greater than 8 bits).
1013 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1014 .getRoundIntegerType(Context);
1015
1016 // Stop trying when getting a non-simple element type.
1017 // Note that vector elements may be greater than legal vector element
1018 // types. Example: X86 XMM registers hold 64bit element on 32bit
1019 // systems.
1020 if (!EltVT.isSimple())
1021 break;
1022
1023 // Build a new vector type and check if it is legal.
1024 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1025 // Found a legal promoted vector type.
1026 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1027 return LegalizeKind(TypePromoteInteger,
1028 EVT::getVectorVT(Context, EltVT, NumElts));
1029 }
1030
1031 // Reset the type to the unexpanded type if we did not find a legal vector
1032 // type with a promoted vector element type.
1033 EltVT = OldEltVT;
1034 }
1035
1036 // Try to widen the vector until a legal type is found.
1037 // If there is no wider legal type, split the vector.
1038 while (true) {
1039 // Round up to the next power of 2.
1040 NumElts = NumElts.coefficientNextPowerOf2();
1041
1042 // If there is no simple vector type with this many elements then there
1043 // cannot be a larger legal vector type. Note that this assumes that
1044 // there are no skipped intermediate vector types in the simple types.
1045 if (!EltVT.isSimple())
1046 break;
1047 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1048 if (LargerVector == MVT())
1049 break;
1050
1051 // If this type is legal then widen the vector.
1052 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1053 return LegalizeKind(TypeWidenVector, LargerVector);
1054 }
1055
1056 // Widen odd vectors to next power of two.
1057 if (!VT.isPow2VectorType()) {
1058 EVT NVT = VT.getPow2VectorType(Context);
1059 return LegalizeKind(TypeWidenVector, NVT);
1060 }
1061
1062 // Vectors with illegal element types are expanded.
1063 EVT NVT = EVT::getVectorVT(Context, EltVT,
1064 VT.getVectorElementCount().divideCoefficientBy(2));
1065 return LegalizeKind(TypeSplitVector, NVT);
1066 }
1067
getVectorTypeBreakdownMVT(MVT VT,MVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT,TargetLoweringBase * TLI)1068 static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1069 unsigned &NumIntermediates,
1070 MVT &RegisterVT,
1071 TargetLoweringBase *TLI) {
1072 // Figure out the right, legal destination reg to copy into.
1073 ElementCount EC = VT.getVectorElementCount();
1074 MVT EltTy = VT.getVectorElementType();
1075
1076 unsigned NumVectorRegs = 1;
1077
1078 // Scalable vectors cannot be scalarized, so splitting or widening is
1079 // required.
1080 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
1081 llvm_unreachable(
1082 "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1083
1084 // FIXME: We don't support non-power-of-2-sized vectors for now.
1085 // Ideally we could break down into LHS/RHS like LegalizeDAG does.
1086 if (!isPowerOf2_32(EC.getKnownMinValue())) {
1087 // Split EC to unit size (scalable property is preserved).
1088 NumVectorRegs = EC.getKnownMinValue();
1089 EC = ElementCount::getFixed(1);
1090 }
1091
1092 // Divide the input until we get to a supported size. This will
1093 // always end up with an EC that represent a scalar or a scalable
1094 // scalar.
1095 while (EC.getKnownMinValue() > 1 &&
1096 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
1097 EC = EC.divideCoefficientBy(2);
1098 NumVectorRegs <<= 1;
1099 }
1100
1101 NumIntermediates = NumVectorRegs;
1102
1103 MVT NewVT = MVT::getVectorVT(EltTy, EC);
1104 if (!TLI->isTypeLegal(NewVT))
1105 NewVT = EltTy;
1106 IntermediateVT = NewVT;
1107
1108 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
1109
1110 // Convert sizes such as i33 to i64.
1111 if (!isPowerOf2_32(LaneSizeInBits))
1112 LaneSizeInBits = NextPowerOf2(LaneSizeInBits);
1113
1114 MVT DestVT = TLI->getRegisterType(NewVT);
1115 RegisterVT = DestVT;
1116 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1117 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
1118
1119 // Otherwise, promotion or legal types use the same number of registers as
1120 // the vector decimated to the appropriate level.
1121 return NumVectorRegs;
1122 }
1123
1124 /// isLegalRC - Return true if the value types that can be represented by the
1125 /// specified register class are all legal.
isLegalRC(const TargetRegisterInfo & TRI,const TargetRegisterClass & RC) const1126 bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
1127 const TargetRegisterClass &RC) const {
1128 for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1129 if (isTypeLegal(*I))
1130 return true;
1131 return false;
1132 }
1133
1134 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
1135 /// sequence of memory operands that is recognized by PrologEpilogInserter.
1136 MachineBasicBlock *
emitPatchPoint(MachineInstr & InitialMI,MachineBasicBlock * MBB) const1137 TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
1138 MachineBasicBlock *MBB) const {
1139 MachineInstr *MI = &InitialMI;
1140 MachineFunction &MF = *MI->getMF();
1141 MachineFrameInfo &MFI = MF.getFrameInfo();
1142
1143 // We're handling multiple types of operands here:
1144 // PATCHPOINT MetaArgs - live-in, read only, direct
1145 // STATEPOINT Deopt Spill - live-through, read only, indirect
1146 // STATEPOINT Deopt Alloca - live-through, read only, direct
1147 // (We're currently conservative and mark the deopt slots read/write in
1148 // practice.)
1149 // STATEPOINT GC Spill - live-through, read/write, indirect
1150 // STATEPOINT GC Alloca - live-through, read/write, direct
1151 // The live-in vs live-through is handled already (the live through ones are
1152 // all stack slots), but we need to handle the different type of stackmap
1153 // operands and memory effects here.
1154
1155 if (!llvm::any_of(MI->operands(),
1156 [](MachineOperand &Operand) { return Operand.isFI(); }))
1157 return MBB;
1158
1159 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1160
1161 // Inherit previous memory operands.
1162 MIB.cloneMemRefs(*MI);
1163
1164 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
1165 MachineOperand &MO = MI->getOperand(i);
1166 if (!MO.isFI()) {
1167 // Index of Def operand this Use it tied to.
1168 // Since Defs are coming before Uses, if Use is tied, then
1169 // index of Def must be smaller that index of that Use.
1170 // Also, Defs preserve their position in new MI.
1171 unsigned TiedTo = i;
1172 if (MO.isReg() && MO.isTied())
1173 TiedTo = MI->findTiedOperandIdx(i);
1174 MIB.add(MO);
1175 if (TiedTo < i)
1176 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
1177 continue;
1178 }
1179
1180 // foldMemoryOperand builds a new MI after replacing a single FI operand
1181 // with the canonical set of five x86 addressing-mode operands.
1182 int FI = MO.getIndex();
1183
1184 // Add frame index operands recognized by stackmaps.cpp
1185 if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
1186 // indirect-mem-ref tag, size, #FI, offset.
1187 // Used for spills inserted by StatepointLowering. This codepath is not
1188 // used for patchpoints/stackmaps at all, for these spilling is done via
1189 // foldMemoryOperand callback only.
1190 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1191 MIB.addImm(StackMaps::IndirectMemRefOp);
1192 MIB.addImm(MFI.getObjectSize(FI));
1193 MIB.add(MO);
1194 MIB.addImm(0);
1195 } else {
1196 // direct-mem-ref tag, #FI, offset.
1197 // Used by patchpoint, and direct alloca arguments to statepoints
1198 MIB.addImm(StackMaps::DirectMemRefOp);
1199 MIB.add(MO);
1200 MIB.addImm(0);
1201 }
1202
1203 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1204
1205 // Add a new memory operand for this FI.
1206 assert(MFI.getObjectOffset(FI) != -1);
1207
1208 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and
1209 // PATCHPOINT should be updated to do the same. (TODO)
1210 if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1211 auto Flags = MachineMemOperand::MOLoad;
1212 MachineMemOperand *MMO = MF.getMachineMemOperand(
1213 MachinePointerInfo::getFixedStack(MF, FI), Flags,
1214 MF.getDataLayout().getPointerSize(), MFI.getObjectAlign(FI));
1215 MIB->addMemOperand(MF, MMO);
1216 }
1217 }
1218 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1219 MI->eraseFromParent();
1220 return MBB;
1221 }
1222
1223 MachineBasicBlock *
emitXRayCustomEvent(MachineInstr & MI,MachineBasicBlock * MBB) const1224 TargetLoweringBase::emitXRayCustomEvent(MachineInstr &MI,
1225 MachineBasicBlock *MBB) const {
1226 assert(MI.getOpcode() == TargetOpcode::PATCHABLE_EVENT_CALL &&
1227 "Called emitXRayCustomEvent on the wrong MI!");
1228 auto &MF = *MI.getMF();
1229 auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1230 for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1231 MIB.add(MI.getOperand(OpIdx));
1232
1233 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1234 MI.eraseFromParent();
1235 return MBB;
1236 }
1237
1238 MachineBasicBlock *
emitXRayTypedEvent(MachineInstr & MI,MachineBasicBlock * MBB) const1239 TargetLoweringBase::emitXRayTypedEvent(MachineInstr &MI,
1240 MachineBasicBlock *MBB) const {
1241 assert(MI.getOpcode() == TargetOpcode::PATCHABLE_TYPED_EVENT_CALL &&
1242 "Called emitXRayTypedEvent on the wrong MI!");
1243 auto &MF = *MI.getMF();
1244 auto MIB = BuildMI(MF, MI.getDebugLoc(), MI.getDesc());
1245 for (unsigned OpIdx = 0; OpIdx != MI.getNumOperands(); ++OpIdx)
1246 MIB.add(MI.getOperand(OpIdx));
1247
1248 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
1249 MI.eraseFromParent();
1250 return MBB;
1251 }
1252
1253 /// findRepresentativeClass - Return the largest legal super-reg register class
1254 /// of the register class for the specified type and its associated "cost".
1255 // This function is in TargetLowering because it uses RegClassForVT which would
1256 // need to be moved to TargetRegisterInfo and would necessitate moving
1257 // isTypeLegal over as well - a massive change that would just require
1258 // TargetLowering having a TargetRegisterInfo class member that it would use.
1259 std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo * TRI,MVT VT) const1260 TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
1261 MVT VT) const {
1262 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1263 if (!RC)
1264 return std::make_pair(RC, 0);
1265
1266 // Compute the set of all super-register classes.
1267 BitVector SuperRegRC(TRI->getNumRegClasses());
1268 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1269 SuperRegRC.setBitsInMask(RCI.getMask());
1270
1271 // Find the first legal register class with the largest spill size.
1272 const TargetRegisterClass *BestRC = RC;
1273 for (unsigned i : SuperRegRC.set_bits()) {
1274 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1275 // We want the largest possible spill size.
1276 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1277 continue;
1278 if (!isLegalRC(*TRI, *SuperRC))
1279 continue;
1280 BestRC = SuperRC;
1281 }
1282 return std::make_pair(BestRC, 1);
1283 }
1284
1285 /// computeRegisterProperties - Once all of the register classes are added,
1286 /// this allows us to compute derived properties we expose.
computeRegisterProperties(const TargetRegisterInfo * TRI)1287 void TargetLoweringBase::computeRegisterProperties(
1288 const TargetRegisterInfo *TRI) {
1289 static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE,
1290 "Too many value types for ValueTypeActions to hold!");
1291
1292 // Everything defaults to needing one register.
1293 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1294 NumRegistersForVT[i] = 1;
1295 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1296 }
1297 // ...except isVoid, which doesn't need any registers.
1298 NumRegistersForVT[MVT::isVoid] = 0;
1299
1300 // Find the largest integer register class.
1301 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1302 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1303 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1304
1305 // Every integer value type larger than this largest register takes twice as
1306 // many registers to represent as the previous ValueType.
1307 for (unsigned ExpandedReg = LargestIntReg + 1;
1308 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1309 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1310 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1311 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1312 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1313 TypeExpandInteger);
1314 }
1315
1316 // Inspect all of the ValueType's smaller than the largest integer
1317 // register to see which ones need promotion.
1318 unsigned LegalIntReg = LargestIntReg;
1319 for (unsigned IntReg = LargestIntReg - 1;
1320 IntReg >= (unsigned)MVT::i1; --IntReg) {
1321 MVT IVT = (MVT::SimpleValueType)IntReg;
1322 if (isTypeLegal(IVT)) {
1323 LegalIntReg = IntReg;
1324 } else {
1325 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1326 (MVT::SimpleValueType)LegalIntReg;
1327 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1328 }
1329 }
1330
1331 // ppcf128 type is really two f64's.
1332 if (!isTypeLegal(MVT::ppcf128)) {
1333 if (isTypeLegal(MVT::f64)) {
1334 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1335 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1336 TransformToType[MVT::ppcf128] = MVT::f64;
1337 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1338 } else {
1339 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1340 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1341 TransformToType[MVT::ppcf128] = MVT::i128;
1342 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1343 }
1344 }
1345
1346 // Decide how to handle f128. If the target does not have native f128 support,
1347 // expand it to i128 and we will be generating soft float library calls.
1348 if (!isTypeLegal(MVT::f128)) {
1349 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1350 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1351 TransformToType[MVT::f128] = MVT::i128;
1352 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1353 }
1354
1355 // Decide how to handle f64. If the target does not have native f64 support,
1356 // expand it to i64 and we will be generating soft float library calls.
1357 if (!isTypeLegal(MVT::f64)) {
1358 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1359 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1360 TransformToType[MVT::f64] = MVT::i64;
1361 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1362 }
1363
1364 // Decide how to handle f32. If the target does not have native f32 support,
1365 // expand it to i32 and we will be generating soft float library calls.
1366 if (!isTypeLegal(MVT::f32)) {
1367 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1368 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1369 TransformToType[MVT::f32] = MVT::i32;
1370 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1371 }
1372
1373 // Decide how to handle f16. If the target does not have native f16 support,
1374 // promote it to f32, because there are no f16 library calls (except for
1375 // conversions).
1376 if (!isTypeLegal(MVT::f16)) {
1377 // Allow targets to control how we legalize half.
1378 if (softPromoteHalfType()) {
1379 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1380 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1381 TransformToType[MVT::f16] = MVT::f32;
1382 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1383 } else {
1384 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1385 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1386 TransformToType[MVT::f16] = MVT::f32;
1387 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1388 }
1389 }
1390
1391 // Loop over all of the vector value types to see which need transformations.
1392 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1393 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1394 MVT VT = (MVT::SimpleValueType) i;
1395 if (isTypeLegal(VT))
1396 continue;
1397
1398 MVT EltVT = VT.getVectorElementType();
1399 ElementCount EC = VT.getVectorElementCount();
1400 bool IsLegalWiderType = false;
1401 bool IsScalable = VT.isScalableVector();
1402 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1403 switch (PreferredAction) {
1404 case TypePromoteInteger: {
1405 MVT::SimpleValueType EndVT = IsScalable ?
1406 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1407 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1408 // Try to promote the elements of integer vectors. If no legal
1409 // promotion was found, fall through to the widen-vector method.
1410 for (unsigned nVT = i + 1;
1411 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1412 MVT SVT = (MVT::SimpleValueType) nVT;
1413 // Promote vectors of integers to vectors with the same number
1414 // of elements, with a wider element type.
1415 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
1416 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1417 TransformToType[i] = SVT;
1418 RegisterTypeForVT[i] = SVT;
1419 NumRegistersForVT[i] = 1;
1420 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1421 IsLegalWiderType = true;
1422 break;
1423 }
1424 }
1425 if (IsLegalWiderType)
1426 break;
1427 LLVM_FALLTHROUGH;
1428 }
1429
1430 case TypeWidenVector:
1431 if (isPowerOf2_32(EC.getKnownMinValue())) {
1432 // Try to widen the vector.
1433 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1434 MVT SVT = (MVT::SimpleValueType) nVT;
1435 if (SVT.getVectorElementType() == EltVT &&
1436 SVT.isScalableVector() == IsScalable &&
1437 SVT.getVectorElementCount().getKnownMinValue() >
1438 EC.getKnownMinValue() &&
1439 isTypeLegal(SVT)) {
1440 TransformToType[i] = SVT;
1441 RegisterTypeForVT[i] = SVT;
1442 NumRegistersForVT[i] = 1;
1443 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1444 IsLegalWiderType = true;
1445 break;
1446 }
1447 }
1448 if (IsLegalWiderType)
1449 break;
1450 } else {
1451 // Only widen to the next power of 2 to keep consistency with EVT.
1452 MVT NVT = VT.getPow2VectorType();
1453 if (isTypeLegal(NVT)) {
1454 TransformToType[i] = NVT;
1455 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1456 RegisterTypeForVT[i] = NVT;
1457 NumRegistersForVT[i] = 1;
1458 break;
1459 }
1460 }
1461 LLVM_FALLTHROUGH;
1462
1463 case TypeSplitVector:
1464 case TypeScalarizeVector: {
1465 MVT IntermediateVT;
1466 MVT RegisterVT;
1467 unsigned NumIntermediates;
1468 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1469 NumIntermediates, RegisterVT, this);
1470 NumRegistersForVT[i] = NumRegisters;
1471 assert(NumRegistersForVT[i] == NumRegisters &&
1472 "NumRegistersForVT size cannot represent NumRegisters!");
1473 RegisterTypeForVT[i] = RegisterVT;
1474
1475 MVT NVT = VT.getPow2VectorType();
1476 if (NVT == VT) {
1477 // Type is already a power of 2. The default action is to split.
1478 TransformToType[i] = MVT::Other;
1479 if (PreferredAction == TypeScalarizeVector)
1480 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1481 else if (PreferredAction == TypeSplitVector)
1482 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1483 else if (EC.getKnownMinValue() > 1)
1484 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1485 else
1486 ValueTypeActions.setTypeAction(VT, EC.isScalable()
1487 ? TypeScalarizeScalableVector
1488 : TypeScalarizeVector);
1489 } else {
1490 TransformToType[i] = NVT;
1491 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1492 }
1493 break;
1494 }
1495 default:
1496 llvm_unreachable("Unknown vector legalization action!");
1497 }
1498 }
1499
1500 // Determine the 'representative' register class for each value type.
1501 // An representative register class is the largest (meaning one which is
1502 // not a sub-register class / subreg register class) legal register class for
1503 // a group of value types. For example, on i386, i8, i16, and i32
1504 // representative would be GR32; while on x86_64 it's GR64.
1505 for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
1506 const TargetRegisterClass* RRC;
1507 uint8_t Cost;
1508 std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i);
1509 RepRegClassForVT[i] = RRC;
1510 RepRegClassCostForVT[i] = Cost;
1511 }
1512 }
1513
getSetCCResultType(const DataLayout & DL,LLVMContext &,EVT VT) const1514 EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1515 EVT VT) const {
1516 assert(!VT.isVector() && "No default SetCC type for vectors!");
1517 return getPointerTy(DL).SimpleTy;
1518 }
1519
getCmpLibcallReturnType() const1520 MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const {
1521 return MVT::i32; // return the default value
1522 }
1523
1524 /// getVectorTypeBreakdown - Vector types are broken down into some number of
1525 /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1526 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1527 /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1528 ///
1529 /// This method returns the number of registers needed, and the VT for each
1530 /// register. It also returns the VT and quantity of the intermediate values
1531 /// before they are promoted/expanded.
getVectorTypeBreakdown(LLVMContext & Context,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT) const1532 unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
1533 EVT &IntermediateVT,
1534 unsigned &NumIntermediates,
1535 MVT &RegisterVT) const {
1536 ElementCount EltCnt = VT.getVectorElementCount();
1537
1538 // If there is a wider vector type with the same element type as this one,
1539 // or a promoted vector type that has the same number of elements which
1540 // are wider, then we should convert to that legal vector type.
1541 // This handles things like <2 x float> -> <4 x float> and
1542 // <4 x i1> -> <4 x i32>.
1543 LegalizeTypeAction TA = getTypeAction(Context, VT);
1544 if (EltCnt.getKnownMinValue() != 1 &&
1545 (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1546 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1547 if (isTypeLegal(RegisterEVT)) {
1548 IntermediateVT = RegisterEVT;
1549 RegisterVT = RegisterEVT.getSimpleVT();
1550 NumIntermediates = 1;
1551 return 1;
1552 }
1553 }
1554
1555 // Figure out the right, legal destination reg to copy into.
1556 EVT EltTy = VT.getVectorElementType();
1557
1558 unsigned NumVectorRegs = 1;
1559
1560 // Scalable vectors cannot be scalarized, so handle the legalisation of the
1561 // types like done elsewhere in SelectionDAG.
1562 if (VT.isScalableVector() && !isPowerOf2_32(EltCnt.getKnownMinValue())) {
1563 LegalizeKind LK;
1564 EVT PartVT = VT;
1565 do {
1566 // Iterate until we've found a legal (part) type to hold VT.
1567 LK = getTypeConversion(Context, PartVT);
1568 PartVT = LK.second;
1569 } while (LK.first != TypeLegal);
1570
1571 NumIntermediates = VT.getVectorElementCount().getKnownMinValue() /
1572 PartVT.getVectorElementCount().getKnownMinValue();
1573
1574 // FIXME: This code needs to be extended to handle more complex vector
1575 // breakdowns, like nxv7i64 -> nxv8i64 -> 4 x nxv2i64. Currently the only
1576 // supported cases are vectors that are broken down into equal parts
1577 // such as nxv6i64 -> 3 x nxv2i64.
1578 assert((PartVT.getVectorElementCount() * NumIntermediates) ==
1579 VT.getVectorElementCount() &&
1580 "Expected an integer multiple of PartVT");
1581 IntermediateVT = PartVT;
1582 RegisterVT = getRegisterType(Context, IntermediateVT);
1583 return NumIntermediates;
1584 }
1585
1586 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally
1587 // we could break down into LHS/RHS like LegalizeDAG does.
1588 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
1589 NumVectorRegs = EltCnt.getKnownMinValue();
1590 EltCnt = ElementCount::getFixed(1);
1591 }
1592
1593 // Divide the input until we get to a supported size. This will always
1594 // end with a scalar if the target doesn't support vectors.
1595 while (EltCnt.getKnownMinValue() > 1 &&
1596 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
1597 EltCnt = EltCnt.divideCoefficientBy(2);
1598 NumVectorRegs <<= 1;
1599 }
1600
1601 NumIntermediates = NumVectorRegs;
1602
1603 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
1604 if (!isTypeLegal(NewVT))
1605 NewVT = EltTy;
1606 IntermediateVT = NewVT;
1607
1608 MVT DestVT = getRegisterType(Context, NewVT);
1609 RegisterVT = DestVT;
1610
1611 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16.
1612 TypeSize NewVTSize = NewVT.getSizeInBits();
1613 // Convert sizes such as i33 to i64.
1614 if (!isPowerOf2_32(NewVTSize.getKnownMinSize()))
1615 NewVTSize = NewVTSize.coefficientNextPowerOf2();
1616 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1617 }
1618
1619 // Otherwise, promotion or legal types use the same number of registers as
1620 // the vector decimated to the appropriate level.
1621 return NumVectorRegs;
1622 }
1623
isSuitableForJumpTable(const SwitchInst * SI,uint64_t NumCases,uint64_t Range,ProfileSummaryInfo * PSI,BlockFrequencyInfo * BFI) const1624 bool TargetLoweringBase::isSuitableForJumpTable(const SwitchInst *SI,
1625 uint64_t NumCases,
1626 uint64_t Range,
1627 ProfileSummaryInfo *PSI,
1628 BlockFrequencyInfo *BFI) const {
1629 // FIXME: This function check the maximum table size and density, but the
1630 // minimum size is not checked. It would be nice if the minimum size is
1631 // also combined within this function. Currently, the minimum size check is
1632 // performed in findJumpTable() in SelectionDAGBuiler and
1633 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1634 const bool OptForSize =
1635 SI->getParent()->getParent()->hasOptSize() ||
1636 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
1637 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
1638 const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
1639
1640 // Check whether the number of cases is small enough and
1641 // the range is dense enough for a jump table.
1642 return (OptForSize || Range <= MaxJumpTableSize) &&
1643 (NumCases * 100 >= Range * MinDensity);
1644 }
1645
1646 /// Get the EVTs and ArgFlags collections that represent the legalized return
1647 /// type of the given function. This does not require a DAG or a return value,
1648 /// and is suitable for use before any DAGs for the function are constructed.
1649 /// TODO: Move this out of TargetLowering.cpp.
GetReturnInfo(CallingConv::ID CC,Type * ReturnType,AttributeList attr,SmallVectorImpl<ISD::OutputArg> & Outs,const TargetLowering & TLI,const DataLayout & DL)1650 void llvm::GetReturnInfo(CallingConv::ID CC, Type *ReturnType,
1651 AttributeList attr,
1652 SmallVectorImpl<ISD::OutputArg> &Outs,
1653 const TargetLowering &TLI, const DataLayout &DL) {
1654 SmallVector<EVT, 4> ValueVTs;
1655 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1656 unsigned NumValues = ValueVTs.size();
1657 if (NumValues == 0) return;
1658
1659 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1660 EVT VT = ValueVTs[j];
1661 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1662
1663 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1664 ExtendKind = ISD::SIGN_EXTEND;
1665 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1666 ExtendKind = ISD::ZERO_EXTEND;
1667
1668 // FIXME: C calling convention requires the return type to be promoted to
1669 // at least 32-bit. But this is not necessary for non-C calling
1670 // conventions. The frontend should mark functions whose return values
1671 // require promoting with signext or zeroext attributes.
1672 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) {
1673 MVT MinVT = TLI.getRegisterType(ReturnType->getContext(), MVT::i32);
1674 if (VT.bitsLT(MinVT))
1675 VT = MinVT;
1676 }
1677
1678 unsigned NumParts =
1679 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1680 MVT PartVT =
1681 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1682
1683 // 'inreg' on function refers to return value
1684 ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
1685 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
1686 Flags.setInReg();
1687
1688 // Propagate extension type if any
1689 if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
1690 Flags.setSExt();
1691 else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
1692 Flags.setZExt();
1693
1694 for (unsigned i = 0; i < NumParts; ++i)
1695 Outs.push_back(ISD::OutputArg(Flags, PartVT, VT, /*isfixed=*/true, 0, 0));
1696 }
1697 }
1698
1699 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1700 /// function arguments in the caller parameter area. This is the actual
1701 /// alignment, not its logarithm.
getByValTypeAlignment(Type * Ty,const DataLayout & DL) const1702 unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
1703 const DataLayout &DL) const {
1704 return DL.getABITypeAlign(Ty).value();
1705 }
1706
allowsMemoryAccessForAlignment(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,bool * Fast) const1707 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1708 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1709 Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
1710 // Check if the specified alignment is sufficient based on the data layout.
1711 // TODO: While using the data layout works in practice, a better solution
1712 // would be to implement this check directly (make this a virtual function).
1713 // For example, the ABI alignment may change based on software platform while
1714 // this function should only be affected by hardware implementation.
1715 Type *Ty = VT.getTypeForEVT(Context);
1716 if (Alignment >= DL.getABITypeAlign(Ty)) {
1717 // Assume that an access that meets the ABI-specified alignment is fast.
1718 if (Fast != nullptr)
1719 *Fast = true;
1720 return true;
1721 }
1722
1723 // This is a misaligned access.
1724 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment.value(), Flags,
1725 Fast);
1726 }
1727
allowsMemoryAccessForAlignment(LLVMContext & Context,const DataLayout & DL,EVT VT,const MachineMemOperand & MMO,bool * Fast) const1728 bool TargetLoweringBase::allowsMemoryAccessForAlignment(
1729 LLVMContext &Context, const DataLayout &DL, EVT VT,
1730 const MachineMemOperand &MMO, bool *Fast) const {
1731 return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
1732 MMO.getAlign(), MMO.getFlags(), Fast);
1733 }
1734
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,bool * Fast) const1735 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1736 const DataLayout &DL, EVT VT,
1737 unsigned AddrSpace, Align Alignment,
1738 MachineMemOperand::Flags Flags,
1739 bool *Fast) const {
1740 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
1741 Flags, Fast);
1742 }
1743
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,const MachineMemOperand & MMO,bool * Fast) const1744 bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
1745 const DataLayout &DL, EVT VT,
1746 const MachineMemOperand &MMO,
1747 bool *Fast) const {
1748 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1749 MMO.getFlags(), Fast);
1750 }
1751
getPredictableBranchThreshold() const1752 BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
1753 return BranchProbability(MinPercentageForPredictableBranch, 100);
1754 }
1755
1756 //===----------------------------------------------------------------------===//
1757 // TargetTransformInfo Helpers
1758 //===----------------------------------------------------------------------===//
1759
InstructionOpcodeToISD(unsigned Opcode) const1760 int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const {
1761 enum InstructionOpcodes {
1762 #define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1763 #define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1764 #include "llvm/IR/Instruction.def"
1765 };
1766 switch (static_cast<InstructionOpcodes>(Opcode)) {
1767 case Ret: return 0;
1768 case Br: return 0;
1769 case Switch: return 0;
1770 case IndirectBr: return 0;
1771 case Invoke: return 0;
1772 case CallBr: return 0;
1773 case Resume: return 0;
1774 case Unreachable: return 0;
1775 case CleanupRet: return 0;
1776 case CatchRet: return 0;
1777 case CatchPad: return 0;
1778 case CatchSwitch: return 0;
1779 case CleanupPad: return 0;
1780 case FNeg: return ISD::FNEG;
1781 case Add: return ISD::ADD;
1782 case FAdd: return ISD::FADD;
1783 case Sub: return ISD::SUB;
1784 case FSub: return ISD::FSUB;
1785 case Mul: return ISD::MUL;
1786 case FMul: return ISD::FMUL;
1787 case UDiv: return ISD::UDIV;
1788 case SDiv: return ISD::SDIV;
1789 case FDiv: return ISD::FDIV;
1790 case URem: return ISD::UREM;
1791 case SRem: return ISD::SREM;
1792 case FRem: return ISD::FREM;
1793 case Shl: return ISD::SHL;
1794 case LShr: return ISD::SRL;
1795 case AShr: return ISD::SRA;
1796 case And: return ISD::AND;
1797 case Or: return ISD::OR;
1798 case Xor: return ISD::XOR;
1799 case Alloca: return 0;
1800 case Load: return ISD::LOAD;
1801 case Store: return ISD::STORE;
1802 case GetElementPtr: return 0;
1803 case Fence: return 0;
1804 case AtomicCmpXchg: return 0;
1805 case AtomicRMW: return 0;
1806 case Trunc: return ISD::TRUNCATE;
1807 case ZExt: return ISD::ZERO_EXTEND;
1808 case SExt: return ISD::SIGN_EXTEND;
1809 case FPToUI: return ISD::FP_TO_UINT;
1810 case FPToSI: return ISD::FP_TO_SINT;
1811 case UIToFP: return ISD::UINT_TO_FP;
1812 case SIToFP: return ISD::SINT_TO_FP;
1813 case FPTrunc: return ISD::FP_ROUND;
1814 case FPExt: return ISD::FP_EXTEND;
1815 case PtrToInt: return ISD::BITCAST;
1816 case IntToPtr: return ISD::BITCAST;
1817 case BitCast: return ISD::BITCAST;
1818 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1819 case ICmp: return ISD::SETCC;
1820 case FCmp: return ISD::SETCC;
1821 case PHI: return 0;
1822 case Call: return 0;
1823 case Select: return ISD::SELECT;
1824 case UserOp1: return 0;
1825 case UserOp2: return 0;
1826 case VAArg: return 0;
1827 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1828 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1829 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1830 case ExtractValue: return ISD::MERGE_VALUES;
1831 case InsertValue: return ISD::MERGE_VALUES;
1832 case LandingPad: return 0;
1833 case Freeze: return ISD::FREEZE;
1834 }
1835
1836 llvm_unreachable("Unknown instruction type encountered!");
1837 }
1838
1839 std::pair<int, MVT>
getTypeLegalizationCost(const DataLayout & DL,Type * Ty) const1840 TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL,
1841 Type *Ty) const {
1842 LLVMContext &C = Ty->getContext();
1843 EVT MTy = getValueType(DL, Ty);
1844
1845 int Cost = 1;
1846 // We keep legalizing the type until we find a legal kind. We assume that
1847 // the only operation that costs anything is the split. After splitting
1848 // we need to handle two types.
1849 while (true) {
1850 LegalizeKind LK = getTypeConversion(C, MTy);
1851
1852 if (LK.first == TypeLegal)
1853 return std::make_pair(Cost, MTy.getSimpleVT());
1854
1855 if (LK.first == TypeSplitVector || LK.first == TypeExpandInteger)
1856 Cost *= 2;
1857
1858 // Do not loop with f128 type.
1859 if (MTy == LK.second)
1860 return std::make_pair(Cost, MTy.getSimpleVT());
1861
1862 // Keep legalizing the type.
1863 MTy = LK.second;
1864 }
1865 }
1866
getDefaultSafeStackPointerLocation(IRBuilder<> & IRB,bool UseTLS) const1867 Value *TargetLoweringBase::getDefaultSafeStackPointerLocation(IRBuilder<> &IRB,
1868 bool UseTLS) const {
1869 // compiler-rt provides a variable with a magic name. Targets that do not
1870 // link with compiler-rt may also provide such a variable.
1871 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1872 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1873 auto UnsafeStackPtr =
1874 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1875
1876 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1877
1878 if (!UnsafeStackPtr) {
1879 auto TLSModel = UseTLS ?
1880 GlobalValue::InitialExecTLSModel :
1881 GlobalValue::NotThreadLocal;
1882 // The global variable is not defined yet, define it ourselves.
1883 // We use the initial-exec TLS model because we do not support the
1884 // variable living anywhere other than in the main executable.
1885 UnsafeStackPtr = new GlobalVariable(
1886 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
1887 UnsafeStackPtrVar, nullptr, TLSModel);
1888 } else {
1889 // The variable exists, check its type and attributes.
1890 if (UnsafeStackPtr->getValueType() != StackPtrTy)
1891 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
1892 if (UseTLS != UnsafeStackPtr->isThreadLocal())
1893 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
1894 (UseTLS ? "" : "not ") + "be thread-local");
1895 }
1896 return UnsafeStackPtr;
1897 }
1898
getSafeStackPointerLocation(IRBuilder<> & IRB) const1899 Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
1900 if (!TM.getTargetTriple().isAndroid())
1901 return getDefaultSafeStackPointerLocation(IRB, true);
1902
1903 // Android provides a libc function to retrieve the address of the current
1904 // thread's unsafe stack pointer.
1905 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1906 Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
1907 FunctionCallee Fn = M->getOrInsertFunction("__safestack_pointer_address",
1908 StackPtrTy->getPointerTo(0));
1909 return IRB.CreateCall(Fn);
1910 }
1911
1912 //===----------------------------------------------------------------------===//
1913 // Loop Strength Reduction hooks
1914 //===----------------------------------------------------------------------===//
1915
1916 /// isLegalAddressingMode - Return true if the addressing mode represented
1917 /// by AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const1918 bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL,
1919 const AddrMode &AM, Type *Ty,
1920 unsigned AS, Instruction *I) const {
1921 // The default implementation of this implements a conservative RISCy, r+r and
1922 // r+i addr mode.
1923
1924 // Allows a sign-extended 16-bit immediate field.
1925 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
1926 return false;
1927
1928 // No global is ever allowed as a base.
1929 if (AM.BaseGV)
1930 return false;
1931
1932 // Only support r+r,
1933 switch (AM.Scale) {
1934 case 0: // "r+i" or just "i", depending on HasBaseReg.
1935 break;
1936 case 1:
1937 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
1938 return false;
1939 // Otherwise we have r+r or r+i.
1940 break;
1941 case 2:
1942 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
1943 return false;
1944 // Allow 2*r as r+r.
1945 break;
1946 default: // Don't allow n * r
1947 return false;
1948 }
1949
1950 return true;
1951 }
1952
1953 //===----------------------------------------------------------------------===//
1954 // Stack Protector
1955 //===----------------------------------------------------------------------===//
1956
1957 // For OpenBSD return its special guard variable. Otherwise return nullptr,
1958 // so that SelectionDAG handle SSP.
getIRStackGuard(IRBuilder<> & IRB) const1959 Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const {
1960 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
1961 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
1962 PointerType *PtrTy = Type::getInt8PtrTy(M.getContext());
1963 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy);
1964 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C))
1965 G->setVisibility(GlobalValue::HiddenVisibility);
1966 return C;
1967 }
1968 return nullptr;
1969 }
1970
1971 // Currently only support "standard" __stack_chk_guard.
1972 // TODO: add LOAD_STACK_GUARD support.
insertSSPDeclarations(Module & M) const1973 void TargetLoweringBase::insertSSPDeclarations(Module &M) const {
1974 if (!M.getNamedValue("__stack_chk_guard")) {
1975 auto *GV = new GlobalVariable(M, Type::getInt8PtrTy(M.getContext()), false,
1976 GlobalVariable::ExternalLinkage, nullptr,
1977 "__stack_chk_guard");
1978 if (TM.getRelocationModel() == Reloc::Static &&
1979 !TM.getTargetTriple().isWindowsGNUEnvironment())
1980 GV->setDSOLocal(true);
1981 }
1982 }
1983
1984 // Currently only support "standard" __stack_chk_guard.
1985 // TODO: add LOAD_STACK_GUARD support.
getSDagStackGuard(const Module & M) const1986 Value *TargetLoweringBase::getSDagStackGuard(const Module &M) const {
1987 return M.getNamedValue("__stack_chk_guard");
1988 }
1989
getSSPStackGuardCheck(const Module & M) const1990 Function *TargetLoweringBase::getSSPStackGuardCheck(const Module &M) const {
1991 return nullptr;
1992 }
1993
getMinimumJumpTableEntries() const1994 unsigned TargetLoweringBase::getMinimumJumpTableEntries() const {
1995 return MinimumJumpTableEntries;
1996 }
1997
setMinimumJumpTableEntries(unsigned Val)1998 void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
1999 MinimumJumpTableEntries = Val;
2000 }
2001
getMinimumJumpTableDensity(bool OptForSize) const2002 unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
2003 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
2004 }
2005
getMaximumJumpTableSize() const2006 unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
2007 return MaximumJumpTableSize;
2008 }
2009
setMaximumJumpTableSize(unsigned Val)2010 void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
2011 MaximumJumpTableSize = Val;
2012 }
2013
isJumpTableRelative() const2014 bool TargetLoweringBase::isJumpTableRelative() const {
2015 return getTargetMachine().isPositionIndependent();
2016 }
2017
2018 //===----------------------------------------------------------------------===//
2019 // Reciprocal Estimates
2020 //===----------------------------------------------------------------------===//
2021
2022 /// Get the reciprocal estimate attribute string for a function that will
2023 /// override the target defaults.
getRecipEstimateForFunc(MachineFunction & MF)2024 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
2025 const Function &F = MF.getFunction();
2026 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
2027 }
2028
2029 /// Construct a string for the given reciprocal operation of the given type.
2030 /// This string should match the corresponding option to the front-end's
2031 /// "-mrecip" flag assuming those strings have been passed through in an
2032 /// attribute string. For example, "vec-divf" for a division of a vXf32.
getReciprocalOpName(bool IsSqrt,EVT VT)2033 static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2034 std::string Name = VT.isVector() ? "vec-" : "";
2035
2036 Name += IsSqrt ? "sqrt" : "div";
2037
2038 // TODO: Handle "half" or other float types?
2039 if (VT.getScalarType() == MVT::f64) {
2040 Name += "d";
2041 } else {
2042 assert(VT.getScalarType() == MVT::f32 &&
2043 "Unexpected FP type for reciprocal estimate");
2044 Name += "f";
2045 }
2046
2047 return Name;
2048 }
2049
2050 /// Return the character position and value (a single numeric character) of a
2051 /// customized refinement operation in the input string if it exists. Return
2052 /// false if there is no customized refinement step count.
parseRefinementStep(StringRef In,size_t & Position,uint8_t & Value)2053 static bool parseRefinementStep(StringRef In, size_t &Position,
2054 uint8_t &Value) {
2055 const char RefStepToken = ':';
2056 Position = In.find(RefStepToken);
2057 if (Position == StringRef::npos)
2058 return false;
2059
2060 StringRef RefStepString = In.substr(Position + 1);
2061 // Allow exactly one numeric character for the additional refinement
2062 // step parameter.
2063 if (RefStepString.size() == 1) {
2064 char RefStepChar = RefStepString[0];
2065 if (RefStepChar >= '0' && RefStepChar <= '9') {
2066 Value = RefStepChar - '0';
2067 return true;
2068 }
2069 }
2070 report_fatal_error("Invalid refinement step for -recip.");
2071 }
2072
2073 /// For the input attribute string, return one of the ReciprocalEstimate enum
2074 /// status values (enabled, disabled, or not specified) for this operation on
2075 /// the specified data type.
getOpEnabled(bool IsSqrt,EVT VT,StringRef Override)2076 static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2077 if (Override.empty())
2078 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2079
2080 SmallVector<StringRef, 4> OverrideVector;
2081 Override.split(OverrideVector, ',');
2082 unsigned NumArgs = OverrideVector.size();
2083
2084 // Check if "all", "none", or "default" was specified.
2085 if (NumArgs == 1) {
2086 // Look for an optional setting of the number of refinement steps needed
2087 // for this type of reciprocal operation.
2088 size_t RefPos;
2089 uint8_t RefSteps;
2090 if (parseRefinementStep(Override, RefPos, RefSteps)) {
2091 // Split the string for further processing.
2092 Override = Override.substr(0, RefPos);
2093 }
2094
2095 // All reciprocal types are enabled.
2096 if (Override == "all")
2097 return TargetLoweringBase::ReciprocalEstimate::Enabled;
2098
2099 // All reciprocal types are disabled.
2100 if (Override == "none")
2101 return TargetLoweringBase::ReciprocalEstimate::Disabled;
2102
2103 // Target defaults for enablement are used.
2104 if (Override == "default")
2105 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2106 }
2107
2108 // The attribute string may omit the size suffix ('f'/'d').
2109 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2110 std::string VTNameNoSize = VTName;
2111 VTNameNoSize.pop_back();
2112 static const char DisabledPrefix = '!';
2113
2114 for (StringRef RecipType : OverrideVector) {
2115 size_t RefPos;
2116 uint8_t RefSteps;
2117 if (parseRefinementStep(RecipType, RefPos, RefSteps))
2118 RecipType = RecipType.substr(0, RefPos);
2119
2120 // Ignore the disablement token for string matching.
2121 bool IsDisabled = RecipType[0] == DisabledPrefix;
2122 if (IsDisabled)
2123 RecipType = RecipType.substr(1);
2124
2125 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2126 return IsDisabled ? TargetLoweringBase::ReciprocalEstimate::Disabled
2127 : TargetLoweringBase::ReciprocalEstimate::Enabled;
2128 }
2129
2130 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2131 }
2132
2133 /// For the input attribute string, return the customized refinement step count
2134 /// for this operation on the specified data type. If the step count does not
2135 /// exist, return the ReciprocalEstimate enum value for unspecified.
getOpRefinementSteps(bool IsSqrt,EVT VT,StringRef Override)2136 static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2137 if (Override.empty())
2138 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2139
2140 SmallVector<StringRef, 4> OverrideVector;
2141 Override.split(OverrideVector, ',');
2142 unsigned NumArgs = OverrideVector.size();
2143
2144 // Check if "all", "default", or "none" was specified.
2145 if (NumArgs == 1) {
2146 // Look for an optional setting of the number of refinement steps needed
2147 // for this type of reciprocal operation.
2148 size_t RefPos;
2149 uint8_t RefSteps;
2150 if (!parseRefinementStep(Override, RefPos, RefSteps))
2151 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2152
2153 // Split the string for further processing.
2154 Override = Override.substr(0, RefPos);
2155 assert(Override != "none" &&
2156 "Disabled reciprocals, but specifed refinement steps?");
2157
2158 // If this is a general override, return the specified number of steps.
2159 if (Override == "all" || Override == "default")
2160 return RefSteps;
2161 }
2162
2163 // The attribute string may omit the size suffix ('f'/'d').
2164 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2165 std::string VTNameNoSize = VTName;
2166 VTNameNoSize.pop_back();
2167
2168 for (StringRef RecipType : OverrideVector) {
2169 size_t RefPos;
2170 uint8_t RefSteps;
2171 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2172 continue;
2173
2174 RecipType = RecipType.substr(0, RefPos);
2175 if (RecipType.equals(VTName) || RecipType.equals(VTNameNoSize))
2176 return RefSteps;
2177 }
2178
2179 return TargetLoweringBase::ReciprocalEstimate::Unspecified;
2180 }
2181
getRecipEstimateSqrtEnabled(EVT VT,MachineFunction & MF) const2182 int TargetLoweringBase::getRecipEstimateSqrtEnabled(EVT VT,
2183 MachineFunction &MF) const {
2184 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2185 }
2186
getRecipEstimateDivEnabled(EVT VT,MachineFunction & MF) const2187 int TargetLoweringBase::getRecipEstimateDivEnabled(EVT VT,
2188 MachineFunction &MF) const {
2189 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2190 }
2191
getSqrtRefinementSteps(EVT VT,MachineFunction & MF) const2192 int TargetLoweringBase::getSqrtRefinementSteps(EVT VT,
2193 MachineFunction &MF) const {
2194 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2195 }
2196
getDivRefinementSteps(EVT VT,MachineFunction & MF) const2197 int TargetLoweringBase::getDivRefinementSteps(EVT VT,
2198 MachineFunction &MF) const {
2199 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2200 }
2201
finalizeLowering(MachineFunction & MF) const2202 void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
2203 MF.getRegInfo().freezeReservedRegs(MF);
2204 }
2205
2206 MachineMemOperand::Flags
getLoadMemOperandFlags(const LoadInst & LI,const DataLayout & DL) const2207 TargetLoweringBase::getLoadMemOperandFlags(const LoadInst &LI,
2208 const DataLayout &DL) const {
2209 MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad;
2210 if (LI.isVolatile())
2211 Flags |= MachineMemOperand::MOVolatile;
2212
2213 if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2214 Flags |= MachineMemOperand::MONonTemporal;
2215
2216 if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2217 Flags |= MachineMemOperand::MOInvariant;
2218
2219 if (isDereferenceablePointer(LI.getPointerOperand(), LI.getType(), DL))
2220 Flags |= MachineMemOperand::MODereferenceable;
2221
2222 Flags |= getTargetMMOFlags(LI);
2223 return Flags;
2224 }
2225
2226 MachineMemOperand::Flags
getStoreMemOperandFlags(const StoreInst & SI,const DataLayout & DL) const2227 TargetLoweringBase::getStoreMemOperandFlags(const StoreInst &SI,
2228 const DataLayout &DL) const {
2229 MachineMemOperand::Flags Flags = MachineMemOperand::MOStore;
2230
2231 if (SI.isVolatile())
2232 Flags |= MachineMemOperand::MOVolatile;
2233
2234 if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2235 Flags |= MachineMemOperand::MONonTemporal;
2236
2237 // FIXME: Not preserving dereferenceable
2238 Flags |= getTargetMMOFlags(SI);
2239 return Flags;
2240 }
2241
2242 MachineMemOperand::Flags
getAtomicMemOperandFlags(const Instruction & AI,const DataLayout & DL) const2243 TargetLoweringBase::getAtomicMemOperandFlags(const Instruction &AI,
2244 const DataLayout &DL) const {
2245 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
2246
2247 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2248 if (RMW->isVolatile())
2249 Flags |= MachineMemOperand::MOVolatile;
2250 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2251 if (CmpX->isVolatile())
2252 Flags |= MachineMemOperand::MOVolatile;
2253 } else
2254 llvm_unreachable("not an atomic instruction");
2255
2256 // FIXME: Not preserving dereferenceable
2257 Flags |= getTargetMMOFlags(AI);
2258 return Flags;
2259 }
2260
2261 //===----------------------------------------------------------------------===//
2262 // GlobalISel Hooks
2263 //===----------------------------------------------------------------------===//
2264
shouldLocalize(const MachineInstr & MI,const TargetTransformInfo * TTI) const2265 bool TargetLoweringBase::shouldLocalize(const MachineInstr &MI,
2266 const TargetTransformInfo *TTI) const {
2267 auto &MF = *MI.getMF();
2268 auto &MRI = MF.getRegInfo();
2269 // Assuming a spill and reload of a value has a cost of 1 instruction each,
2270 // this helper function computes the maximum number of uses we should consider
2271 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2272 // break even in terms of code size when the original MI has 2 users vs
2273 // choosing to potentially spill. Any more than 2 users we we have a net code
2274 // size increase. This doesn't take into account register pressure though.
2275 auto maxUses = [](unsigned RematCost) {
2276 // A cost of 1 means remats are basically free.
2277 if (RematCost == 1)
2278 return UINT_MAX;
2279 if (RematCost == 2)
2280 return 2U;
2281
2282 // Remat is too expensive, only sink if there's one user.
2283 if (RematCost > 2)
2284 return 1U;
2285 llvm_unreachable("Unexpected remat cost");
2286 };
2287
2288 // Helper to walk through uses and terminate if we've reached a limit. Saves
2289 // us spending time traversing uses if all we want to know is if it's >= min.
2290 auto isUsesAtMost = [&](unsigned Reg, unsigned MaxUses) {
2291 unsigned NumUses = 0;
2292 auto UI = MRI.use_instr_nodbg_begin(Reg), UE = MRI.use_instr_nodbg_end();
2293 for (; UI != UE && NumUses < MaxUses; ++UI) {
2294 NumUses++;
2295 }
2296 // If we haven't reached the end yet then there are more than MaxUses users.
2297 return UI == UE;
2298 };
2299
2300 switch (MI.getOpcode()) {
2301 default:
2302 return false;
2303 // Constants-like instructions should be close to their users.
2304 // We don't want long live-ranges for them.
2305 case TargetOpcode::G_CONSTANT:
2306 case TargetOpcode::G_FCONSTANT:
2307 case TargetOpcode::G_FRAME_INDEX:
2308 case TargetOpcode::G_INTTOPTR:
2309 return true;
2310 case TargetOpcode::G_GLOBAL_VALUE: {
2311 unsigned RematCost = TTI->getGISelRematGlobalCost();
2312 Register Reg = MI.getOperand(0).getReg();
2313 unsigned MaxUses = maxUses(RematCost);
2314 if (MaxUses == UINT_MAX)
2315 return true; // Remats are "free" so always localize.
2316 bool B = isUsesAtMost(Reg, MaxUses);
2317 return B;
2318 }
2319 }
2320 }
2321