• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "ARMISelLowering.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMCallingConv.h"
18 #include "ARMConstantPoolValue.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMPerfectShuffle.h"
21 #include "ARMRegisterInfo.h"
22 #include "ARMSelectionDAGInfo.h"
23 #include "ARMSubtarget.h"
24 #include "MCTargetDesc/ARMAddressingModes.h"
25 #include "MCTargetDesc/ARMBaseInfo.h"
26 #include "Utils/ARMBaseInfo.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/ArrayRef.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/ADT/StringRef.h"
38 #include "llvm/ADT/StringSwitch.h"
39 #include "llvm/ADT/Triple.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/CodeGen/CallingConvLower.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/IntrinsicLowering.h"
45 #include "llvm/CodeGen/MachineBasicBlock.h"
46 #include "llvm/CodeGen/MachineConstantPool.h"
47 #include "llvm/CodeGen/MachineFrameInfo.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineInstr.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineJumpTableInfo.h"
52 #include "llvm/CodeGen/MachineMemOperand.h"
53 #include "llvm/CodeGen/MachineOperand.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/RuntimeLibcalls.h"
56 #include "llvm/CodeGen/SelectionDAG.h"
57 #include "llvm/CodeGen/SelectionDAGNodes.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetOpcodes.h"
61 #include "llvm/CodeGen/TargetRegisterInfo.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/CodeGen/ValueTypes.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/CallingConv.h"
66 #include "llvm/IR/Constant.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DebugLoc.h"
70 #include "llvm/IR/DerivedTypes.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalAlias.h"
73 #include "llvm/IR/GlobalValue.h"
74 #include "llvm/IR/GlobalVariable.h"
75 #include "llvm/IR/IRBuilder.h"
76 #include "llvm/IR/InlineAsm.h"
77 #include "llvm/IR/Instruction.h"
78 #include "llvm/IR/Instructions.h"
79 #include "llvm/IR/IntrinsicInst.h"
80 #include "llvm/IR/Intrinsics.h"
81 #include "llvm/IR/IntrinsicsARM.h"
82 #include "llvm/IR/Module.h"
83 #include "llvm/IR/PatternMatch.h"
84 #include "llvm/IR/Type.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/IR/Value.h"
87 #include "llvm/MC/MCInstrDesc.h"
88 #include "llvm/MC/MCInstrItineraries.h"
89 #include "llvm/MC/MCRegisterInfo.h"
90 #include "llvm/MC/MCSchedule.h"
91 #include "llvm/Support/AtomicOrdering.h"
92 #include "llvm/Support/BranchProbability.h"
93 #include "llvm/Support/Casting.h"
94 #include "llvm/Support/CodeGen.h"
95 #include "llvm/Support/CommandLine.h"
96 #include "llvm/Support/Compiler.h"
97 #include "llvm/Support/Debug.h"
98 #include "llvm/Support/ErrorHandling.h"
99 #include "llvm/Support/KnownBits.h"
100 #include "llvm/Support/MachineValueType.h"
101 #include "llvm/Support/MathExtras.h"
102 #include "llvm/Support/raw_ostream.h"
103 #include "llvm/Target/TargetMachine.h"
104 #include "llvm/Target/TargetOptions.h"
105 #include <algorithm>
106 #include <cassert>
107 #include <cstdint>
108 #include <cstdlib>
109 #include <iterator>
110 #include <limits>
111 #include <string>
112 #include <tuple>
113 #include <utility>
114 #include <vector>
115 
116 using namespace llvm;
117 using namespace llvm::PatternMatch;
118 
119 #define DEBUG_TYPE "arm-isel"
120 
121 STATISTIC(NumTailCalls, "Number of tail calls");
122 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
123 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
124 STATISTIC(NumConstpoolPromoted,
125   "Number of constants with their storage promoted into constant pools");
126 
127 static cl::opt<bool>
128 ARMInterworking("arm-interworking", cl::Hidden,
129   cl::desc("Enable / disable ARM interworking (for debugging only)"),
130   cl::init(true));
131 
132 static cl::opt<bool> EnableConstpoolPromotion(
133     "arm-promote-constant", cl::Hidden,
134     cl::desc("Enable / disable promotion of unnamed_addr constants into "
135              "constant pools"),
136     cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
137 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
138     "arm-promote-constant-max-size", cl::Hidden,
139     cl::desc("Maximum size of constant to promote into a constant pool"),
140     cl::init(64));
141 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
142     "arm-promote-constant-max-total", cl::Hidden,
143     cl::desc("Maximum size of ALL constants to promote into a constant pool"),
144     cl::init(128));
145 
146 static cl::opt<unsigned>
147 MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
148   cl::desc("Maximum interleave factor for MVE VLDn to generate."),
149   cl::init(2));
150 
151 // The APCS parameter registers.
152 static const MCPhysReg GPRArgRegs[] = {
153   ARM::R0, ARM::R1, ARM::R2, ARM::R3
154 };
155 
addTypeForNEON(MVT VT,MVT PromotedLdStVT,MVT PromotedBitwiseVT)156 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
157                                        MVT PromotedBitwiseVT) {
158   if (VT != PromotedLdStVT) {
159     setOperationAction(ISD::LOAD, VT, Promote);
160     AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
161 
162     setOperationAction(ISD::STORE, VT, Promote);
163     AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
164   }
165 
166   MVT ElemTy = VT.getVectorElementType();
167   if (ElemTy != MVT::f64)
168     setOperationAction(ISD::SETCC, VT, Custom);
169   setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
170   setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
171   if (ElemTy == MVT::i32) {
172     setOperationAction(ISD::SINT_TO_FP, VT, Custom);
173     setOperationAction(ISD::UINT_TO_FP, VT, Custom);
174     setOperationAction(ISD::FP_TO_SINT, VT, Custom);
175     setOperationAction(ISD::FP_TO_UINT, VT, Custom);
176   } else {
177     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
178     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
179     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
180     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
181   }
182   setOperationAction(ISD::BUILD_VECTOR,      VT, Custom);
183   setOperationAction(ISD::VECTOR_SHUFFLE,    VT, Custom);
184   setOperationAction(ISD::CONCAT_VECTORS,    VT, Legal);
185   setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
186   setOperationAction(ISD::SELECT,            VT, Expand);
187   setOperationAction(ISD::SELECT_CC,         VT, Expand);
188   setOperationAction(ISD::VSELECT,           VT, Expand);
189   setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
190   if (VT.isInteger()) {
191     setOperationAction(ISD::SHL, VT, Custom);
192     setOperationAction(ISD::SRA, VT, Custom);
193     setOperationAction(ISD::SRL, VT, Custom);
194   }
195 
196   // Promote all bit-wise operations.
197   if (VT.isInteger() && VT != PromotedBitwiseVT) {
198     setOperationAction(ISD::AND, VT, Promote);
199     AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
200     setOperationAction(ISD::OR,  VT, Promote);
201     AddPromotedToType (ISD::OR,  VT, PromotedBitwiseVT);
202     setOperationAction(ISD::XOR, VT, Promote);
203     AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
204   }
205 
206   // Neon does not support vector divide/remainder operations.
207   setOperationAction(ISD::SDIV, VT, Expand);
208   setOperationAction(ISD::UDIV, VT, Expand);
209   setOperationAction(ISD::FDIV, VT, Expand);
210   setOperationAction(ISD::SREM, VT, Expand);
211   setOperationAction(ISD::UREM, VT, Expand);
212   setOperationAction(ISD::FREM, VT, Expand);
213 
214   if (!VT.isFloatingPoint() &&
215       VT != MVT::v2i64 && VT != MVT::v1i64)
216     for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
217       setOperationAction(Opcode, VT, Legal);
218   if (!VT.isFloatingPoint())
219     for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT})
220       setOperationAction(Opcode, VT, Legal);
221 }
222 
addDRTypeForNEON(MVT VT)223 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
224   addRegisterClass(VT, &ARM::DPRRegClass);
225   addTypeForNEON(VT, MVT::f64, MVT::v2i32);
226 }
227 
addQRTypeForNEON(MVT VT)228 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
229   addRegisterClass(VT, &ARM::DPairRegClass);
230   addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
231 }
232 
setAllExpand(MVT VT)233 void ARMTargetLowering::setAllExpand(MVT VT) {
234   for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
235     setOperationAction(Opc, VT, Expand);
236 
237   // We support these really simple operations even on types where all
238   // the actual arithmetic has to be broken down into simpler
239   // operations or turned into library calls.
240   setOperationAction(ISD::BITCAST, VT, Legal);
241   setOperationAction(ISD::LOAD, VT, Legal);
242   setOperationAction(ISD::STORE, VT, Legal);
243   setOperationAction(ISD::UNDEF, VT, Legal);
244 }
245 
addAllExtLoads(const MVT From,const MVT To,LegalizeAction Action)246 void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To,
247                                        LegalizeAction Action) {
248   setLoadExtAction(ISD::EXTLOAD,  From, To, Action);
249   setLoadExtAction(ISD::ZEXTLOAD, From, To, Action);
250   setLoadExtAction(ISD::SEXTLOAD, From, To, Action);
251 }
252 
addMVEVectorTypes(bool HasMVEFP)253 void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {
254   const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };
255 
256   for (auto VT : IntTypes) {
257     addRegisterClass(VT, &ARM::MQPRRegClass);
258     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
259     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
260     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
261     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
262     setOperationAction(ISD::SHL, VT, Custom);
263     setOperationAction(ISD::SRA, VT, Custom);
264     setOperationAction(ISD::SRL, VT, Custom);
265     setOperationAction(ISD::SMIN, VT, Legal);
266     setOperationAction(ISD::SMAX, VT, Legal);
267     setOperationAction(ISD::UMIN, VT, Legal);
268     setOperationAction(ISD::UMAX, VT, Legal);
269     setOperationAction(ISD::ABS, VT, Legal);
270     setOperationAction(ISD::SETCC, VT, Custom);
271     setOperationAction(ISD::MLOAD, VT, Custom);
272     setOperationAction(ISD::MSTORE, VT, Legal);
273     setOperationAction(ISD::CTLZ, VT, Legal);
274     setOperationAction(ISD::CTTZ, VT, Custom);
275     setOperationAction(ISD::BITREVERSE, VT, Legal);
276     setOperationAction(ISD::BSWAP, VT, Legal);
277     setOperationAction(ISD::SADDSAT, VT, Legal);
278     setOperationAction(ISD::UADDSAT, VT, Legal);
279     setOperationAction(ISD::SSUBSAT, VT, Legal);
280     setOperationAction(ISD::USUBSAT, VT, Legal);
281 
282     // No native support for these.
283     setOperationAction(ISD::UDIV, VT, Expand);
284     setOperationAction(ISD::SDIV, VT, Expand);
285     setOperationAction(ISD::UREM, VT, Expand);
286     setOperationAction(ISD::SREM, VT, Expand);
287     setOperationAction(ISD::CTPOP, VT, Expand);
288 
289     // Vector reductions
290     setOperationAction(ISD::VECREDUCE_ADD, VT, Legal);
291     setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal);
292     setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal);
293     setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal);
294     setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal);
295 
296     if (!HasMVEFP) {
297       setOperationAction(ISD::SINT_TO_FP, VT, Expand);
298       setOperationAction(ISD::UINT_TO_FP, VT, Expand);
299       setOperationAction(ISD::FP_TO_SINT, VT, Expand);
300       setOperationAction(ISD::FP_TO_UINT, VT, Expand);
301     }
302 
303     // Pre and Post inc are supported on loads and stores
304     for (unsigned im = (unsigned)ISD::PRE_INC;
305          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
306       setIndexedLoadAction(im, VT, Legal);
307       setIndexedStoreAction(im, VT, Legal);
308       setIndexedMaskedLoadAction(im, VT, Legal);
309       setIndexedMaskedStoreAction(im, VT, Legal);
310     }
311   }
312 
313   const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
314   for (auto VT : FloatTypes) {
315     addRegisterClass(VT, &ARM::MQPRRegClass);
316     if (!HasMVEFP)
317       setAllExpand(VT);
318 
319     // These are legal or custom whether we have MVE.fp or not
320     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
321     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
322     setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom);
323     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
324     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
325     setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom);
326     setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
327     setOperationAction(ISD::SETCC, VT, Custom);
328     setOperationAction(ISD::MLOAD, VT, Custom);
329     setOperationAction(ISD::MSTORE, VT, Legal);
330 
331     // Pre and Post inc are supported on loads and stores
332     for (unsigned im = (unsigned)ISD::PRE_INC;
333          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
334       setIndexedLoadAction(im, VT, Legal);
335       setIndexedStoreAction(im, VT, Legal);
336       setIndexedMaskedLoadAction(im, VT, Legal);
337       setIndexedMaskedStoreAction(im, VT, Legal);
338     }
339 
340     if (HasMVEFP) {
341       setOperationAction(ISD::FMINNUM, VT, Legal);
342       setOperationAction(ISD::FMAXNUM, VT, Legal);
343       setOperationAction(ISD::FROUND, VT, Legal);
344 
345       // No native support for these.
346       setOperationAction(ISD::FDIV, VT, Expand);
347       setOperationAction(ISD::FREM, VT, Expand);
348       setOperationAction(ISD::FSQRT, VT, Expand);
349       setOperationAction(ISD::FSIN, VT, Expand);
350       setOperationAction(ISD::FCOS, VT, Expand);
351       setOperationAction(ISD::FPOW, VT, Expand);
352       setOperationAction(ISD::FLOG, VT, Expand);
353       setOperationAction(ISD::FLOG2, VT, Expand);
354       setOperationAction(ISD::FLOG10, VT, Expand);
355       setOperationAction(ISD::FEXP, VT, Expand);
356       setOperationAction(ISD::FEXP2, VT, Expand);
357       setOperationAction(ISD::FNEARBYINT, VT, Expand);
358     }
359   }
360 
361   // We 'support' these types up to bitcast/load/store level, regardless of
362   // MVE integer-only / float support. Only doing FP data processing on the FP
363   // vector types is inhibited at integer-only level.
364   const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
365   for (auto VT : LongTypes) {
366     addRegisterClass(VT, &ARM::MQPRRegClass);
367     setAllExpand(VT);
368     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
369     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
370     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
371   }
372   // We can do bitwise operations on v2i64 vectors
373   setOperationAction(ISD::AND, MVT::v2i64, Legal);
374   setOperationAction(ISD::OR, MVT::v2i64, Legal);
375   setOperationAction(ISD::XOR, MVT::v2i64, Legal);
376 
377   // It is legal to extload from v4i8 to v4i16 or v4i32.
378   addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal);
379   addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal);
380   addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal);
381 
382   // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16.
383   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
384   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
385   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
386   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8,  Legal);
387   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal);
388 
389   // Some truncating stores are legal too.
390   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
391   setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
392   setTruncStoreAction(MVT::v8i16, MVT::v8i8,  Legal);
393 
394   // Pre and Post inc on these are legal, given the correct extends
395   for (unsigned im = (unsigned)ISD::PRE_INC;
396        im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
397     for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) {
398       setIndexedLoadAction(im, VT, Legal);
399       setIndexedStoreAction(im, VT, Legal);
400       setIndexedMaskedLoadAction(im, VT, Legal);
401       setIndexedMaskedStoreAction(im, VT, Legal);
402     }
403   }
404 
405   // Predicate types
406   const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1};
407   for (auto VT : pTypes) {
408     addRegisterClass(VT, &ARM::VCCRRegClass);
409     setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
410     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
411     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
412     setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
413     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
414     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
415     setOperationAction(ISD::SETCC, VT, Custom);
416     setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
417     setOperationAction(ISD::LOAD, VT, Custom);
418     setOperationAction(ISD::STORE, VT, Custom);
419   }
420 }
421 
ARMTargetLowering(const TargetMachine & TM,const ARMSubtarget & STI)422 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
423                                      const ARMSubtarget &STI)
424     : TargetLowering(TM), Subtarget(&STI) {
425   RegInfo = Subtarget->getRegisterInfo();
426   Itins = Subtarget->getInstrItineraryData();
427 
428   setBooleanContents(ZeroOrOneBooleanContent);
429   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
430 
431   if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() &&
432       !Subtarget->isTargetWatchOS()) {
433     bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard;
434     for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
435       setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID),
436                             IsHFTarget ? CallingConv::ARM_AAPCS_VFP
437                                        : CallingConv::ARM_AAPCS);
438   }
439 
440   if (Subtarget->isTargetMachO()) {
441     // Uses VFP for Thumb libfuncs if available.
442     if (Subtarget->isThumb() && Subtarget->hasVFP2Base() &&
443         Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
444       static const struct {
445         const RTLIB::Libcall Op;
446         const char * const Name;
447         const ISD::CondCode Cond;
448       } LibraryCalls[] = {
449         // Single-precision floating-point arithmetic.
450         { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
451         { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
452         { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
453         { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
454 
455         // Double-precision floating-point arithmetic.
456         { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
457         { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
458         { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
459         { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
460 
461         // Single-precision comparisons.
462         { RTLIB::OEQ_F32, "__eqsf2vfp",    ISD::SETNE },
463         { RTLIB::UNE_F32, "__nesf2vfp",    ISD::SETNE },
464         { RTLIB::OLT_F32, "__ltsf2vfp",    ISD::SETNE },
465         { RTLIB::OLE_F32, "__lesf2vfp",    ISD::SETNE },
466         { RTLIB::OGE_F32, "__gesf2vfp",    ISD::SETNE },
467         { RTLIB::OGT_F32, "__gtsf2vfp",    ISD::SETNE },
468         { RTLIB::UO_F32,  "__unordsf2vfp", ISD::SETNE },
469 
470         // Double-precision comparisons.
471         { RTLIB::OEQ_F64, "__eqdf2vfp",    ISD::SETNE },
472         { RTLIB::UNE_F64, "__nedf2vfp",    ISD::SETNE },
473         { RTLIB::OLT_F64, "__ltdf2vfp",    ISD::SETNE },
474         { RTLIB::OLE_F64, "__ledf2vfp",    ISD::SETNE },
475         { RTLIB::OGE_F64, "__gedf2vfp",    ISD::SETNE },
476         { RTLIB::OGT_F64, "__gtdf2vfp",    ISD::SETNE },
477         { RTLIB::UO_F64,  "__unorddf2vfp", ISD::SETNE },
478 
479         // Floating-point to integer conversions.
480         // i64 conversions are done via library routines even when generating VFP
481         // instructions, so use the same ones.
482         { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp",    ISD::SETCC_INVALID },
483         { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
484         { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp",    ISD::SETCC_INVALID },
485         { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
486 
487         // Conversions between floating types.
488         { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp",  ISD::SETCC_INVALID },
489         { RTLIB::FPEXT_F32_F64,   "__extendsfdf2vfp", ISD::SETCC_INVALID },
490 
491         // Integer to floating-point conversions.
492         // i64 conversions are done via library routines even when generating VFP
493         // instructions, so use the same ones.
494         // FIXME: There appears to be some naming inconsistency in ARM libgcc:
495         // e.g., __floatunsidf vs. __floatunssidfvfp.
496         { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp",    ISD::SETCC_INVALID },
497         { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
498         { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp",    ISD::SETCC_INVALID },
499         { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
500       };
501 
502       for (const auto &LC : LibraryCalls) {
503         setLibcallName(LC.Op, LC.Name);
504         if (LC.Cond != ISD::SETCC_INVALID)
505           setCmpLibcallCC(LC.Op, LC.Cond);
506       }
507     }
508   }
509 
510   // These libcalls are not available in 32-bit.
511   setLibcallName(RTLIB::SHL_I128, nullptr);
512   setLibcallName(RTLIB::SRL_I128, nullptr);
513   setLibcallName(RTLIB::SRA_I128, nullptr);
514 
515   // RTLIB
516   if (Subtarget->isAAPCS_ABI() &&
517       (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
518        Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
519     static const struct {
520       const RTLIB::Libcall Op;
521       const char * const Name;
522       const CallingConv::ID CC;
523       const ISD::CondCode Cond;
524     } LibraryCalls[] = {
525       // Double-precision floating-point arithmetic helper functions
526       // RTABI chapter 4.1.2, Table 2
527       { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
528       { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
529       { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
530       { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
531 
532       // Double-precision floating-point comparison helper functions
533       // RTABI chapter 4.1.2, Table 3
534       { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
535       { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
536       { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
537       { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
538       { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
539       { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
540       { RTLIB::UO_F64,  "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
541 
542       // Single-precision floating-point arithmetic helper functions
543       // RTABI chapter 4.1.2, Table 4
544       { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
545       { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
546       { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
547       { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
548 
549       // Single-precision floating-point comparison helper functions
550       // RTABI chapter 4.1.2, Table 5
551       { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
552       { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
553       { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
554       { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
555       { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
556       { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
557       { RTLIB::UO_F32,  "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
558 
559       // Floating-point to integer conversions.
560       // RTABI chapter 4.1.2, Table 6
561       { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
562       { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
563       { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
564       { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
565       { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
566       { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
567       { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
568       { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
569 
570       // Conversions between floating types.
571       // RTABI chapter 4.1.2, Table 7
572       { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
573       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
574       { RTLIB::FPEXT_F32_F64,   "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
575 
576       // Integer to floating-point conversions.
577       // RTABI chapter 4.1.2, Table 8
578       { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
579       { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
580       { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
581       { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
582       { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
583       { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
584       { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
585       { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
586 
587       // Long long helper functions
588       // RTABI chapter 4.2, Table 9
589       { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
590       { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
591       { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
592       { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
593 
594       // Integer division functions
595       // RTABI chapter 4.3.1
596       { RTLIB::SDIV_I8,  "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
597       { RTLIB::SDIV_I16, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
598       { RTLIB::SDIV_I32, "__aeabi_idiv",     CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
599       { RTLIB::SDIV_I64, "__aeabi_ldivmod",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
600       { RTLIB::UDIV_I8,  "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
601       { RTLIB::UDIV_I16, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
602       { RTLIB::UDIV_I32, "__aeabi_uidiv",    CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
603       { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
604     };
605 
606     for (const auto &LC : LibraryCalls) {
607       setLibcallName(LC.Op, LC.Name);
608       setLibcallCallingConv(LC.Op, LC.CC);
609       if (LC.Cond != ISD::SETCC_INVALID)
610         setCmpLibcallCC(LC.Op, LC.Cond);
611     }
612 
613     // EABI dependent RTLIB
614     if (TM.Options.EABIVersion == EABI::EABI4 ||
615         TM.Options.EABIVersion == EABI::EABI5) {
616       static const struct {
617         const RTLIB::Libcall Op;
618         const char *const Name;
619         const CallingConv::ID CC;
620         const ISD::CondCode Cond;
621       } MemOpsLibraryCalls[] = {
622         // Memory operations
623         // RTABI chapter 4.3.4
624         { RTLIB::MEMCPY,  "__aeabi_memcpy",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
625         { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
626         { RTLIB::MEMSET,  "__aeabi_memset",  CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
627       };
628 
629       for (const auto &LC : MemOpsLibraryCalls) {
630         setLibcallName(LC.Op, LC.Name);
631         setLibcallCallingConv(LC.Op, LC.CC);
632         if (LC.Cond != ISD::SETCC_INVALID)
633           setCmpLibcallCC(LC.Op, LC.Cond);
634       }
635     }
636   }
637 
638   if (Subtarget->isTargetWindows()) {
639     static const struct {
640       const RTLIB::Libcall Op;
641       const char * const Name;
642       const CallingConv::ID CC;
643     } LibraryCalls[] = {
644       { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
645       { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
646       { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
647       { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
648       { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
649       { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
650       { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
651       { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
652     };
653 
654     for (const auto &LC : LibraryCalls) {
655       setLibcallName(LC.Op, LC.Name);
656       setLibcallCallingConv(LC.Op, LC.CC);
657     }
658   }
659 
660   // Use divmod compiler-rt calls for iOS 5.0 and later.
661   if (Subtarget->isTargetMachO() &&
662       !(Subtarget->isTargetIOS() &&
663         Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
664     setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
665     setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
666   }
667 
668   // The half <-> float conversion functions are always soft-float on
669   // non-watchos platforms, but are needed for some targets which use a
670   // hard-float calling convention by default.
671   if (!Subtarget->isTargetWatchABI()) {
672     if (Subtarget->isAAPCS_ABI()) {
673       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
674       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
675       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
676     } else {
677       setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
678       setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
679       setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
680     }
681   }
682 
683   // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
684   // a __gnu_ prefix (which is the default).
685   if (Subtarget->isTargetAEABI()) {
686     static const struct {
687       const RTLIB::Libcall Op;
688       const char * const Name;
689       const CallingConv::ID CC;
690     } LibraryCalls[] = {
691       { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
692       { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
693       { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
694     };
695 
696     for (const auto &LC : LibraryCalls) {
697       setLibcallName(LC.Op, LC.Name);
698       setLibcallCallingConv(LC.Op, LC.CC);
699     }
700   }
701 
702   if (Subtarget->isThumb1Only())
703     addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
704   else
705     addRegisterClass(MVT::i32, &ARM::GPRRegClass);
706 
707   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() &&
708       Subtarget->hasFPRegs()) {
709     addRegisterClass(MVT::f32, &ARM::SPRRegClass);
710     addRegisterClass(MVT::f64, &ARM::DPRRegClass);
711     if (!Subtarget->hasVFP2Base())
712       setAllExpand(MVT::f32);
713     if (!Subtarget->hasFP64())
714       setAllExpand(MVT::f64);
715   }
716 
717   if (Subtarget->hasFullFP16()) {
718     addRegisterClass(MVT::f16, &ARM::HPRRegClass);
719     setOperationAction(ISD::BITCAST, MVT::i16, Custom);
720     setOperationAction(ISD::BITCAST, MVT::i32, Custom);
721     setOperationAction(ISD::BITCAST, MVT::f16, Custom);
722 
723     setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
724     setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
725   }
726 
727   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
728     for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
729       setTruncStoreAction(VT, InnerVT, Expand);
730       addAllExtLoads(VT, InnerVT, Expand);
731     }
732 
733     setOperationAction(ISD::MULHS, VT, Expand);
734     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
735     setOperationAction(ISD::MULHU, VT, Expand);
736     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
737 
738     setOperationAction(ISD::BSWAP, VT, Expand);
739   }
740 
741   setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
742   setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
743 
744   setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
745   setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
746 
747   if (Subtarget->hasMVEIntegerOps())
748     addMVEVectorTypes(Subtarget->hasMVEFloatOps());
749 
750   // Combine low-overhead loop intrinsics so that we can lower i1 types.
751   if (Subtarget->hasLOB()) {
752     setTargetDAGCombine(ISD::BRCOND);
753     setTargetDAGCombine(ISD::BR_CC);
754   }
755 
756   if (Subtarget->hasNEON()) {
757     addDRTypeForNEON(MVT::v2f32);
758     addDRTypeForNEON(MVT::v8i8);
759     addDRTypeForNEON(MVT::v4i16);
760     addDRTypeForNEON(MVT::v2i32);
761     addDRTypeForNEON(MVT::v1i64);
762 
763     addQRTypeForNEON(MVT::v4f32);
764     addQRTypeForNEON(MVT::v2f64);
765     addQRTypeForNEON(MVT::v16i8);
766     addQRTypeForNEON(MVT::v8i16);
767     addQRTypeForNEON(MVT::v4i32);
768     addQRTypeForNEON(MVT::v2i64);
769 
770     if (Subtarget->hasFullFP16()) {
771       addQRTypeForNEON(MVT::v8f16);
772       addDRTypeForNEON(MVT::v4f16);
773     }
774   }
775 
776   if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
777     // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
778     // none of Neon, MVE or VFP supports any arithmetic operations on it.
779     setOperationAction(ISD::FADD, MVT::v2f64, Expand);
780     setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
781     setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
782     // FIXME: Code duplication: FDIV and FREM are expanded always, see
783     // ARMTargetLowering::addTypeForNEON method for details.
784     setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
785     setOperationAction(ISD::FREM, MVT::v2f64, Expand);
786     // FIXME: Create unittest.
787     // In another words, find a way when "copysign" appears in DAG with vector
788     // operands.
789     setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
790     // FIXME: Code duplication: SETCC has custom operation action, see
791     // ARMTargetLowering::addTypeForNEON method for details.
792     setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
793     // FIXME: Create unittest for FNEG and for FABS.
794     setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
795     setOperationAction(ISD::FABS, MVT::v2f64, Expand);
796     setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
797     setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
798     setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
799     setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
800     setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
801     setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
802     setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
803     setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
804     setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
805     // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
806     setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
807     setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
808     setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
809     setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
810     setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
811     setOperationAction(ISD::FMA, MVT::v2f64, Expand);
812   }
813 
814   if (Subtarget->hasNEON()) {
815     // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
816     // supported for v4f32.
817     setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
818     setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
819     setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
820     setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
821     setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
822     setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
823     setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
824     setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
825     setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
826     setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
827     setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
828     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
829     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
830     setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
831 
832     // Mark v2f32 intrinsics.
833     setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
834     setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
835     setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
836     setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
837     setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
838     setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
839     setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
840     setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
841     setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
842     setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
843     setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
844     setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
845     setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
846     setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
847 
848     // Neon does not support some operations on v1i64 and v2i64 types.
849     setOperationAction(ISD::MUL, MVT::v1i64, Expand);
850     // Custom handling for some quad-vector types to detect VMULL.
851     setOperationAction(ISD::MUL, MVT::v8i16, Custom);
852     setOperationAction(ISD::MUL, MVT::v4i32, Custom);
853     setOperationAction(ISD::MUL, MVT::v2i64, Custom);
854     // Custom handling for some vector types to avoid expensive expansions
855     setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
856     setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
857     setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
858     setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
859     // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
860     // a destination type that is wider than the source, and nor does
861     // it have a FP_TO_[SU]INT instruction with a narrower destination than
862     // source.
863     setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
864     setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
865     setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
866     setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
867     setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
868     setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
869     setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
870     setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
871 
872     setOperationAction(ISD::FP_ROUND,   MVT::v2f32, Expand);
873     setOperationAction(ISD::FP_EXTEND,  MVT::v2f64, Expand);
874 
875     // NEON does not have single instruction CTPOP for vectors with element
876     // types wider than 8-bits.  However, custom lowering can leverage the
877     // v8i8/v16i8 vcnt instruction.
878     setOperationAction(ISD::CTPOP,      MVT::v2i32, Custom);
879     setOperationAction(ISD::CTPOP,      MVT::v4i32, Custom);
880     setOperationAction(ISD::CTPOP,      MVT::v4i16, Custom);
881     setOperationAction(ISD::CTPOP,      MVT::v8i16, Custom);
882     setOperationAction(ISD::CTPOP,      MVT::v1i64, Custom);
883     setOperationAction(ISD::CTPOP,      MVT::v2i64, Custom);
884 
885     setOperationAction(ISD::CTLZ,       MVT::v1i64, Expand);
886     setOperationAction(ISD::CTLZ,       MVT::v2i64, Expand);
887 
888     // NEON does not have single instruction CTTZ for vectors.
889     setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
890     setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
891     setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
892     setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
893 
894     setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
895     setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
896     setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
897     setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
898 
899     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
900     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
901     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
902     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
903 
904     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
905     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
906     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
907     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
908 
909     // NEON only has FMA instructions as of VFP4.
910     if (!Subtarget->hasVFP4Base()) {
911       setOperationAction(ISD::FMA, MVT::v2f32, Expand);
912       setOperationAction(ISD::FMA, MVT::v4f32, Expand);
913     }
914 
915     setTargetDAGCombine(ISD::INTRINSIC_VOID);
916     setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
917     setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
918     setTargetDAGCombine(ISD::SHL);
919     setTargetDAGCombine(ISD::SRL);
920     setTargetDAGCombine(ISD::SRA);
921     setTargetDAGCombine(ISD::FP_TO_SINT);
922     setTargetDAGCombine(ISD::FP_TO_UINT);
923     setTargetDAGCombine(ISD::FDIV);
924     setTargetDAGCombine(ISD::LOAD);
925 
926     // It is legal to extload from v4i8 to v4i16 or v4i32.
927     for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
928                    MVT::v2i32}) {
929       for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
930         setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
931         setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
932         setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
933       }
934     }
935   }
936 
937   if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
938     setTargetDAGCombine(ISD::BUILD_VECTOR);
939     setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
940     setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
941     setTargetDAGCombine(ISD::STORE);
942     setTargetDAGCombine(ISD::SIGN_EXTEND);
943     setTargetDAGCombine(ISD::ZERO_EXTEND);
944     setTargetDAGCombine(ISD::ANY_EXTEND);
945   }
946 
947   if (!Subtarget->hasFP64()) {
948     // When targeting a floating-point unit with only single-precision
949     // operations, f64 is legal for the few double-precision instructions which
950     // are present However, no double-precision operations other than moves,
951     // loads and stores are provided by the hardware.
952     setOperationAction(ISD::FADD,       MVT::f64, Expand);
953     setOperationAction(ISD::FSUB,       MVT::f64, Expand);
954     setOperationAction(ISD::FMUL,       MVT::f64, Expand);
955     setOperationAction(ISD::FMA,        MVT::f64, Expand);
956     setOperationAction(ISD::FDIV,       MVT::f64, Expand);
957     setOperationAction(ISD::FREM,       MVT::f64, Expand);
958     setOperationAction(ISD::FCOPYSIGN,  MVT::f64, Expand);
959     setOperationAction(ISD::FGETSIGN,   MVT::f64, Expand);
960     setOperationAction(ISD::FNEG,       MVT::f64, Expand);
961     setOperationAction(ISD::FABS,       MVT::f64, Expand);
962     setOperationAction(ISD::FSQRT,      MVT::f64, Expand);
963     setOperationAction(ISD::FSIN,       MVT::f64, Expand);
964     setOperationAction(ISD::FCOS,       MVT::f64, Expand);
965     setOperationAction(ISD::FPOW,       MVT::f64, Expand);
966     setOperationAction(ISD::FLOG,       MVT::f64, Expand);
967     setOperationAction(ISD::FLOG2,      MVT::f64, Expand);
968     setOperationAction(ISD::FLOG10,     MVT::f64, Expand);
969     setOperationAction(ISD::FEXP,       MVT::f64, Expand);
970     setOperationAction(ISD::FEXP2,      MVT::f64, Expand);
971     setOperationAction(ISD::FCEIL,      MVT::f64, Expand);
972     setOperationAction(ISD::FTRUNC,     MVT::f64, Expand);
973     setOperationAction(ISD::FRINT,      MVT::f64, Expand);
974     setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
975     setOperationAction(ISD::FFLOOR,     MVT::f64, Expand);
976     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
977     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
978     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
979     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
980     setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
981     setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
982     setOperationAction(ISD::FP_ROUND,   MVT::f32, Custom);
983     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
984     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
985     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom);
986     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom);
987     setOperationAction(ISD::STRICT_FP_ROUND,   MVT::f32, Custom);
988   }
989 
990   if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) {
991     setOperationAction(ISD::FP_EXTEND,  MVT::f64, Custom);
992     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
993     if (Subtarget->hasFullFP16()) {
994       setOperationAction(ISD::FP_ROUND,  MVT::f16, Custom);
995       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
996     }
997   }
998 
999   if (!Subtarget->hasFP16()) {
1000     setOperationAction(ISD::FP_EXTEND,  MVT::f32, Custom);
1001     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
1002   }
1003 
1004   computeRegisterProperties(Subtarget->getRegisterInfo());
1005 
1006   // ARM does not have floating-point extending loads.
1007   for (MVT VT : MVT::fp_valuetypes()) {
1008     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1009     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1010   }
1011 
1012   // ... or truncating stores
1013   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1014   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1015   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1016 
1017   // ARM does not have i1 sign extending load.
1018   for (MVT VT : MVT::integer_valuetypes())
1019     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1020 
1021   // ARM supports all 4 flavors of integer indexed load / store.
1022   if (!Subtarget->isThumb1Only()) {
1023     for (unsigned im = (unsigned)ISD::PRE_INC;
1024          im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
1025       setIndexedLoadAction(im,  MVT::i1,  Legal);
1026       setIndexedLoadAction(im,  MVT::i8,  Legal);
1027       setIndexedLoadAction(im,  MVT::i16, Legal);
1028       setIndexedLoadAction(im,  MVT::i32, Legal);
1029       setIndexedStoreAction(im, MVT::i1,  Legal);
1030       setIndexedStoreAction(im, MVT::i8,  Legal);
1031       setIndexedStoreAction(im, MVT::i16, Legal);
1032       setIndexedStoreAction(im, MVT::i32, Legal);
1033     }
1034   } else {
1035     // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
1036     setIndexedLoadAction(ISD::POST_INC, MVT::i32,  Legal);
1037     setIndexedStoreAction(ISD::POST_INC, MVT::i32,  Legal);
1038   }
1039 
1040   setOperationAction(ISD::SADDO, MVT::i32, Custom);
1041   setOperationAction(ISD::UADDO, MVT::i32, Custom);
1042   setOperationAction(ISD::SSUBO, MVT::i32, Custom);
1043   setOperationAction(ISD::USUBO, MVT::i32, Custom);
1044 
1045   setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
1046   setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
1047   if (Subtarget->hasDSP()) {
1048     setOperationAction(ISD::SADDSAT, MVT::i8, Custom);
1049     setOperationAction(ISD::SSUBSAT, MVT::i8, Custom);
1050     setOperationAction(ISD::SADDSAT, MVT::i16, Custom);
1051     setOperationAction(ISD::SSUBSAT, MVT::i16, Custom);
1052   }
1053   if (Subtarget->hasBaseDSP()) {
1054     setOperationAction(ISD::SADDSAT, MVT::i32, Legal);
1055     setOperationAction(ISD::SSUBSAT, MVT::i32, Legal);
1056   }
1057 
1058   // i64 operation support.
1059   setOperationAction(ISD::MUL,     MVT::i64, Expand);
1060   setOperationAction(ISD::MULHU,   MVT::i32, Expand);
1061   if (Subtarget->isThumb1Only()) {
1062     setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1063     setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1064   }
1065   if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
1066       || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
1067     setOperationAction(ISD::MULHS, MVT::i32, Expand);
1068 
1069   setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
1070   setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
1071   setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
1072   setOperationAction(ISD::SRL,       MVT::i64, Custom);
1073   setOperationAction(ISD::SRA,       MVT::i64, Custom);
1074   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1075   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
1076 
1077   // MVE lowers 64 bit shifts to lsll and lsrl
1078   // assuming that ISD::SRL and SRA of i64 are already marked custom
1079   if (Subtarget->hasMVEIntegerOps())
1080     setOperationAction(ISD::SHL, MVT::i64, Custom);
1081 
1082   // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1.
1083   if (Subtarget->isThumb1Only()) {
1084     setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1085     setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1086     setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1087   }
1088 
1089   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
1090     setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1091 
1092   // ARM does not have ROTL.
1093   setOperationAction(ISD::ROTL, MVT::i32, Expand);
1094   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1095     setOperationAction(ISD::ROTL, VT, Expand);
1096     setOperationAction(ISD::ROTR, VT, Expand);
1097   }
1098   setOperationAction(ISD::CTTZ,  MVT::i32, Custom);
1099   setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1100   if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) {
1101     setOperationAction(ISD::CTLZ, MVT::i32, Expand);
1102     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall);
1103   }
1104 
1105   // @llvm.readcyclecounter requires the Performance Monitors extension.
1106   // Default to the 0 expansion on unsupported platforms.
1107   // FIXME: Technically there are older ARM CPUs that have
1108   // implementation-specific ways of obtaining this information.
1109   if (Subtarget->hasPerfMon())
1110     setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1111 
1112   // Only ARMv6 has BSWAP.
1113   if (!Subtarget->hasV6Ops())
1114     setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1115 
1116   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1117                                         : Subtarget->hasDivideInARMMode();
1118   if (!hasDivide) {
1119     // These are expanded into libcalls if the cpu doesn't have HW divider.
1120     setOperationAction(ISD::SDIV,  MVT::i32, LibCall);
1121     setOperationAction(ISD::UDIV,  MVT::i32, LibCall);
1122   }
1123 
1124   if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1125     setOperationAction(ISD::SDIV, MVT::i32, Custom);
1126     setOperationAction(ISD::UDIV, MVT::i32, Custom);
1127 
1128     setOperationAction(ISD::SDIV, MVT::i64, Custom);
1129     setOperationAction(ISD::UDIV, MVT::i64, Custom);
1130   }
1131 
1132   setOperationAction(ISD::SREM,  MVT::i32, Expand);
1133   setOperationAction(ISD::UREM,  MVT::i32, Expand);
1134 
1135   // Register based DivRem for AEABI (RTABI 4.2)
1136   if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
1137       Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
1138       Subtarget->isTargetWindows()) {
1139     setOperationAction(ISD::SREM, MVT::i64, Custom);
1140     setOperationAction(ISD::UREM, MVT::i64, Custom);
1141     HasStandaloneRem = false;
1142 
1143     if (Subtarget->isTargetWindows()) {
1144       const struct {
1145         const RTLIB::Libcall Op;
1146         const char * const Name;
1147         const CallingConv::ID CC;
1148       } LibraryCalls[] = {
1149         { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
1150         { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
1151         { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
1152         { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },
1153 
1154         { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
1155         { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
1156         { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
1157         { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
1158       };
1159 
1160       for (const auto &LC : LibraryCalls) {
1161         setLibcallName(LC.Op, LC.Name);
1162         setLibcallCallingConv(LC.Op, LC.CC);
1163       }
1164     } else {
1165       const struct {
1166         const RTLIB::Libcall Op;
1167         const char * const Name;
1168         const CallingConv::ID CC;
1169       } LibraryCalls[] = {
1170         { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1171         { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1172         { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1173         { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },
1174 
1175         { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1176         { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1177         { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1178         { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
1179       };
1180 
1181       for (const auto &LC : LibraryCalls) {
1182         setLibcallName(LC.Op, LC.Name);
1183         setLibcallCallingConv(LC.Op, LC.CC);
1184       }
1185     }
1186 
1187     setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
1188     setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
1189     setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
1190     setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
1191   } else {
1192     setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1193     setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1194   }
1195 
1196   if (Subtarget->getTargetTriple().isOSMSVCRT()) {
1197     // MSVCRT doesn't have powi; fall back to pow
1198     setLibcallName(RTLIB::POWI_F32, nullptr);
1199     setLibcallName(RTLIB::POWI_F64, nullptr);
1200   }
1201 
1202   setOperationAction(ISD::GlobalAddress, MVT::i32,   Custom);
1203   setOperationAction(ISD::ConstantPool,  MVT::i32,   Custom);
1204   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1205   setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1206 
1207   setOperationAction(ISD::TRAP, MVT::Other, Legal);
1208   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
1209 
1210   // Use the default implementation.
1211   setOperationAction(ISD::VASTART,            MVT::Other, Custom);
1212   setOperationAction(ISD::VAARG,              MVT::Other, Expand);
1213   setOperationAction(ISD::VACOPY,             MVT::Other, Expand);
1214   setOperationAction(ISD::VAEND,              MVT::Other, Expand);
1215   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
1216   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
1217 
1218   if (Subtarget->isTargetWindows())
1219     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1220   else
1221     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
1222 
1223   // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
1224   // the default expansion.
1225   InsertFencesForAtomic = false;
1226   if (Subtarget->hasAnyDataBarrier() &&
1227       (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1228     // ATOMIC_FENCE needs custom lowering; the others should have been expanded
1229     // to ldrex/strex loops already.
1230     setOperationAction(ISD::ATOMIC_FENCE,     MVT::Other, Custom);
1231     if (!Subtarget->isThumb() || !Subtarget->isMClass())
1232       setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i64, Custom);
1233 
1234     // On v8, we have particularly efficient implementations of atomic fences
1235     // if they can be combined with nearby atomic loads and stores.
1236     if (!Subtarget->hasAcquireRelease() ||
1237         getTargetMachine().getOptLevel() == 0) {
1238       // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
1239       InsertFencesForAtomic = true;
1240     }
1241   } else {
1242     // If there's anything we can use as a barrier, go through custom lowering
1243     // for ATOMIC_FENCE.
1244     // If target has DMB in thumb, Fences can be inserted.
1245     if (Subtarget->hasDataBarrier())
1246       InsertFencesForAtomic = true;
1247 
1248     setOperationAction(ISD::ATOMIC_FENCE,   MVT::Other,
1249                        Subtarget->hasAnyDataBarrier() ? Custom : Expand);
1250 
1251     // Set them all for expansion, which will force libcalls.
1252     setOperationAction(ISD::ATOMIC_CMP_SWAP,  MVT::i32, Expand);
1253     setOperationAction(ISD::ATOMIC_SWAP,      MVT::i32, Expand);
1254     setOperationAction(ISD::ATOMIC_LOAD_ADD,  MVT::i32, Expand);
1255     setOperationAction(ISD::ATOMIC_LOAD_SUB,  MVT::i32, Expand);
1256     setOperationAction(ISD::ATOMIC_LOAD_AND,  MVT::i32, Expand);
1257     setOperationAction(ISD::ATOMIC_LOAD_OR,   MVT::i32, Expand);
1258     setOperationAction(ISD::ATOMIC_LOAD_XOR,  MVT::i32, Expand);
1259     setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
1260     setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
1261     setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
1262     setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
1263     setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
1264     // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
1265     // Unordered/Monotonic case.
1266     if (!InsertFencesForAtomic) {
1267       setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1268       setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1269     }
1270   }
1271 
1272   setOperationAction(ISD::PREFETCH,         MVT::Other, Custom);
1273 
1274   // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1275   if (!Subtarget->hasV6Ops()) {
1276     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1277     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8,  Expand);
1278   }
1279   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1280 
1281   if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1282       !Subtarget->isThumb1Only()) {
1283     // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1284     // iff target supports vfp2.
1285     setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1286     setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
1287   }
1288 
1289   // We want to custom lower some of our intrinsics.
1290   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1291   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1292   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1293   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1294   if (Subtarget->useSjLjEH())
1295     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1296 
1297   setOperationAction(ISD::SETCC,     MVT::i32, Expand);
1298   setOperationAction(ISD::SETCC,     MVT::f32, Expand);
1299   setOperationAction(ISD::SETCC,     MVT::f64, Expand);
1300   setOperationAction(ISD::SELECT,    MVT::i32, Custom);
1301   setOperationAction(ISD::SELECT,    MVT::f32, Custom);
1302   setOperationAction(ISD::SELECT,    MVT::f64, Custom);
1303   setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1304   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1305   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1306   if (Subtarget->hasFullFP16()) {
1307     setOperationAction(ISD::SETCC,     MVT::f16, Expand);
1308     setOperationAction(ISD::SELECT,    MVT::f16, Custom);
1309     setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
1310   }
1311 
1312   setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom);
1313 
1314   setOperationAction(ISD::BRCOND,    MVT::Other, Custom);
1315   setOperationAction(ISD::BR_CC,     MVT::i32,   Custom);
1316   if (Subtarget->hasFullFP16())
1317       setOperationAction(ISD::BR_CC, MVT::f16,   Custom);
1318   setOperationAction(ISD::BR_CC,     MVT::f32,   Custom);
1319   setOperationAction(ISD::BR_CC,     MVT::f64,   Custom);
1320   setOperationAction(ISD::BR_JT,     MVT::Other, Custom);
1321 
1322   // We don't support sin/cos/fmod/copysign/pow
1323   setOperationAction(ISD::FSIN,      MVT::f64, Expand);
1324   setOperationAction(ISD::FSIN,      MVT::f32, Expand);
1325   setOperationAction(ISD::FCOS,      MVT::f32, Expand);
1326   setOperationAction(ISD::FCOS,      MVT::f64, Expand);
1327   setOperationAction(ISD::FSINCOS,   MVT::f64, Expand);
1328   setOperationAction(ISD::FSINCOS,   MVT::f32, Expand);
1329   setOperationAction(ISD::FREM,      MVT::f64, Expand);
1330   setOperationAction(ISD::FREM,      MVT::f32, Expand);
1331   if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() &&
1332       !Subtarget->isThumb1Only()) {
1333     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1334     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1335   }
1336   setOperationAction(ISD::FPOW,      MVT::f64, Expand);
1337   setOperationAction(ISD::FPOW,      MVT::f32, Expand);
1338 
1339   if (!Subtarget->hasVFP4Base()) {
1340     setOperationAction(ISD::FMA, MVT::f64, Expand);
1341     setOperationAction(ISD::FMA, MVT::f32, Expand);
1342   }
1343 
1344   // Various VFP goodness
1345   if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1346     // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1347     if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) {
1348       setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1349       setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1350     }
1351 
1352     // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1353     if (!Subtarget->hasFP16()) {
1354       setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1355       setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1356     }
1357 
1358     // Strict floating-point comparisons need custom lowering.
1359     setOperationAction(ISD::STRICT_FSETCC,  MVT::f16, Custom);
1360     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
1361     setOperationAction(ISD::STRICT_FSETCC,  MVT::f32, Custom);
1362     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
1363     setOperationAction(ISD::STRICT_FSETCC,  MVT::f64, Custom);
1364     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
1365   }
1366 
1367   // Use __sincos_stret if available.
1368   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1369       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1370     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1371     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1372   }
1373 
1374   // FP-ARMv8 implements a lot of rounding-like FP operations.
1375   if (Subtarget->hasFPARMv8Base()) {
1376     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1377     setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1378     setOperationAction(ISD::FROUND, MVT::f32, Legal);
1379     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1380     setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1381     setOperationAction(ISD::FRINT, MVT::f32, Legal);
1382     setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1383     setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1384     if (Subtarget->hasNEON()) {
1385       setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1386       setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1387       setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1388       setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1389     }
1390 
1391     if (Subtarget->hasFP64()) {
1392       setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1393       setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1394       setOperationAction(ISD::FROUND, MVT::f64, Legal);
1395       setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1396       setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1397       setOperationAction(ISD::FRINT, MVT::f64, Legal);
1398       setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1399       setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1400     }
1401   }
1402 
1403   // FP16 often need to be promoted to call lib functions
1404   if (Subtarget->hasFullFP16()) {
1405     setOperationAction(ISD::FREM, MVT::f16, Promote);
1406     setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
1407     setOperationAction(ISD::FSIN, MVT::f16, Promote);
1408     setOperationAction(ISD::FCOS, MVT::f16, Promote);
1409     setOperationAction(ISD::FSINCOS, MVT::f16, Promote);
1410     setOperationAction(ISD::FPOWI, MVT::f16, Promote);
1411     setOperationAction(ISD::FPOW, MVT::f16, Promote);
1412     setOperationAction(ISD::FEXP, MVT::f16, Promote);
1413     setOperationAction(ISD::FEXP2, MVT::f16, Promote);
1414     setOperationAction(ISD::FLOG, MVT::f16, Promote);
1415     setOperationAction(ISD::FLOG10, MVT::f16, Promote);
1416     setOperationAction(ISD::FLOG2, MVT::f16, Promote);
1417 
1418     setOperationAction(ISD::FROUND, MVT::f16, Legal);
1419   }
1420 
1421   if (Subtarget->hasNEON()) {
1422     // vmin and vmax aren't available in a scalar form, so we use
1423     // a NEON instruction with an undef lane instead.
1424     setOperationAction(ISD::FMINIMUM, MVT::f16, Legal);
1425     setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal);
1426     setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
1427     setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
1428     setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal);
1429     setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal);
1430     setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
1431     setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
1432 
1433     if (Subtarget->hasFullFP16()) {
1434       setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal);
1435       setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal);
1436       setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal);
1437       setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal);
1438 
1439       setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal);
1440       setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal);
1441       setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal);
1442       setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal);
1443     }
1444   }
1445 
1446   // We have target-specific dag combine patterns for the following nodes:
1447   // ARMISD::VMOVRRD  - No need to call setTargetDAGCombine
1448   setTargetDAGCombine(ISD::ADD);
1449   setTargetDAGCombine(ISD::SUB);
1450   setTargetDAGCombine(ISD::MUL);
1451   setTargetDAGCombine(ISD::AND);
1452   setTargetDAGCombine(ISD::OR);
1453   setTargetDAGCombine(ISD::XOR);
1454 
1455   if (Subtarget->hasV6Ops())
1456     setTargetDAGCombine(ISD::SRL);
1457   if (Subtarget->isThumb1Only())
1458     setTargetDAGCombine(ISD::SHL);
1459 
1460   setStackPointerRegisterToSaveRestore(ARM::SP);
1461 
1462   if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1463       !Subtarget->hasVFP2Base() || Subtarget->hasMinSize())
1464     setSchedulingPreference(Sched::RegPressure);
1465   else
1466     setSchedulingPreference(Sched::Hybrid);
1467 
1468   //// temporary - rewrite interface to use type
1469   MaxStoresPerMemset = 8;
1470   MaxStoresPerMemsetOptSize = 4;
1471   MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1472   MaxStoresPerMemcpyOptSize = 2;
1473   MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1474   MaxStoresPerMemmoveOptSize = 2;
1475 
1476   // On ARM arguments smaller than 4 bytes are extended, so all arguments
1477   // are at least 4 bytes aligned.
1478   setMinStackArgumentAlignment(Align(4));
1479 
1480   // Prefer likely predicted branches to selects on out-of-order cores.
1481   PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1482 
1483   setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
1484 
1485   setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));
1486 
1487   if (Subtarget->isThumb() || Subtarget->isThumb2())
1488     setTargetDAGCombine(ISD::ABS);
1489 }
1490 
useSoftFloat() const1491 bool ARMTargetLowering::useSoftFloat() const {
1492   return Subtarget->useSoftFloat();
1493 }
1494 
1495 // FIXME: It might make sense to define the representative register class as the
1496 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1497 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1498 // SPR's representative would be DPR_VFP2. This should work well if register
1499 // pressure tracking were modified such that a register use would increment the
1500 // pressure of the register class's representative and all of it's super
1501 // classes' representatives transitively. We have not implemented this because
1502 // of the difficulty prior to coalescing of modeling operand register classes
1503 // due to the common occurrence of cross class copies and subregister insertions
1504 // and extractions.
1505 std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo * TRI,MVT VT) const1506 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1507                                            MVT VT) const {
1508   const TargetRegisterClass *RRC = nullptr;
1509   uint8_t Cost = 1;
1510   switch (VT.SimpleTy) {
1511   default:
1512     return TargetLowering::findRepresentativeClass(TRI, VT);
1513   // Use DPR as representative register class for all floating point
1514   // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1515   // the cost is 1 for both f32 and f64.
1516   case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1517   case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1518     RRC = &ARM::DPRRegClass;
1519     // When NEON is used for SP, only half of the register file is available
1520     // because operations that define both SP and DP results will be constrained
1521     // to the VFP2 class (D0-D15). We currently model this constraint prior to
1522     // coalescing by double-counting the SP regs. See the FIXME above.
1523     if (Subtarget->useNEONForSinglePrecisionFP())
1524       Cost = 2;
1525     break;
1526   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1527   case MVT::v4f32: case MVT::v2f64:
1528     RRC = &ARM::DPRRegClass;
1529     Cost = 2;
1530     break;
1531   case MVT::v4i64:
1532     RRC = &ARM::DPRRegClass;
1533     Cost = 4;
1534     break;
1535   case MVT::v8i64:
1536     RRC = &ARM::DPRRegClass;
1537     Cost = 8;
1538     break;
1539   }
1540   return std::make_pair(RRC, Cost);
1541 }
1542 
getTargetNodeName(unsigned Opcode) const1543 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1544   switch ((ARMISD::NodeType)Opcode) {
1545   case ARMISD::FIRST_NUMBER:  break;
1546   case ARMISD::Wrapper:       return "ARMISD::Wrapper";
1547   case ARMISD::WrapperPIC:    return "ARMISD::WrapperPIC";
1548   case ARMISD::WrapperJT:     return "ARMISD::WrapperJT";
1549   case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
1550   case ARMISD::CALL:          return "ARMISD::CALL";
1551   case ARMISD::CALL_PRED:     return "ARMISD::CALL_PRED";
1552   case ARMISD::CALL_NOLINK:   return "ARMISD::CALL_NOLINK";
1553   case ARMISD::BRCOND:        return "ARMISD::BRCOND";
1554   case ARMISD::BR_JT:         return "ARMISD::BR_JT";
1555   case ARMISD::BR2_JT:        return "ARMISD::BR2_JT";
1556   case ARMISD::RET_FLAG:      return "ARMISD::RET_FLAG";
1557   case ARMISD::INTRET_FLAG:   return "ARMISD::INTRET_FLAG";
1558   case ARMISD::PIC_ADD:       return "ARMISD::PIC_ADD";
1559   case ARMISD::CMP:           return "ARMISD::CMP";
1560   case ARMISD::CMN:           return "ARMISD::CMN";
1561   case ARMISD::CMPZ:          return "ARMISD::CMPZ";
1562   case ARMISD::CMPFP:         return "ARMISD::CMPFP";
1563   case ARMISD::CMPFPE:        return "ARMISD::CMPFPE";
1564   case ARMISD::CMPFPw0:       return "ARMISD::CMPFPw0";
1565   case ARMISD::CMPFPEw0:      return "ARMISD::CMPFPEw0";
1566   case ARMISD::BCC_i64:       return "ARMISD::BCC_i64";
1567   case ARMISD::FMSTAT:        return "ARMISD::FMSTAT";
1568 
1569   case ARMISD::CMOV:          return "ARMISD::CMOV";
1570   case ARMISD::SUBS:          return "ARMISD::SUBS";
1571 
1572   case ARMISD::SSAT:          return "ARMISD::SSAT";
1573   case ARMISD::USAT:          return "ARMISD::USAT";
1574 
1575   case ARMISD::ASRL:          return "ARMISD::ASRL";
1576   case ARMISD::LSRL:          return "ARMISD::LSRL";
1577   case ARMISD::LSLL:          return "ARMISD::LSLL";
1578 
1579   case ARMISD::SRL_FLAG:      return "ARMISD::SRL_FLAG";
1580   case ARMISD::SRA_FLAG:      return "ARMISD::SRA_FLAG";
1581   case ARMISD::RRX:           return "ARMISD::RRX";
1582 
1583   case ARMISD::ADDC:          return "ARMISD::ADDC";
1584   case ARMISD::ADDE:          return "ARMISD::ADDE";
1585   case ARMISD::SUBC:          return "ARMISD::SUBC";
1586   case ARMISD::SUBE:          return "ARMISD::SUBE";
1587   case ARMISD::LSLS:          return "ARMISD::LSLS";
1588 
1589   case ARMISD::VMOVRRD:       return "ARMISD::VMOVRRD";
1590   case ARMISD::VMOVDRR:       return "ARMISD::VMOVDRR";
1591   case ARMISD::VMOVhr:        return "ARMISD::VMOVhr";
1592   case ARMISD::VMOVrh:        return "ARMISD::VMOVrh";
1593   case ARMISD::VMOVSR:        return "ARMISD::VMOVSR";
1594 
1595   case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
1596   case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
1597   case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1598 
1599   case ARMISD::TC_RETURN:     return "ARMISD::TC_RETURN";
1600 
1601   case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
1602 
1603   case ARMISD::DYN_ALLOC:     return "ARMISD::DYN_ALLOC";
1604 
1605   case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
1606 
1607   case ARMISD::PRELOAD:       return "ARMISD::PRELOAD";
1608 
1609   case ARMISD::WIN__CHKSTK:   return "ARMISD::WIN__CHKSTK";
1610   case ARMISD::WIN__DBZCHK:   return "ARMISD::WIN__DBZCHK";
1611 
1612   case ARMISD::PREDICATE_CAST: return "ARMISD::PREDICATE_CAST";
1613   case ARMISD::VCMP:          return "ARMISD::VCMP";
1614   case ARMISD::VCMPZ:         return "ARMISD::VCMPZ";
1615   case ARMISD::VTST:          return "ARMISD::VTST";
1616 
1617   case ARMISD::VSHLs:         return "ARMISD::VSHLs";
1618   case ARMISD::VSHLu:         return "ARMISD::VSHLu";
1619   case ARMISD::VSHLIMM:       return "ARMISD::VSHLIMM";
1620   case ARMISD::VSHRsIMM:      return "ARMISD::VSHRsIMM";
1621   case ARMISD::VSHRuIMM:      return "ARMISD::VSHRuIMM";
1622   case ARMISD::VRSHRsIMM:     return "ARMISD::VRSHRsIMM";
1623   case ARMISD::VRSHRuIMM:     return "ARMISD::VRSHRuIMM";
1624   case ARMISD::VRSHRNIMM:     return "ARMISD::VRSHRNIMM";
1625   case ARMISD::VQSHLsIMM:     return "ARMISD::VQSHLsIMM";
1626   case ARMISD::VQSHLuIMM:     return "ARMISD::VQSHLuIMM";
1627   case ARMISD::VQSHLsuIMM:    return "ARMISD::VQSHLsuIMM";
1628   case ARMISD::VQSHRNsIMM:    return "ARMISD::VQSHRNsIMM";
1629   case ARMISD::VQSHRNuIMM:    return "ARMISD::VQSHRNuIMM";
1630   case ARMISD::VQSHRNsuIMM:   return "ARMISD::VQSHRNsuIMM";
1631   case ARMISD::VQRSHRNsIMM:   return "ARMISD::VQRSHRNsIMM";
1632   case ARMISD::VQRSHRNuIMM:   return "ARMISD::VQRSHRNuIMM";
1633   case ARMISD::VQRSHRNsuIMM:  return "ARMISD::VQRSHRNsuIMM";
1634   case ARMISD::VSLIIMM:       return "ARMISD::VSLIIMM";
1635   case ARMISD::VSRIIMM:       return "ARMISD::VSRIIMM";
1636   case ARMISD::VGETLANEu:     return "ARMISD::VGETLANEu";
1637   case ARMISD::VGETLANEs:     return "ARMISD::VGETLANEs";
1638   case ARMISD::VMOVIMM:       return "ARMISD::VMOVIMM";
1639   case ARMISD::VMVNIMM:       return "ARMISD::VMVNIMM";
1640   case ARMISD::VMOVFPIMM:     return "ARMISD::VMOVFPIMM";
1641   case ARMISD::VDUP:          return "ARMISD::VDUP";
1642   case ARMISD::VDUPLANE:      return "ARMISD::VDUPLANE";
1643   case ARMISD::VEXT:          return "ARMISD::VEXT";
1644   case ARMISD::VREV64:        return "ARMISD::VREV64";
1645   case ARMISD::VREV32:        return "ARMISD::VREV32";
1646   case ARMISD::VREV16:        return "ARMISD::VREV16";
1647   case ARMISD::VZIP:          return "ARMISD::VZIP";
1648   case ARMISD::VUZP:          return "ARMISD::VUZP";
1649   case ARMISD::VTRN:          return "ARMISD::VTRN";
1650   case ARMISD::VTBL1:         return "ARMISD::VTBL1";
1651   case ARMISD::VTBL2:         return "ARMISD::VTBL2";
1652   case ARMISD::VMOVN:         return "ARMISD::VMOVN";
1653   case ARMISD::VMULLs:        return "ARMISD::VMULLs";
1654   case ARMISD::VMULLu:        return "ARMISD::VMULLu";
1655   case ARMISD::UMAAL:         return "ARMISD::UMAAL";
1656   case ARMISD::UMLAL:         return "ARMISD::UMLAL";
1657   case ARMISD::SMLAL:         return "ARMISD::SMLAL";
1658   case ARMISD::SMLALBB:       return "ARMISD::SMLALBB";
1659   case ARMISD::SMLALBT:       return "ARMISD::SMLALBT";
1660   case ARMISD::SMLALTB:       return "ARMISD::SMLALTB";
1661   case ARMISD::SMLALTT:       return "ARMISD::SMLALTT";
1662   case ARMISD::SMULWB:        return "ARMISD::SMULWB";
1663   case ARMISD::SMULWT:        return "ARMISD::SMULWT";
1664   case ARMISD::SMLALD:        return "ARMISD::SMLALD";
1665   case ARMISD::SMLALDX:       return "ARMISD::SMLALDX";
1666   case ARMISD::SMLSLD:        return "ARMISD::SMLSLD";
1667   case ARMISD::SMLSLDX:       return "ARMISD::SMLSLDX";
1668   case ARMISD::SMMLAR:        return "ARMISD::SMMLAR";
1669   case ARMISD::SMMLSR:        return "ARMISD::SMMLSR";
1670   case ARMISD::QADD16b:       return "ARMISD::QADD16b";
1671   case ARMISD::QSUB16b:       return "ARMISD::QSUB16b";
1672   case ARMISD::QADD8b:        return "ARMISD::QADD8b";
1673   case ARMISD::QSUB8b:        return "ARMISD::QSUB8b";
1674   case ARMISD::BUILD_VECTOR:  return "ARMISD::BUILD_VECTOR";
1675   case ARMISD::BFI:           return "ARMISD::BFI";
1676   case ARMISD::VORRIMM:       return "ARMISD::VORRIMM";
1677   case ARMISD::VBICIMM:       return "ARMISD::VBICIMM";
1678   case ARMISD::VBSL:          return "ARMISD::VBSL";
1679   case ARMISD::MEMCPY:        return "ARMISD::MEMCPY";
1680   case ARMISD::VLD1DUP:       return "ARMISD::VLD1DUP";
1681   case ARMISD::VLD2DUP:       return "ARMISD::VLD2DUP";
1682   case ARMISD::VLD3DUP:       return "ARMISD::VLD3DUP";
1683   case ARMISD::VLD4DUP:       return "ARMISD::VLD4DUP";
1684   case ARMISD::VLD1_UPD:      return "ARMISD::VLD1_UPD";
1685   case ARMISD::VLD2_UPD:      return "ARMISD::VLD2_UPD";
1686   case ARMISD::VLD3_UPD:      return "ARMISD::VLD3_UPD";
1687   case ARMISD::VLD4_UPD:      return "ARMISD::VLD4_UPD";
1688   case ARMISD::VLD2LN_UPD:    return "ARMISD::VLD2LN_UPD";
1689   case ARMISD::VLD3LN_UPD:    return "ARMISD::VLD3LN_UPD";
1690   case ARMISD::VLD4LN_UPD:    return "ARMISD::VLD4LN_UPD";
1691   case ARMISD::VLD1DUP_UPD:   return "ARMISD::VLD1DUP_UPD";
1692   case ARMISD::VLD2DUP_UPD:   return "ARMISD::VLD2DUP_UPD";
1693   case ARMISD::VLD3DUP_UPD:   return "ARMISD::VLD3DUP_UPD";
1694   case ARMISD::VLD4DUP_UPD:   return "ARMISD::VLD4DUP_UPD";
1695   case ARMISD::VST1_UPD:      return "ARMISD::VST1_UPD";
1696   case ARMISD::VST2_UPD:      return "ARMISD::VST2_UPD";
1697   case ARMISD::VST3_UPD:      return "ARMISD::VST3_UPD";
1698   case ARMISD::VST4_UPD:      return "ARMISD::VST4_UPD";
1699   case ARMISD::VST2LN_UPD:    return "ARMISD::VST2LN_UPD";
1700   case ARMISD::VST3LN_UPD:    return "ARMISD::VST3LN_UPD";
1701   case ARMISD::VST4LN_UPD:    return "ARMISD::VST4LN_UPD";
1702   case ARMISD::WLS:           return "ARMISD::WLS";
1703   case ARMISD::LE:            return "ARMISD::LE";
1704   case ARMISD::LOOP_DEC:      return "ARMISD::LOOP_DEC";
1705   case ARMISD::CSINV:         return "ARMISD::CSINV";
1706   case ARMISD::CSNEG:         return "ARMISD::CSNEG";
1707   case ARMISD::CSINC:         return "ARMISD::CSINC";
1708   }
1709   return nullptr;
1710 }
1711 
getSetCCResultType(const DataLayout & DL,LLVMContext &,EVT VT) const1712 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1713                                           EVT VT) const {
1714   if (!VT.isVector())
1715     return getPointerTy(DL);
1716 
1717   // MVE has a predicate register.
1718   if (Subtarget->hasMVEIntegerOps() &&
1719       (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8))
1720     return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1721   return VT.changeVectorElementTypeToInteger();
1722 }
1723 
1724 /// getRegClassFor - Return the register class that should be used for the
1725 /// specified value type.
1726 const TargetRegisterClass *
getRegClassFor(MVT VT,bool isDivergent) const1727 ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
1728   (void)isDivergent;
1729   // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1730   // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1731   // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive
1732   // MVE Q registers.
1733   if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1734     if (VT == MVT::v4i64)
1735       return &ARM::QQPRRegClass;
1736     if (VT == MVT::v8i64)
1737       return &ARM::QQQQPRRegClass;
1738   }
1739   return TargetLowering::getRegClassFor(VT);
1740 }
1741 
1742 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1743 // source/dest is aligned and the copy size is large enough. We therefore want
1744 // to align such objects passed to memory intrinsics.
shouldAlignPointerArgs(CallInst * CI,unsigned & MinSize,unsigned & PrefAlign) const1745 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1746                                                unsigned &PrefAlign) const {
1747   if (!isa<MemIntrinsic>(CI))
1748     return false;
1749   MinSize = 8;
1750   // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1751   // cycle faster than 4-byte aligned LDM.
1752   PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
1753   return true;
1754 }
1755 
1756 // Create a fast isel object.
1757 FastISel *
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo) const1758 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1759                                   const TargetLibraryInfo *libInfo) const {
1760   return ARM::createFastISel(funcInfo, libInfo);
1761 }
1762 
getSchedulingPreference(SDNode * N) const1763 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1764   unsigned NumVals = N->getNumValues();
1765   if (!NumVals)
1766     return Sched::RegPressure;
1767 
1768   for (unsigned i = 0; i != NumVals; ++i) {
1769     EVT VT = N->getValueType(i);
1770     if (VT == MVT::Glue || VT == MVT::Other)
1771       continue;
1772     if (VT.isFloatingPoint() || VT.isVector())
1773       return Sched::ILP;
1774   }
1775 
1776   if (!N->isMachineOpcode())
1777     return Sched::RegPressure;
1778 
1779   // Load are scheduled for latency even if there instruction itinerary
1780   // is not available.
1781   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1782   const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1783 
1784   if (MCID.getNumDefs() == 0)
1785     return Sched::RegPressure;
1786   if (!Itins->isEmpty() &&
1787       Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1788     return Sched::ILP;
1789 
1790   return Sched::RegPressure;
1791 }
1792 
1793 //===----------------------------------------------------------------------===//
1794 // Lowering Code
1795 //===----------------------------------------------------------------------===//
1796 
isSRL16(const SDValue & Op)1797 static bool isSRL16(const SDValue &Op) {
1798   if (Op.getOpcode() != ISD::SRL)
1799     return false;
1800   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1801     return Const->getZExtValue() == 16;
1802   return false;
1803 }
1804 
isSRA16(const SDValue & Op)1805 static bool isSRA16(const SDValue &Op) {
1806   if (Op.getOpcode() != ISD::SRA)
1807     return false;
1808   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1809     return Const->getZExtValue() == 16;
1810   return false;
1811 }
1812 
isSHL16(const SDValue & Op)1813 static bool isSHL16(const SDValue &Op) {
1814   if (Op.getOpcode() != ISD::SHL)
1815     return false;
1816   if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1817     return Const->getZExtValue() == 16;
1818   return false;
1819 }
1820 
1821 // Check for a signed 16-bit value. We special case SRA because it makes it
1822 // more simple when also looking for SRAs that aren't sign extending a
1823 // smaller value. Without the check, we'd need to take extra care with
1824 // checking order for some operations.
isS16(const SDValue & Op,SelectionDAG & DAG)1825 static bool isS16(const SDValue &Op, SelectionDAG &DAG) {
1826   if (isSRA16(Op))
1827     return isSHL16(Op.getOperand(0));
1828   return DAG.ComputeNumSignBits(Op) == 17;
1829 }
1830 
1831 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
IntCCToARMCC(ISD::CondCode CC)1832 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1833   switch (CC) {
1834   default: llvm_unreachable("Unknown condition code!");
1835   case ISD::SETNE:  return ARMCC::NE;
1836   case ISD::SETEQ:  return ARMCC::EQ;
1837   case ISD::SETGT:  return ARMCC::GT;
1838   case ISD::SETGE:  return ARMCC::GE;
1839   case ISD::SETLT:  return ARMCC::LT;
1840   case ISD::SETLE:  return ARMCC::LE;
1841   case ISD::SETUGT: return ARMCC::HI;
1842   case ISD::SETUGE: return ARMCC::HS;
1843   case ISD::SETULT: return ARMCC::LO;
1844   case ISD::SETULE: return ARMCC::LS;
1845   }
1846 }
1847 
1848 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
FPCCToARMCC(ISD::CondCode CC,ARMCC::CondCodes & CondCode,ARMCC::CondCodes & CondCode2)1849 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1850                         ARMCC::CondCodes &CondCode2) {
1851   CondCode2 = ARMCC::AL;
1852   switch (CC) {
1853   default: llvm_unreachable("Unknown FP condition!");
1854   case ISD::SETEQ:
1855   case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
1856   case ISD::SETGT:
1857   case ISD::SETOGT: CondCode = ARMCC::GT; break;
1858   case ISD::SETGE:
1859   case ISD::SETOGE: CondCode = ARMCC::GE; break;
1860   case ISD::SETOLT: CondCode = ARMCC::MI; break;
1861   case ISD::SETOLE: CondCode = ARMCC::LS; break;
1862   case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
1863   case ISD::SETO:   CondCode = ARMCC::VC; break;
1864   case ISD::SETUO:  CondCode = ARMCC::VS; break;
1865   case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
1866   case ISD::SETUGT: CondCode = ARMCC::HI; break;
1867   case ISD::SETUGE: CondCode = ARMCC::PL; break;
1868   case ISD::SETLT:
1869   case ISD::SETULT: CondCode = ARMCC::LT; break;
1870   case ISD::SETLE:
1871   case ISD::SETULE: CondCode = ARMCC::LE; break;
1872   case ISD::SETNE:
1873   case ISD::SETUNE: CondCode = ARMCC::NE; break;
1874   }
1875 }
1876 
1877 //===----------------------------------------------------------------------===//
1878 //                      Calling Convention Implementation
1879 //===----------------------------------------------------------------------===//
1880 
1881 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1882 /// account presence of floating point hardware and calling convention
1883 /// limitations, such as support for variadic functions.
1884 CallingConv::ID
getEffectiveCallingConv(CallingConv::ID CC,bool isVarArg) const1885 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
1886                                            bool isVarArg) const {
1887   switch (CC) {
1888   default:
1889     report_fatal_error("Unsupported calling convention");
1890   case CallingConv::ARM_AAPCS:
1891   case CallingConv::ARM_APCS:
1892   case CallingConv::GHC:
1893   case CallingConv::CFGuard_Check:
1894     return CC;
1895   case CallingConv::PreserveMost:
1896     return CallingConv::PreserveMost;
1897   case CallingConv::ARM_AAPCS_VFP:
1898   case CallingConv::Swift:
1899     return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
1900   case CallingConv::C:
1901     if (!Subtarget->isAAPCS_ABI())
1902       return CallingConv::ARM_APCS;
1903     else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() &&
1904              getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
1905              !isVarArg)
1906       return CallingConv::ARM_AAPCS_VFP;
1907     else
1908       return CallingConv::ARM_AAPCS;
1909   case CallingConv::Fast:
1910   case CallingConv::CXX_FAST_TLS:
1911     if (!Subtarget->isAAPCS_ABI()) {
1912       if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg)
1913         return CallingConv::Fast;
1914       return CallingConv::ARM_APCS;
1915     } else if (Subtarget->hasVFP2Base() &&
1916                !Subtarget->isThumb1Only() && !isVarArg)
1917       return CallingConv::ARM_AAPCS_VFP;
1918     else
1919       return CallingConv::ARM_AAPCS;
1920   }
1921 }
1922 
CCAssignFnForCall(CallingConv::ID CC,bool isVarArg) const1923 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1924                                                  bool isVarArg) const {
1925   return CCAssignFnForNode(CC, false, isVarArg);
1926 }
1927 
CCAssignFnForReturn(CallingConv::ID CC,bool isVarArg) const1928 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1929                                                    bool isVarArg) const {
1930   return CCAssignFnForNode(CC, true, isVarArg);
1931 }
1932 
1933 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
1934 /// CallingConvention.
CCAssignFnForNode(CallingConv::ID CC,bool Return,bool isVarArg) const1935 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
1936                                                  bool Return,
1937                                                  bool isVarArg) const {
1938   switch (getEffectiveCallingConv(CC, isVarArg)) {
1939   default:
1940     report_fatal_error("Unsupported calling convention");
1941   case CallingConv::ARM_APCS:
1942     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1943   case CallingConv::ARM_AAPCS:
1944     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1945   case CallingConv::ARM_AAPCS_VFP:
1946     return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1947   case CallingConv::Fast:
1948     return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1949   case CallingConv::GHC:
1950     return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1951   case CallingConv::PreserveMost:
1952     return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1953   case CallingConv::CFGuard_Check:
1954     return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check);
1955   }
1956 }
1957 
1958 /// LowerCallResult - Lower the result values of a call into the
1959 /// appropriate copies out of appropriate physical registers.
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,bool isThisReturn,SDValue ThisVal) const1960 SDValue ARMTargetLowering::LowerCallResult(
1961     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1962     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1963     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1964     SDValue ThisVal) const {
1965   // Assign locations to each value returned by this call.
1966   SmallVector<CCValAssign, 16> RVLocs;
1967   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1968                  *DAG.getContext());
1969   CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
1970 
1971   // Copy all of the result registers out of their specified physreg.
1972   for (unsigned i = 0; i != RVLocs.size(); ++i) {
1973     CCValAssign VA = RVLocs[i];
1974 
1975     // Pass 'this' value directly from the argument to return value, to avoid
1976     // reg unit interference
1977     if (i == 0 && isThisReturn) {
1978       assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
1979              "unexpected return calling convention register assignment");
1980       InVals.push_back(ThisVal);
1981       continue;
1982     }
1983 
1984     SDValue Val;
1985     if (VA.needsCustom()) {
1986       // Handle f64 or half of a v2f64.
1987       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1988                                       InFlag);
1989       Chain = Lo.getValue(1);
1990       InFlag = Lo.getValue(2);
1991       VA = RVLocs[++i]; // skip ahead to next loc
1992       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1993                                       InFlag);
1994       Chain = Hi.getValue(1);
1995       InFlag = Hi.getValue(2);
1996       if (!Subtarget->isLittle())
1997         std::swap (Lo, Hi);
1998       Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1999 
2000       if (VA.getLocVT() == MVT::v2f64) {
2001         SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
2002         Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
2003                           DAG.getConstant(0, dl, MVT::i32));
2004 
2005         VA = RVLocs[++i]; // skip ahead to next loc
2006         Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
2007         Chain = Lo.getValue(1);
2008         InFlag = Lo.getValue(2);
2009         VA = RVLocs[++i]; // skip ahead to next loc
2010         Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
2011         Chain = Hi.getValue(1);
2012         InFlag = Hi.getValue(2);
2013         if (!Subtarget->isLittle())
2014           std::swap (Lo, Hi);
2015         Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2016         Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
2017                           DAG.getConstant(1, dl, MVT::i32));
2018       }
2019     } else {
2020       Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
2021                                InFlag);
2022       Chain = Val.getValue(1);
2023       InFlag = Val.getValue(2);
2024     }
2025 
2026     switch (VA.getLocInfo()) {
2027     default: llvm_unreachable("Unknown loc info!");
2028     case CCValAssign::Full: break;
2029     case CCValAssign::BCvt:
2030       Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
2031       break;
2032     }
2033 
2034     InVals.push_back(Val);
2035   }
2036 
2037   return Chain;
2038 }
2039 
2040 /// LowerMemOpCallTo - Store the argument to the stack.
LowerMemOpCallTo(SDValue Chain,SDValue StackPtr,SDValue Arg,const SDLoc & dl,SelectionDAG & DAG,const CCValAssign & VA,ISD::ArgFlagsTy Flags) const2041 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
2042                                             SDValue Arg, const SDLoc &dl,
2043                                             SelectionDAG &DAG,
2044                                             const CCValAssign &VA,
2045                                             ISD::ArgFlagsTy Flags) const {
2046   unsigned LocMemOffset = VA.getLocMemOffset();
2047   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2048   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
2049                        StackPtr, PtrOff);
2050   return DAG.getStore(
2051       Chain, dl, Arg, PtrOff,
2052       MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
2053 }
2054 
PassF64ArgInRegs(const SDLoc & dl,SelectionDAG & DAG,SDValue Chain,SDValue & Arg,RegsToPassVector & RegsToPass,CCValAssign & VA,CCValAssign & NextVA,SDValue & StackPtr,SmallVectorImpl<SDValue> & MemOpChains,ISD::ArgFlagsTy Flags) const2055 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
2056                                          SDValue Chain, SDValue &Arg,
2057                                          RegsToPassVector &RegsToPass,
2058                                          CCValAssign &VA, CCValAssign &NextVA,
2059                                          SDValue &StackPtr,
2060                                          SmallVectorImpl<SDValue> &MemOpChains,
2061                                          ISD::ArgFlagsTy Flags) const {
2062   SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2063                               DAG.getVTList(MVT::i32, MVT::i32), Arg);
2064   unsigned id = Subtarget->isLittle() ? 0 : 1;
2065   RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
2066 
2067   if (NextVA.isRegLoc())
2068     RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
2069   else {
2070     assert(NextVA.isMemLoc());
2071     if (!StackPtr.getNode())
2072       StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
2073                                     getPointerTy(DAG.getDataLayout()));
2074 
2075     MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
2076                                            dl, DAG, NextVA,
2077                                            Flags));
2078   }
2079 }
2080 
2081 /// LowerCall - Lowering a call into a callseq_start <-
2082 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
2083 /// nodes.
2084 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const2085 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2086                              SmallVectorImpl<SDValue> &InVals) const {
2087   SelectionDAG &DAG                     = CLI.DAG;
2088   SDLoc &dl                             = CLI.DL;
2089   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2090   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
2091   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
2092   SDValue Chain                         = CLI.Chain;
2093   SDValue Callee                        = CLI.Callee;
2094   bool &isTailCall                      = CLI.IsTailCall;
2095   CallingConv::ID CallConv              = CLI.CallConv;
2096   bool doesNotRet                       = CLI.DoesNotReturn;
2097   bool isVarArg                         = CLI.IsVarArg;
2098 
2099   MachineFunction &MF = DAG.getMachineFunction();
2100   MachineFunction::CallSiteInfo CSInfo;
2101   bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
2102   bool isThisReturn = false;
2103   bool PreferIndirect = false;
2104 
2105   // Disable tail calls if they're not supported.
2106   if (!Subtarget->supportsTailCall())
2107     isTailCall = false;
2108 
2109   if (isa<GlobalAddressSDNode>(Callee)) {
2110     // If we're optimizing for minimum size and the function is called three or
2111     // more times in this block, we can improve codesize by calling indirectly
2112     // as BLXr has a 16-bit encoding.
2113     auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2114     if (CLI.CS) {
2115       auto *BB = CLI.CS.getParent();
2116       PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() &&
2117                        count_if(GV->users(), [&BB](const User *U) {
2118                          return isa<Instruction>(U) &&
2119                                 cast<Instruction>(U)->getParent() == BB;
2120                        }) > 2;
2121     }
2122   }
2123   if (isTailCall) {
2124     // Check if it's really possible to do a tail call.
2125     isTailCall = IsEligibleForTailCallOptimization(
2126         Callee, CallConv, isVarArg, isStructRet,
2127         MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
2128         PreferIndirect);
2129     if (!isTailCall && CLI.CS && CLI.CS.isMustTailCall())
2130       report_fatal_error("failed to perform tail call elimination on a call "
2131                          "site marked musttail");
2132     // We don't support GuaranteedTailCallOpt for ARM, only automatically
2133     // detected sibcalls.
2134     if (isTailCall)
2135       ++NumTailCalls;
2136   }
2137 
2138   // Analyze operands of the call, assigning locations to each operand.
2139   SmallVector<CCValAssign, 16> ArgLocs;
2140   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2141                  *DAG.getContext());
2142   CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
2143 
2144   // Get a count of how many bytes are to be pushed on the stack.
2145   unsigned NumBytes = CCInfo.getNextStackOffset();
2146 
2147   if (isTailCall) {
2148     // For tail calls, memory operands are available in our caller's stack.
2149     NumBytes = 0;
2150   } else {
2151     // Adjust the stack pointer for the new arguments...
2152     // These operations are automatically eliminated by the prolog/epilog pass
2153     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
2154   }
2155 
2156   SDValue StackPtr =
2157       DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
2158 
2159   RegsToPassVector RegsToPass;
2160   SmallVector<SDValue, 8> MemOpChains;
2161 
2162   // Walk the register/memloc assignments, inserting copies/loads.  In the case
2163   // of tail call optimization, arguments are handled later.
2164   for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2165        i != e;
2166        ++i, ++realArgIdx) {
2167     CCValAssign &VA = ArgLocs[i];
2168     SDValue Arg = OutVals[realArgIdx];
2169     ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2170     bool isByVal = Flags.isByVal();
2171 
2172     // Promote the value if needed.
2173     switch (VA.getLocInfo()) {
2174     default: llvm_unreachable("Unknown loc info!");
2175     case CCValAssign::Full: break;
2176     case CCValAssign::SExt:
2177       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
2178       break;
2179     case CCValAssign::ZExt:
2180       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
2181       break;
2182     case CCValAssign::AExt:
2183       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
2184       break;
2185     case CCValAssign::BCvt:
2186       Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2187       break;
2188     }
2189 
2190     // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
2191     if (VA.needsCustom()) {
2192       if (VA.getLocVT() == MVT::v2f64) {
2193         SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2194                                   DAG.getConstant(0, dl, MVT::i32));
2195         SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2196                                   DAG.getConstant(1, dl, MVT::i32));
2197 
2198         PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
2199                          VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
2200 
2201         VA = ArgLocs[++i]; // skip ahead to next loc
2202         if (VA.isRegLoc()) {
2203           PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
2204                            VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
2205         } else {
2206           assert(VA.isMemLoc());
2207 
2208           MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
2209                                                  dl, DAG, VA, Flags));
2210         }
2211       } else {
2212         PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2213                          StackPtr, MemOpChains, Flags);
2214       }
2215     } else if (VA.isRegLoc()) {
2216       if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
2217           Outs[0].VT == MVT::i32) {
2218         assert(VA.getLocVT() == MVT::i32 &&
2219                "unexpected calling convention register assignment");
2220         assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
2221                "unexpected use of 'returned'");
2222         isThisReturn = true;
2223       }
2224       const TargetOptions &Options = DAG.getTarget().Options;
2225       if (Options.EnableDebugEntryValues)
2226         CSInfo.emplace_back(VA.getLocReg(), i);
2227       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2228     } else if (isByVal) {
2229       assert(VA.isMemLoc());
2230       unsigned offset = 0;
2231 
2232       // True if this byval aggregate will be split between registers
2233       // and memory.
2234       unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2235       unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2236 
2237       if (CurByValIdx < ByValArgsCount) {
2238 
2239         unsigned RegBegin, RegEnd;
2240         CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2241 
2242         EVT PtrVT =
2243             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2244         unsigned int i, j;
2245         for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
2246           SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
2247           SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
2248           SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
2249                                      MachinePointerInfo(),
2250                                      DAG.InferPtrAlignment(AddArg));
2251           MemOpChains.push_back(Load.getValue(1));
2252           RegsToPass.push_back(std::make_pair(j, Load));
2253         }
2254 
2255         // If parameter size outsides register area, "offset" value
2256         // helps us to calculate stack slot for remained part properly.
2257         offset = RegEnd - RegBegin;
2258 
2259         CCInfo.nextInRegsParam();
2260       }
2261 
2262       if (Flags.getByValSize() > 4*offset) {
2263         auto PtrVT = getPointerTy(DAG.getDataLayout());
2264         unsigned LocMemOffset = VA.getLocMemOffset();
2265         SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2266         SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
2267         SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
2268         SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
2269         SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
2270                                            MVT::i32);
2271         SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl,
2272                                             MVT::i32);
2273 
2274         SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
2275         SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2276         MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
2277                                           Ops));
2278       }
2279     } else if (!isTailCall) {
2280       assert(VA.isMemLoc());
2281 
2282       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2283                                              dl, DAG, VA, Flags));
2284     }
2285   }
2286 
2287   if (!MemOpChains.empty())
2288     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2289 
2290   // Build a sequence of copy-to-reg nodes chained together with token chain
2291   // and flag operands which copy the outgoing args into the appropriate regs.
2292   SDValue InFlag;
2293   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2294     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2295                              RegsToPass[i].second, InFlag);
2296     InFlag = Chain.getValue(1);
2297   }
2298 
2299   // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2300   // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2301   // node so that legalize doesn't hack it.
2302   bool isDirect = false;
2303 
2304   const TargetMachine &TM = getTargetMachine();
2305   const Module *Mod = MF.getFunction().getParent();
2306   const GlobalValue *GV = nullptr;
2307   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2308     GV = G->getGlobal();
2309   bool isStub =
2310       !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
2311 
2312   bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
2313   bool isLocalARMFunc = false;
2314   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2315   auto PtrVt = getPointerTy(DAG.getDataLayout());
2316 
2317   if (Subtarget->genLongCalls()) {
2318     assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
2319            "long-calls codegen is not position independent!");
2320     // Handle a global address or an external symbol. If it's not one of
2321     // those, the target's already in a register, so we don't need to do
2322     // anything extra.
2323     if (isa<GlobalAddressSDNode>(Callee)) {
2324       // Create a constant pool entry for the callee address
2325       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2326       ARMConstantPoolValue *CPV =
2327         ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
2328 
2329       // Get the address of the callee into a register
2330       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2331       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2332       Callee = DAG.getLoad(
2333           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2334           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2335     } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2336       const char *Sym = S->getSymbol();
2337 
2338       // Create a constant pool entry for the callee address
2339       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2340       ARMConstantPoolValue *CPV =
2341         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2342                                       ARMPCLabelIndex, 0);
2343       // Get the address of the callee into a register
2344       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2345       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2346       Callee = DAG.getLoad(
2347           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2348           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2349     }
2350   } else if (isa<GlobalAddressSDNode>(Callee)) {
2351     if (!PreferIndirect) {
2352       isDirect = true;
2353       bool isDef = GV->isStrongDefinitionForLinker();
2354 
2355       // ARM call to a local ARM function is predicable.
2356       isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2357       // tBX takes a register source operand.
2358       if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2359         assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2360         Callee = DAG.getNode(
2361             ARMISD::WrapperPIC, dl, PtrVt,
2362             DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2363         Callee = DAG.getLoad(
2364             PtrVt, dl, DAG.getEntryNode(), Callee,
2365             MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2366             /* Alignment = */ 0, MachineMemOperand::MODereferenceable |
2367                                      MachineMemOperand::MOInvariant);
2368       } else if (Subtarget->isTargetCOFF()) {
2369         assert(Subtarget->isTargetWindows() &&
2370                "Windows is the only supported COFF target");
2371         unsigned TargetFlags = ARMII::MO_NO_FLAG;
2372         if (GV->hasDLLImportStorageClass())
2373           TargetFlags = ARMII::MO_DLLIMPORT;
2374         else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
2375           TargetFlags = ARMII::MO_COFFSTUB;
2376         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0,
2377                                             TargetFlags);
2378         if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
2379           Callee =
2380               DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2381                           DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2382                           MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2383       } else {
2384         Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
2385       }
2386     }
2387   } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2388     isDirect = true;
2389     // tBX takes a register source operand.
2390     const char *Sym = S->getSymbol();
2391     if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2392       unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2393       ARMConstantPoolValue *CPV =
2394         ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2395                                       ARMPCLabelIndex, 4);
2396       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2397       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2398       Callee = DAG.getLoad(
2399           PtrVt, dl, DAG.getEntryNode(), CPAddr,
2400           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2401       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2402       Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2403     } else {
2404       Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2405     }
2406   }
2407 
2408   // FIXME: handle tail calls differently.
2409   unsigned CallOpc;
2410   if (Subtarget->isThumb()) {
2411     if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2412       CallOpc = ARMISD::CALL_NOLINK;
2413     else
2414       CallOpc = ARMISD::CALL;
2415   } else {
2416     if (!isDirect && !Subtarget->hasV5TOps())
2417       CallOpc = ARMISD::CALL_NOLINK;
2418     else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2419              // Emit regular call when code size is the priority
2420              !Subtarget->hasMinSize())
2421       // "mov lr, pc; b _foo" to avoid confusing the RSP
2422       CallOpc = ARMISD::CALL_NOLINK;
2423     else
2424       CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2425   }
2426 
2427   std::vector<SDValue> Ops;
2428   Ops.push_back(Chain);
2429   Ops.push_back(Callee);
2430 
2431   // Add argument registers to the end of the list so that they are known live
2432   // into the call.
2433   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2434     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2435                                   RegsToPass[i].second.getValueType()));
2436 
2437   // Add a register mask operand representing the call-preserved registers.
2438   if (!isTailCall) {
2439     const uint32_t *Mask;
2440     const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2441     if (isThisReturn) {
2442       // For 'this' returns, use the R0-preserving mask if applicable
2443       Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2444       if (!Mask) {
2445         // Set isThisReturn to false if the calling convention is not one that
2446         // allows 'returned' to be modeled in this way, so LowerCallResult does
2447         // not try to pass 'this' straight through
2448         isThisReturn = false;
2449         Mask = ARI->getCallPreservedMask(MF, CallConv);
2450       }
2451     } else
2452       Mask = ARI->getCallPreservedMask(MF, CallConv);
2453 
2454     assert(Mask && "Missing call preserved mask for calling convention");
2455     Ops.push_back(DAG.getRegisterMask(Mask));
2456   }
2457 
2458   if (InFlag.getNode())
2459     Ops.push_back(InFlag);
2460 
2461   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2462   if (isTailCall) {
2463     MF.getFrameInfo().setHasTailCall();
2464     SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2465     DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
2466     return Ret;
2467   }
2468 
2469   // Returns a chain and a flag for retval copy to use.
2470   Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2471   InFlag = Chain.getValue(1);
2472   DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
2473 
2474   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
2475                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
2476   if (!Ins.empty())
2477     InFlag = Chain.getValue(1);
2478 
2479   // Handle result values, copying them out of physregs into vregs that we
2480   // return.
2481   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2482                          InVals, isThisReturn,
2483                          isThisReturn ? OutVals[0] : SDValue());
2484 }
2485 
2486 /// HandleByVal - Every parameter *after* a byval parameter is passed
2487 /// on the stack.  Remember the next parameter register to allocate,
2488 /// and then confiscate the rest of the parameter registers to insure
2489 /// this.
HandleByVal(CCState * State,unsigned & Size,unsigned Align) const2490 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2491                                     unsigned Align) const {
2492   // Byval (as with any stack) slots are always at least 4 byte aligned.
2493   Align = std::max(Align, 4U);
2494 
2495   unsigned Reg = State->AllocateReg(GPRArgRegs);
2496   if (!Reg)
2497     return;
2498 
2499   unsigned AlignInRegs = Align / 4;
2500   unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2501   for (unsigned i = 0; i < Waste; ++i)
2502     Reg = State->AllocateReg(GPRArgRegs);
2503 
2504   if (!Reg)
2505     return;
2506 
2507   unsigned Excess = 4 * (ARM::R4 - Reg);
2508 
2509   // Special case when NSAA != SP and parameter size greater than size of
2510   // all remained GPR regs. In that case we can't split parameter, we must
2511   // send it to stack. We also must set NCRN to R4, so waste all
2512   // remained registers.
2513   const unsigned NSAAOffset = State->getNextStackOffset();
2514   if (NSAAOffset != 0 && Size > Excess) {
2515     while (State->AllocateReg(GPRArgRegs))
2516       ;
2517     return;
2518   }
2519 
2520   // First register for byval parameter is the first register that wasn't
2521   // allocated before this method call, so it would be "reg".
2522   // If parameter is small enough to be saved in range [reg, r4), then
2523   // the end (first after last) register would be reg + param-size-in-regs,
2524   // else parameter would be splitted between registers and stack,
2525   // end register would be r4 in this case.
2526   unsigned ByValRegBegin = Reg;
2527   unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2528   State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2529   // Note, first register is allocated in the beginning of function already,
2530   // allocate remained amount of registers we need.
2531   for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2532     State->AllocateReg(GPRArgRegs);
2533   // A byval parameter that is split between registers and memory needs its
2534   // size truncated here.
2535   // In the case where the entire structure fits in registers, we set the
2536   // size in memory to zero.
2537   Size = std::max<int>(Size - Excess, 0);
2538 }
2539 
2540 /// MatchingStackOffset - Return true if the given stack call argument is
2541 /// already available in the same position (relatively) of the caller's
2542 /// incoming argument stack.
2543 static
MatchingStackOffset(SDValue Arg,unsigned Offset,ISD::ArgFlagsTy Flags,MachineFrameInfo & MFI,const MachineRegisterInfo * MRI,const TargetInstrInfo * TII)2544 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2545                          MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2546                          const TargetInstrInfo *TII) {
2547   unsigned Bytes = Arg.getValueSizeInBits() / 8;
2548   int FI = std::numeric_limits<int>::max();
2549   if (Arg.getOpcode() == ISD::CopyFromReg) {
2550     unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2551     if (!Register::isVirtualRegister(VR))
2552       return false;
2553     MachineInstr *Def = MRI->getVRegDef(VR);
2554     if (!Def)
2555       return false;
2556     if (!Flags.isByVal()) {
2557       if (!TII->isLoadFromStackSlot(*Def, FI))
2558         return false;
2559     } else {
2560       return false;
2561     }
2562   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2563     if (Flags.isByVal())
2564       // ByVal argument is passed in as a pointer but it's now being
2565       // dereferenced. e.g.
2566       // define @foo(%struct.X* %A) {
2567       //   tail call @bar(%struct.X* byval %A)
2568       // }
2569       return false;
2570     SDValue Ptr = Ld->getBasePtr();
2571     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2572     if (!FINode)
2573       return false;
2574     FI = FINode->getIndex();
2575   } else
2576     return false;
2577 
2578   assert(FI != std::numeric_limits<int>::max());
2579   if (!MFI.isFixedObjectIndex(FI))
2580     return false;
2581   return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2582 }
2583 
2584 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2585 /// for tail call optimization. Targets which want to do tail call
2586 /// optimization should implement this function.
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool isVarArg,bool isCalleeStructRet,bool isCallerStructRet,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG,const bool isIndirect) const2587 bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2588     SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
2589     bool isCalleeStructRet, bool isCallerStructRet,
2590     const SmallVectorImpl<ISD::OutputArg> &Outs,
2591     const SmallVectorImpl<SDValue> &OutVals,
2592     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
2593     const bool isIndirect) const {
2594   MachineFunction &MF = DAG.getMachineFunction();
2595   const Function &CallerF = MF.getFunction();
2596   CallingConv::ID CallerCC = CallerF.getCallingConv();
2597 
2598   assert(Subtarget->supportsTailCall());
2599 
2600   // Indirect tail calls cannot be optimized for Thumb1 if the args
2601   // to the call take up r0-r3. The reason is that there are no legal registers
2602   // left to hold the pointer to the function to be called.
2603   if (Subtarget->isThumb1Only() && Outs.size() >= 4 &&
2604       (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect))
2605     return false;
2606 
2607   // Look for obvious safe cases to perform tail call optimization that do not
2608   // require ABI changes. This is what gcc calls sibcall.
2609 
2610   // Exception-handling functions need a special set of instructions to indicate
2611   // a return to the hardware. Tail-calling another function would probably
2612   // break this.
2613   if (CallerF.hasFnAttribute("interrupt"))
2614     return false;
2615 
2616   // Also avoid sibcall optimization if either caller or callee uses struct
2617   // return semantics.
2618   if (isCalleeStructRet || isCallerStructRet)
2619     return false;
2620 
2621   // Externally-defined functions with weak linkage should not be
2622   // tail-called on ARM when the OS does not support dynamic
2623   // pre-emption of symbols, as the AAELF spec requires normal calls
2624   // to undefined weak functions to be replaced with a NOP or jump to the
2625   // next instruction. The behaviour of branch instructions in this
2626   // situation (as used for tail calls) is implementation-defined, so we
2627   // cannot rely on the linker replacing the tail call with a return.
2628   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2629     const GlobalValue *GV = G->getGlobal();
2630     const Triple &TT = getTargetMachine().getTargetTriple();
2631     if (GV->hasExternalWeakLinkage() &&
2632         (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
2633       return false;
2634   }
2635 
2636   // Check that the call results are passed in the same way.
2637   LLVMContext &C = *DAG.getContext();
2638   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
2639                                   CCAssignFnForReturn(CalleeCC, isVarArg),
2640                                   CCAssignFnForReturn(CallerCC, isVarArg)))
2641     return false;
2642   // The callee has to preserve all registers the caller needs to preserve.
2643   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2644   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2645   if (CalleeCC != CallerCC) {
2646     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2647     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2648       return false;
2649   }
2650 
2651   // If Caller's vararg or byval argument has been split between registers and
2652   // stack, do not perform tail call, since part of the argument is in caller's
2653   // local frame.
2654   const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
2655   if (AFI_Caller->getArgRegsSaveSize())
2656     return false;
2657 
2658   // If the callee takes no arguments then go on to check the results of the
2659   // call.
2660   if (!Outs.empty()) {
2661     // Check if stack adjustment is needed. For now, do not do this if any
2662     // argument is passed on the stack.
2663     SmallVector<CCValAssign, 16> ArgLocs;
2664     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2665     CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
2666     if (CCInfo.getNextStackOffset()) {
2667       // Check if the arguments are already laid out in the right way as
2668       // the caller's fixed stack objects.
2669       MachineFrameInfo &MFI = MF.getFrameInfo();
2670       const MachineRegisterInfo *MRI = &MF.getRegInfo();
2671       const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2672       for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2673            i != e;
2674            ++i, ++realArgIdx) {
2675         CCValAssign &VA = ArgLocs[i];
2676         EVT RegVT = VA.getLocVT();
2677         SDValue Arg = OutVals[realArgIdx];
2678         ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2679         if (VA.getLocInfo() == CCValAssign::Indirect)
2680           return false;
2681         if (VA.needsCustom()) {
2682           // f64 and vector types are split into multiple registers or
2683           // register/stack-slot combinations.  The types will not match
2684           // the registers; give up on memory f64 refs until we figure
2685           // out what to do about this.
2686           if (!VA.isRegLoc())
2687             return false;
2688           if (!ArgLocs[++i].isRegLoc())
2689             return false;
2690           if (RegVT == MVT::v2f64) {
2691             if (!ArgLocs[++i].isRegLoc())
2692               return false;
2693             if (!ArgLocs[++i].isRegLoc())
2694               return false;
2695           }
2696         } else if (!VA.isRegLoc()) {
2697           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2698                                    MFI, MRI, TII))
2699             return false;
2700         }
2701       }
2702     }
2703 
2704     const MachineRegisterInfo &MRI = MF.getRegInfo();
2705     if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
2706       return false;
2707   }
2708 
2709   return true;
2710 }
2711 
2712 bool
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const2713 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2714                                   MachineFunction &MF, bool isVarArg,
2715                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
2716                                   LLVMContext &Context) const {
2717   SmallVector<CCValAssign, 16> RVLocs;
2718   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2719   return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2720 }
2721 
LowerInterruptReturn(SmallVectorImpl<SDValue> & RetOps,const SDLoc & DL,SelectionDAG & DAG)2722 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
2723                                     const SDLoc &DL, SelectionDAG &DAG) {
2724   const MachineFunction &MF = DAG.getMachineFunction();
2725   const Function &F = MF.getFunction();
2726 
2727   StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString();
2728 
2729   // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2730   // version of the "preferred return address". These offsets affect the return
2731   // instruction if this is a return from PL1 without hypervisor extensions.
2732   //    IRQ/FIQ: +4     "subs pc, lr, #4"
2733   //    SWI:     0      "subs pc, lr, #0"
2734   //    ABORT:   +4     "subs pc, lr, #4"
2735   //    UNDEF:   +4/+2  "subs pc, lr, #0"
2736   // UNDEF varies depending on where the exception came from ARM or Thumb
2737   // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2738 
2739   int64_t LROffset;
2740   if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
2741       IntKind == "ABORT")
2742     LROffset = 4;
2743   else if (IntKind == "SWI" || IntKind == "UNDEF")
2744     LROffset = 0;
2745   else
2746     report_fatal_error("Unsupported interrupt attribute. If present, value "
2747                        "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2748 
2749   RetOps.insert(RetOps.begin() + 1,
2750                 DAG.getConstant(LROffset, DL, MVT::i32, false));
2751 
2752   return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
2753 }
2754 
2755 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & dl,SelectionDAG & DAG) const2756 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2757                                bool isVarArg,
2758                                const SmallVectorImpl<ISD::OutputArg> &Outs,
2759                                const SmallVectorImpl<SDValue> &OutVals,
2760                                const SDLoc &dl, SelectionDAG &DAG) const {
2761   // CCValAssign - represent the assignment of the return value to a location.
2762   SmallVector<CCValAssign, 16> RVLocs;
2763 
2764   // CCState - Info about the registers and stack slots.
2765   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2766                  *DAG.getContext());
2767 
2768   // Analyze outgoing return values.
2769   CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2770 
2771   SDValue Flag;
2772   SmallVector<SDValue, 4> RetOps;
2773   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2774   bool isLittleEndian = Subtarget->isLittle();
2775 
2776   MachineFunction &MF = DAG.getMachineFunction();
2777   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2778   AFI->setReturnRegsCount(RVLocs.size());
2779 
2780   // Copy the result values into the output registers.
2781   for (unsigned i = 0, realRVLocIdx = 0;
2782        i != RVLocs.size();
2783        ++i, ++realRVLocIdx) {
2784     CCValAssign &VA = RVLocs[i];
2785     assert(VA.isRegLoc() && "Can only return in registers!");
2786 
2787     SDValue Arg = OutVals[realRVLocIdx];
2788     bool ReturnF16 = false;
2789 
2790     if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) {
2791       // Half-precision return values can be returned like this:
2792       //
2793       // t11 f16 = fadd ...
2794       // t12: i16 = bitcast t11
2795       //   t13: i32 = zero_extend t12
2796       // t14: f32 = bitcast t13  <~~~~~~~ Arg
2797       //
2798       // to avoid code generation for bitcasts, we simply set Arg to the node
2799       // that produces the f16 value, t11 in this case.
2800       //
2801       if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) {
2802         SDValue ZE = Arg.getOperand(0);
2803         if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) {
2804           SDValue BC = ZE.getOperand(0);
2805           if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) {
2806             Arg = BC.getOperand(0);
2807             ReturnF16 = true;
2808           }
2809         }
2810       }
2811     }
2812 
2813     switch (VA.getLocInfo()) {
2814     default: llvm_unreachable("Unknown loc info!");
2815     case CCValAssign::Full: break;
2816     case CCValAssign::BCvt:
2817       if (!ReturnF16)
2818         Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2819       break;
2820     }
2821 
2822     if (VA.needsCustom()) {
2823       if (VA.getLocVT() == MVT::v2f64) {
2824         // Extract the first half and return it in two registers.
2825         SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2826                                    DAG.getConstant(0, dl, MVT::i32));
2827         SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
2828                                        DAG.getVTList(MVT::i32, MVT::i32), Half);
2829 
2830         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2831                                  HalfGPRs.getValue(isLittleEndian ? 0 : 1),
2832                                  Flag);
2833         Flag = Chain.getValue(1);
2834         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2835         VA = RVLocs[++i]; // skip ahead to next loc
2836         Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2837                                  HalfGPRs.getValue(isLittleEndian ? 1 : 0),
2838                                  Flag);
2839         Flag = Chain.getValue(1);
2840         RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2841         VA = RVLocs[++i]; // skip ahead to next loc
2842 
2843         // Extract the 2nd half and fall through to handle it as an f64 value.
2844         Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2845                           DAG.getConstant(1, dl, MVT::i32));
2846       }
2847       // Legalize ret f64 -> ret 2 x i32.  We always have fmrrd if f64 is
2848       // available.
2849       SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2850                                   DAG.getVTList(MVT::i32, MVT::i32), Arg);
2851       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2852                                fmrrd.getValue(isLittleEndian ? 0 : 1),
2853                                Flag);
2854       Flag = Chain.getValue(1);
2855       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2856       VA = RVLocs[++i]; // skip ahead to next loc
2857       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2858                                fmrrd.getValue(isLittleEndian ? 1 : 0),
2859                                Flag);
2860     } else
2861       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
2862 
2863     // Guarantee that all emitted copies are
2864     // stuck together, avoiding something bad.
2865     Flag = Chain.getValue(1);
2866     RetOps.push_back(DAG.getRegister(VA.getLocReg(),
2867                                      ReturnF16 ? MVT::f16 : VA.getLocVT()));
2868   }
2869   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2870   const MCPhysReg *I =
2871       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2872   if (I) {
2873     for (; *I; ++I) {
2874       if (ARM::GPRRegClass.contains(*I))
2875         RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2876       else if (ARM::DPRRegClass.contains(*I))
2877         RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
2878       else
2879         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2880     }
2881   }
2882 
2883   // Update chain and glue.
2884   RetOps[0] = Chain;
2885   if (Flag.getNode())
2886     RetOps.push_back(Flag);
2887 
2888   // CPUs which aren't M-class use a special sequence to return from
2889   // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
2890   // though we use "subs pc, lr, #N").
2891   //
2892   // M-class CPUs actually use a normal return sequence with a special
2893   // (hardware-provided) value in LR, so the normal code path works.
2894   if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
2895       !Subtarget->isMClass()) {
2896     if (Subtarget->isThumb1Only())
2897       report_fatal_error("interrupt attribute is not supported in Thumb1");
2898     return LowerInterruptReturn(RetOps, dl, DAG);
2899   }
2900 
2901   return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
2902 }
2903 
isUsedByReturnOnly(SDNode * N,SDValue & Chain) const2904 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2905   if (N->getNumValues() != 1)
2906     return false;
2907   if (!N->hasNUsesOfValue(1, 0))
2908     return false;
2909 
2910   SDValue TCChain = Chain;
2911   SDNode *Copy = *N->use_begin();
2912   if (Copy->getOpcode() == ISD::CopyToReg) {
2913     // If the copy has a glue operand, we conservatively assume it isn't safe to
2914     // perform a tail call.
2915     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2916       return false;
2917     TCChain = Copy->getOperand(0);
2918   } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
2919     SDNode *VMov = Copy;
2920     // f64 returned in a pair of GPRs.
2921     SmallPtrSet<SDNode*, 2> Copies;
2922     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2923          UI != UE; ++UI) {
2924       if (UI->getOpcode() != ISD::CopyToReg)
2925         return false;
2926       Copies.insert(*UI);
2927     }
2928     if (Copies.size() > 2)
2929       return false;
2930 
2931     for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2932          UI != UE; ++UI) {
2933       SDValue UseChain = UI->getOperand(0);
2934       if (Copies.count(UseChain.getNode()))
2935         // Second CopyToReg
2936         Copy = *UI;
2937       else {
2938         // We are at the top of this chain.
2939         // If the copy has a glue operand, we conservatively assume it
2940         // isn't safe to perform a tail call.
2941         if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
2942           return false;
2943         // First CopyToReg
2944         TCChain = UseChain;
2945       }
2946     }
2947   } else if (Copy->getOpcode() == ISD::BITCAST) {
2948     // f32 returned in a single GPR.
2949     if (!Copy->hasOneUse())
2950       return false;
2951     Copy = *Copy->use_begin();
2952     if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
2953       return false;
2954     // If the copy has a glue operand, we conservatively assume it isn't safe to
2955     // perform a tail call.
2956     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2957       return false;
2958     TCChain = Copy->getOperand(0);
2959   } else {
2960     return false;
2961   }
2962 
2963   bool HasRet = false;
2964   for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2965        UI != UE; ++UI) {
2966     if (UI->getOpcode() != ARMISD::RET_FLAG &&
2967         UI->getOpcode() != ARMISD::INTRET_FLAG)
2968       return false;
2969     HasRet = true;
2970   }
2971 
2972   if (!HasRet)
2973     return false;
2974 
2975   Chain = TCChain;
2976   return true;
2977 }
2978 
mayBeEmittedAsTailCall(const CallInst * CI) const2979 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2980   if (!Subtarget->supportsTailCall())
2981     return false;
2982 
2983   if (!CI->isTailCall())
2984     return false;
2985 
2986   return true;
2987 }
2988 
2989 // Trying to write a 64 bit value so need to split into two 32 bit values first,
2990 // and pass the lower and high parts through.
LowerWRITE_REGISTER(SDValue Op,SelectionDAG & DAG)2991 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
2992   SDLoc DL(Op);
2993   SDValue WriteValue = Op->getOperand(2);
2994 
2995   // This function is only supposed to be called for i64 type argument.
2996   assert(WriteValue.getValueType() == MVT::i64
2997           && "LowerWRITE_REGISTER called for non-i64 type argument.");
2998 
2999   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
3000                            DAG.getConstant(0, DL, MVT::i32));
3001   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
3002                            DAG.getConstant(1, DL, MVT::i32));
3003   SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
3004   return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
3005 }
3006 
3007 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
3008 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
3009 // one of the above mentioned nodes. It has to be wrapped because otherwise
3010 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
3011 // be used to form addressing mode. These wrapped nodes will be selected
3012 // into MOVi.
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const3013 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
3014                                              SelectionDAG &DAG) const {
3015   EVT PtrVT = Op.getValueType();
3016   // FIXME there is no actual debug info here
3017   SDLoc dl(Op);
3018   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3019   SDValue Res;
3020 
3021   // When generating execute-only code Constant Pools must be promoted to the
3022   // global data section. It's a bit ugly that we can't share them across basic
3023   // blocks, but this way we guarantee that execute-only behaves correct with
3024   // position-independent addressing modes.
3025   if (Subtarget->genExecuteOnly()) {
3026     auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
3027     auto T = const_cast<Type*>(CP->getType());
3028     auto C = const_cast<Constant*>(CP->getConstVal());
3029     auto M = const_cast<Module*>(DAG.getMachineFunction().
3030                                  getFunction().getParent());
3031     auto GV = new GlobalVariable(
3032                     *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C,
3033                     Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
3034                     Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" +
3035                     Twine(AFI->createPICLabelUId())
3036                   );
3037     SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV),
3038                                             dl, PtrVT);
3039     return LowerGlobalAddress(GA, DAG);
3040   }
3041 
3042   if (CP->isMachineConstantPoolEntry())
3043     Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
3044                                     CP->getAlignment());
3045   else
3046     Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
3047                                     CP->getAlignment());
3048   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
3049 }
3050 
getJumpTableEncoding() const3051 unsigned ARMTargetLowering::getJumpTableEncoding() const {
3052   return MachineJumpTableInfo::EK_Inline;
3053 }
3054 
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const3055 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
3056                                              SelectionDAG &DAG) const {
3057   MachineFunction &MF = DAG.getMachineFunction();
3058   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3059   unsigned ARMPCLabelIndex = 0;
3060   SDLoc DL(Op);
3061   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3062   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
3063   SDValue CPAddr;
3064   bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
3065   if (!IsPositionIndependent) {
3066     CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
3067   } else {
3068     unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3069     ARMPCLabelIndex = AFI->createPICLabelUId();
3070     ARMConstantPoolValue *CPV =
3071       ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
3072                                       ARMCP::CPBlockAddress, PCAdj);
3073     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3074   }
3075   CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
3076   SDValue Result = DAG.getLoad(
3077       PtrVT, DL, DAG.getEntryNode(), CPAddr,
3078       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3079   if (!IsPositionIndependent)
3080     return Result;
3081   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
3082   return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
3083 }
3084 
3085 /// Convert a TLS address reference into the correct sequence of loads
3086 /// and calls to compute the variable's address for Darwin, and return an
3087 /// SDValue containing the final node.
3088 
3089 /// Darwin only has one TLS scheme which must be capable of dealing with the
3090 /// fully general situation, in the worst case. This means:
3091 ///     + "extern __thread" declaration.
3092 ///     + Defined in a possibly unknown dynamic library.
3093 ///
3094 /// The general system is that each __thread variable has a [3 x i32] descriptor
3095 /// which contains information used by the runtime to calculate the address. The
3096 /// only part of this the compiler needs to know about is the first word, which
3097 /// contains a function pointer that must be called with the address of the
3098 /// entire descriptor in "r0".
3099 ///
3100 /// Since this descriptor may be in a different unit, in general access must
3101 /// proceed along the usual ARM rules. A common sequence to produce is:
3102 ///
3103 ///     movw rT1, :lower16:_var$non_lazy_ptr
3104 ///     movt rT1, :upper16:_var$non_lazy_ptr
3105 ///     ldr r0, [rT1]
3106 ///     ldr rT2, [r0]
3107 ///     blx rT2
3108 ///     [...address now in r0...]
3109 SDValue
LowerGlobalTLSAddressDarwin(SDValue Op,SelectionDAG & DAG) const3110 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
3111                                                SelectionDAG &DAG) const {
3112   assert(Subtarget->isTargetDarwin() &&
3113          "This function expects a Darwin target");
3114   SDLoc DL(Op);
3115 
3116   // First step is to get the address of the actua global symbol. This is where
3117   // the TLS descriptor lives.
3118   SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
3119 
3120   // The first entry in the descriptor is a function pointer that we must call
3121   // to obtain the address of the variable.
3122   SDValue Chain = DAG.getEntryNode();
3123   SDValue FuncTLVGet = DAG.getLoad(
3124       MVT::i32, DL, Chain, DescAddr,
3125       MachinePointerInfo::getGOT(DAG.getMachineFunction()),
3126       /* Alignment = */ 4,
3127       MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
3128           MachineMemOperand::MOInvariant);
3129   Chain = FuncTLVGet.getValue(1);
3130 
3131   MachineFunction &F = DAG.getMachineFunction();
3132   MachineFrameInfo &MFI = F.getFrameInfo();
3133   MFI.setAdjustsStack(true);
3134 
3135   // TLS calls preserve all registers except those that absolutely must be
3136   // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
3137   // silly).
3138   auto TRI =
3139       getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo();
3140   auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
3141   const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
3142 
3143   // Finally, we can make the call. This is just a degenerate version of a
3144   // normal AArch64 call node: r0 takes the address of the descriptor, and
3145   // returns the address of the variable in this thread.
3146   Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
3147   Chain =
3148       DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
3149                   Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
3150                   DAG.getRegisterMask(Mask), Chain.getValue(1));
3151   return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
3152 }
3153 
3154 SDValue
LowerGlobalTLSAddressWindows(SDValue Op,SelectionDAG & DAG) const3155 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
3156                                                 SelectionDAG &DAG) const {
3157   assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
3158 
3159   SDValue Chain = DAG.getEntryNode();
3160   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3161   SDLoc DL(Op);
3162 
3163   // Load the current TEB (thread environment block)
3164   SDValue Ops[] = {Chain,
3165                    DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
3166                    DAG.getTargetConstant(15, DL, MVT::i32),
3167                    DAG.getTargetConstant(0, DL, MVT::i32),
3168                    DAG.getTargetConstant(13, DL, MVT::i32),
3169                    DAG.getTargetConstant(0, DL, MVT::i32),
3170                    DAG.getTargetConstant(2, DL, MVT::i32)};
3171   SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
3172                                    DAG.getVTList(MVT::i32, MVT::Other), Ops);
3173 
3174   SDValue TEB = CurrentTEB.getValue(0);
3175   Chain = CurrentTEB.getValue(1);
3176 
3177   // Load the ThreadLocalStoragePointer from the TEB
3178   // A pointer to the TLS array is located at offset 0x2c from the TEB.
3179   SDValue TLSArray =
3180       DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
3181   TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
3182 
3183   // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
3184   // offset into the TLSArray.
3185 
3186   // Load the TLS index from the C runtime
3187   SDValue TLSIndex =
3188       DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
3189   TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
3190   TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
3191 
3192   SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
3193                               DAG.getConstant(2, DL, MVT::i32));
3194   SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
3195                             DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
3196                             MachinePointerInfo());
3197 
3198   // Get the offset of the start of the .tls section (section base)
3199   const auto *GA = cast<GlobalAddressSDNode>(Op);
3200   auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
3201   SDValue Offset = DAG.getLoad(
3202       PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
3203                                     DAG.getTargetConstantPool(CPV, PtrVT, 4)),
3204       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3205 
3206   return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
3207 }
3208 
3209 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
3210 SDValue
LowerToTLSGeneralDynamicModel(GlobalAddressSDNode * GA,SelectionDAG & DAG) const3211 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
3212                                                  SelectionDAG &DAG) const {
3213   SDLoc dl(GA);
3214   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3215   unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3216   MachineFunction &MF = DAG.getMachineFunction();
3217   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3218   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3219   ARMConstantPoolValue *CPV =
3220     ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3221                                     ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
3222   SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3223   Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
3224   Argument = DAG.getLoad(
3225       PtrVT, dl, DAG.getEntryNode(), Argument,
3226       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3227   SDValue Chain = Argument.getValue(1);
3228 
3229   SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3230   Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
3231 
3232   // call __tls_get_addr.
3233   ArgListTy Args;
3234   ArgListEntry Entry;
3235   Entry.Node = Argument;
3236   Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
3237   Args.push_back(Entry);
3238 
3239   // FIXME: is there useful debug info available here?
3240   TargetLowering::CallLoweringInfo CLI(DAG);
3241   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3242       CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
3243       DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
3244 
3245   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3246   return CallResult.first;
3247 }
3248 
3249 // Lower ISD::GlobalTLSAddress using the "initial exec" or
3250 // "local exec" model.
3251 SDValue
LowerToTLSExecModels(GlobalAddressSDNode * GA,SelectionDAG & DAG,TLSModel::Model model) const3252 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
3253                                         SelectionDAG &DAG,
3254                                         TLSModel::Model model) const {
3255   const GlobalValue *GV = GA->getGlobal();
3256   SDLoc dl(GA);
3257   SDValue Offset;
3258   SDValue Chain = DAG.getEntryNode();
3259   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3260   // Get the Thread Pointer
3261   SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3262 
3263   if (model == TLSModel::InitialExec) {
3264     MachineFunction &MF = DAG.getMachineFunction();
3265     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3266     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3267     // Initial exec model.
3268     unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3269     ARMConstantPoolValue *CPV =
3270       ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3271                                       ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
3272                                       true);
3273     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3274     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3275     Offset = DAG.getLoad(
3276         PtrVT, dl, Chain, Offset,
3277         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3278     Chain = Offset.getValue(1);
3279 
3280     SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3281     Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
3282 
3283     Offset = DAG.getLoad(
3284         PtrVT, dl, Chain, Offset,
3285         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3286   } else {
3287     // local exec model
3288     assert(model == TLSModel::LocalExec);
3289     ARMConstantPoolValue *CPV =
3290       ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
3291     Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3292     Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3293     Offset = DAG.getLoad(
3294         PtrVT, dl, Chain, Offset,
3295         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3296   }
3297 
3298   // The address of the thread local variable is the add of the thread
3299   // pointer with the offset of the variable.
3300   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
3301 }
3302 
3303 SDValue
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const3304 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
3305   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3306   if (DAG.getTarget().useEmulatedTLS())
3307     return LowerToTLSEmulatedModel(GA, DAG);
3308 
3309   if (Subtarget->isTargetDarwin())
3310     return LowerGlobalTLSAddressDarwin(Op, DAG);
3311 
3312   if (Subtarget->isTargetWindows())
3313     return LowerGlobalTLSAddressWindows(Op, DAG);
3314 
3315   // TODO: implement the "local dynamic" model
3316   assert(Subtarget->isTargetELF() && "Only ELF implemented here");
3317   TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
3318 
3319   switch (model) {
3320     case TLSModel::GeneralDynamic:
3321     case TLSModel::LocalDynamic:
3322       return LowerToTLSGeneralDynamicModel(GA, DAG);
3323     case TLSModel::InitialExec:
3324     case TLSModel::LocalExec:
3325       return LowerToTLSExecModels(GA, DAG, model);
3326   }
3327   llvm_unreachable("bogus TLS model");
3328 }
3329 
3330 /// Return true if all users of V are within function F, looking through
3331 /// ConstantExprs.
allUsersAreInFunction(const Value * V,const Function * F)3332 static bool allUsersAreInFunction(const Value *V, const Function *F) {
3333   SmallVector<const User*,4> Worklist;
3334   for (auto *U : V->users())
3335     Worklist.push_back(U);
3336   while (!Worklist.empty()) {
3337     auto *U = Worklist.pop_back_val();
3338     if (isa<ConstantExpr>(U)) {
3339       for (auto *UU : U->users())
3340         Worklist.push_back(UU);
3341       continue;
3342     }
3343 
3344     auto *I = dyn_cast<Instruction>(U);
3345     if (!I || I->getParent()->getParent() != F)
3346       return false;
3347   }
3348   return true;
3349 }
3350 
promoteToConstantPool(const ARMTargetLowering * TLI,const GlobalValue * GV,SelectionDAG & DAG,EVT PtrVT,const SDLoc & dl)3351 static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
3352                                      const GlobalValue *GV, SelectionDAG &DAG,
3353                                      EVT PtrVT, const SDLoc &dl) {
3354   // If we're creating a pool entry for a constant global with unnamed address,
3355   // and the global is small enough, we can emit it inline into the constant pool
3356   // to save ourselves an indirection.
3357   //
3358   // This is a win if the constant is only used in one function (so it doesn't
3359   // need to be duplicated) or duplicating the constant wouldn't increase code
3360   // size (implying the constant is no larger than 4 bytes).
3361   const Function &F = DAG.getMachineFunction().getFunction();
3362 
3363   // We rely on this decision to inline being idemopotent and unrelated to the
3364   // use-site. We know that if we inline a variable at one use site, we'll
3365   // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3366   // doesn't know about this optimization, so bail out if it's enabled else
3367   // we could decide to inline here (and thus never emit the GV) but require
3368   // the GV from fast-isel generated code.
3369   if (!EnableConstpoolPromotion ||
3370       DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3371       return SDValue();
3372 
3373   auto *GVar = dyn_cast<GlobalVariable>(GV);
3374   if (!GVar || !GVar->hasInitializer() ||
3375       !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3376       !GVar->hasLocalLinkage())
3377     return SDValue();
3378 
3379   // If we inline a value that contains relocations, we move the relocations
3380   // from .data to .text. This is not allowed in position-independent code.
3381   auto *Init = GVar->getInitializer();
3382   if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) &&
3383       Init->needsRelocation())
3384     return SDValue();
3385 
3386   // The constant islands pass can only really deal with alignment requests
3387   // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3388   // any type wanting greater alignment requirements than 4 bytes. We also
3389   // can only promote constants that are multiples of 4 bytes in size or
3390   // are paddable to a multiple of 4. Currently we only try and pad constants
3391   // that are strings for simplicity.
3392   auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3393   unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3394   unsigned Align = DAG.getDataLayout().getPreferredAlignment(GVar);
3395   unsigned RequiredPadding = 4 - (Size % 4);
3396   bool PaddingPossible =
3397     RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3398   if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize ||
3399       Size == 0)
3400     return SDValue();
3401 
3402   unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3403   MachineFunction &MF = DAG.getMachineFunction();
3404   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3405 
3406   // We can't bloat the constant pool too much, else the ConstantIslands pass
3407   // may fail to converge. If we haven't promoted this global yet (it may have
3408   // multiple uses), and promoting it would increase the constant pool size (Sz
3409   // > 4), ensure we have space to do so up to MaxTotal.
3410   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3411     if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3412         ConstpoolPromotionMaxTotal)
3413       return SDValue();
3414 
3415   // This is only valid if all users are in a single function; we can't clone
3416   // the constant in general. The LLVM IR unnamed_addr allows merging
3417   // constants, but not cloning them.
3418   //
3419   // We could potentially allow cloning if we could prove all uses of the
3420   // constant in the current function don't care about the address, like
3421   // printf format strings. But that isn't implemented for now.
3422   if (!allUsersAreInFunction(GVar, &F))
3423     return SDValue();
3424 
3425   // We're going to inline this global. Pad it out if needed.
3426   if (RequiredPadding != 4) {
3427     StringRef S = CDAInit->getAsString();
3428 
3429     SmallVector<uint8_t,16> V(S.size());
3430     std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3431     while (RequiredPadding--)
3432       V.push_back(0);
3433     Init = ConstantDataArray::get(*DAG.getContext(), V);
3434   }
3435 
3436   auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3437   SDValue CPAddr =
3438     DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4);
3439   if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3440     AFI->markGlobalAsPromotedToConstantPool(GVar);
3441     AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3442                                       PaddedSize - 4);
3443   }
3444   ++NumConstpoolPromoted;
3445   return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3446 }
3447 
isReadOnly(const GlobalValue * GV) const3448 bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const {
3449   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3450     if (!(GV = GA->getBaseObject()))
3451       return false;
3452   if (const auto *V = dyn_cast<GlobalVariable>(GV))
3453     return V->isConstant();
3454   return isa<Function>(GV);
3455 }
3456 
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const3457 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
3458                                               SelectionDAG &DAG) const {
3459   switch (Subtarget->getTargetTriple().getObjectFormat()) {
3460   default: llvm_unreachable("unknown object format");
3461   case Triple::COFF:
3462     return LowerGlobalAddressWindows(Op, DAG);
3463   case Triple::ELF:
3464     return LowerGlobalAddressELF(Op, DAG);
3465   case Triple::MachO:
3466     return LowerGlobalAddressDarwin(Op, DAG);
3467   }
3468 }
3469 
LowerGlobalAddressELF(SDValue Op,SelectionDAG & DAG) const3470 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3471                                                  SelectionDAG &DAG) const {
3472   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3473   SDLoc dl(Op);
3474   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3475   const TargetMachine &TM = getTargetMachine();
3476   bool IsRO = isReadOnly(GV);
3477 
3478   // promoteToConstantPool only if not generating XO text section
3479   if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3480     if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl))
3481       return V;
3482 
3483   if (isPositionIndependent()) {
3484     bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3485     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3486                                            UseGOT_PREL ? ARMII::MO_GOT : 0);
3487     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3488     if (UseGOT_PREL)
3489       Result =
3490           DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3491                       MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3492     return Result;
3493   } else if (Subtarget->isROPI() && IsRO) {
3494     // PC-relative.
3495     SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3496     SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3497     return Result;
3498   } else if (Subtarget->isRWPI() && !IsRO) {
3499     // SB-relative.
3500     SDValue RelAddr;
3501     if (Subtarget->useMovt()) {
3502       ++NumMovwMovt;
3503       SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL);
3504       RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G);
3505     } else { // use literal pool for address constant
3506       ARMConstantPoolValue *CPV =
3507         ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3508       SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3509       CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3510       RelAddr = DAG.getLoad(
3511           PtrVT, dl, DAG.getEntryNode(), CPAddr,
3512           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3513     }
3514     SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3515     SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr);
3516     return Result;
3517   }
3518 
3519   // If we have T2 ops, we can materialize the address directly via movt/movw
3520   // pair. This is always cheaper.
3521   if (Subtarget->useMovt()) {
3522     ++NumMovwMovt;
3523     // FIXME: Once remat is capable of dealing with instructions with register
3524     // operands, expand this into two nodes.
3525     return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3526                        DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3527   } else {
3528     SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
3529     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3530     return DAG.getLoad(
3531         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3532         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3533   }
3534 }
3535 
LowerGlobalAddressDarwin(SDValue Op,SelectionDAG & DAG) const3536 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3537                                                     SelectionDAG &DAG) const {
3538   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3539          "ROPI/RWPI not currently supported for Darwin");
3540   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3541   SDLoc dl(Op);
3542   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3543 
3544   if (Subtarget->useMovt())
3545     ++NumMovwMovt;
3546 
3547   // FIXME: Once remat is capable of dealing with instructions with register
3548   // operands, expand this into multiple nodes
3549   unsigned Wrapper =
3550       isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
3551 
3552   SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
3553   SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
3554 
3555   if (Subtarget->isGVIndirectSymbol(GV))
3556     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3557                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3558   return Result;
3559 }
3560 
LowerGlobalAddressWindows(SDValue Op,SelectionDAG & DAG) const3561 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
3562                                                      SelectionDAG &DAG) const {
3563   assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
3564   assert(Subtarget->useMovt() &&
3565          "Windows on ARM expects to use movw/movt");
3566   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3567          "ROPI/RWPI not currently supported for Windows");
3568 
3569   const TargetMachine &TM = getTargetMachine();
3570   const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3571   ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG;
3572   if (GV->hasDLLImportStorageClass())
3573     TargetFlags = ARMII::MO_DLLIMPORT;
3574   else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
3575     TargetFlags = ARMII::MO_COFFSTUB;
3576   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3577   SDValue Result;
3578   SDLoc DL(Op);
3579 
3580   ++NumMovwMovt;
3581 
3582   // FIXME: Once remat is capable of dealing with instructions with register
3583   // operands, expand this into two nodes.
3584   Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
3585                        DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0,
3586                                                   TargetFlags));
3587   if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
3588     Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3589                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3590   return Result;
3591 }
3592 
3593 SDValue
LowerEH_SJLJ_SETJMP(SDValue Op,SelectionDAG & DAG) const3594 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
3595   SDLoc dl(Op);
3596   SDValue Val = DAG.getConstant(0, dl, MVT::i32);
3597   return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
3598                      DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
3599                      Op.getOperand(1), Val);
3600 }
3601 
3602 SDValue
LowerEH_SJLJ_LONGJMP(SDValue Op,SelectionDAG & DAG) const3603 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
3604   SDLoc dl(Op);
3605   return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
3606                      Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
3607 }
3608 
LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,SelectionDAG & DAG) const3609 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
3610                                                       SelectionDAG &DAG) const {
3611   SDLoc dl(Op);
3612   return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
3613                      Op.getOperand(0));
3614 }
3615 
LowerINTRINSIC_VOID(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget) const3616 SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
3617     SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const {
3618   unsigned IntNo =
3619       cast<ConstantSDNode>(
3620           Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other))
3621           ->getZExtValue();
3622   switch (IntNo) {
3623     default:
3624       return SDValue();  // Don't custom lower most intrinsics.
3625     case Intrinsic::arm_gnu_eabi_mcount: {
3626       MachineFunction &MF = DAG.getMachineFunction();
3627       EVT PtrVT = getPointerTy(DAG.getDataLayout());
3628       SDLoc dl(Op);
3629       SDValue Chain = Op.getOperand(0);
3630       // call "\01__gnu_mcount_nc"
3631       const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
3632       const uint32_t *Mask =
3633           ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
3634       assert(Mask && "Missing call preserved mask for calling convention");
3635       // Mark LR an implicit live-in.
3636       unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
3637       SDValue ReturnAddress =
3638           DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT);
3639       std::vector<EVT> ResultTys = {MVT::Other, MVT::Glue};
3640       SDValue Callee =
3641           DAG.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT, 0);
3642       SDValue RegisterMask = DAG.getRegisterMask(Mask);
3643       if (Subtarget->isThumb())
3644         return SDValue(
3645             DAG.getMachineNode(
3646                 ARM::tBL_PUSHLR, dl, ResultTys,
3647                 {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT),
3648                  DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}),
3649             0);
3650       return SDValue(
3651           DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys,
3652                              {ReturnAddress, Callee, RegisterMask, Chain}),
3653           0);
3654     }
3655   }
3656 }
3657 
3658 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget) const3659 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
3660                                           const ARMSubtarget *Subtarget) const {
3661   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3662   SDLoc dl(Op);
3663   switch (IntNo) {
3664   default: return SDValue();    // Don't custom lower most intrinsics.
3665   case Intrinsic::thread_pointer: {
3666     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3667     return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3668   }
3669   case Intrinsic::arm_cls: {
3670     const SDValue &Operand = Op.getOperand(1);
3671     const EVT VTy = Op.getValueType();
3672     SDValue SRA =
3673         DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy));
3674     SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand);
3675     SDValue SHL =
3676         DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy));
3677     SDValue OR =
3678         DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy));
3679     SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR);
3680     return Result;
3681   }
3682   case Intrinsic::arm_cls64: {
3683     // cls(x) = if cls(hi(x)) != 31 then cls(hi(x))
3684     //          else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x)))
3685     const SDValue &Operand = Op.getOperand(1);
3686     const EVT VTy = Op.getValueType();
3687 
3688     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand,
3689                              DAG.getConstant(1, dl, VTy));
3690     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand,
3691                              DAG.getConstant(0, dl, VTy));
3692     SDValue Constant0 = DAG.getConstant(0, dl, VTy);
3693     SDValue Constant1 = DAG.getConstant(1, dl, VTy);
3694     SDValue Constant31 = DAG.getConstant(31, dl, VTy);
3695     SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31);
3696     SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi);
3697     SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1);
3698     SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1);
3699     SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi);
3700     SDValue CheckLo =
3701         DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ);
3702     SDValue HiIsZero =
3703         DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ);
3704     SDValue AdjustedLo =
3705         DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy));
3706     SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo);
3707     SDValue Result =
3708         DAG.getSelect(dl, VTy, CheckLo,
3709                       DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi);
3710     return Result;
3711   }
3712   case Intrinsic::eh_sjlj_lsda: {
3713     MachineFunction &MF = DAG.getMachineFunction();
3714     ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3715     unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3716     EVT PtrVT = getPointerTy(DAG.getDataLayout());
3717     SDValue CPAddr;
3718     bool IsPositionIndependent = isPositionIndependent();
3719     unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
3720     ARMConstantPoolValue *CPV =
3721       ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex,
3722                                       ARMCP::CPLSDA, PCAdj);
3723     CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3724     CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3725     SDValue Result = DAG.getLoad(
3726         PtrVT, dl, DAG.getEntryNode(), CPAddr,
3727         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3728 
3729     if (IsPositionIndependent) {
3730       SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3731       Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3732     }
3733     return Result;
3734   }
3735   case Intrinsic::arm_neon_vabs:
3736     return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
3737                         Op.getOperand(1));
3738   case Intrinsic::arm_neon_vmulls:
3739   case Intrinsic::arm_neon_vmullu: {
3740     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3741       ? ARMISD::VMULLs : ARMISD::VMULLu;
3742     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3743                        Op.getOperand(1), Op.getOperand(2));
3744   }
3745   case Intrinsic::arm_neon_vminnm:
3746   case Intrinsic::arm_neon_vmaxnm: {
3747     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3748       ? ISD::FMINNUM : ISD::FMAXNUM;
3749     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3750                        Op.getOperand(1), Op.getOperand(2));
3751   }
3752   case Intrinsic::arm_neon_vminu:
3753   case Intrinsic::arm_neon_vmaxu: {
3754     if (Op.getValueType().isFloatingPoint())
3755       return SDValue();
3756     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3757       ? ISD::UMIN : ISD::UMAX;
3758     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3759                          Op.getOperand(1), Op.getOperand(2));
3760   }
3761   case Intrinsic::arm_neon_vmins:
3762   case Intrinsic::arm_neon_vmaxs: {
3763     // v{min,max}s is overloaded between signed integers and floats.
3764     if (!Op.getValueType().isFloatingPoint()) {
3765       unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3766         ? ISD::SMIN : ISD::SMAX;
3767       return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3768                          Op.getOperand(1), Op.getOperand(2));
3769     }
3770     unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3771       ? ISD::FMINIMUM : ISD::FMAXIMUM;
3772     return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3773                        Op.getOperand(1), Op.getOperand(2));
3774   }
3775   case Intrinsic::arm_neon_vtbl1:
3776     return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
3777                        Op.getOperand(1), Op.getOperand(2));
3778   case Intrinsic::arm_neon_vtbl2:
3779     return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
3780                        Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3781   case Intrinsic::arm_mve_pred_i2v:
3782   case Intrinsic::arm_mve_pred_v2i:
3783     return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(),
3784                        Op.getOperand(1));
3785   }
3786 }
3787 
LowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget)3788 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
3789                                  const ARMSubtarget *Subtarget) {
3790   SDLoc dl(Op);
3791   ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
3792   auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue());
3793   if (SSID == SyncScope::SingleThread)
3794     return Op;
3795 
3796   if (!Subtarget->hasDataBarrier()) {
3797     // Some ARMv6 cpus can support data barriers with an mcr instruction.
3798     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
3799     // here.
3800     assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
3801            "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3802     return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
3803                        DAG.getConstant(0, dl, MVT::i32));
3804   }
3805 
3806   ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
3807   AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
3808   ARM_MB::MemBOpt Domain = ARM_MB::ISH;
3809   if (Subtarget->isMClass()) {
3810     // Only a full system barrier exists in the M-class architectures.
3811     Domain = ARM_MB::SY;
3812   } else if (Subtarget->preferISHSTBarriers() &&
3813              Ord == AtomicOrdering::Release) {
3814     // Swift happens to implement ISHST barriers in a way that's compatible with
3815     // Release semantics but weaker than ISH so we'd be fools not to use
3816     // it. Beware: other processors probably don't!
3817     Domain = ARM_MB::ISHST;
3818   }
3819 
3820   return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
3821                      DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
3822                      DAG.getConstant(Domain, dl, MVT::i32));
3823 }
3824 
LowerPREFETCH(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget)3825 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
3826                              const ARMSubtarget *Subtarget) {
3827   // ARM pre v5TE and Thumb1 does not have preload instructions.
3828   if (!(Subtarget->isThumb2() ||
3829         (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
3830     // Just preserve the chain.
3831     return Op.getOperand(0);
3832 
3833   SDLoc dl(Op);
3834   unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
3835   if (!isRead &&
3836       (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
3837     // ARMv7 with MP extension has PLDW.
3838     return Op.getOperand(0);
3839 
3840   unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3841   if (Subtarget->isThumb()) {
3842     // Invert the bits.
3843     isRead = ~isRead & 1;
3844     isData = ~isData & 1;
3845   }
3846 
3847   return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
3848                      Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
3849                      DAG.getConstant(isData, dl, MVT::i32));
3850 }
3851 
LowerVASTART(SDValue Op,SelectionDAG & DAG)3852 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
3853   MachineFunction &MF = DAG.getMachineFunction();
3854   ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
3855 
3856   // vastart just stores the address of the VarArgsFrameIndex slot into the
3857   // memory location argument.
3858   SDLoc dl(Op);
3859   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
3860   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3861   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3862   return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3863                       MachinePointerInfo(SV));
3864 }
3865 
GetF64FormalArgument(CCValAssign & VA,CCValAssign & NextVA,SDValue & Root,SelectionDAG & DAG,const SDLoc & dl) const3866 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
3867                                                 CCValAssign &NextVA,
3868                                                 SDValue &Root,
3869                                                 SelectionDAG &DAG,
3870                                                 const SDLoc &dl) const {
3871   MachineFunction &MF = DAG.getMachineFunction();
3872   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3873 
3874   const TargetRegisterClass *RC;
3875   if (AFI->isThumb1OnlyFunction())
3876     RC = &ARM::tGPRRegClass;
3877   else
3878     RC = &ARM::GPRRegClass;
3879 
3880   // Transform the arguments stored in physical registers into virtual ones.
3881   unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3882   SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3883 
3884   SDValue ArgValue2;
3885   if (NextVA.isMemLoc()) {
3886     MachineFrameInfo &MFI = MF.getFrameInfo();
3887     int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
3888 
3889     // Create load node to retrieve arguments from the stack.
3890     SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3891     ArgValue2 = DAG.getLoad(
3892         MVT::i32, dl, Root, FIN,
3893         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3894   } else {
3895     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3896     ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3897   }
3898   if (!Subtarget->isLittle())
3899     std::swap (ArgValue, ArgValue2);
3900   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
3901 }
3902 
3903 // The remaining GPRs hold either the beginning of variable-argument
3904 // data, or the beginning of an aggregate passed by value (usually
3905 // byval).  Either way, we allocate stack slots adjacent to the data
3906 // provided by our caller, and store the unallocated registers there.
3907 // If this is a variadic function, the va_list pointer will begin with
3908 // these values; otherwise, this reassembles a (byval) structure that
3909 // was split between registers and memory.
3910 // Return: The frame index registers were stored into.
StoreByValRegs(CCState & CCInfo,SelectionDAG & DAG,const SDLoc & dl,SDValue & Chain,const Value * OrigArg,unsigned InRegsParamRecordIdx,int ArgOffset,unsigned ArgSize) const3911 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
3912                                       const SDLoc &dl, SDValue &Chain,
3913                                       const Value *OrigArg,
3914                                       unsigned InRegsParamRecordIdx,
3915                                       int ArgOffset, unsigned ArgSize) const {
3916   // Currently, two use-cases possible:
3917   // Case #1. Non-var-args function, and we meet first byval parameter.
3918   //          Setup first unallocated register as first byval register;
3919   //          eat all remained registers
3920   //          (these two actions are performed by HandleByVal method).
3921   //          Then, here, we initialize stack frame with
3922   //          "store-reg" instructions.
3923   // Case #2. Var-args function, that doesn't contain byval parameters.
3924   //          The same: eat all remained unallocated registers,
3925   //          initialize stack frame.
3926 
3927   MachineFunction &MF = DAG.getMachineFunction();
3928   MachineFrameInfo &MFI = MF.getFrameInfo();
3929   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3930   unsigned RBegin, REnd;
3931   if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
3932     CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
3933   } else {
3934     unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3935     RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
3936     REnd = ARM::R4;
3937   }
3938 
3939   if (REnd != RBegin)
3940     ArgOffset = -4 * (ARM::R4 - RBegin);
3941 
3942   auto PtrVT = getPointerTy(DAG.getDataLayout());
3943   int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
3944   SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
3945 
3946   SmallVector<SDValue, 4> MemOps;
3947   const TargetRegisterClass *RC =
3948       AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
3949 
3950   for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
3951     unsigned VReg = MF.addLiveIn(Reg, RC);
3952     SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
3953     SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3954                                  MachinePointerInfo(OrigArg, 4 * i));
3955     MemOps.push_back(Store);
3956     FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
3957   }
3958 
3959   if (!MemOps.empty())
3960     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3961   return FrameIndex;
3962 }
3963 
3964 // Setup stack frame, the va_list pointer will start from.
VarArgStyleRegisters(CCState & CCInfo,SelectionDAG & DAG,const SDLoc & dl,SDValue & Chain,unsigned ArgOffset,unsigned TotalArgRegsSaveSize,bool ForceMutable) const3965 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
3966                                              const SDLoc &dl, SDValue &Chain,
3967                                              unsigned ArgOffset,
3968                                              unsigned TotalArgRegsSaveSize,
3969                                              bool ForceMutable) const {
3970   MachineFunction &MF = DAG.getMachineFunction();
3971   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3972 
3973   // Try to store any remaining integer argument regs
3974   // to their spots on the stack so that they may be loaded by dereferencing
3975   // the result of va_next.
3976   // If there is no regs to be stored, just point address after last
3977   // argument passed via stack.
3978   int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
3979                                   CCInfo.getInRegsParamsCount(),
3980                                   CCInfo.getNextStackOffset(),
3981                                   std::max(4U, TotalArgRegsSaveSize));
3982   AFI->setVarArgsFrameIndex(FrameIndex);
3983 }
3984 
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const3985 SDValue ARMTargetLowering::LowerFormalArguments(
3986     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3987     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3988     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3989   MachineFunction &MF = DAG.getMachineFunction();
3990   MachineFrameInfo &MFI = MF.getFrameInfo();
3991 
3992   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3993 
3994   // Assign locations to all of the incoming arguments.
3995   SmallVector<CCValAssign, 16> ArgLocs;
3996   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3997                  *DAG.getContext());
3998   CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
3999 
4000   SmallVector<SDValue, 16> ArgValues;
4001   SDValue ArgValue;
4002   Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
4003   unsigned CurArgIdx = 0;
4004 
4005   // Initially ArgRegsSaveSize is zero.
4006   // Then we increase this value each time we meet byval parameter.
4007   // We also increase this value in case of varargs function.
4008   AFI->setArgRegsSaveSize(0);
4009 
4010   // Calculate the amount of stack space that we need to allocate to store
4011   // byval and variadic arguments that are passed in registers.
4012   // We need to know this before we allocate the first byval or variadic
4013   // argument, as they will be allocated a stack slot below the CFA (Canonical
4014   // Frame Address, the stack pointer at entry to the function).
4015   unsigned ArgRegBegin = ARM::R4;
4016   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4017     if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
4018       break;
4019 
4020     CCValAssign &VA = ArgLocs[i];
4021     unsigned Index = VA.getValNo();
4022     ISD::ArgFlagsTy Flags = Ins[Index].Flags;
4023     if (!Flags.isByVal())
4024       continue;
4025 
4026     assert(VA.isMemLoc() && "unexpected byval pointer in reg");
4027     unsigned RBegin, REnd;
4028     CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
4029     ArgRegBegin = std::min(ArgRegBegin, RBegin);
4030 
4031     CCInfo.nextInRegsParam();
4032   }
4033   CCInfo.rewindByValRegsInfo();
4034 
4035   int lastInsIndex = -1;
4036   if (isVarArg && MFI.hasVAStart()) {
4037     unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
4038     if (RegIdx != array_lengthof(GPRArgRegs))
4039       ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
4040   }
4041 
4042   unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
4043   AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
4044   auto PtrVT = getPointerTy(DAG.getDataLayout());
4045 
4046   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4047     CCValAssign &VA = ArgLocs[i];
4048     if (Ins[VA.getValNo()].isOrigArg()) {
4049       std::advance(CurOrigArg,
4050                    Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
4051       CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
4052     }
4053     // Arguments stored in registers.
4054     if (VA.isRegLoc()) {
4055       EVT RegVT = VA.getLocVT();
4056 
4057       if (VA.needsCustom()) {
4058         // f64 and vector types are split up into multiple registers or
4059         // combinations of registers and stack slots.
4060         if (VA.getLocVT() == MVT::v2f64) {
4061           SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
4062                                                    Chain, DAG, dl);
4063           VA = ArgLocs[++i]; // skip ahead to next loc
4064           SDValue ArgValue2;
4065           if (VA.isMemLoc()) {
4066             int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
4067             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4068             ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
4069                                     MachinePointerInfo::getFixedStack(
4070                                         DAG.getMachineFunction(), FI));
4071           } else {
4072             ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
4073                                              Chain, DAG, dl);
4074           }
4075           ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
4076           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
4077                                  ArgValue, ArgValue1,
4078                                  DAG.getIntPtrConstant(0, dl));
4079           ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
4080                                  ArgValue, ArgValue2,
4081                                  DAG.getIntPtrConstant(1, dl));
4082         } else
4083           ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4084       } else {
4085         const TargetRegisterClass *RC;
4086 
4087 
4088         if (RegVT == MVT::f16)
4089           RC = &ARM::HPRRegClass;
4090         else if (RegVT == MVT::f32)
4091           RC = &ARM::SPRRegClass;
4092         else if (RegVT == MVT::f64 || RegVT == MVT::v4f16)
4093           RC = &ARM::DPRRegClass;
4094         else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16)
4095           RC = &ARM::QPRRegClass;
4096         else if (RegVT == MVT::i32)
4097           RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
4098                                            : &ARM::GPRRegClass;
4099         else
4100           llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
4101 
4102         // Transform the arguments in physical registers into virtual ones.
4103         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
4104         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
4105 
4106         // If this value is passed in r0 and has the returned attribute (e.g.
4107         // C++ 'structors), record this fact for later use.
4108         if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) {
4109           AFI->setPreservesR0();
4110         }
4111       }
4112 
4113       // If this is an 8 or 16-bit value, it is really passed promoted
4114       // to 32 bits.  Insert an assert[sz]ext to capture this, then
4115       // truncate to the right size.
4116       switch (VA.getLocInfo()) {
4117       default: llvm_unreachable("Unknown loc info!");
4118       case CCValAssign::Full: break;
4119       case CCValAssign::BCvt:
4120         ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
4121         break;
4122       case CCValAssign::SExt:
4123         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
4124                                DAG.getValueType(VA.getValVT()));
4125         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4126         break;
4127       case CCValAssign::ZExt:
4128         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
4129                                DAG.getValueType(VA.getValVT()));
4130         ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4131         break;
4132       }
4133 
4134       InVals.push_back(ArgValue);
4135     } else { // VA.isRegLoc()
4136       // sanity check
4137       assert(VA.isMemLoc());
4138       assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
4139 
4140       int index = VA.getValNo();
4141 
4142       // Some Ins[] entries become multiple ArgLoc[] entries.
4143       // Process them only once.
4144       if (index != lastInsIndex)
4145         {
4146           ISD::ArgFlagsTy Flags = Ins[index].Flags;
4147           // FIXME: For now, all byval parameter objects are marked mutable.
4148           // This can be changed with more analysis.
4149           // In case of tail call optimization mark all arguments mutable.
4150           // Since they could be overwritten by lowering of arguments in case of
4151           // a tail call.
4152           if (Flags.isByVal()) {
4153             assert(Ins[index].isOrigArg() &&
4154                    "Byval arguments cannot be implicit");
4155             unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
4156 
4157             int FrameIndex = StoreByValRegs(
4158                 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
4159                 VA.getLocMemOffset(), Flags.getByValSize());
4160             InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
4161             CCInfo.nextInRegsParam();
4162           } else {
4163             unsigned FIOffset = VA.getLocMemOffset();
4164             int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
4165                                            FIOffset, true);
4166 
4167             // Create load nodes to retrieve arguments from the stack.
4168             SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4169             InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
4170                                          MachinePointerInfo::getFixedStack(
4171                                              DAG.getMachineFunction(), FI)));
4172           }
4173           lastInsIndex = index;
4174         }
4175     }
4176   }
4177 
4178   // varargs
4179   if (isVarArg && MFI.hasVAStart())
4180     VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
4181                          CCInfo.getNextStackOffset(),
4182                          TotalArgRegsSaveSize);
4183 
4184   AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
4185 
4186   return Chain;
4187 }
4188 
4189 /// isFloatingPointZero - Return true if this is +0.0.
isFloatingPointZero(SDValue Op)4190 static bool isFloatingPointZero(SDValue Op) {
4191   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
4192     return CFP->getValueAPF().isPosZero();
4193   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
4194     // Maybe this has already been legalized into the constant pool?
4195     if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
4196       SDValue WrapperOp = Op.getOperand(1).getOperand(0);
4197       if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
4198         if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
4199           return CFP->getValueAPF().isPosZero();
4200     }
4201   } else if (Op->getOpcode() == ISD::BITCAST &&
4202              Op->getValueType(0) == MVT::f64) {
4203     // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
4204     // created by LowerConstantFP().
4205     SDValue BitcastOp = Op->getOperand(0);
4206     if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
4207         isNullConstant(BitcastOp->getOperand(0)))
4208       return true;
4209   }
4210   return false;
4211 }
4212 
4213 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
4214 /// the given operands.
getARMCmp(SDValue LHS,SDValue RHS,ISD::CondCode CC,SDValue & ARMcc,SelectionDAG & DAG,const SDLoc & dl) const4215 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
4216                                      SDValue &ARMcc, SelectionDAG &DAG,
4217                                      const SDLoc &dl) const {
4218   if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
4219     unsigned C = RHSC->getZExtValue();
4220     if (!isLegalICmpImmediate((int32_t)C)) {
4221       // Constant does not fit, try adjusting it by one.
4222       switch (CC) {
4223       default: break;
4224       case ISD::SETLT:
4225       case ISD::SETGE:
4226         if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
4227           CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
4228           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4229         }
4230         break;
4231       case ISD::SETULT:
4232       case ISD::SETUGE:
4233         if (C != 0 && isLegalICmpImmediate(C-1)) {
4234           CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
4235           RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4236         }
4237         break;
4238       case ISD::SETLE:
4239       case ISD::SETGT:
4240         if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
4241           CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
4242           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4243         }
4244         break;
4245       case ISD::SETULE:
4246       case ISD::SETUGT:
4247         if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
4248           CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
4249           RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4250         }
4251         break;
4252       }
4253     }
4254   } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) &&
4255              (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) {
4256     // In ARM and Thumb-2, the compare instructions can shift their second
4257     // operand.
4258     CC = ISD::getSetCCSwappedOperands(CC);
4259     std::swap(LHS, RHS);
4260   }
4261 
4262   // Thumb1 has very limited immediate modes, so turning an "and" into a
4263   // shift can save multiple instructions.
4264   //
4265   // If we have (x & C1), and C1 is an appropriate mask, we can transform it
4266   // into "((x << n) >> n)".  But that isn't necessarily profitable on its
4267   // own. If it's the operand to an unsigned comparison with an immediate,
4268   // we can eliminate one of the shifts: we transform
4269   // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)".
4270   //
4271   // We avoid transforming cases which aren't profitable due to encoding
4272   // details:
4273   //
4274   // 1. C2 fits into the immediate field of a cmp, and the transformed version
4275   // would not; in that case, we're essentially trading one immediate load for
4276   // another.
4277   // 2. C1 is 255 or 65535, so we can use uxtb or uxth.
4278   // 3. C2 is zero; we have other code for this special case.
4279   //
4280   // FIXME: Figure out profitability for Thumb2; we usually can't save an
4281   // instruction, since the AND is always one instruction anyway, but we could
4282   // use narrow instructions in some cases.
4283   if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND &&
4284       LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) &&
4285       LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) &&
4286       !isSignedIntSetCC(CC)) {
4287     unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue();
4288     auto *RHSC = cast<ConstantSDNode>(RHS.getNode());
4289     uint64_t RHSV = RHSC->getZExtValue();
4290     if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) {
4291       unsigned ShiftBits = countLeadingZeros(Mask);
4292       if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) {
4293         SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32);
4294         LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt);
4295         RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32);
4296       }
4297     }
4298   }
4299 
4300   // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a
4301   // single "lsls x, c+1".  The shift sets the "C" and "Z" flags the same
4302   // way a cmp would.
4303   // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and
4304   // some tweaks to the heuristics for the previous and->shift transform.
4305   // FIXME: Optimize cases where the LHS isn't a shift.
4306   if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
4307       isa<ConstantSDNode>(RHS) &&
4308       cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
4309       CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
4310       cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) {
4311     unsigned ShiftAmt =
4312       cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1;
4313     SDValue Shift = DAG.getNode(ARMISD::LSLS, dl,
4314                                 DAG.getVTList(MVT::i32, MVT::i32),
4315                                 LHS.getOperand(0),
4316                                 DAG.getConstant(ShiftAmt, dl, MVT::i32));
4317     SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
4318                                      Shift.getValue(1), SDValue());
4319     ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32);
4320     return Chain.getValue(1);
4321   }
4322 
4323   ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4324 
4325   // If the RHS is a constant zero then the V (overflow) flag will never be
4326   // set. This can allow us to simplify GE to PL or LT to MI, which can be
4327   // simpler for other passes (like the peephole optimiser) to deal with.
4328   if (isNullConstant(RHS)) {
4329     switch (CondCode) {
4330       default: break;
4331       case ARMCC::GE:
4332         CondCode = ARMCC::PL;
4333         break;
4334       case ARMCC::LT:
4335         CondCode = ARMCC::MI;
4336         break;
4337     }
4338   }
4339 
4340   ARMISD::NodeType CompareType;
4341   switch (CondCode) {
4342   default:
4343     CompareType = ARMISD::CMP;
4344     break;
4345   case ARMCC::EQ:
4346   case ARMCC::NE:
4347     // Uses only Z Flag
4348     CompareType = ARMISD::CMPZ;
4349     break;
4350   }
4351   ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4352   return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
4353 }
4354 
4355 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
getVFPCmp(SDValue LHS,SDValue RHS,SelectionDAG & DAG,const SDLoc & dl,bool Signaling) const4356 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
4357                                      SelectionDAG &DAG, const SDLoc &dl,
4358                                      bool Signaling) const {
4359   assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64);
4360   SDValue Cmp;
4361   if (!isFloatingPointZero(RHS))
4362     Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP,
4363                       dl, MVT::Glue, LHS, RHS);
4364   else
4365     Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0,
4366                       dl, MVT::Glue, LHS);
4367   return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
4368 }
4369 
4370 /// duplicateCmp - Glue values can have only one use, so this function
4371 /// duplicates a comparison node.
4372 SDValue
duplicateCmp(SDValue Cmp,SelectionDAG & DAG) const4373 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
4374   unsigned Opc = Cmp.getOpcode();
4375   SDLoc DL(Cmp);
4376   if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
4377     return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4378 
4379   assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
4380   Cmp = Cmp.getOperand(0);
4381   Opc = Cmp.getOpcode();
4382   if (Opc == ARMISD::CMPFP)
4383     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4384   else {
4385     assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
4386     Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
4387   }
4388   return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
4389 }
4390 
4391 // This function returns three things: the arithmetic computation itself
4392 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc).  The
4393 // comparison and the condition code define the case in which the arithmetic
4394 // computation *does not* overflow.
4395 std::pair<SDValue, SDValue>
getARMXALUOOp(SDValue Op,SelectionDAG & DAG,SDValue & ARMcc) const4396 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
4397                                  SDValue &ARMcc) const {
4398   assert(Op.getValueType() == MVT::i32 &&  "Unsupported value type");
4399 
4400   SDValue Value, OverflowCmp;
4401   SDValue LHS = Op.getOperand(0);
4402   SDValue RHS = Op.getOperand(1);
4403   SDLoc dl(Op);
4404 
4405   // FIXME: We are currently always generating CMPs because we don't support
4406   // generating CMN through the backend. This is not as good as the natural
4407   // CMP case because it causes a register dependency and cannot be folded
4408   // later.
4409 
4410   switch (Op.getOpcode()) {
4411   default:
4412     llvm_unreachable("Unknown overflow instruction!");
4413   case ISD::SADDO:
4414     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4415     Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
4416     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4417     break;
4418   case ISD::UADDO:
4419     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4420     // We use ADDC here to correspond to its use in LowerUnsignedALUO.
4421     // We do not use it in the USUBO case as Value may not be used.
4422     Value = DAG.getNode(ARMISD::ADDC, dl,
4423                         DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS)
4424                 .getValue(0);
4425     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4426     break;
4427   case ISD::SSUBO:
4428     ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4429     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4430     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4431     break;
4432   case ISD::USUBO:
4433     ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4434     Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4435     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4436     break;
4437   case ISD::UMULO:
4438     // We generate a UMUL_LOHI and then check if the high word is 0.
4439     ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4440     Value = DAG.getNode(ISD::UMUL_LOHI, dl,
4441                         DAG.getVTList(Op.getValueType(), Op.getValueType()),
4442                         LHS, RHS);
4443     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4444                               DAG.getConstant(0, dl, MVT::i32));
4445     Value = Value.getValue(0); // We only want the low 32 bits for the result.
4446     break;
4447   case ISD::SMULO:
4448     // We generate a SMUL_LOHI and then check if all the bits of the high word
4449     // are the same as the sign bit of the low word.
4450     ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4451     Value = DAG.getNode(ISD::SMUL_LOHI, dl,
4452                         DAG.getVTList(Op.getValueType(), Op.getValueType()),
4453                         LHS, RHS);
4454     OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4455                               DAG.getNode(ISD::SRA, dl, Op.getValueType(),
4456                                           Value.getValue(0),
4457                                           DAG.getConstant(31, dl, MVT::i32)));
4458     Value = Value.getValue(0); // We only want the low 32 bits for the result.
4459     break;
4460   } // switch (...)
4461 
4462   return std::make_pair(Value, OverflowCmp);
4463 }
4464 
4465 SDValue
LowerSignedALUO(SDValue Op,SelectionDAG & DAG) const4466 ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const {
4467   // Let legalize expand this if it isn't a legal type yet.
4468   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4469     return SDValue();
4470 
4471   SDValue Value, OverflowCmp;
4472   SDValue ARMcc;
4473   std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
4474   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4475   SDLoc dl(Op);
4476   // We use 0 and 1 as false and true values.
4477   SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
4478   SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
4479   EVT VT = Op.getValueType();
4480 
4481   SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
4482                                  ARMcc, CCR, OverflowCmp);
4483 
4484   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
4485   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4486 }
4487 
ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,SelectionDAG & DAG)4488 static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,
4489                                               SelectionDAG &DAG) {
4490   SDLoc DL(BoolCarry);
4491   EVT CarryVT = BoolCarry.getValueType();
4492 
4493   // This converts the boolean value carry into the carry flag by doing
4494   // ARMISD::SUBC Carry, 1
4495   SDValue Carry = DAG.getNode(ARMISD::SUBC, DL,
4496                               DAG.getVTList(CarryVT, MVT::i32),
4497                               BoolCarry, DAG.getConstant(1, DL, CarryVT));
4498   return Carry.getValue(1);
4499 }
4500 
ConvertCarryFlagToBooleanCarry(SDValue Flags,EVT VT,SelectionDAG & DAG)4501 static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT,
4502                                               SelectionDAG &DAG) {
4503   SDLoc DL(Flags);
4504 
4505   // Now convert the carry flag into a boolean carry. We do this
4506   // using ARMISD:ADDE 0, 0, Carry
4507   return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32),
4508                      DAG.getConstant(0, DL, MVT::i32),
4509                      DAG.getConstant(0, DL, MVT::i32), Flags);
4510 }
4511 
LowerUnsignedALUO(SDValue Op,SelectionDAG & DAG) const4512 SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
4513                                              SelectionDAG &DAG) const {
4514   // Let legalize expand this if it isn't a legal type yet.
4515   if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4516     return SDValue();
4517 
4518   SDValue LHS = Op.getOperand(0);
4519   SDValue RHS = Op.getOperand(1);
4520   SDLoc dl(Op);
4521 
4522   EVT VT = Op.getValueType();
4523   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
4524   SDValue Value;
4525   SDValue Overflow;
4526   switch (Op.getOpcode()) {
4527   default:
4528     llvm_unreachable("Unknown overflow instruction!");
4529   case ISD::UADDO:
4530     Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS);
4531     // Convert the carry flag into a boolean value.
4532     Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
4533     break;
4534   case ISD::USUBO: {
4535     Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS);
4536     // Convert the carry flag into a boolean value.
4537     Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
4538     // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow
4539     // value. So compute 1 - C.
4540     Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32,
4541                            DAG.getConstant(1, dl, MVT::i32), Overflow);
4542     break;
4543   }
4544   }
4545 
4546   return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4547 }
4548 
LowerSADDSUBSAT(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * Subtarget)4549 static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG,
4550                                const ARMSubtarget *Subtarget) {
4551   EVT VT = Op.getValueType();
4552   if (!Subtarget->hasDSP())
4553     return SDValue();
4554   if (!VT.isSimple())
4555     return SDValue();
4556 
4557   unsigned NewOpcode;
4558   bool IsAdd = Op->getOpcode() == ISD::SADDSAT;
4559   switch (VT.getSimpleVT().SimpleTy) {
4560   default:
4561     return SDValue();
4562   case MVT::i8:
4563     NewOpcode = IsAdd ? ARMISD::QADD8b : ARMISD::QSUB8b;
4564     break;
4565   case MVT::i16:
4566     NewOpcode = IsAdd ? ARMISD::QADD16b : ARMISD::QSUB16b;
4567     break;
4568   }
4569 
4570   SDLoc dl(Op);
4571   SDValue Add =
4572       DAG.getNode(NewOpcode, dl, MVT::i32,
4573                   DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32),
4574                   DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32));
4575   return DAG.getNode(ISD::TRUNCATE, dl, VT, Add);
4576 }
4577 
LowerSELECT(SDValue Op,SelectionDAG & DAG) const4578 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
4579   SDValue Cond = Op.getOperand(0);
4580   SDValue SelectTrue = Op.getOperand(1);
4581   SDValue SelectFalse = Op.getOperand(2);
4582   SDLoc dl(Op);
4583   unsigned Opc = Cond.getOpcode();
4584 
4585   if (Cond.getResNo() == 1 &&
4586       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
4587        Opc == ISD::USUBO)) {
4588     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
4589       return SDValue();
4590 
4591     SDValue Value, OverflowCmp;
4592     SDValue ARMcc;
4593     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4594     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4595     EVT VT = Op.getValueType();
4596 
4597     return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
4598                    OverflowCmp, DAG);
4599   }
4600 
4601   // Convert:
4602   //
4603   //   (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
4604   //   (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
4605   //
4606   if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
4607     const ConstantSDNode *CMOVTrue =
4608       dyn_cast<ConstantSDNode>(Cond.getOperand(0));
4609     const ConstantSDNode *CMOVFalse =
4610       dyn_cast<ConstantSDNode>(Cond.getOperand(1));
4611 
4612     if (CMOVTrue && CMOVFalse) {
4613       unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
4614       unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
4615 
4616       SDValue True;
4617       SDValue False;
4618       if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4619         True = SelectTrue;
4620         False = SelectFalse;
4621       } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4622         True = SelectFalse;
4623         False = SelectTrue;
4624       }
4625 
4626       if (True.getNode() && False.getNode()) {
4627         EVT VT = Op.getValueType();
4628         SDValue ARMcc = Cond.getOperand(2);
4629         SDValue CCR = Cond.getOperand(3);
4630         SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
4631         assert(True.getValueType() == VT);
4632         return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4633       }
4634     }
4635   }
4636 
4637   // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4638   // undefined bits before doing a full-word comparison with zero.
4639   Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
4640                      DAG.getConstant(1, dl, Cond.getValueType()));
4641 
4642   return DAG.getSelectCC(dl, Cond,
4643                          DAG.getConstant(0, dl, Cond.getValueType()),
4644                          SelectTrue, SelectFalse, ISD::SETNE);
4645 }
4646 
checkVSELConstraints(ISD::CondCode CC,ARMCC::CondCodes & CondCode,bool & swpCmpOps,bool & swpVselOps)4647 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
4648                                  bool &swpCmpOps, bool &swpVselOps) {
4649   // Start by selecting the GE condition code for opcodes that return true for
4650   // 'equality'
4651   if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
4652       CC == ISD::SETULE || CC == ISD::SETGE  || CC == ISD::SETLE)
4653     CondCode = ARMCC::GE;
4654 
4655   // and GT for opcodes that return false for 'equality'.
4656   else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
4657            CC == ISD::SETULT || CC == ISD::SETGT  || CC == ISD::SETLT)
4658     CondCode = ARMCC::GT;
4659 
4660   // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4661   // to swap the compare operands.
4662   if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
4663       CC == ISD::SETULT || CC == ISD::SETLE  || CC == ISD::SETLT)
4664     swpCmpOps = true;
4665 
4666   // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4667   // If we have an unordered opcode, we need to swap the operands to the VSEL
4668   // instruction (effectively negating the condition).
4669   //
4670   // This also has the effect of swapping which one of 'less' or 'greater'
4671   // returns true, so we also swap the compare operands. It also switches
4672   // whether we return true for 'equality', so we compensate by picking the
4673   // opposite condition code to our original choice.
4674   if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
4675       CC == ISD::SETUGT) {
4676     swpCmpOps = !swpCmpOps;
4677     swpVselOps = !swpVselOps;
4678     CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
4679   }
4680 
4681   // 'ordered' is 'anything but unordered', so use the VS condition code and
4682   // swap the VSEL operands.
4683   if (CC == ISD::SETO) {
4684     CondCode = ARMCC::VS;
4685     swpVselOps = true;
4686   }
4687 
4688   // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4689   // code and swap the VSEL operands. Also do this if we don't care about the
4690   // unordered case.
4691   if (CC == ISD::SETUNE || CC == ISD::SETNE) {
4692     CondCode = ARMCC::EQ;
4693     swpVselOps = true;
4694   }
4695 }
4696 
getCMOV(const SDLoc & dl,EVT VT,SDValue FalseVal,SDValue TrueVal,SDValue ARMcc,SDValue CCR,SDValue Cmp,SelectionDAG & DAG) const4697 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
4698                                    SDValue TrueVal, SDValue ARMcc, SDValue CCR,
4699                                    SDValue Cmp, SelectionDAG &DAG) const {
4700   if (!Subtarget->hasFP64() && VT == MVT::f64) {
4701     FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4702                            DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
4703     TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4704                           DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
4705 
4706     SDValue TrueLow = TrueVal.getValue(0);
4707     SDValue TrueHigh = TrueVal.getValue(1);
4708     SDValue FalseLow = FalseVal.getValue(0);
4709     SDValue FalseHigh = FalseVal.getValue(1);
4710 
4711     SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
4712                               ARMcc, CCR, Cmp);
4713     SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
4714                                ARMcc, CCR, duplicateCmp(Cmp, DAG));
4715 
4716     return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
4717   } else {
4718     return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
4719                        Cmp);
4720   }
4721 }
4722 
isGTorGE(ISD::CondCode CC)4723 static bool isGTorGE(ISD::CondCode CC) {
4724   return CC == ISD::SETGT || CC == ISD::SETGE;
4725 }
4726 
isLTorLE(ISD::CondCode CC)4727 static bool isLTorLE(ISD::CondCode CC) {
4728   return CC == ISD::SETLT || CC == ISD::SETLE;
4729 }
4730 
4731 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4732 // All of these conditions (and their <= and >= counterparts) will do:
4733 //          x < k ? k : x
4734 //          x > k ? x : k
4735 //          k < x ? x : k
4736 //          k > x ? k : x
isLowerSaturate(const SDValue LHS,const SDValue RHS,const SDValue TrueVal,const SDValue FalseVal,const ISD::CondCode CC,const SDValue K)4737 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
4738                             const SDValue TrueVal, const SDValue FalseVal,
4739                             const ISD::CondCode CC, const SDValue K) {
4740   return (isGTorGE(CC) &&
4741           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4742          (isLTorLE(CC) &&
4743           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4744 }
4745 
4746 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
isUpperSaturate(const SDValue LHS,const SDValue RHS,const SDValue TrueVal,const SDValue FalseVal,const ISD::CondCode CC,const SDValue K)4747 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
4748                             const SDValue TrueVal, const SDValue FalseVal,
4749                             const ISD::CondCode CC, const SDValue K) {
4750   return (isGTorGE(CC) &&
4751           ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4752          (isLTorLE(CC) &&
4753           ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4754 }
4755 
4756 // Check if two chained conditionals could be converted into SSAT or USAT.
4757 //
4758 // SSAT can replace a set of two conditional selectors that bound a number to an
4759 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
4760 //
4761 //     x < -k ? -k : (x > k ? k : x)
4762 //     x < -k ? -k : (x < k ? x : k)
4763 //     x > -k ? (x > k ? k : x) : -k
4764 //     x < k ? (x < -k ? -k : x) : k
4765 //     etc.
4766 //
4767 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is
4768 // a power of 2.
4769 //
4770 // It returns true if the conversion can be done, false otherwise.
4771 // Additionally, the variable is returned in parameter V, the constant in K and
4772 // usat is set to true if the conditional represents an unsigned saturation
isSaturatingConditional(const SDValue & Op,SDValue & V,uint64_t & K,bool & usat)4773 static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
4774                                     uint64_t &K, bool &usat) {
4775   SDValue LHS1 = Op.getOperand(0);
4776   SDValue RHS1 = Op.getOperand(1);
4777   SDValue TrueVal1 = Op.getOperand(2);
4778   SDValue FalseVal1 = Op.getOperand(3);
4779   ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4780 
4781   const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4782   if (Op2.getOpcode() != ISD::SELECT_CC)
4783     return false;
4784 
4785   SDValue LHS2 = Op2.getOperand(0);
4786   SDValue RHS2 = Op2.getOperand(1);
4787   SDValue TrueVal2 = Op2.getOperand(2);
4788   SDValue FalseVal2 = Op2.getOperand(3);
4789   ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
4790 
4791   // Find out which are the constants and which are the variables
4792   // in each conditional
4793   SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4794                                                         ? &RHS1
4795                                                         : nullptr;
4796   SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4797                                                         ? &RHS2
4798                                                         : nullptr;
4799   SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4800   SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4801   SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4802   SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4803 
4804   // We must detect cases where the original operations worked with 16- or
4805   // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
4806   // must work with sign-extended values but the select operations return
4807   // the original non-extended value.
4808   SDValue V2TmpReg = V2Tmp;
4809   if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
4810     V2TmpReg = V2Tmp->getOperand(0);
4811 
4812   // Check that the registers and the constants have the correct values
4813   // in both conditionals
4814   if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4815       V2TmpReg != V2)
4816     return false;
4817 
4818   // Figure out which conditional is saturating the lower/upper bound.
4819   const SDValue *LowerCheckOp =
4820       isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4821           ? &Op
4822           : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
4823                 ? &Op2
4824                 : nullptr;
4825   const SDValue *UpperCheckOp =
4826       isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4827           ? &Op
4828           : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
4829                 ? &Op2
4830                 : nullptr;
4831 
4832   if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4833     return false;
4834 
4835   // Check that the constant in the lower-bound check is
4836   // the opposite of the constant in the upper-bound check
4837   // in 1's complement.
4838   int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4839   int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4840   int64_t PosVal = std::max(Val1, Val2);
4841   int64_t NegVal = std::min(Val1, Val2);
4842 
4843   if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4844        (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4845       isPowerOf2_64(PosVal + 1)) {
4846 
4847     // Handle the difference between USAT (unsigned) and SSAT (signed) saturation
4848     if (Val1 == ~Val2)
4849       usat = false;
4850     else if (NegVal == 0)
4851       usat = true;
4852     else
4853       return false;
4854 
4855     V = V2;
4856     K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
4857 
4858     return true;
4859   }
4860 
4861   return false;
4862 }
4863 
4864 // Check if a condition of the type x < k ? k : x can be converted into a
4865 // bit operation instead of conditional moves.
4866 // Currently this is allowed given:
4867 // - The conditions and values match up
4868 // - k is 0 or -1 (all ones)
4869 // This function will not check the last condition, thats up to the caller
4870 // It returns true if the transformation can be made, and in such case
4871 // returns x in V, and k in SatK.
isLowerSaturatingConditional(const SDValue & Op,SDValue & V,SDValue & SatK)4872 static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V,
4873                                          SDValue &SatK)
4874 {
4875   SDValue LHS = Op.getOperand(0);
4876   SDValue RHS = Op.getOperand(1);
4877   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4878   SDValue TrueVal = Op.getOperand(2);
4879   SDValue FalseVal = Op.getOperand(3);
4880 
4881   SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
4882                                                ? &RHS
4883                                                : nullptr;
4884 
4885   // No constant operation in comparison, early out
4886   if (!K)
4887     return false;
4888 
4889   SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
4890   V = (KTmp == TrueVal) ? FalseVal : TrueVal;
4891   SDValue VTmp = (K && *K == LHS) ? RHS : LHS;
4892 
4893   // If the constant on left and right side, or variable on left and right,
4894   // does not match, early out
4895   if (*K != KTmp || V != VTmp)
4896     return false;
4897 
4898   if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) {
4899     SatK = *K;
4900     return true;
4901   }
4902 
4903   return false;
4904 }
4905 
isUnsupportedFloatingType(EVT VT) const4906 bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const {
4907   if (VT == MVT::f32)
4908     return !Subtarget->hasVFP2Base();
4909   if (VT == MVT::f64)
4910     return !Subtarget->hasFP64();
4911   if (VT == MVT::f16)
4912     return !Subtarget->hasFullFP16();
4913   return false;
4914 }
4915 
LowerSELECT_CC(SDValue Op,SelectionDAG & DAG) const4916 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
4917   EVT VT = Op.getValueType();
4918   SDLoc dl(Op);
4919 
4920   // Try to convert two saturating conditional selects into a single SSAT
4921   SDValue SatValue;
4922   uint64_t SatConstant;
4923   bool SatUSat;
4924   if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
4925       isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) {
4926     if (SatUSat)
4927       return DAG.getNode(ARMISD::USAT, dl, VT, SatValue,
4928                          DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
4929     else
4930       return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
4931                          DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
4932   }
4933 
4934   // Try to convert expressions of the form x < k ? k : x (and similar forms)
4935   // into more efficient bit operations, which is possible when k is 0 or -1
4936   // On ARM and Thumb-2 which have flexible operand 2 this will result in
4937   // single instructions. On Thumb the shift and the bit operation will be two
4938   // instructions.
4939   // Only allow this transformation on full-width (32-bit) operations
4940   SDValue LowerSatConstant;
4941   if (VT == MVT::i32 &&
4942       isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) {
4943     SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue,
4944                                  DAG.getConstant(31, dl, VT));
4945     if (isNullConstant(LowerSatConstant)) {
4946       SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV,
4947                                       DAG.getAllOnesConstant(dl, VT));
4948       return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV);
4949     } else if (isAllOnesConstant(LowerSatConstant))
4950       return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV);
4951   }
4952 
4953   SDValue LHS = Op.getOperand(0);
4954   SDValue RHS = Op.getOperand(1);
4955   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4956   SDValue TrueVal = Op.getOperand(2);
4957   SDValue FalseVal = Op.getOperand(3);
4958   ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal);
4959   ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal);
4960 
4961   if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal &&
4962       LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) {
4963     unsigned TVal = CTVal->getZExtValue();
4964     unsigned FVal = CFVal->getZExtValue();
4965     unsigned Opcode = 0;
4966 
4967     if (TVal == ~FVal) {
4968       Opcode = ARMISD::CSINV;
4969     } else if (TVal == ~FVal + 1) {
4970       Opcode = ARMISD::CSNEG;
4971     } else if (TVal + 1 == FVal) {
4972       Opcode = ARMISD::CSINC;
4973     } else if (TVal == FVal + 1) {
4974       Opcode = ARMISD::CSINC;
4975       std::swap(TrueVal, FalseVal);
4976       std::swap(TVal, FVal);
4977       CC = ISD::getSetCCInverse(CC, LHS.getValueType());
4978     }
4979 
4980     if (Opcode) {
4981       // If one of the constants is cheaper than another, materialise the
4982       // cheaper one and let the csel generate the other.
4983       if (Opcode != ARMISD::CSINC &&
4984           HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) {
4985         std::swap(TrueVal, FalseVal);
4986         std::swap(TVal, FVal);
4987         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
4988       }
4989 
4990       // Attempt to use ZR checking TVal is 0, possibly inverting the condition
4991       // to get there. CSINC not is invertable like the other two (~(~a) == a,
4992       // -(-a) == a, but (a+1)+1 != a).
4993       if (FVal == 0 && Opcode != ARMISD::CSINC) {
4994         std::swap(TrueVal, FalseVal);
4995         std::swap(TVal, FVal);
4996         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
4997       }
4998       if (TVal == 0)
4999         TrueVal = DAG.getRegister(ARM::ZR, MVT::i32);
5000 
5001       // Drops F's value because we can get it by inverting/negating TVal.
5002       FalseVal = TrueVal;
5003 
5004       SDValue ARMcc;
5005       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5006       EVT VT = TrueVal.getValueType();
5007       return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp);
5008     }
5009   }
5010 
5011   if (isUnsupportedFloatingType(LHS.getValueType())) {
5012     DAG.getTargetLoweringInfo().softenSetCCOperands(
5013         DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5014 
5015     // If softenSetCCOperands only returned one value, we should compare it to
5016     // zero.
5017     if (!RHS.getNode()) {
5018       RHS = DAG.getConstant(0, dl, LHS.getValueType());
5019       CC = ISD::SETNE;
5020     }
5021   }
5022 
5023   if (LHS.getValueType() == MVT::i32) {
5024     // Try to generate VSEL on ARMv8.
5025     // The VSEL instruction can't use all the usual ARM condition
5026     // codes: it only has two bits to select the condition code, so it's
5027     // constrained to use only GE, GT, VS and EQ.
5028     //
5029     // To implement all the various ISD::SETXXX opcodes, we sometimes need to
5030     // swap the operands of the previous compare instruction (effectively
5031     // inverting the compare condition, swapping 'less' and 'greater') and
5032     // sometimes need to swap the operands to the VSEL (which inverts the
5033     // condition in the sense of firing whenever the previous condition didn't)
5034     if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 ||
5035                                         TrueVal.getValueType() == MVT::f32 ||
5036                                         TrueVal.getValueType() == MVT::f64)) {
5037       ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
5038       if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
5039           CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
5040         CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5041         std::swap(TrueVal, FalseVal);
5042       }
5043     }
5044 
5045     SDValue ARMcc;
5046     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5047     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5048     // Choose GE over PL, which vsel does now support
5049     if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
5050       ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
5051     return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5052   }
5053 
5054   ARMCC::CondCodes CondCode, CondCode2;
5055   FPCCToARMCC(CC, CondCode, CondCode2);
5056 
5057   // Normalize the fp compare. If RHS is zero we prefer to keep it there so we
5058   // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we
5059   // must use VSEL (limited condition codes), due to not having conditional f16
5060   // moves.
5061   if (Subtarget->hasFPARMv8Base() &&
5062       !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) &&
5063       (TrueVal.getValueType() == MVT::f16 ||
5064        TrueVal.getValueType() == MVT::f32 ||
5065        TrueVal.getValueType() == MVT::f64)) {
5066     bool swpCmpOps = false;
5067     bool swpVselOps = false;
5068     checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
5069 
5070     if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
5071         CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
5072       if (swpCmpOps)
5073         std::swap(LHS, RHS);
5074       if (swpVselOps)
5075         std::swap(TrueVal, FalseVal);
5076     }
5077   }
5078 
5079   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5080   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5081   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5082   SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5083   if (CondCode2 != ARMCC::AL) {
5084     SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
5085     // FIXME: Needs another CMP because flag can have but one use.
5086     SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
5087     Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
5088   }
5089   return Result;
5090 }
5091 
5092 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
5093 /// to morph to an integer compare sequence.
canChangeToInt(SDValue Op,bool & SeenZero,const ARMSubtarget * Subtarget)5094 static bool canChangeToInt(SDValue Op, bool &SeenZero,
5095                            const ARMSubtarget *Subtarget) {
5096   SDNode *N = Op.getNode();
5097   if (!N->hasOneUse())
5098     // Otherwise it requires moving the value from fp to integer registers.
5099     return false;
5100   if (!N->getNumValues())
5101     return false;
5102   EVT VT = Op.getValueType();
5103   if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
5104     // f32 case is generally profitable. f64 case only makes sense when vcmpe +
5105     // vmrs are very slow, e.g. cortex-a8.
5106     return false;
5107 
5108   if (isFloatingPointZero(Op)) {
5109     SeenZero = true;
5110     return true;
5111   }
5112   return ISD::isNormalLoad(N);
5113 }
5114 
bitcastf32Toi32(SDValue Op,SelectionDAG & DAG)5115 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
5116   if (isFloatingPointZero(Op))
5117     return DAG.getConstant(0, SDLoc(Op), MVT::i32);
5118 
5119   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
5120     return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
5121                        Ld->getPointerInfo(), Ld->getAlignment(),
5122                        Ld->getMemOperand()->getFlags());
5123 
5124   llvm_unreachable("Unknown VFP cmp argument!");
5125 }
5126 
expandf64Toi32(SDValue Op,SelectionDAG & DAG,SDValue & RetVal1,SDValue & RetVal2)5127 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
5128                            SDValue &RetVal1, SDValue &RetVal2) {
5129   SDLoc dl(Op);
5130 
5131   if (isFloatingPointZero(Op)) {
5132     RetVal1 = DAG.getConstant(0, dl, MVT::i32);
5133     RetVal2 = DAG.getConstant(0, dl, MVT::i32);
5134     return;
5135   }
5136 
5137   if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
5138     SDValue Ptr = Ld->getBasePtr();
5139     RetVal1 =
5140         DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
5141                     Ld->getAlignment(), Ld->getMemOperand()->getFlags());
5142 
5143     EVT PtrType = Ptr.getValueType();
5144     unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
5145     SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
5146                                  PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
5147     RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
5148                           Ld->getPointerInfo().getWithOffset(4), NewAlign,
5149                           Ld->getMemOperand()->getFlags());
5150     return;
5151   }
5152 
5153   llvm_unreachable("Unknown VFP cmp argument!");
5154 }
5155 
5156 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
5157 /// f32 and even f64 comparisons to integer ones.
5158 SDValue
OptimizeVFPBrcond(SDValue Op,SelectionDAG & DAG) const5159 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
5160   SDValue Chain = Op.getOperand(0);
5161   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
5162   SDValue LHS = Op.getOperand(2);
5163   SDValue RHS = Op.getOperand(3);
5164   SDValue Dest = Op.getOperand(4);
5165   SDLoc dl(Op);
5166 
5167   bool LHSSeenZero = false;
5168   bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
5169   bool RHSSeenZero = false;
5170   bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
5171   if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
5172     // If unsafe fp math optimization is enabled and there are no other uses of
5173     // the CMP operands, and the condition code is EQ or NE, we can optimize it
5174     // to an integer comparison.
5175     if (CC == ISD::SETOEQ)
5176       CC = ISD::SETEQ;
5177     else if (CC == ISD::SETUNE)
5178       CC = ISD::SETNE;
5179 
5180     SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
5181     SDValue ARMcc;
5182     if (LHS.getValueType() == MVT::f32) {
5183       LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
5184                         bitcastf32Toi32(LHS, DAG), Mask);
5185       RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
5186                         bitcastf32Toi32(RHS, DAG), Mask);
5187       SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5188       SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5189       return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
5190                          Chain, Dest, ARMcc, CCR, Cmp);
5191     }
5192 
5193     SDValue LHS1, LHS2;
5194     SDValue RHS1, RHS2;
5195     expandf64Toi32(LHS, DAG, LHS1, LHS2);
5196     expandf64Toi32(RHS, DAG, RHS1, RHS2);
5197     LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
5198     RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
5199     ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
5200     ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5201     SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
5202     SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
5203     return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
5204   }
5205 
5206   return SDValue();
5207 }
5208 
LowerBRCOND(SDValue Op,SelectionDAG & DAG) const5209 SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
5210   SDValue Chain = Op.getOperand(0);
5211   SDValue Cond = Op.getOperand(1);
5212   SDValue Dest = Op.getOperand(2);
5213   SDLoc dl(Op);
5214 
5215   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5216   // instruction.
5217   unsigned Opc = Cond.getOpcode();
5218   bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
5219                       !Subtarget->isThumb1Only();
5220   if (Cond.getResNo() == 1 &&
5221       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5222        Opc == ISD::USUBO || OptimizeMul)) {
5223     // Only lower legal XALUO ops.
5224     if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
5225       return SDValue();
5226 
5227     // The actual operation with overflow check.
5228     SDValue Value, OverflowCmp;
5229     SDValue ARMcc;
5230     std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
5231 
5232     // Reverse the condition code.
5233     ARMCC::CondCodes CondCode =
5234         (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
5235     CondCode = ARMCC::getOppositeCondition(CondCode);
5236     ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
5237     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5238 
5239     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
5240                        OverflowCmp);
5241   }
5242 
5243   return SDValue();
5244 }
5245 
LowerBR_CC(SDValue Op,SelectionDAG & DAG) const5246 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
5247   SDValue Chain = Op.getOperand(0);
5248   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
5249   SDValue LHS = Op.getOperand(2);
5250   SDValue RHS = Op.getOperand(3);
5251   SDValue Dest = Op.getOperand(4);
5252   SDLoc dl(Op);
5253 
5254   if (isUnsupportedFloatingType(LHS.getValueType())) {
5255     DAG.getTargetLoweringInfo().softenSetCCOperands(
5256         DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5257 
5258     // If softenSetCCOperands only returned one value, we should compare it to
5259     // zero.
5260     if (!RHS.getNode()) {
5261       RHS = DAG.getConstant(0, dl, LHS.getValueType());
5262       CC = ISD::SETNE;
5263     }
5264   }
5265 
5266   // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5267   // instruction.
5268   unsigned Opc = LHS.getOpcode();
5269   bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
5270                       !Subtarget->isThumb1Only();
5271   if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) &&
5272       (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5273        Opc == ISD::USUBO || OptimizeMul) &&
5274       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
5275     // Only lower legal XALUO ops.
5276     if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
5277       return SDValue();
5278 
5279     // The actual operation with overflow check.
5280     SDValue Value, OverflowCmp;
5281     SDValue ARMcc;
5282     std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc);
5283 
5284     if ((CC == ISD::SETNE) != isOneConstant(RHS)) {
5285       // Reverse the condition code.
5286       ARMCC::CondCodes CondCode =
5287           (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
5288       CondCode = ARMCC::getOppositeCondition(CondCode);
5289       ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
5290     }
5291     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5292 
5293     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
5294                        OverflowCmp);
5295   }
5296 
5297   if (LHS.getValueType() == MVT::i32) {
5298     SDValue ARMcc;
5299     SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5300     SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5301     return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
5302                        Chain, Dest, ARMcc, CCR, Cmp);
5303   }
5304 
5305   if (getTargetMachine().Options.UnsafeFPMath &&
5306       (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
5307        CC == ISD::SETNE || CC == ISD::SETUNE)) {
5308     if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
5309       return Result;
5310   }
5311 
5312   ARMCC::CondCodes CondCode, CondCode2;
5313   FPCCToARMCC(CC, CondCode, CondCode2);
5314 
5315   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5316   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5317   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5318   SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
5319   SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
5320   SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
5321   if (CondCode2 != ARMCC::AL) {
5322     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
5323     SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
5324     Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
5325   }
5326   return Res;
5327 }
5328 
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const5329 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
5330   SDValue Chain = Op.getOperand(0);
5331   SDValue Table = Op.getOperand(1);
5332   SDValue Index = Op.getOperand(2);
5333   SDLoc dl(Op);
5334 
5335   EVT PTy = getPointerTy(DAG.getDataLayout());
5336   JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
5337   SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
5338   Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
5339   Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
5340   SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index);
5341   if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
5342     // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
5343     // which does another jump to the destination. This also makes it easier
5344     // to translate it to TBB / TBH later (Thumb2 only).
5345     // FIXME: This might not work if the function is extremely large.
5346     return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
5347                        Addr, Op.getOperand(2), JTI);
5348   }
5349   if (isPositionIndependent() || Subtarget->isROPI()) {
5350     Addr =
5351         DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
5352                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
5353     Chain = Addr.getValue(1);
5354     Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr);
5355     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
5356   } else {
5357     Addr =
5358         DAG.getLoad(PTy, dl, Chain, Addr,
5359                     MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
5360     Chain = Addr.getValue(1);
5361     return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
5362   }
5363 }
5364 
LowerVectorFP_TO_INT(SDValue Op,SelectionDAG & DAG)5365 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
5366   EVT VT = Op.getValueType();
5367   SDLoc dl(Op);
5368 
5369   if (Op.getValueType().getVectorElementType() == MVT::i32) {
5370     if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
5371       return Op;
5372     return DAG.UnrollVectorOp(Op.getNode());
5373   }
5374 
5375   const bool HasFullFP16 =
5376     static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
5377 
5378   EVT NewTy;
5379   const EVT OpTy = Op.getOperand(0).getValueType();
5380   if (OpTy == MVT::v4f32)
5381     NewTy = MVT::v4i32;
5382   else if (OpTy == MVT::v4f16 && HasFullFP16)
5383     NewTy = MVT::v4i16;
5384   else if (OpTy == MVT::v8f16 && HasFullFP16)
5385     NewTy = MVT::v8i16;
5386   else
5387     llvm_unreachable("Invalid type for custom lowering!");
5388 
5389   if (VT != MVT::v4i16 && VT != MVT::v8i16)
5390     return DAG.UnrollVectorOp(Op.getNode());
5391 
5392   Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0));
5393   return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
5394 }
5395 
LowerFP_TO_INT(SDValue Op,SelectionDAG & DAG) const5396 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
5397   EVT VT = Op.getValueType();
5398   if (VT.isVector())
5399     return LowerVectorFP_TO_INT(Op, DAG);
5400 
5401   bool IsStrict = Op->isStrictFPOpcode();
5402   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
5403 
5404   if (isUnsupportedFloatingType(SrcVal.getValueType())) {
5405     RTLIB::Libcall LC;
5406     if (Op.getOpcode() == ISD::FP_TO_SINT ||
5407         Op.getOpcode() == ISD::STRICT_FP_TO_SINT)
5408       LC = RTLIB::getFPTOSINT(SrcVal.getValueType(),
5409                               Op.getValueType());
5410     else
5411       LC = RTLIB::getFPTOUINT(SrcVal.getValueType(),
5412                               Op.getValueType());
5413     SDLoc Loc(Op);
5414     MakeLibCallOptions CallOptions;
5415     SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
5416     SDValue Result;
5417     std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal,
5418                                           CallOptions, Loc, Chain);
5419     return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
5420   }
5421 
5422   // FIXME: Remove this when we have strict fp instruction selection patterns
5423   if (IsStrict) {
5424     SDLoc Loc(Op);
5425     SDValue Result =
5426         DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT
5427                                                              : ISD::FP_TO_UINT,
5428                     Loc, Op.getValueType(), SrcVal);
5429     return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
5430   }
5431 
5432   return Op;
5433 }
5434 
LowerVectorINT_TO_FP(SDValue Op,SelectionDAG & DAG)5435 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
5436   EVT VT = Op.getValueType();
5437   SDLoc dl(Op);
5438 
5439   if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
5440     if (VT.getVectorElementType() == MVT::f32)
5441       return Op;
5442     return DAG.UnrollVectorOp(Op.getNode());
5443   }
5444 
5445   assert((Op.getOperand(0).getValueType() == MVT::v4i16 ||
5446           Op.getOperand(0).getValueType() == MVT::v8i16) &&
5447          "Invalid type for custom lowering!");
5448 
5449   const bool HasFullFP16 =
5450     static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
5451 
5452   EVT DestVecType;
5453   if (VT == MVT::v4f32)
5454     DestVecType = MVT::v4i32;
5455   else if (VT == MVT::v4f16 && HasFullFP16)
5456     DestVecType = MVT::v4i16;
5457   else if (VT == MVT::v8f16 && HasFullFP16)
5458     DestVecType = MVT::v8i16;
5459   else
5460     return DAG.UnrollVectorOp(Op.getNode());
5461 
5462   unsigned CastOpc;
5463   unsigned Opc;
5464   switch (Op.getOpcode()) {
5465   default: llvm_unreachable("Invalid opcode!");
5466   case ISD::SINT_TO_FP:
5467     CastOpc = ISD::SIGN_EXTEND;
5468     Opc = ISD::SINT_TO_FP;
5469     break;
5470   case ISD::UINT_TO_FP:
5471     CastOpc = ISD::ZERO_EXTEND;
5472     Opc = ISD::UINT_TO_FP;
5473     break;
5474   }
5475 
5476   Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0));
5477   return DAG.getNode(Opc, dl, VT, Op);
5478 }
5479 
LowerINT_TO_FP(SDValue Op,SelectionDAG & DAG) const5480 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
5481   EVT VT = Op.getValueType();
5482   if (VT.isVector())
5483     return LowerVectorINT_TO_FP(Op, DAG);
5484   if (isUnsupportedFloatingType(VT)) {
5485     RTLIB::Libcall LC;
5486     if (Op.getOpcode() == ISD::SINT_TO_FP)
5487       LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
5488                               Op.getValueType());
5489     else
5490       LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
5491                               Op.getValueType());
5492     MakeLibCallOptions CallOptions;
5493     return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
5494                        CallOptions, SDLoc(Op)).first;
5495   }
5496 
5497   return Op;
5498 }
5499 
LowerFCOPYSIGN(SDValue Op,SelectionDAG & DAG) const5500 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
5501   // Implement fcopysign with a fabs and a conditional fneg.
5502   SDValue Tmp0 = Op.getOperand(0);
5503   SDValue Tmp1 = Op.getOperand(1);
5504   SDLoc dl(Op);
5505   EVT VT = Op.getValueType();
5506   EVT SrcVT = Tmp1.getValueType();
5507   bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
5508     Tmp0.getOpcode() == ARMISD::VMOVDRR;
5509   bool UseNEON = !InGPR && Subtarget->hasNEON();
5510 
5511   if (UseNEON) {
5512     // Use VBSL to copy the sign bit.
5513     unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80);
5514     SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
5515                                DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
5516     EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
5517     if (VT == MVT::f64)
5518       Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
5519                          DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
5520                          DAG.getConstant(32, dl, MVT::i32));
5521     else /*if (VT == MVT::f32)*/
5522       Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
5523     if (SrcVT == MVT::f32) {
5524       Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
5525       if (VT == MVT::f64)
5526         Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
5527                            DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
5528                            DAG.getConstant(32, dl, MVT::i32));
5529     } else if (VT == MVT::f32)
5530       Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64,
5531                          DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
5532                          DAG.getConstant(32, dl, MVT::i32));
5533     Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
5534     Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
5535 
5536     SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff),
5537                                             dl, MVT::i32);
5538     AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
5539     SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
5540                                   DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
5541 
5542     SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
5543                               DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
5544                               DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
5545     if (VT == MVT::f32) {
5546       Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
5547       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
5548                         DAG.getConstant(0, dl, MVT::i32));
5549     } else {
5550       Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
5551     }
5552 
5553     return Res;
5554   }
5555 
5556   // Bitcast operand 1 to i32.
5557   if (SrcVT == MVT::f64)
5558     Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
5559                        Tmp1).getValue(1);
5560   Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
5561 
5562   // Or in the signbit with integer operations.
5563   SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
5564   SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
5565   Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
5566   if (VT == MVT::f32) {
5567     Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
5568                        DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
5569     return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5570                        DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
5571   }
5572 
5573   // f64: Or the high part with signbit and then combine two parts.
5574   Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
5575                      Tmp0);
5576   SDValue Lo = Tmp0.getValue(0);
5577   SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
5578   Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
5579   return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
5580 }
5581 
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const5582 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
5583   MachineFunction &MF = DAG.getMachineFunction();
5584   MachineFrameInfo &MFI = MF.getFrameInfo();
5585   MFI.setReturnAddressIsTaken(true);
5586 
5587   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
5588     return SDValue();
5589 
5590   EVT VT = Op.getValueType();
5591   SDLoc dl(Op);
5592   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5593   if (Depth) {
5594     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5595     SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
5596     return DAG.getLoad(VT, dl, DAG.getEntryNode(),
5597                        DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
5598                        MachinePointerInfo());
5599   }
5600 
5601   // Return LR, which contains the return address. Mark it an implicit live-in.
5602   unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
5603   return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
5604 }
5605 
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const5606 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
5607   const ARMBaseRegisterInfo &ARI =
5608     *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
5609   MachineFunction &MF = DAG.getMachineFunction();
5610   MachineFrameInfo &MFI = MF.getFrameInfo();
5611   MFI.setFrameAddressIsTaken(true);
5612 
5613   EVT VT = Op.getValueType();
5614   SDLoc dl(Op);  // FIXME probably not meaningful
5615   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5616   Register FrameReg = ARI.getFrameRegister(MF);
5617   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
5618   while (Depth--)
5619     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
5620                             MachinePointerInfo());
5621   return FrameAddr;
5622 }
5623 
5624 // FIXME? Maybe this could be a TableGen attribute on some registers and
5625 // this table could be generated automatically from RegInfo.
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const5626 Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT,
5627                                               const MachineFunction &MF) const {
5628   Register Reg = StringSwitch<unsigned>(RegName)
5629                        .Case("sp", ARM::SP)
5630                        .Default(0);
5631   if (Reg)
5632     return Reg;
5633   report_fatal_error(Twine("Invalid register name \""
5634                               + StringRef(RegName)  + "\"."));
5635 }
5636 
5637 // Result is 64 bit value so split into two 32 bit values and return as a
5638 // pair of values.
ExpandREAD_REGISTER(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG)5639 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
5640                                 SelectionDAG &DAG) {
5641   SDLoc DL(N);
5642 
5643   // This function is only supposed to be called for i64 type destination.
5644   assert(N->getValueType(0) == MVT::i64
5645           && "ExpandREAD_REGISTER called for non-i64 type result.");
5646 
5647   SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
5648                              DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
5649                              N->getOperand(0),
5650                              N->getOperand(1));
5651 
5652   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
5653                     Read.getValue(1)));
5654   Results.push_back(Read.getOperand(0));
5655 }
5656 
5657 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
5658 /// When \p DstVT, the destination type of \p BC, is on the vector
5659 /// register bank and the source of bitcast, \p Op, operates on the same bank,
5660 /// it might be possible to combine them, such that everything stays on the
5661 /// vector register bank.
5662 /// \p return The node that would replace \p BT, if the combine
5663 /// is possible.
CombineVMOVDRRCandidateWithVecOp(const SDNode * BC,SelectionDAG & DAG)5664 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
5665                                                 SelectionDAG &DAG) {
5666   SDValue Op = BC->getOperand(0);
5667   EVT DstVT = BC->getValueType(0);
5668 
5669   // The only vector instruction that can produce a scalar (remember,
5670   // since the bitcast was about to be turned into VMOVDRR, the source
5671   // type is i64) from a vector is EXTRACT_VECTOR_ELT.
5672   // Moreover, we can do this combine only if there is one use.
5673   // Finally, if the destination type is not a vector, there is not
5674   // much point on forcing everything on the vector bank.
5675   if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5676       !Op.hasOneUse())
5677     return SDValue();
5678 
5679   // If the index is not constant, we will introduce an additional
5680   // multiply that will stick.
5681   // Give up in that case.
5682   ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5683   if (!Index)
5684     return SDValue();
5685   unsigned DstNumElt = DstVT.getVectorNumElements();
5686 
5687   // Compute the new index.
5688   const APInt &APIntIndex = Index->getAPIntValue();
5689   APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
5690   NewIndex *= APIntIndex;
5691   // Check if the new constant index fits into i32.
5692   if (NewIndex.getBitWidth() > 32)
5693     return SDValue();
5694 
5695   // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
5696   // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
5697   SDLoc dl(Op);
5698   SDValue ExtractSrc = Op.getOperand(0);
5699   EVT VecVT = EVT::getVectorVT(
5700       *DAG.getContext(), DstVT.getScalarType(),
5701       ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
5702   SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
5703   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
5704                      DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
5705 }
5706 
5707 /// ExpandBITCAST - If the target supports VFP, this function is called to
5708 /// expand a bit convert where either the source or destination type is i64 to
5709 /// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
5710 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
5711 /// vectors), since the legalizer won't know what to do with that.
ExpandBITCAST(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * Subtarget)5712 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
5713                              const ARMSubtarget *Subtarget) {
5714   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5715   SDLoc dl(N);
5716   SDValue Op = N->getOperand(0);
5717 
5718   // This function is only supposed to be called for i64 types, either as the
5719   // source or destination of the bit convert.
5720   EVT SrcVT = Op.getValueType();
5721   EVT DstVT = N->getValueType(0);
5722   const bool HasFullFP16 = Subtarget->hasFullFP16();
5723 
5724   if (SrcVT == MVT::f32 && DstVT == MVT::i32) {
5725      // FullFP16: half values are passed in S-registers, and we don't
5726      // need any of the bitcast and moves:
5727      //
5728      // t2: f32,ch = CopyFromReg t0, Register:f32 %0
5729      //   t5: i32 = bitcast t2
5730      // t18: f16 = ARMISD::VMOVhr t5
5731      if (Op.getOpcode() != ISD::CopyFromReg ||
5732          Op.getValueType() != MVT::f32)
5733        return SDValue();
5734 
5735      auto Move = N->use_begin();
5736      if (Move->getOpcode() != ARMISD::VMOVhr)
5737        return SDValue();
5738 
5739      SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
5740      SDValue Copy = DAG.getNode(ISD::CopyFromReg, SDLoc(Op), MVT::f16, Ops);
5741      DAG.ReplaceAllUsesWith(*Move, &Copy);
5742      return Copy;
5743   }
5744 
5745   if (SrcVT == MVT::i16 && DstVT == MVT::f16) {
5746     if (!HasFullFP16)
5747       return SDValue();
5748     // SoftFP: read half-precision arguments:
5749     //
5750     // t2: i32,ch = ...
5751     //        t7: i16 = truncate t2 <~~~~ Op
5752     //      t8: f16 = bitcast t7    <~~~~ N
5753     //
5754     if (Op.getOperand(0).getValueType() == MVT::i32)
5755       return DAG.getNode(ARMISD::VMOVhr, SDLoc(Op),
5756                          MVT::f16, Op.getOperand(0));
5757 
5758     return SDValue();
5759   }
5760 
5761   // Half-precision return values
5762   if (SrcVT == MVT::f16 && DstVT == MVT::i16) {
5763     if (!HasFullFP16)
5764       return SDValue();
5765     //
5766     //          t11: f16 = fadd t8, t10
5767     //        t12: i16 = bitcast t11       <~~~ SDNode N
5768     //      t13: i32 = zero_extend t12
5769     //    t16: ch,glue = CopyToReg t0, Register:i32 %r0, t13
5770     //  t17: ch = ARMISD::RET_FLAG t16, Register:i32 %r0, t16:1
5771     //
5772     // transform this into:
5773     //
5774     //    t20: i32 = ARMISD::VMOVrh t11
5775     //  t16: ch,glue = CopyToReg t0, Register:i32 %r0, t20
5776     //
5777     auto ZeroExtend = N->use_begin();
5778     if (N->use_size() != 1 || ZeroExtend->getOpcode() != ISD::ZERO_EXTEND ||
5779         ZeroExtend->getValueType(0) != MVT::i32)
5780       return SDValue();
5781 
5782     auto Copy = ZeroExtend->use_begin();
5783     if (Copy->getOpcode() == ISD::CopyToReg &&
5784         Copy->use_begin()->getOpcode() == ARMISD::RET_FLAG) {
5785       SDValue Cvt = DAG.getNode(ARMISD::VMOVrh, SDLoc(Op), MVT::i32, Op);
5786       DAG.ReplaceAllUsesWith(*ZeroExtend, &Cvt);
5787       return Cvt;
5788     }
5789     return SDValue();
5790   }
5791 
5792   if (!(SrcVT == MVT::i64 || DstVT == MVT::i64))
5793     return SDValue();
5794 
5795   // Turn i64->f64 into VMOVDRR.
5796   if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
5797     // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
5798     // if we can combine the bitcast with its source.
5799     if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
5800       return Val;
5801 
5802     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
5803                              DAG.getConstant(0, dl, MVT::i32));
5804     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
5805                              DAG.getConstant(1, dl, MVT::i32));
5806     return DAG.getNode(ISD::BITCAST, dl, DstVT,
5807                        DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
5808   }
5809 
5810   // Turn f64->i64 into VMOVRRD.
5811   if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
5812     SDValue Cvt;
5813     if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
5814         SrcVT.getVectorNumElements() > 1)
5815       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
5816                         DAG.getVTList(MVT::i32, MVT::i32),
5817                         DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
5818     else
5819       Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
5820                         DAG.getVTList(MVT::i32, MVT::i32), Op);
5821     // Merge the pieces into a single i64 value.
5822     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
5823   }
5824 
5825   return SDValue();
5826 }
5827 
5828 /// getZeroVector - Returns a vector of specified type with all zero elements.
5829 /// Zero vectors are used to represent vector negation and in those cases
5830 /// will be implemented with the NEON VNEG instruction.  However, VNEG does
5831 /// not support i64 elements, so sometimes the zero vectors will need to be
5832 /// explicitly constructed.  Regardless, use a canonical VMOV to create the
5833 /// zero vector.
getZeroVector(EVT VT,SelectionDAG & DAG,const SDLoc & dl)5834 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5835   assert(VT.isVector() && "Expected a vector type");
5836   // The canonical modified immediate encoding of a zero vector is....0!
5837   SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
5838   EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
5839   SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
5840   return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
5841 }
5842 
5843 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
5844 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
LowerShiftRightParts(SDValue Op,SelectionDAG & DAG) const5845 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
5846                                                 SelectionDAG &DAG) const {
5847   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
5848   EVT VT = Op.getValueType();
5849   unsigned VTBits = VT.getSizeInBits();
5850   SDLoc dl(Op);
5851   SDValue ShOpLo = Op.getOperand(0);
5852   SDValue ShOpHi = Op.getOperand(1);
5853   SDValue ShAmt  = Op.getOperand(2);
5854   SDValue ARMcc;
5855   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5856   unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
5857 
5858   assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
5859 
5860   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
5861                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
5862   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
5863   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
5864                                    DAG.getConstant(VTBits, dl, MVT::i32));
5865   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
5866   SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
5867   SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
5868   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
5869                             ISD::SETGE, ARMcc, DAG, dl);
5870   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
5871                            ARMcc, CCR, CmpLo);
5872 
5873   SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
5874   SDValue HiBigShift = Opc == ISD::SRA
5875                            ? DAG.getNode(Opc, dl, VT, ShOpHi,
5876                                          DAG.getConstant(VTBits - 1, dl, VT))
5877                            : DAG.getConstant(0, dl, VT);
5878   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
5879                             ISD::SETGE, ARMcc, DAG, dl);
5880   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
5881                            ARMcc, CCR, CmpHi);
5882 
5883   SDValue Ops[2] = { Lo, Hi };
5884   return DAG.getMergeValues(Ops, dl);
5885 }
5886 
5887 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
5888 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
LowerShiftLeftParts(SDValue Op,SelectionDAG & DAG) const5889 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
5890                                                SelectionDAG &DAG) const {
5891   assert(Op.getNumOperands() == 3 && "Not a double-shift!");
5892   EVT VT = Op.getValueType();
5893   unsigned VTBits = VT.getSizeInBits();
5894   SDLoc dl(Op);
5895   SDValue ShOpLo = Op.getOperand(0);
5896   SDValue ShOpHi = Op.getOperand(1);
5897   SDValue ShAmt  = Op.getOperand(2);
5898   SDValue ARMcc;
5899   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5900 
5901   assert(Op.getOpcode() == ISD::SHL_PARTS);
5902   SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
5903                                  DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
5904   SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
5905   SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
5906   SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
5907 
5908   SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
5909                                    DAG.getConstant(VTBits, dl, MVT::i32));
5910   SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
5911   SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
5912                             ISD::SETGE, ARMcc, DAG, dl);
5913   SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
5914                            ARMcc, CCR, CmpHi);
5915 
5916   SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
5917                           ISD::SETGE, ARMcc, DAG, dl);
5918   SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
5919   SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
5920                            DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
5921 
5922   SDValue Ops[2] = { Lo, Hi };
5923   return DAG.getMergeValues(Ops, dl);
5924 }
5925 
LowerFLT_ROUNDS_(SDValue Op,SelectionDAG & DAG) const5926 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
5927                                             SelectionDAG &DAG) const {
5928   // The rounding mode is in bits 23:22 of the FPSCR.
5929   // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
5930   // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
5931   // so that the shift + and get folded into a bitfield extract.
5932   SDLoc dl(Op);
5933   SDValue Ops[] = { DAG.getEntryNode(),
5934                     DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32) };
5935 
5936   SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, MVT::i32, Ops);
5937   SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
5938                                   DAG.getConstant(1U << 22, dl, MVT::i32));
5939   SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
5940                               DAG.getConstant(22, dl, MVT::i32));
5941   return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
5942                      DAG.getConstant(3, dl, MVT::i32));
5943 }
5944 
LowerCTTZ(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)5945 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
5946                          const ARMSubtarget *ST) {
5947   SDLoc dl(N);
5948   EVT VT = N->getValueType(0);
5949   if (VT.isVector() && ST->hasNEON()) {
5950 
5951     // Compute the least significant set bit: LSB = X & -X
5952     SDValue X = N->getOperand(0);
5953     SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
5954     SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
5955 
5956     EVT ElemTy = VT.getVectorElementType();
5957 
5958     if (ElemTy == MVT::i8) {
5959       // Compute with: cttz(x) = ctpop(lsb - 1)
5960       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5961                                 DAG.getTargetConstant(1, dl, ElemTy));
5962       SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5963       return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
5964     }
5965 
5966     if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
5967         (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
5968       // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
5969       unsigned NumBits = ElemTy.getSizeInBits();
5970       SDValue WidthMinus1 =
5971           DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5972                       DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
5973       SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
5974       return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
5975     }
5976 
5977     // Compute with: cttz(x) = ctpop(lsb - 1)
5978 
5979     // Compute LSB - 1.
5980     SDValue Bits;
5981     if (ElemTy == MVT::i64) {
5982       // Load constant 0xffff'ffff'ffff'ffff to register.
5983       SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5984                                DAG.getTargetConstant(0x1eff, dl, MVT::i32));
5985       Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
5986     } else {
5987       SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5988                                 DAG.getTargetConstant(1, dl, ElemTy));
5989       Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5990     }
5991     return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
5992   }
5993 
5994   if (!ST->hasV6T2Ops())
5995     return SDValue();
5996 
5997   SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
5998   return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
5999 }
6000 
LowerCTPOP(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)6001 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
6002                           const ARMSubtarget *ST) {
6003   EVT VT = N->getValueType(0);
6004   SDLoc DL(N);
6005 
6006   assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
6007   assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
6008           VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
6009          "Unexpected type for custom ctpop lowering");
6010 
6011   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6012   EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
6013   SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0));
6014   Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res);
6015 
6016   // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
6017   unsigned EltSize = 8;
6018   unsigned NumElts = VT.is64BitVector() ? 8 : 16;
6019   while (EltSize != VT.getScalarSizeInBits()) {
6020     SmallVector<SDValue, 8> Ops;
6021     Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL,
6022                                   TLI.getPointerTy(DAG.getDataLayout())));
6023     Ops.push_back(Res);
6024 
6025     EltSize *= 2;
6026     NumElts /= 2;
6027     MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
6028     Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops);
6029   }
6030 
6031   return Res;
6032 }
6033 
6034 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
6035 /// operand of a vector shift operation, where all the elements of the
6036 /// build_vector must have the same constant integer value.
getVShiftImm(SDValue Op,unsigned ElementBits,int64_t & Cnt)6037 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
6038   // Ignore bit_converts.
6039   while (Op.getOpcode() == ISD::BITCAST)
6040     Op = Op.getOperand(0);
6041   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
6042   APInt SplatBits, SplatUndef;
6043   unsigned SplatBitSize;
6044   bool HasAnyUndefs;
6045   if (!BVN ||
6046       !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6047                             ElementBits) ||
6048       SplatBitSize > ElementBits)
6049     return false;
6050   Cnt = SplatBits.getSExtValue();
6051   return true;
6052 }
6053 
6054 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
6055 /// operand of a vector shift left operation.  That value must be in the range:
6056 ///   0 <= Value < ElementBits for a left shift; or
6057 ///   0 <= Value <= ElementBits for a long left shift.
isVShiftLImm(SDValue Op,EVT VT,bool isLong,int64_t & Cnt)6058 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
6059   assert(VT.isVector() && "vector shift count is not a vector type");
6060   int64_t ElementBits = VT.getScalarSizeInBits();
6061   if (!getVShiftImm(Op, ElementBits, Cnt))
6062     return false;
6063   return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
6064 }
6065 
6066 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
6067 /// operand of a vector shift right operation.  For a shift opcode, the value
6068 /// is positive, but for an intrinsic the value count must be negative. The
6069 /// absolute value must be in the range:
6070 ///   1 <= |Value| <= ElementBits for a right shift; or
6071 ///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
isVShiftRImm(SDValue Op,EVT VT,bool isNarrow,bool isIntrinsic,int64_t & Cnt)6072 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
6073                          int64_t &Cnt) {
6074   assert(VT.isVector() && "vector shift count is not a vector type");
6075   int64_t ElementBits = VT.getScalarSizeInBits();
6076   if (!getVShiftImm(Op, ElementBits, Cnt))
6077     return false;
6078   if (!isIntrinsic)
6079     return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
6080   if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) {
6081     Cnt = -Cnt;
6082     return true;
6083   }
6084   return false;
6085 }
6086 
LowerShift(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)6087 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
6088                           const ARMSubtarget *ST) {
6089   EVT VT = N->getValueType(0);
6090   SDLoc dl(N);
6091   int64_t Cnt;
6092 
6093   if (!VT.isVector())
6094     return SDValue();
6095 
6096   // We essentially have two forms here. Shift by an immediate and shift by a
6097   // vector register (there are also shift by a gpr, but that is just handled
6098   // with a tablegen pattern). We cannot easily match shift by an immediate in
6099   // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM.
6100   // For shifting by a vector, we don't have VSHR, only VSHL (which can be
6101   // signed or unsigned, and a negative shift indicates a shift right).
6102   if (N->getOpcode() == ISD::SHL) {
6103     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
6104       return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
6105                          DAG.getConstant(Cnt, dl, MVT::i32));
6106     return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0),
6107                        N->getOperand(1));
6108   }
6109 
6110   assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
6111          "unexpected vector shift opcode");
6112 
6113   if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
6114     unsigned VShiftOpc =
6115         (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
6116     return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
6117                        DAG.getConstant(Cnt, dl, MVT::i32));
6118   }
6119 
6120   // Other right shifts we don't have operations for (we use a shift left by a
6121   // negative number).
6122   EVT ShiftVT = N->getOperand(1).getValueType();
6123   SDValue NegatedCount = DAG.getNode(
6124       ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1));
6125   unsigned VShiftOpc =
6126       (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu);
6127   return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount);
6128 }
6129 
Expand64BitShift(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)6130 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
6131                                 const ARMSubtarget *ST) {
6132   EVT VT = N->getValueType(0);
6133   SDLoc dl(N);
6134 
6135   // We can get here for a node like i32 = ISD::SHL i32, i64
6136   if (VT != MVT::i64)
6137     return SDValue();
6138 
6139   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA ||
6140           N->getOpcode() == ISD::SHL) &&
6141          "Unknown shift to lower!");
6142 
6143   unsigned ShOpc = N->getOpcode();
6144   if (ST->hasMVEIntegerOps()) {
6145     SDValue ShAmt = N->getOperand(1);
6146     unsigned ShPartsOpc = ARMISD::LSLL;
6147     ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt);
6148 
6149     // If the shift amount is greater than 32 or has a greater bitwidth than 64
6150     // then do the default optimisation
6151     if (ShAmt->getValueType(0).getSizeInBits() > 64 ||
6152         (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32)))
6153       return SDValue();
6154 
6155     // Extract the lower 32 bits of the shift amount if it's not an i32
6156     if (ShAmt->getValueType(0) != MVT::i32)
6157       ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32);
6158 
6159     if (ShOpc == ISD::SRL) {
6160       if (!Con)
6161         // There is no t2LSRLr instruction so negate and perform an lsll if the
6162         // shift amount is in a register, emulating a right shift.
6163         ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6164                             DAG.getConstant(0, dl, MVT::i32), ShAmt);
6165       else
6166         // Else generate an lsrl on the immediate shift amount
6167         ShPartsOpc = ARMISD::LSRL;
6168     } else if (ShOpc == ISD::SRA)
6169       ShPartsOpc = ARMISD::ASRL;
6170 
6171     // Lower 32 bits of the destination/source
6172     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6173                              DAG.getConstant(0, dl, MVT::i32));
6174     // Upper 32 bits of the destination/source
6175     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6176                              DAG.getConstant(1, dl, MVT::i32));
6177 
6178     // Generate the shift operation as computed above
6179     Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi,
6180                      ShAmt);
6181     // The upper 32 bits come from the second return value of lsll
6182     Hi = SDValue(Lo.getNode(), 1);
6183     return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6184   }
6185 
6186   // We only lower SRA, SRL of 1 here, all others use generic lowering.
6187   if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL)
6188     return SDValue();
6189 
6190   // If we are in thumb mode, we don't have RRX.
6191   if (ST->isThumb1Only())
6192     return SDValue();
6193 
6194   // Okay, we have a 64-bit SRA or SRL of 1.  Lower this to an RRX expr.
6195   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6196                            DAG.getConstant(0, dl, MVT::i32));
6197   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6198                            DAG.getConstant(1, dl, MVT::i32));
6199 
6200   // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
6201   // captures the result into a carry flag.
6202   unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
6203   Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
6204 
6205   // The low part is an ARMISD::RRX operand, which shifts the carry in.
6206   Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
6207 
6208   // Merge the pieces into a single i64 value.
6209  return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6210 }
6211 
LowerVSETCC(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)6212 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
6213                            const ARMSubtarget *ST) {
6214   bool Invert = false;
6215   bool Swap = false;
6216   unsigned Opc = ARMCC::AL;
6217 
6218   SDValue Op0 = Op.getOperand(0);
6219   SDValue Op1 = Op.getOperand(1);
6220   SDValue CC = Op.getOperand(2);
6221   EVT VT = Op.getValueType();
6222   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
6223   SDLoc dl(Op);
6224 
6225   EVT CmpVT;
6226   if (ST->hasNEON())
6227     CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
6228   else {
6229     assert(ST->hasMVEIntegerOps() &&
6230            "No hardware support for integer vector comparison!");
6231 
6232     if (Op.getValueType().getVectorElementType() != MVT::i1)
6233       return SDValue();
6234 
6235     // Make sure we expand floating point setcc to scalar if we do not have
6236     // mve.fp, so that we can handle them from there.
6237     if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps())
6238       return SDValue();
6239 
6240     CmpVT = VT;
6241   }
6242 
6243   if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
6244       (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
6245     // Special-case integer 64-bit equality comparisons. They aren't legal,
6246     // but they can be lowered with a few vector instructions.
6247     unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
6248     EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
6249     SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
6250     SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
6251     SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
6252                               DAG.getCondCode(ISD::SETEQ));
6253     SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
6254     SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
6255     Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
6256     if (SetCCOpcode == ISD::SETNE)
6257       Merged = DAG.getNOT(dl, Merged, CmpVT);
6258     Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
6259     return Merged;
6260   }
6261 
6262   if (CmpVT.getVectorElementType() == MVT::i64)
6263     // 64-bit comparisons are not legal in general.
6264     return SDValue();
6265 
6266   if (Op1.getValueType().isFloatingPoint()) {
6267     switch (SetCCOpcode) {
6268     default: llvm_unreachable("Illegal FP comparison");
6269     case ISD::SETUNE:
6270     case ISD::SETNE:
6271       if (ST->hasMVEFloatOps()) {
6272         Opc = ARMCC::NE; break;
6273       } else {
6274         Invert = true; LLVM_FALLTHROUGH;
6275       }
6276     case ISD::SETOEQ:
6277     case ISD::SETEQ:  Opc = ARMCC::EQ; break;
6278     case ISD::SETOLT:
6279     case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
6280     case ISD::SETOGT:
6281     case ISD::SETGT:  Opc = ARMCC::GT; break;
6282     case ISD::SETOLE:
6283     case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
6284     case ISD::SETOGE:
6285     case ISD::SETGE: Opc = ARMCC::GE; break;
6286     case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
6287     case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break;
6288     case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
6289     case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break;
6290     case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
6291     case ISD::SETONE: {
6292       // Expand this to (OLT | OGT).
6293       SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
6294                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6295       SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6296                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6297       SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
6298       if (Invert)
6299         Result = DAG.getNOT(dl, Result, VT);
6300       return Result;
6301     }
6302     case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH;
6303     case ISD::SETO: {
6304       // Expand this to (OLT | OGE).
6305       SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
6306                                    DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6307       SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6308                                    DAG.getConstant(ARMCC::GE, dl, MVT::i32));
6309       SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
6310       if (Invert)
6311         Result = DAG.getNOT(dl, Result, VT);
6312       return Result;
6313     }
6314     }
6315   } else {
6316     // Integer comparisons.
6317     switch (SetCCOpcode) {
6318     default: llvm_unreachable("Illegal integer comparison");
6319     case ISD::SETNE:
6320       if (ST->hasMVEIntegerOps()) {
6321         Opc = ARMCC::NE; break;
6322       } else {
6323         Invert = true; LLVM_FALLTHROUGH;
6324       }
6325     case ISD::SETEQ:  Opc = ARMCC::EQ; break;
6326     case ISD::SETLT:  Swap = true; LLVM_FALLTHROUGH;
6327     case ISD::SETGT:  Opc = ARMCC::GT; break;
6328     case ISD::SETLE:  Swap = true; LLVM_FALLTHROUGH;
6329     case ISD::SETGE:  Opc = ARMCC::GE; break;
6330     case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
6331     case ISD::SETUGT: Opc = ARMCC::HI; break;
6332     case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
6333     case ISD::SETUGE: Opc = ARMCC::HS; break;
6334     }
6335 
6336     // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
6337     if (ST->hasNEON() && Opc == ARMCC::EQ) {
6338       SDValue AndOp;
6339       if (ISD::isBuildVectorAllZeros(Op1.getNode()))
6340         AndOp = Op0;
6341       else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
6342         AndOp = Op1;
6343 
6344       // Ignore bitconvert.
6345       if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
6346         AndOp = AndOp.getOperand(0);
6347 
6348       if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
6349         Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
6350         Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
6351         SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1);
6352         if (!Invert)
6353           Result = DAG.getNOT(dl, Result, VT);
6354         return Result;
6355       }
6356     }
6357   }
6358 
6359   if (Swap)
6360     std::swap(Op0, Op1);
6361 
6362   // If one of the operands is a constant vector zero, attempt to fold the
6363   // comparison to a specialized compare-against-zero form.
6364   SDValue SingleOp;
6365   if (ISD::isBuildVectorAllZeros(Op1.getNode()))
6366     SingleOp = Op0;
6367   else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
6368     if (Opc == ARMCC::GE)
6369       Opc = ARMCC::LE;
6370     else if (Opc == ARMCC::GT)
6371       Opc = ARMCC::LT;
6372     SingleOp = Op1;
6373   }
6374 
6375   SDValue Result;
6376   if (SingleOp.getNode()) {
6377     Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, SingleOp,
6378                          DAG.getConstant(Opc, dl, MVT::i32));
6379   } else {
6380     Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6381                          DAG.getConstant(Opc, dl, MVT::i32));
6382   }
6383 
6384   Result = DAG.getSExtOrTrunc(Result, dl, VT);
6385 
6386   if (Invert)
6387     Result = DAG.getNOT(dl, Result, VT);
6388 
6389   return Result;
6390 }
6391 
LowerSETCCCARRY(SDValue Op,SelectionDAG & DAG)6392 static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) {
6393   SDValue LHS = Op.getOperand(0);
6394   SDValue RHS = Op.getOperand(1);
6395   SDValue Carry = Op.getOperand(2);
6396   SDValue Cond = Op.getOperand(3);
6397   SDLoc DL(Op);
6398 
6399   assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
6400 
6401   // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
6402   // have to invert the carry first.
6403   Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
6404                       DAG.getConstant(1, DL, MVT::i32), Carry);
6405   // This converts the boolean value carry into the carry flag.
6406   Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
6407 
6408   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
6409   SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
6410 
6411   SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
6412   SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
6413   SDValue ARMcc = DAG.getConstant(
6414       IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
6415   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6416   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
6417                                    Cmp.getValue(1), SDValue());
6418   return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
6419                      CCR, Chain.getValue(1));
6420 }
6421 
6422 /// isVMOVModifiedImm - Check if the specified splat value corresponds to a
6423 /// valid vector constant for a NEON or MVE instruction with a "modified
6424 /// immediate" operand (e.g., VMOV).  If so, return the encoded value.
isVMOVModifiedImm(uint64_t SplatBits,uint64_t SplatUndef,unsigned SplatBitSize,SelectionDAG & DAG,const SDLoc & dl,EVT & VT,bool is128Bits,VMOVModImmType type)6425 static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
6426                                  unsigned SplatBitSize, SelectionDAG &DAG,
6427                                  const SDLoc &dl, EVT &VT, bool is128Bits,
6428                                  VMOVModImmType type) {
6429   unsigned OpCmode, Imm;
6430 
6431   // SplatBitSize is set to the smallest size that splats the vector, so a
6432   // zero vector will always have SplatBitSize == 8.  However, NEON modified
6433   // immediate instructions others than VMOV do not support the 8-bit encoding
6434   // of a zero vector, and the default encoding of zero is supposed to be the
6435   // 32-bit version.
6436   if (SplatBits == 0)
6437     SplatBitSize = 32;
6438 
6439   switch (SplatBitSize) {
6440   case 8:
6441     if (type != VMOVModImm)
6442       return SDValue();
6443     // Any 1-byte value is OK.  Op=0, Cmode=1110.
6444     assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
6445     OpCmode = 0xe;
6446     Imm = SplatBits;
6447     VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
6448     break;
6449 
6450   case 16:
6451     // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
6452     VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
6453     if ((SplatBits & ~0xff) == 0) {
6454       // Value = 0x00nn: Op=x, Cmode=100x.
6455       OpCmode = 0x8;
6456       Imm = SplatBits;
6457       break;
6458     }
6459     if ((SplatBits & ~0xff00) == 0) {
6460       // Value = 0xnn00: Op=x, Cmode=101x.
6461       OpCmode = 0xa;
6462       Imm = SplatBits >> 8;
6463       break;
6464     }
6465     return SDValue();
6466 
6467   case 32:
6468     // NEON's 32-bit VMOV supports splat values where:
6469     // * only one byte is nonzero, or
6470     // * the least significant byte is 0xff and the second byte is nonzero, or
6471     // * the least significant 2 bytes are 0xff and the third is nonzero.
6472     VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
6473     if ((SplatBits & ~0xff) == 0) {
6474       // Value = 0x000000nn: Op=x, Cmode=000x.
6475       OpCmode = 0;
6476       Imm = SplatBits;
6477       break;
6478     }
6479     if ((SplatBits & ~0xff00) == 0) {
6480       // Value = 0x0000nn00: Op=x, Cmode=001x.
6481       OpCmode = 0x2;
6482       Imm = SplatBits >> 8;
6483       break;
6484     }
6485     if ((SplatBits & ~0xff0000) == 0) {
6486       // Value = 0x00nn0000: Op=x, Cmode=010x.
6487       OpCmode = 0x4;
6488       Imm = SplatBits >> 16;
6489       break;
6490     }
6491     if ((SplatBits & ~0xff000000) == 0) {
6492       // Value = 0xnn000000: Op=x, Cmode=011x.
6493       OpCmode = 0x6;
6494       Imm = SplatBits >> 24;
6495       break;
6496     }
6497 
6498     // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
6499     if (type == OtherModImm) return SDValue();
6500 
6501     if ((SplatBits & ~0xffff) == 0 &&
6502         ((SplatBits | SplatUndef) & 0xff) == 0xff) {
6503       // Value = 0x0000nnff: Op=x, Cmode=1100.
6504       OpCmode = 0xc;
6505       Imm = SplatBits >> 8;
6506       break;
6507     }
6508 
6509     // cmode == 0b1101 is not supported for MVE VMVN
6510     if (type == MVEVMVNModImm)
6511       return SDValue();
6512 
6513     if ((SplatBits & ~0xffffff) == 0 &&
6514         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
6515       // Value = 0x00nnffff: Op=x, Cmode=1101.
6516       OpCmode = 0xd;
6517       Imm = SplatBits >> 16;
6518       break;
6519     }
6520 
6521     // Note: there are a few 32-bit splat values (specifically: 00ffff00,
6522     // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
6523     // VMOV.I32.  A (very) minor optimization would be to replicate the value
6524     // and fall through here to test for a valid 64-bit splat.  But, then the
6525     // caller would also need to check and handle the change in size.
6526     return SDValue();
6527 
6528   case 64: {
6529     if (type != VMOVModImm)
6530       return SDValue();
6531     // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
6532     uint64_t BitMask = 0xff;
6533     unsigned ImmMask = 1;
6534     Imm = 0;
6535     for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
6536       if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
6537         Imm |= ImmMask;
6538       } else if ((SplatBits & BitMask) != 0) {
6539         return SDValue();
6540       }
6541       BitMask <<= 8;
6542       ImmMask <<= 1;
6543     }
6544 
6545     if (DAG.getDataLayout().isBigEndian())
6546       // swap higher and lower 32 bit word
6547       Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
6548 
6549     // Op=1, Cmode=1110.
6550     OpCmode = 0x1e;
6551     VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
6552     break;
6553   }
6554 
6555   default:
6556     llvm_unreachable("unexpected size for isVMOVModifiedImm");
6557   }
6558 
6559   unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm);
6560   return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
6561 }
6562 
LowerConstantFP(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST) const6563 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
6564                                            const ARMSubtarget *ST) const {
6565   EVT VT = Op.getValueType();
6566   bool IsDouble = (VT == MVT::f64);
6567   ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
6568   const APFloat &FPVal = CFP->getValueAPF();
6569 
6570   // Prevent floating-point constants from using literal loads
6571   // when execute-only is enabled.
6572   if (ST->genExecuteOnly()) {
6573     // If we can represent the constant as an immediate, don't lower it
6574     if (isFPImmLegal(FPVal, VT))
6575       return Op;
6576     // Otherwise, construct as integer, and move to float register
6577     APInt INTVal = FPVal.bitcastToAPInt();
6578     SDLoc DL(CFP);
6579     switch (VT.getSimpleVT().SimpleTy) {
6580       default:
6581         llvm_unreachable("Unknown floating point type!");
6582         break;
6583       case MVT::f64: {
6584         SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
6585         SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
6586         if (!ST->isLittle())
6587           std::swap(Lo, Hi);
6588         return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
6589       }
6590       case MVT::f32:
6591           return DAG.getNode(ARMISD::VMOVSR, DL, VT,
6592               DAG.getConstant(INTVal, DL, MVT::i32));
6593     }
6594   }
6595 
6596   if (!ST->hasVFP3Base())
6597     return SDValue();
6598 
6599   // Use the default (constant pool) lowering for double constants when we have
6600   // an SP-only FPU
6601   if (IsDouble && !Subtarget->hasFP64())
6602     return SDValue();
6603 
6604   // Try splatting with a VMOV.f32...
6605   int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
6606 
6607   if (ImmVal != -1) {
6608     if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
6609       // We have code in place to select a valid ConstantFP already, no need to
6610       // do any mangling.
6611       return Op;
6612     }
6613 
6614     // It's a float and we are trying to use NEON operations where
6615     // possible. Lower it to a splat followed by an extract.
6616     SDLoc DL(Op);
6617     SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
6618     SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
6619                                       NewVal);
6620     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
6621                        DAG.getConstant(0, DL, MVT::i32));
6622   }
6623 
6624   // The rest of our options are NEON only, make sure that's allowed before
6625   // proceeding..
6626   if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
6627     return SDValue();
6628 
6629   EVT VMovVT;
6630   uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
6631 
6632   // It wouldn't really be worth bothering for doubles except for one very
6633   // important value, which does happen to match: 0.0. So make sure we don't do
6634   // anything stupid.
6635   if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
6636     return SDValue();
6637 
6638   // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
6639   SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
6640                                      VMovVT, false, VMOVModImm);
6641   if (NewVal != SDValue()) {
6642     SDLoc DL(Op);
6643     SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
6644                                       NewVal);
6645     if (IsDouble)
6646       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
6647 
6648     // It's a float: cast and extract a vector element.
6649     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
6650                                        VecConstant);
6651     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
6652                        DAG.getConstant(0, DL, MVT::i32));
6653   }
6654 
6655   // Finally, try a VMVN.i32
6656   NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
6657                              false, VMVNModImm);
6658   if (NewVal != SDValue()) {
6659     SDLoc DL(Op);
6660     SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
6661 
6662     if (IsDouble)
6663       return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
6664 
6665     // It's a float: cast and extract a vector element.
6666     SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
6667                                        VecConstant);
6668     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
6669                        DAG.getConstant(0, DL, MVT::i32));
6670   }
6671 
6672   return SDValue();
6673 }
6674 
6675 // check if an VEXT instruction can handle the shuffle mask when the
6676 // vector sources of the shuffle are the same.
isSingletonVEXTMask(ArrayRef<int> M,EVT VT,unsigned & Imm)6677 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
6678   unsigned NumElts = VT.getVectorNumElements();
6679 
6680   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
6681   if (M[0] < 0)
6682     return false;
6683 
6684   Imm = M[0];
6685 
6686   // If this is a VEXT shuffle, the immediate value is the index of the first
6687   // element.  The other shuffle indices must be the successive elements after
6688   // the first one.
6689   unsigned ExpectedElt = Imm;
6690   for (unsigned i = 1; i < NumElts; ++i) {
6691     // Increment the expected index.  If it wraps around, just follow it
6692     // back to index zero and keep going.
6693     ++ExpectedElt;
6694     if (ExpectedElt == NumElts)
6695       ExpectedElt = 0;
6696 
6697     if (M[i] < 0) continue; // ignore UNDEF indices
6698     if (ExpectedElt != static_cast<unsigned>(M[i]))
6699       return false;
6700   }
6701 
6702   return true;
6703 }
6704 
isVEXTMask(ArrayRef<int> M,EVT VT,bool & ReverseVEXT,unsigned & Imm)6705 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
6706                        bool &ReverseVEXT, unsigned &Imm) {
6707   unsigned NumElts = VT.getVectorNumElements();
6708   ReverseVEXT = false;
6709 
6710   // Assume that the first shuffle index is not UNDEF.  Fail if it is.
6711   if (M[0] < 0)
6712     return false;
6713 
6714   Imm = M[0];
6715 
6716   // If this is a VEXT shuffle, the immediate value is the index of the first
6717   // element.  The other shuffle indices must be the successive elements after
6718   // the first one.
6719   unsigned ExpectedElt = Imm;
6720   for (unsigned i = 1; i < NumElts; ++i) {
6721     // Increment the expected index.  If it wraps around, it may still be
6722     // a VEXT but the source vectors must be swapped.
6723     ExpectedElt += 1;
6724     if (ExpectedElt == NumElts * 2) {
6725       ExpectedElt = 0;
6726       ReverseVEXT = true;
6727     }
6728 
6729     if (M[i] < 0) continue; // ignore UNDEF indices
6730     if (ExpectedElt != static_cast<unsigned>(M[i]))
6731       return false;
6732   }
6733 
6734   // Adjust the index value if the source operands will be swapped.
6735   if (ReverseVEXT)
6736     Imm -= NumElts;
6737 
6738   return true;
6739 }
6740 
6741 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
6742 /// instruction with the specified blocksize.  (The order of the elements
6743 /// within each block of the vector is reversed.)
isVREVMask(ArrayRef<int> M,EVT VT,unsigned BlockSize)6744 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
6745   assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
6746          "Only possible block sizes for VREV are: 16, 32, 64");
6747 
6748   unsigned EltSz = VT.getScalarSizeInBits();
6749   if (EltSz == 64)
6750     return false;
6751 
6752   unsigned NumElts = VT.getVectorNumElements();
6753   unsigned BlockElts = M[0] + 1;
6754   // If the first shuffle index is UNDEF, be optimistic.
6755   if (M[0] < 0)
6756     BlockElts = BlockSize / EltSz;
6757 
6758   if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
6759     return false;
6760 
6761   for (unsigned i = 0; i < NumElts; ++i) {
6762     if (M[i] < 0) continue; // ignore UNDEF indices
6763     if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
6764       return false;
6765   }
6766 
6767   return true;
6768 }
6769 
isVTBLMask(ArrayRef<int> M,EVT VT)6770 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
6771   // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
6772   // range, then 0 is placed into the resulting vector. So pretty much any mask
6773   // of 8 elements can work here.
6774   return VT == MVT::v8i8 && M.size() == 8;
6775 }
6776 
SelectPairHalf(unsigned Elements,ArrayRef<int> Mask,unsigned Index)6777 static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask,
6778                                unsigned Index) {
6779   if (Mask.size() == Elements * 2)
6780     return Index / Elements;
6781   return Mask[Index] == 0 ? 0 : 1;
6782 }
6783 
6784 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
6785 // checking that pairs of elements in the shuffle mask represent the same index
6786 // in each vector, incrementing the expected index by 2 at each step.
6787 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
6788 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
6789 //  v2={e,f,g,h}
6790 // WhichResult gives the offset for each element in the mask based on which
6791 // of the two results it belongs to.
6792 //
6793 // The transpose can be represented either as:
6794 // result1 = shufflevector v1, v2, result1_shuffle_mask
6795 // result2 = shufflevector v1, v2, result2_shuffle_mask
6796 // where v1/v2 and the shuffle masks have the same number of elements
6797 // (here WhichResult (see below) indicates which result is being checked)
6798 //
6799 // or as:
6800 // results = shufflevector v1, v2, shuffle_mask
6801 // where both results are returned in one vector and the shuffle mask has twice
6802 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
6803 // want to check the low half and high half of the shuffle mask as if it were
6804 // the other case
isVTRNMask(ArrayRef<int> M,EVT VT,unsigned & WhichResult)6805 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
6806   unsigned EltSz = VT.getScalarSizeInBits();
6807   if (EltSz == 64)
6808     return false;
6809 
6810   unsigned NumElts = VT.getVectorNumElements();
6811   if (M.size() != NumElts && M.size() != NumElts*2)
6812     return false;
6813 
6814   // If the mask is twice as long as the input vector then we need to check the
6815   // upper and lower parts of the mask with a matching value for WhichResult
6816   // FIXME: A mask with only even values will be rejected in case the first
6817   // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
6818   // M[0] is used to determine WhichResult
6819   for (unsigned i = 0; i < M.size(); i += NumElts) {
6820     WhichResult = SelectPairHalf(NumElts, M, i);
6821     for (unsigned j = 0; j < NumElts; j += 2) {
6822       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
6823           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
6824         return false;
6825     }
6826   }
6827 
6828   if (M.size() == NumElts*2)
6829     WhichResult = 0;
6830 
6831   return true;
6832 }
6833 
6834 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
6835 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6836 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
isVTRN_v_undef_Mask(ArrayRef<int> M,EVT VT,unsigned & WhichResult)6837 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
6838   unsigned EltSz = VT.getScalarSizeInBits();
6839   if (EltSz == 64)
6840     return false;
6841 
6842   unsigned NumElts = VT.getVectorNumElements();
6843   if (M.size() != NumElts && M.size() != NumElts*2)
6844     return false;
6845 
6846   for (unsigned i = 0; i < M.size(); i += NumElts) {
6847     WhichResult = SelectPairHalf(NumElts, M, i);
6848     for (unsigned j = 0; j < NumElts; j += 2) {
6849       if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
6850           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
6851         return false;
6852     }
6853   }
6854 
6855   if (M.size() == NumElts*2)
6856     WhichResult = 0;
6857 
6858   return true;
6859 }
6860 
6861 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
6862 // that the mask elements are either all even and in steps of size 2 or all odd
6863 // and in steps of size 2.
6864 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
6865 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
6866 //  v2={e,f,g,h}
6867 // Requires similar checks to that of isVTRNMask with
6868 // respect the how results are returned.
isVUZPMask(ArrayRef<int> M,EVT VT,unsigned & WhichResult)6869 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
6870   unsigned EltSz = VT.getScalarSizeInBits();
6871   if (EltSz == 64)
6872     return false;
6873 
6874   unsigned NumElts = VT.getVectorNumElements();
6875   if (M.size() != NumElts && M.size() != NumElts*2)
6876     return false;
6877 
6878   for (unsigned i = 0; i < M.size(); i += NumElts) {
6879     WhichResult = SelectPairHalf(NumElts, M, i);
6880     for (unsigned j = 0; j < NumElts; ++j) {
6881       if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
6882         return false;
6883     }
6884   }
6885 
6886   if (M.size() == NumElts*2)
6887     WhichResult = 0;
6888 
6889   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6890   if (VT.is64BitVector() && EltSz == 32)
6891     return false;
6892 
6893   return true;
6894 }
6895 
6896 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
6897 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6898 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
isVUZP_v_undef_Mask(ArrayRef<int> M,EVT VT,unsigned & WhichResult)6899 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
6900   unsigned EltSz = VT.getScalarSizeInBits();
6901   if (EltSz == 64)
6902     return false;
6903 
6904   unsigned NumElts = VT.getVectorNumElements();
6905   if (M.size() != NumElts && M.size() != NumElts*2)
6906     return false;
6907 
6908   unsigned Half = NumElts / 2;
6909   for (unsigned i = 0; i < M.size(); i += NumElts) {
6910     WhichResult = SelectPairHalf(NumElts, M, i);
6911     for (unsigned j = 0; j < NumElts; j += Half) {
6912       unsigned Idx = WhichResult;
6913       for (unsigned k = 0; k < Half; ++k) {
6914         int MIdx = M[i + j + k];
6915         if (MIdx >= 0 && (unsigned) MIdx != Idx)
6916           return false;
6917         Idx += 2;
6918       }
6919     }
6920   }
6921 
6922   if (M.size() == NumElts*2)
6923     WhichResult = 0;
6924 
6925   // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6926   if (VT.is64BitVector() && EltSz == 32)
6927     return false;
6928 
6929   return true;
6930 }
6931 
6932 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
6933 // that pairs of elements of the shufflemask represent the same index in each
6934 // vector incrementing sequentially through the vectors.
6935 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
6936 //  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
6937 //  v2={e,f,g,h}
6938 // Requires similar checks to that of isVTRNMask with respect the how results
6939 // are returned.
isVZIPMask(ArrayRef<int> M,EVT VT,unsigned & WhichResult)6940 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
6941   unsigned EltSz = VT.getScalarSizeInBits();
6942   if (EltSz == 64)
6943     return false;
6944 
6945   unsigned NumElts = VT.getVectorNumElements();
6946   if (M.size() != NumElts && M.size() != NumElts*2)
6947     return false;
6948 
6949   for (unsigned i = 0; i < M.size(); i += NumElts) {
6950     WhichResult = SelectPairHalf(NumElts, M, i);
6951     unsigned Idx = WhichResult * NumElts / 2;
6952     for (unsigned j = 0; j < NumElts; j += 2) {
6953       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
6954           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
6955         return false;
6956       Idx += 1;
6957     }
6958   }
6959 
6960   if (M.size() == NumElts*2)
6961     WhichResult = 0;
6962 
6963   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6964   if (VT.is64BitVector() && EltSz == 32)
6965     return false;
6966 
6967   return true;
6968 }
6969 
6970 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
6971 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6972 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
isVZIP_v_undef_Mask(ArrayRef<int> M,EVT VT,unsigned & WhichResult)6973 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
6974   unsigned EltSz = VT.getScalarSizeInBits();
6975   if (EltSz == 64)
6976     return false;
6977 
6978   unsigned NumElts = VT.getVectorNumElements();
6979   if (M.size() != NumElts && M.size() != NumElts*2)
6980     return false;
6981 
6982   for (unsigned i = 0; i < M.size(); i += NumElts) {
6983     WhichResult = SelectPairHalf(NumElts, M, i);
6984     unsigned Idx = WhichResult * NumElts / 2;
6985     for (unsigned j = 0; j < NumElts; j += 2) {
6986       if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
6987           (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
6988         return false;
6989       Idx += 1;
6990     }
6991   }
6992 
6993   if (M.size() == NumElts*2)
6994     WhichResult = 0;
6995 
6996   // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6997   if (VT.is64BitVector() && EltSz == 32)
6998     return false;
6999 
7000   return true;
7001 }
7002 
7003 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
7004 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask,EVT VT,unsigned & WhichResult,bool & isV_UNDEF)7005 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
7006                                            unsigned &WhichResult,
7007                                            bool &isV_UNDEF) {
7008   isV_UNDEF = false;
7009   if (isVTRNMask(ShuffleMask, VT, WhichResult))
7010     return ARMISD::VTRN;
7011   if (isVUZPMask(ShuffleMask, VT, WhichResult))
7012     return ARMISD::VUZP;
7013   if (isVZIPMask(ShuffleMask, VT, WhichResult))
7014     return ARMISD::VZIP;
7015 
7016   isV_UNDEF = true;
7017   if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
7018     return ARMISD::VTRN;
7019   if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
7020     return ARMISD::VUZP;
7021   if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
7022     return ARMISD::VZIP;
7023 
7024   return 0;
7025 }
7026 
7027 /// \return true if this is a reverse operation on an vector.
isReverseMask(ArrayRef<int> M,EVT VT)7028 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
7029   unsigned NumElts = VT.getVectorNumElements();
7030   // Make sure the mask has the right size.
7031   if (NumElts != M.size())
7032       return false;
7033 
7034   // Look for <15, ..., 3, -1, 1, 0>.
7035   for (unsigned i = 0; i != NumElts; ++i)
7036     if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
7037       return false;
7038 
7039   return true;
7040 }
7041 
isVMOVNMask(ArrayRef<int> M,EVT VT,bool Top)7042 static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top) {
7043   unsigned NumElts = VT.getVectorNumElements();
7044   // Make sure the mask has the right size.
7045   if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8))
7046       return false;
7047 
7048   // If Top
7049   //   Look for <0, N, 2, N+2, 4, N+4, ..>.
7050   //   This inserts Input2 into Input1
7051   // else if not Top
7052   //   Look for <0, N+1, 2, N+3, 4, N+5, ..>
7053   //   This inserts Input1 into Input2
7054   unsigned Offset = Top ? 0 : 1;
7055   for (unsigned i = 0; i < NumElts; i+=2) {
7056     if (M[i] >= 0 && M[i] != (int)i)
7057       return false;
7058     if (M[i+1] >= 0 && M[i+1] != (int)(NumElts + i + Offset))
7059       return false;
7060   }
7061 
7062   return true;
7063 }
7064 
7065 // If N is an integer constant that can be moved into a register in one
7066 // instruction, return an SDValue of such a constant (will become a MOV
7067 // instruction).  Otherwise return null.
IsSingleInstrConstant(SDValue N,SelectionDAG & DAG,const ARMSubtarget * ST,const SDLoc & dl)7068 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
7069                                      const ARMSubtarget *ST, const SDLoc &dl) {
7070   uint64_t Val;
7071   if (!isa<ConstantSDNode>(N))
7072     return SDValue();
7073   Val = cast<ConstantSDNode>(N)->getZExtValue();
7074 
7075   if (ST->isThumb1Only()) {
7076     if (Val <= 255 || ~Val <= 255)
7077       return DAG.getConstant(Val, dl, MVT::i32);
7078   } else {
7079     if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
7080       return DAG.getConstant(Val, dl, MVT::i32);
7081   }
7082   return SDValue();
7083 }
7084 
LowerBUILD_VECTOR_i1(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)7085 static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
7086                                     const ARMSubtarget *ST) {
7087   SDLoc dl(Op);
7088   EVT VT = Op.getValueType();
7089 
7090   assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!");
7091 
7092   unsigned NumElts = VT.getVectorNumElements();
7093   unsigned BoolMask;
7094   unsigned BitsPerBool;
7095   if (NumElts == 4) {
7096     BitsPerBool = 4;
7097     BoolMask = 0xf;
7098   } else if (NumElts == 8) {
7099     BitsPerBool = 2;
7100     BoolMask = 0x3;
7101   } else if (NumElts == 16) {
7102     BitsPerBool = 1;
7103     BoolMask = 0x1;
7104   } else
7105     return SDValue();
7106 
7107   // If this is a single value copied into all lanes (a splat), we can just sign
7108   // extend that single value
7109   SDValue FirstOp = Op.getOperand(0);
7110   if (!isa<ConstantSDNode>(FirstOp) &&
7111       std::all_of(std::next(Op->op_begin()), Op->op_end(),
7112                   [&FirstOp](SDUse &U) {
7113                     return U.get().isUndef() || U.get() == FirstOp;
7114                   })) {
7115     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp,
7116                               DAG.getValueType(MVT::i1));
7117     return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext);
7118   }
7119 
7120   // First create base with bits set where known
7121   unsigned Bits32 = 0;
7122   for (unsigned i = 0; i < NumElts; ++i) {
7123     SDValue V = Op.getOperand(i);
7124     if (!isa<ConstantSDNode>(V) && !V.isUndef())
7125       continue;
7126     bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue();
7127     if (BitSet)
7128       Bits32 |= BoolMask << (i * BitsPerBool);
7129   }
7130 
7131   // Add in unknown nodes
7132   SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT,
7133                              DAG.getConstant(Bits32, dl, MVT::i32));
7134   for (unsigned i = 0; i < NumElts; ++i) {
7135     SDValue V = Op.getOperand(i);
7136     if (isa<ConstantSDNode>(V) || V.isUndef())
7137       continue;
7138     Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V,
7139                        DAG.getConstant(i, dl, MVT::i32));
7140   }
7141 
7142   return Base;
7143 }
7144 
7145 // If this is a case we can't handle, return null and let the default
7146 // expansion code take care of it.
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST) const7147 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
7148                                              const ARMSubtarget *ST) const {
7149   BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
7150   SDLoc dl(Op);
7151   EVT VT = Op.getValueType();
7152 
7153   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
7154     return LowerBUILD_VECTOR_i1(Op, DAG, ST);
7155 
7156   APInt SplatBits, SplatUndef;
7157   unsigned SplatBitSize;
7158   bool HasAnyUndefs;
7159   if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
7160     if (SplatUndef.isAllOnesValue())
7161       return DAG.getUNDEF(VT);
7162 
7163     if ((ST->hasNEON() && SplatBitSize <= 64) ||
7164         (ST->hasMVEIntegerOps() && SplatBitSize <= 32)) {
7165       // Check if an immediate VMOV works.
7166       EVT VmovVT;
7167       SDValue Val = isVMOVModifiedImm(SplatBits.getZExtValue(),
7168                                       SplatUndef.getZExtValue(), SplatBitSize,
7169                                       DAG, dl, VmovVT, VT.is128BitVector(),
7170                                       VMOVModImm);
7171 
7172       if (Val.getNode()) {
7173         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
7174         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
7175       }
7176 
7177       // Try an immediate VMVN.
7178       uint64_t NegatedImm = (~SplatBits).getZExtValue();
7179       Val = isVMOVModifiedImm(
7180           NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
7181           DAG, dl, VmovVT, VT.is128BitVector(),
7182           ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm);
7183       if (Val.getNode()) {
7184         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
7185         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
7186       }
7187 
7188       // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
7189       if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
7190         int ImmVal = ARM_AM::getFP32Imm(SplatBits);
7191         if (ImmVal != -1) {
7192           SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
7193           return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
7194         }
7195       }
7196     }
7197   }
7198 
7199   // Scan through the operands to see if only one value is used.
7200   //
7201   // As an optimisation, even if more than one value is used it may be more
7202   // profitable to splat with one value then change some lanes.
7203   //
7204   // Heuristically we decide to do this if the vector has a "dominant" value,
7205   // defined as splatted to more than half of the lanes.
7206   unsigned NumElts = VT.getVectorNumElements();
7207   bool isOnlyLowElement = true;
7208   bool usesOnlyOneValue = true;
7209   bool hasDominantValue = false;
7210   bool isConstant = true;
7211 
7212   // Map of the number of times a particular SDValue appears in the
7213   // element list.
7214   DenseMap<SDValue, unsigned> ValueCounts;
7215   SDValue Value;
7216   for (unsigned i = 0; i < NumElts; ++i) {
7217     SDValue V = Op.getOperand(i);
7218     if (V.isUndef())
7219       continue;
7220     if (i > 0)
7221       isOnlyLowElement = false;
7222     if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
7223       isConstant = false;
7224 
7225     ValueCounts.insert(std::make_pair(V, 0));
7226     unsigned &Count = ValueCounts[V];
7227 
7228     // Is this value dominant? (takes up more than half of the lanes)
7229     if (++Count > (NumElts / 2)) {
7230       hasDominantValue = true;
7231       Value = V;
7232     }
7233   }
7234   if (ValueCounts.size() != 1)
7235     usesOnlyOneValue = false;
7236   if (!Value.getNode() && !ValueCounts.empty())
7237     Value = ValueCounts.begin()->first;
7238 
7239   if (ValueCounts.empty())
7240     return DAG.getUNDEF(VT);
7241 
7242   // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
7243   // Keep going if we are hitting this case.
7244   if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
7245     return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
7246 
7247   unsigned EltSize = VT.getScalarSizeInBits();
7248 
7249   // Use VDUP for non-constant splats.  For f32 constant splats, reduce to
7250   // i32 and try again.
7251   if (hasDominantValue && EltSize <= 32) {
7252     if (!isConstant) {
7253       SDValue N;
7254 
7255       // If we are VDUPing a value that comes directly from a vector, that will
7256       // cause an unnecessary move to and from a GPR, where instead we could
7257       // just use VDUPLANE. We can only do this if the lane being extracted
7258       // is at a constant index, as the VDUP from lane instructions only have
7259       // constant-index forms.
7260       ConstantSDNode *constIndex;
7261       if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7262           (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
7263         // We need to create a new undef vector to use for the VDUPLANE if the
7264         // size of the vector from which we get the value is different than the
7265         // size of the vector that we need to create. We will insert the element
7266         // such that the register coalescer will remove unnecessary copies.
7267         if (VT != Value->getOperand(0).getValueType()) {
7268           unsigned index = constIndex->getAPIntValue().getLimitedValue() %
7269                              VT.getVectorNumElements();
7270           N =  DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7271                  DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
7272                         Value, DAG.getConstant(index, dl, MVT::i32)),
7273                            DAG.getConstant(index, dl, MVT::i32));
7274         } else
7275           N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7276                         Value->getOperand(0), Value->getOperand(1));
7277       } else
7278         N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
7279 
7280       if (!usesOnlyOneValue) {
7281         // The dominant value was splatted as 'N', but we now have to insert
7282         // all differing elements.
7283         for (unsigned I = 0; I < NumElts; ++I) {
7284           if (Op.getOperand(I) == Value)
7285             continue;
7286           SmallVector<SDValue, 3> Ops;
7287           Ops.push_back(N);
7288           Ops.push_back(Op.getOperand(I));
7289           Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
7290           N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
7291         }
7292       }
7293       return N;
7294     }
7295     if (VT.getVectorElementType().isFloatingPoint()) {
7296       SmallVector<SDValue, 8> Ops;
7297       MVT FVT = VT.getVectorElementType().getSimpleVT();
7298       assert(FVT == MVT::f32 || FVT == MVT::f16);
7299       MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16;
7300       for (unsigned i = 0; i < NumElts; ++i)
7301         Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT,
7302                                   Op.getOperand(i)));
7303       EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts);
7304       SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
7305       Val = LowerBUILD_VECTOR(Val, DAG, ST);
7306       if (Val.getNode())
7307         return DAG.getNode(ISD::BITCAST, dl, VT, Val);
7308     }
7309     if (usesOnlyOneValue) {
7310       SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
7311       if (isConstant && Val.getNode())
7312         return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
7313     }
7314   }
7315 
7316   // If all elements are constants and the case above didn't get hit, fall back
7317   // to the default expansion, which will generate a load from the constant
7318   // pool.
7319   if (isConstant)
7320     return SDValue();
7321 
7322   // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
7323   if (NumElts >= 4) {
7324     SDValue shuffle = ReconstructShuffle(Op, DAG);
7325     if (shuffle != SDValue())
7326       return shuffle;
7327   }
7328 
7329   if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
7330     // If we haven't found an efficient lowering, try splitting a 128-bit vector
7331     // into two 64-bit vectors; we might discover a better way to lower it.
7332     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
7333     EVT ExtVT = VT.getVectorElementType();
7334     EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
7335     SDValue Lower =
7336         DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
7337     if (Lower.getOpcode() == ISD::BUILD_VECTOR)
7338       Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
7339     SDValue Upper = DAG.getBuildVector(
7340         HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
7341     if (Upper.getOpcode() == ISD::BUILD_VECTOR)
7342       Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
7343     if (Lower && Upper)
7344       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
7345   }
7346 
7347   // Vectors with 32- or 64-bit elements can be built by directly assigning
7348   // the subregisters.  Lower it to an ARMISD::BUILD_VECTOR so the operands
7349   // will be legalized.
7350   if (EltSize >= 32) {
7351     // Do the expansion with floating-point types, since that is what the VFP
7352     // registers are defined to use, and since i64 is not legal.
7353     EVT EltVT = EVT::getFloatingPointVT(EltSize);
7354     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
7355     SmallVector<SDValue, 8> Ops;
7356     for (unsigned i = 0; i < NumElts; ++i)
7357       Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
7358     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
7359     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
7360   }
7361 
7362   // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
7363   // know the default expansion would otherwise fall back on something even
7364   // worse. For a vector with one or two non-undef values, that's
7365   // scalar_to_vector for the elements followed by a shuffle (provided the
7366   // shuffle is valid for the target) and materialization element by element
7367   // on the stack followed by a load for everything else.
7368   if (!isConstant && !usesOnlyOneValue) {
7369     SDValue Vec = DAG.getUNDEF(VT);
7370     for (unsigned i = 0 ; i < NumElts; ++i) {
7371       SDValue V = Op.getOperand(i);
7372       if (V.isUndef())
7373         continue;
7374       SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
7375       Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
7376     }
7377     return Vec;
7378   }
7379 
7380   return SDValue();
7381 }
7382 
7383 // Gather data to see if the operation can be modelled as a
7384 // shuffle in combination with VEXTs.
ReconstructShuffle(SDValue Op,SelectionDAG & DAG) const7385 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
7386                                               SelectionDAG &DAG) const {
7387   assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7388   SDLoc dl(Op);
7389   EVT VT = Op.getValueType();
7390   unsigned NumElts = VT.getVectorNumElements();
7391 
7392   struct ShuffleSourceInfo {
7393     SDValue Vec;
7394     unsigned MinElt = std::numeric_limits<unsigned>::max();
7395     unsigned MaxElt = 0;
7396 
7397     // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
7398     // be compatible with the shuffle we intend to construct. As a result
7399     // ShuffleVec will be some sliding window into the original Vec.
7400     SDValue ShuffleVec;
7401 
7402     // Code should guarantee that element i in Vec starts at element "WindowBase
7403     // + i * WindowScale in ShuffleVec".
7404     int WindowBase = 0;
7405     int WindowScale = 1;
7406 
7407     ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
7408 
7409     bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
7410   };
7411 
7412   // First gather all vectors used as an immediate source for this BUILD_VECTOR
7413   // node.
7414   SmallVector<ShuffleSourceInfo, 2> Sources;
7415   for (unsigned i = 0; i < NumElts; ++i) {
7416     SDValue V = Op.getOperand(i);
7417     if (V.isUndef())
7418       continue;
7419     else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
7420       // A shuffle can only come from building a vector from various
7421       // elements of other vectors.
7422       return SDValue();
7423     } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
7424       // Furthermore, shuffles require a constant mask, whereas extractelts
7425       // accept variable indices.
7426       return SDValue();
7427     }
7428 
7429     // Add this element source to the list if it's not already there.
7430     SDValue SourceVec = V.getOperand(0);
7431     auto Source = llvm::find(Sources, SourceVec);
7432     if (Source == Sources.end())
7433       Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
7434 
7435     // Update the minimum and maximum lane number seen.
7436     unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
7437     Source->MinElt = std::min(Source->MinElt, EltNo);
7438     Source->MaxElt = std::max(Source->MaxElt, EltNo);
7439   }
7440 
7441   // Currently only do something sane when at most two source vectors
7442   // are involved.
7443   if (Sources.size() > 2)
7444     return SDValue();
7445 
7446   // Find out the smallest element size among result and two sources, and use
7447   // it as element size to build the shuffle_vector.
7448   EVT SmallestEltTy = VT.getVectorElementType();
7449   for (auto &Source : Sources) {
7450     EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
7451     if (SrcEltTy.bitsLT(SmallestEltTy))
7452       SmallestEltTy = SrcEltTy;
7453   }
7454   unsigned ResMultiplier =
7455       VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
7456   NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
7457   EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
7458 
7459   // If the source vector is too wide or too narrow, we may nevertheless be able
7460   // to construct a compatible shuffle either by concatenating it with UNDEF or
7461   // extracting a suitable range of elements.
7462   for (auto &Src : Sources) {
7463     EVT SrcVT = Src.ShuffleVec.getValueType();
7464 
7465     if (SrcVT.getSizeInBits() == VT.getSizeInBits())
7466       continue;
7467 
7468     // This stage of the search produces a source with the same element type as
7469     // the original, but with a total width matching the BUILD_VECTOR output.
7470     EVT EltVT = SrcVT.getVectorElementType();
7471     unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
7472     EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
7473 
7474     if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
7475       if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
7476         return SDValue();
7477       // We can pad out the smaller vector for free, so if it's part of a
7478       // shuffle...
7479       Src.ShuffleVec =
7480           DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
7481                       DAG.getUNDEF(Src.ShuffleVec.getValueType()));
7482       continue;
7483     }
7484 
7485     if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
7486       return SDValue();
7487 
7488     if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
7489       // Span too large for a VEXT to cope
7490       return SDValue();
7491     }
7492 
7493     if (Src.MinElt >= NumSrcElts) {
7494       // The extraction can just take the second half
7495       Src.ShuffleVec =
7496           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7497                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
7498       Src.WindowBase = -NumSrcElts;
7499     } else if (Src.MaxElt < NumSrcElts) {
7500       // The extraction can just take the first half
7501       Src.ShuffleVec =
7502           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7503                       DAG.getConstant(0, dl, MVT::i32));
7504     } else {
7505       // An actual VEXT is needed
7506       SDValue VEXTSrc1 =
7507           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7508                       DAG.getConstant(0, dl, MVT::i32));
7509       SDValue VEXTSrc2 =
7510           DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7511                       DAG.getConstant(NumSrcElts, dl, MVT::i32));
7512 
7513       Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
7514                                    VEXTSrc2,
7515                                    DAG.getConstant(Src.MinElt, dl, MVT::i32));
7516       Src.WindowBase = -Src.MinElt;
7517     }
7518   }
7519 
7520   // Another possible incompatibility occurs from the vector element types. We
7521   // can fix this by bitcasting the source vectors to the same type we intend
7522   // for the shuffle.
7523   for (auto &Src : Sources) {
7524     EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
7525     if (SrcEltTy == SmallestEltTy)
7526       continue;
7527     assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
7528     Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
7529     Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
7530     Src.WindowBase *= Src.WindowScale;
7531   }
7532 
7533   // Final sanity check before we try to actually produce a shuffle.
7534   LLVM_DEBUG(for (auto Src
7535                   : Sources)
7536                  assert(Src.ShuffleVec.getValueType() == ShuffleVT););
7537 
7538   // The stars all align, our next step is to produce the mask for the shuffle.
7539   SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
7540   int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
7541   for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
7542     SDValue Entry = Op.getOperand(i);
7543     if (Entry.isUndef())
7544       continue;
7545 
7546     auto Src = llvm::find(Sources, Entry.getOperand(0));
7547     int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
7548 
7549     // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
7550     // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
7551     // segment.
7552     EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
7553     int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
7554                                VT.getScalarSizeInBits());
7555     int LanesDefined = BitsDefined / BitsPerShuffleLane;
7556 
7557     // This source is expected to fill ResMultiplier lanes of the final shuffle,
7558     // starting at the appropriate offset.
7559     int *LaneMask = &Mask[i * ResMultiplier];
7560 
7561     int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
7562     ExtractBase += NumElts * (Src - Sources.begin());
7563     for (int j = 0; j < LanesDefined; ++j)
7564       LaneMask[j] = ExtractBase + j;
7565   }
7566 
7567 
7568   // We can't handle more than two sources. This should have already
7569   // been checked before this point.
7570   assert(Sources.size() <= 2 && "Too many sources!");
7571 
7572   SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
7573   for (unsigned i = 0; i < Sources.size(); ++i)
7574     ShuffleOps[i] = Sources[i].ShuffleVec;
7575 
7576   SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
7577                                             ShuffleOps[1], Mask, DAG);
7578   if (!Shuffle)
7579     return SDValue();
7580   return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
7581 }
7582 
7583 enum ShuffleOpCodes {
7584   OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
7585   OP_VREV,
7586   OP_VDUP0,
7587   OP_VDUP1,
7588   OP_VDUP2,
7589   OP_VDUP3,
7590   OP_VEXT1,
7591   OP_VEXT2,
7592   OP_VEXT3,
7593   OP_VUZPL, // VUZP, left result
7594   OP_VUZPR, // VUZP, right result
7595   OP_VZIPL, // VZIP, left result
7596   OP_VZIPR, // VZIP, right result
7597   OP_VTRNL, // VTRN, left result
7598   OP_VTRNR  // VTRN, right result
7599 };
7600 
isLegalMVEShuffleOp(unsigned PFEntry)7601 static bool isLegalMVEShuffleOp(unsigned PFEntry) {
7602   unsigned OpNum = (PFEntry >> 26) & 0x0F;
7603   switch (OpNum) {
7604   case OP_COPY:
7605   case OP_VREV:
7606   case OP_VDUP0:
7607   case OP_VDUP1:
7608   case OP_VDUP2:
7609   case OP_VDUP3:
7610     return true;
7611   }
7612   return false;
7613 }
7614 
7615 /// isShuffleMaskLegal - Targets can use this to indicate that they only
7616 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
7617 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
7618 /// are assumed to be legal.
isShuffleMaskLegal(ArrayRef<int> M,EVT VT) const7619 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
7620   if (VT.getVectorNumElements() == 4 &&
7621       (VT.is128BitVector() || VT.is64BitVector())) {
7622     unsigned PFIndexes[4];
7623     for (unsigned i = 0; i != 4; ++i) {
7624       if (M[i] < 0)
7625         PFIndexes[i] = 8;
7626       else
7627         PFIndexes[i] = M[i];
7628     }
7629 
7630     // Compute the index in the perfect shuffle table.
7631     unsigned PFTableIndex =
7632       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7633     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
7634     unsigned Cost = (PFEntry >> 30);
7635 
7636     if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry)))
7637       return true;
7638   }
7639 
7640   bool ReverseVEXT, isV_UNDEF;
7641   unsigned Imm, WhichResult;
7642 
7643   unsigned EltSize = VT.getScalarSizeInBits();
7644   if (EltSize >= 32 ||
7645       ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
7646       ShuffleVectorInst::isIdentityMask(M) ||
7647       isVREVMask(M, VT, 64) ||
7648       isVREVMask(M, VT, 32) ||
7649       isVREVMask(M, VT, 16))
7650     return true;
7651   else if (Subtarget->hasNEON() &&
7652            (isVEXTMask(M, VT, ReverseVEXT, Imm) ||
7653             isVTBLMask(M, VT) ||
7654             isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF)))
7655     return true;
7656   else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) &&
7657            isReverseMask(M, VT))
7658     return true;
7659   else if (Subtarget->hasMVEIntegerOps() &&
7660            (isVMOVNMask(M, VT, 0) || isVMOVNMask(M, VT, 1)))
7661     return true;
7662   else
7663     return false;
7664 }
7665 
7666 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
7667 /// the specified operations to build the shuffle.
GeneratePerfectShuffle(unsigned PFEntry,SDValue LHS,SDValue RHS,SelectionDAG & DAG,const SDLoc & dl)7668 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
7669                                       SDValue RHS, SelectionDAG &DAG,
7670                                       const SDLoc &dl) {
7671   unsigned OpNum = (PFEntry >> 26) & 0x0F;
7672   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7673   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
7674 
7675   if (OpNum == OP_COPY) {
7676     if (LHSID == (1*9+2)*9+3) return LHS;
7677     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
7678     return RHS;
7679   }
7680 
7681   SDValue OpLHS, OpRHS;
7682   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
7683   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
7684   EVT VT = OpLHS.getValueType();
7685 
7686   switch (OpNum) {
7687   default: llvm_unreachable("Unknown shuffle opcode!");
7688   case OP_VREV:
7689     // VREV divides the vector in half and swaps within the half.
7690     if (VT.getVectorElementType() == MVT::i32 ||
7691         VT.getVectorElementType() == MVT::f32)
7692       return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
7693     // vrev <4 x i16> -> VREV32
7694     if (VT.getVectorElementType() == MVT::i16)
7695       return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
7696     // vrev <4 x i8> -> VREV16
7697     assert(VT.getVectorElementType() == MVT::i8);
7698     return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
7699   case OP_VDUP0:
7700   case OP_VDUP1:
7701   case OP_VDUP2:
7702   case OP_VDUP3:
7703     return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7704                        OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
7705   case OP_VEXT1:
7706   case OP_VEXT2:
7707   case OP_VEXT3:
7708     return DAG.getNode(ARMISD::VEXT, dl, VT,
7709                        OpLHS, OpRHS,
7710                        DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
7711   case OP_VUZPL:
7712   case OP_VUZPR:
7713     return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
7714                        OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
7715   case OP_VZIPL:
7716   case OP_VZIPR:
7717     return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
7718                        OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
7719   case OP_VTRNL:
7720   case OP_VTRNR:
7721     return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
7722                        OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
7723   }
7724 }
7725 
LowerVECTOR_SHUFFLEv8i8(SDValue Op,ArrayRef<int> ShuffleMask,SelectionDAG & DAG)7726 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
7727                                        ArrayRef<int> ShuffleMask,
7728                                        SelectionDAG &DAG) {
7729   // Check to see if we can use the VTBL instruction.
7730   SDValue V1 = Op.getOperand(0);
7731   SDValue V2 = Op.getOperand(1);
7732   SDLoc DL(Op);
7733 
7734   SmallVector<SDValue, 8> VTBLMask;
7735   for (ArrayRef<int>::iterator
7736          I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
7737     VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
7738 
7739   if (V2.getNode()->isUndef())
7740     return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
7741                        DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
7742 
7743   return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
7744                      DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
7745 }
7746 
LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,SelectionDAG & DAG)7747 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
7748                                                       SelectionDAG &DAG) {
7749   SDLoc DL(Op);
7750   SDValue OpLHS = Op.getOperand(0);
7751   EVT VT = OpLHS.getValueType();
7752 
7753   assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
7754          "Expect an v8i16/v16i8 type");
7755   OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
7756   // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
7757   // extract the first 8 bytes into the top double word and the last 8 bytes
7758   // into the bottom double word. The v8i16 case is similar.
7759   unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
7760   return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
7761                      DAG.getConstant(ExtractNum, DL, MVT::i32));
7762 }
7763 
getVectorTyFromPredicateVector(EVT VT)7764 static EVT getVectorTyFromPredicateVector(EVT VT) {
7765   switch (VT.getSimpleVT().SimpleTy) {
7766   case MVT::v4i1:
7767     return MVT::v4i32;
7768   case MVT::v8i1:
7769     return MVT::v8i16;
7770   case MVT::v16i1:
7771     return MVT::v16i8;
7772   default:
7773     llvm_unreachable("Unexpected vector predicate type");
7774   }
7775 }
7776 
PromoteMVEPredVector(SDLoc dl,SDValue Pred,EVT VT,SelectionDAG & DAG)7777 static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT,
7778                                     SelectionDAG &DAG) {
7779   // Converting from boolean predicates to integers involves creating a vector
7780   // of all ones or all zeroes and selecting the lanes based upon the real
7781   // predicate.
7782   SDValue AllOnes =
7783       DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32);
7784   AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes);
7785 
7786   SDValue AllZeroes =
7787       DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32);
7788   AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes);
7789 
7790   // Get full vector type from predicate type
7791   EVT NewVT = getVectorTyFromPredicateVector(VT);
7792 
7793   SDValue RecastV1;
7794   // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast
7795   // this to a v16i1. This cannot be done with an ordinary bitcast because the
7796   // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node,
7797   // since we know in hardware the sizes are really the same.
7798   if (VT != MVT::v16i1)
7799     RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred);
7800   else
7801     RecastV1 = Pred;
7802 
7803   // Select either all ones or zeroes depending upon the real predicate bits.
7804   SDValue PredAsVector =
7805       DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes);
7806 
7807   // Recast our new predicate-as-integer v16i8 vector into something
7808   // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate.
7809   return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector);
7810 }
7811 
LowerVECTOR_SHUFFLE_i1(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)7812 static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
7813                                       const ARMSubtarget *ST) {
7814   EVT VT = Op.getValueType();
7815   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
7816   ArrayRef<int> ShuffleMask = SVN->getMask();
7817 
7818   assert(ST->hasMVEIntegerOps() &&
7819          "No support for vector shuffle of boolean predicates");
7820 
7821   SDValue V1 = Op.getOperand(0);
7822   SDLoc dl(Op);
7823   if (isReverseMask(ShuffleMask, VT)) {
7824     SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1);
7825     SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast);
7826     SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit,
7827                               DAG.getConstant(16, dl, MVT::i32));
7828     return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl);
7829   }
7830 
7831   // Until we can come up with optimised cases for every single vector
7832   // shuffle in existence we have chosen the least painful strategy. This is
7833   // to essentially promote the boolean predicate to a 8-bit integer, where
7834   // each predicate represents a byte. Then we fall back on a normal integer
7835   // vector shuffle and convert the result back into a predicate vector. In
7836   // many cases the generated code might be even better than scalar code
7837   // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit
7838   // fields in a register into 8 other arbitrary 2-bit fields!
7839   SDValue PredAsVector = PromoteMVEPredVector(dl, V1, VT, DAG);
7840   EVT NewVT = PredAsVector.getValueType();
7841 
7842   // Do the shuffle!
7843   SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector,
7844                                           DAG.getUNDEF(NewVT), ShuffleMask);
7845 
7846   // Now return the result of comparing the shuffled vector with zero,
7847   // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
7848   return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled,
7849                      DAG.getConstant(ARMCC::NE, dl, MVT::i32));
7850 }
7851 
LowerVECTOR_SHUFFLEUsingMovs(SDValue Op,ArrayRef<int> ShuffleMask,SelectionDAG & DAG)7852 static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op,
7853                                             ArrayRef<int> ShuffleMask,
7854                                             SelectionDAG &DAG) {
7855   // Attempt to lower the vector shuffle using as many whole register movs as
7856   // possible. This is useful for types smaller than 32bits, which would
7857   // often otherwise become a series for grp movs.
7858   SDLoc dl(Op);
7859   EVT VT = Op.getValueType();
7860   if (VT.getScalarSizeInBits() >= 32)
7861     return SDValue();
7862 
7863   assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&
7864          "Unexpected vector type");
7865   int NumElts = VT.getVectorNumElements();
7866   int QuarterSize = NumElts / 4;
7867   // The four final parts of the vector, as i32's
7868   SDValue Parts[4];
7869 
7870   // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not
7871   // <u,u,u,u>), returning the vmov lane index
7872   auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) {
7873     // Detect which mov lane this would be from the first non-undef element.
7874     int MovIdx = -1;
7875     for (int i = 0; i < Length; i++) {
7876       if (ShuffleMask[Start + i] >= 0) {
7877         if (ShuffleMask[Start + i] % Length != i)
7878           return -1;
7879         MovIdx = ShuffleMask[Start + i] / Length;
7880         break;
7881       }
7882     }
7883     // If all items are undef, leave this for other combines
7884     if (MovIdx == -1)
7885       return -1;
7886     // Check the remaining values are the correct part of the same mov
7887     for (int i = 1; i < Length; i++) {
7888       if (ShuffleMask[Start + i] >= 0 &&
7889           (ShuffleMask[Start + i] / Length != MovIdx ||
7890            ShuffleMask[Start + i] % Length != i))
7891         return -1;
7892     }
7893     return MovIdx;
7894   };
7895 
7896   for (int Part = 0; Part < 4; ++Part) {
7897     // Does this part look like a mov
7898     int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize);
7899     if (Elt != -1) {
7900       SDValue Input = Op->getOperand(0);
7901       if (Elt >= 4) {
7902         Input = Op->getOperand(1);
7903         Elt -= 4;
7904       }
7905       SDValue BitCast = DAG.getBitcast(MVT::v4i32, Input);
7906       Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, BitCast,
7907                                 DAG.getConstant(Elt, dl, MVT::i32));
7908     }
7909   }
7910 
7911   // Nothing interesting found, just return
7912   if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3])
7913     return SDValue();
7914 
7915   // The other parts need to be built with the old shuffle vector, cast to a
7916   // v4i32 and extract_vector_elts
7917   if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) {
7918     SmallVector<int, 16> NewShuffleMask;
7919     for (int Part = 0; Part < 4; ++Part)
7920       for (int i = 0; i < QuarterSize; i++)
7921         NewShuffleMask.push_back(
7922             Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]);
7923     SDValue NewShuffle = DAG.getVectorShuffle(
7924         VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask);
7925     SDValue BitCast = DAG.getBitcast(MVT::v4i32, NewShuffle);
7926 
7927     for (int Part = 0; Part < 4; ++Part)
7928       if (!Parts[Part])
7929         Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
7930                                   BitCast, DAG.getConstant(Part, dl, MVT::i32));
7931   }
7932   // Build a vector out of the various parts and bitcast it back to the original
7933   // type.
7934   SDValue NewVec = DAG.getBuildVector(MVT::v4i32, dl, Parts);
7935   return DAG.getBitcast(VT, NewVec);
7936 }
7937 
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)7938 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
7939                                    const ARMSubtarget *ST) {
7940   SDValue V1 = Op.getOperand(0);
7941   SDValue V2 = Op.getOperand(1);
7942   SDLoc dl(Op);
7943   EVT VT = Op.getValueType();
7944   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
7945   unsigned EltSize = VT.getScalarSizeInBits();
7946 
7947   if (ST->hasMVEIntegerOps() && EltSize == 1)
7948     return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST);
7949 
7950   // Convert shuffles that are directly supported on NEON to target-specific
7951   // DAG nodes, instead of keeping them as shuffles and matching them again
7952   // during code selection.  This is more efficient and avoids the possibility
7953   // of inconsistencies between legalization and selection.
7954   // FIXME: floating-point vectors should be canonicalized to integer vectors
7955   // of the same time so that they get CSEd properly.
7956   ArrayRef<int> ShuffleMask = SVN->getMask();
7957 
7958   if (EltSize <= 32) {
7959     if (SVN->isSplat()) {
7960       int Lane = SVN->getSplatIndex();
7961       // If this is undef splat, generate it via "just" vdup, if possible.
7962       if (Lane == -1) Lane = 0;
7963 
7964       // Test if V1 is a SCALAR_TO_VECTOR.
7965       if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
7966         return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
7967       }
7968       // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
7969       // (and probably will turn into a SCALAR_TO_VECTOR once legalization
7970       // reaches it).
7971       if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
7972           !isa<ConstantSDNode>(V1.getOperand(0))) {
7973         bool IsScalarToVector = true;
7974         for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
7975           if (!V1.getOperand(i).isUndef()) {
7976             IsScalarToVector = false;
7977             break;
7978           }
7979         if (IsScalarToVector)
7980           return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
7981       }
7982       return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
7983                          DAG.getConstant(Lane, dl, MVT::i32));
7984     }
7985 
7986     bool ReverseVEXT = false;
7987     unsigned Imm = 0;
7988     if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
7989       if (ReverseVEXT)
7990         std::swap(V1, V2);
7991       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
7992                          DAG.getConstant(Imm, dl, MVT::i32));
7993     }
7994 
7995     if (isVREVMask(ShuffleMask, VT, 64))
7996       return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
7997     if (isVREVMask(ShuffleMask, VT, 32))
7998       return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
7999     if (isVREVMask(ShuffleMask, VT, 16))
8000       return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
8001 
8002     if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
8003       return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
8004                          DAG.getConstant(Imm, dl, MVT::i32));
8005     }
8006 
8007     // Check for Neon shuffles that modify both input vectors in place.
8008     // If both results are used, i.e., if there are two shuffles with the same
8009     // source operands and with masks corresponding to both results of one of
8010     // these operations, DAG memoization will ensure that a single node is
8011     // used for both shuffles.
8012     unsigned WhichResult = 0;
8013     bool isV_UNDEF = false;
8014     if (ST->hasNEON()) {
8015       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
8016               ShuffleMask, VT, WhichResult, isV_UNDEF)) {
8017         if (isV_UNDEF)
8018           V2 = V1;
8019         return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
8020             .getValue(WhichResult);
8021       }
8022     }
8023     if (ST->hasMVEIntegerOps()) {
8024       if (isVMOVNMask(ShuffleMask, VT, 0))
8025         return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1,
8026                            DAG.getConstant(0, dl, MVT::i32));
8027       if (isVMOVNMask(ShuffleMask, VT, 1))
8028         return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2,
8029                            DAG.getConstant(1, dl, MVT::i32));
8030     }
8031 
8032     // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
8033     // shuffles that produce a result larger than their operands with:
8034     //   shuffle(concat(v1, undef), concat(v2, undef))
8035     // ->
8036     //   shuffle(concat(v1, v2), undef)
8037     // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
8038     //
8039     // This is useful in the general case, but there are special cases where
8040     // native shuffles produce larger results: the two-result ops.
8041     //
8042     // Look through the concat when lowering them:
8043     //   shuffle(concat(v1, v2), undef)
8044     // ->
8045     //   concat(VZIP(v1, v2):0, :1)
8046     //
8047     if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
8048       SDValue SubV1 = V1->getOperand(0);
8049       SDValue SubV2 = V1->getOperand(1);
8050       EVT SubVT = SubV1.getValueType();
8051 
8052       // We expect these to have been canonicalized to -1.
8053       assert(llvm::all_of(ShuffleMask, [&](int i) {
8054         return i < (int)VT.getVectorNumElements();
8055       }) && "Unexpected shuffle index into UNDEF operand!");
8056 
8057       if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
8058               ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
8059         if (isV_UNDEF)
8060           SubV2 = SubV1;
8061         assert((WhichResult == 0) &&
8062                "In-place shuffle of concat can only have one result!");
8063         SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
8064                                   SubV1, SubV2);
8065         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
8066                            Res.getValue(1));
8067       }
8068     }
8069   }
8070 
8071   // If the shuffle is not directly supported and it has 4 elements, use
8072   // the PerfectShuffle-generated table to synthesize it from other shuffles.
8073   unsigned NumElts = VT.getVectorNumElements();
8074   if (NumElts == 4) {
8075     unsigned PFIndexes[4];
8076     for (unsigned i = 0; i != 4; ++i) {
8077       if (ShuffleMask[i] < 0)
8078         PFIndexes[i] = 8;
8079       else
8080         PFIndexes[i] = ShuffleMask[i];
8081     }
8082 
8083     // Compute the index in the perfect shuffle table.
8084     unsigned PFTableIndex =
8085       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8086     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8087     unsigned Cost = (PFEntry >> 30);
8088 
8089     if (Cost <= 4) {
8090       if (ST->hasNEON())
8091         return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8092       else if (isLegalMVEShuffleOp(PFEntry)) {
8093         unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8094         unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
8095         unsigned PFEntryLHS = PerfectShuffleTable[LHSID];
8096         unsigned PFEntryRHS = PerfectShuffleTable[RHSID];
8097         if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS))
8098           return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8099       }
8100     }
8101   }
8102 
8103   // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
8104   if (EltSize >= 32) {
8105     // Do the expansion with floating-point types, since that is what the VFP
8106     // registers are defined to use, and since i64 is not legal.
8107     EVT EltVT = EVT::getFloatingPointVT(EltSize);
8108     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
8109     V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
8110     V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
8111     SmallVector<SDValue, 8> Ops;
8112     for (unsigned i = 0; i < NumElts; ++i) {
8113       if (ShuffleMask[i] < 0)
8114         Ops.push_back(DAG.getUNDEF(EltVT));
8115       else
8116         Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
8117                                   ShuffleMask[i] < (int)NumElts ? V1 : V2,
8118                                   DAG.getConstant(ShuffleMask[i] & (NumElts-1),
8119                                                   dl, MVT::i32)));
8120     }
8121     SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
8122     return DAG.getNode(ISD::BITCAST, dl, VT, Val);
8123   }
8124 
8125   if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
8126     return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
8127 
8128   if (ST->hasNEON() && VT == MVT::v8i8)
8129     if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
8130       return NewOp;
8131 
8132   if (ST->hasMVEIntegerOps())
8133     if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG))
8134       return NewOp;
8135 
8136   return SDValue();
8137 }
8138 
LowerINSERT_VECTOR_ELT_i1(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)8139 static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
8140                                          const ARMSubtarget *ST) {
8141   EVT VecVT = Op.getOperand(0).getValueType();
8142   SDLoc dl(Op);
8143 
8144   assert(ST->hasMVEIntegerOps() &&
8145          "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8146 
8147   SDValue Conv =
8148       DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
8149   unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
8150   unsigned LaneWidth =
8151       getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
8152   unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth;
8153   SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32,
8154                             Op.getOperand(1), DAG.getValueType(MVT::i1));
8155   SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext,
8156                             DAG.getConstant(~Mask, dl, MVT::i32));
8157   return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI);
8158 }
8159 
LowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const8160 SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
8161                                                   SelectionDAG &DAG) const {
8162   // INSERT_VECTOR_ELT is legal only for immediate indexes.
8163   SDValue Lane = Op.getOperand(2);
8164   if (!isa<ConstantSDNode>(Lane))
8165     return SDValue();
8166 
8167   SDValue Elt = Op.getOperand(1);
8168   EVT EltVT = Elt.getValueType();
8169 
8170   if (Subtarget->hasMVEIntegerOps() &&
8171       Op.getValueType().getScalarSizeInBits() == 1)
8172     return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget);
8173 
8174   if (getTypeAction(*DAG.getContext(), EltVT) ==
8175       TargetLowering::TypePromoteFloat) {
8176     // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32,
8177     // but the type system will try to do that if we don't intervene.
8178     // Reinterpret any such vector-element insertion as one with the
8179     // corresponding integer types.
8180 
8181     SDLoc dl(Op);
8182 
8183     EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits());
8184     assert(getTypeAction(*DAG.getContext(), IEltVT) !=
8185            TargetLowering::TypePromoteFloat);
8186 
8187     SDValue VecIn = Op.getOperand(0);
8188     EVT VecVT = VecIn.getValueType();
8189     EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT,
8190                                   VecVT.getVectorNumElements());
8191 
8192     SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt);
8193     SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn);
8194     SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT,
8195                                   IVecIn, IElt, Lane);
8196     return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut);
8197   }
8198 
8199   return Op;
8200 }
8201 
LowerEXTRACT_VECTOR_ELT_i1(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)8202 static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
8203                                           const ARMSubtarget *ST) {
8204   EVT VecVT = Op.getOperand(0).getValueType();
8205   SDLoc dl(Op);
8206 
8207   assert(ST->hasMVEIntegerOps() &&
8208          "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8209 
8210   SDValue Conv =
8211       DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
8212   unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
8213   unsigned LaneWidth =
8214       getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
8215   SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv,
8216                               DAG.getConstant(Lane * LaneWidth, dl, MVT::i32));
8217   return Shift;
8218 }
8219 
LowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)8220 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG,
8221                                        const ARMSubtarget *ST) {
8222   // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
8223   SDValue Lane = Op.getOperand(1);
8224   if (!isa<ConstantSDNode>(Lane))
8225     return SDValue();
8226 
8227   SDValue Vec = Op.getOperand(0);
8228   EVT VT = Vec.getValueType();
8229 
8230   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
8231     return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST);
8232 
8233   if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
8234     SDLoc dl(Op);
8235     return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
8236   }
8237 
8238   return Op;
8239 }
8240 
LowerCONCAT_VECTORS_i1(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)8241 static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,
8242                                       const ARMSubtarget *ST) {
8243   SDValue V1 = Op.getOperand(0);
8244   SDValue V2 = Op.getOperand(1);
8245   SDLoc dl(Op);
8246   EVT VT = Op.getValueType();
8247   EVT Op1VT = V1.getValueType();
8248   EVT Op2VT = V2.getValueType();
8249   unsigned NumElts = VT.getVectorNumElements();
8250 
8251   assert(Op1VT == Op2VT && "Operand types don't match!");
8252   assert(VT.getScalarSizeInBits() == 1 &&
8253          "Unexpected custom CONCAT_VECTORS lowering");
8254   assert(ST->hasMVEIntegerOps() &&
8255          "CONCAT_VECTORS lowering only supported for MVE");
8256 
8257   SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
8258   SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG);
8259 
8260   // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets
8261   // promoted to v8i16, etc.
8262 
8263   MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
8264 
8265   // Extract the vector elements from Op1 and Op2 one by one and truncate them
8266   // to be the right size for the destination. For example, if Op1 is v4i1 then
8267   // the promoted vector is v4i32. The result of concatentation gives a v8i1,
8268   // which when promoted is v8i16. That means each i32 element from Op1 needs
8269   // truncating to i16 and inserting in the result.
8270   EVT ConcatVT = MVT::getVectorVT(ElType, NumElts);
8271   SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT);
8272   auto ExractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) {
8273     EVT NewVT = NewV.getValueType();
8274     EVT ConcatVT = ConVec.getValueType();
8275     for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) {
8276       SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV,
8277                                 DAG.getIntPtrConstant(i, dl));
8278       ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt,
8279                            DAG.getConstant(j, dl, MVT::i32));
8280     }
8281     return ConVec;
8282   };
8283   unsigned j = 0;
8284   ConVec = ExractInto(NewV1, ConVec, j);
8285   ConVec = ExractInto(NewV2, ConVec, j);
8286 
8287   // Now return the result of comparing the subvector with zero,
8288   // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8289   return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec,
8290                      DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8291 }
8292 
LowerCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)8293 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
8294                                    const ARMSubtarget *ST) {
8295   EVT VT = Op->getValueType(0);
8296   if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
8297     return LowerCONCAT_VECTORS_i1(Op, DAG, ST);
8298 
8299   // The only time a CONCAT_VECTORS operation can have legal types is when
8300   // two 64-bit vectors are concatenated to a 128-bit vector.
8301   assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
8302          "unexpected CONCAT_VECTORS");
8303   SDLoc dl(Op);
8304   SDValue Val = DAG.getUNDEF(MVT::v2f64);
8305   SDValue Op0 = Op.getOperand(0);
8306   SDValue Op1 = Op.getOperand(1);
8307   if (!Op0.isUndef())
8308     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
8309                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
8310                       DAG.getIntPtrConstant(0, dl));
8311   if (!Op1.isUndef())
8312     Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
8313                       DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
8314                       DAG.getIntPtrConstant(1, dl));
8315   return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
8316 }
8317 
LowerEXTRACT_SUBVECTOR(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)8318 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
8319                                       const ARMSubtarget *ST) {
8320   SDValue V1 = Op.getOperand(0);
8321   SDValue V2 = Op.getOperand(1);
8322   SDLoc dl(Op);
8323   EVT VT = Op.getValueType();
8324   EVT Op1VT = V1.getValueType();
8325   unsigned NumElts = VT.getVectorNumElements();
8326   unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue();
8327 
8328   assert(VT.getScalarSizeInBits() == 1 &&
8329          "Unexpected custom EXTRACT_SUBVECTOR lowering");
8330   assert(ST->hasMVEIntegerOps() &&
8331          "EXTRACT_SUBVECTOR lowering only supported for MVE");
8332 
8333   SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
8334 
8335   // We now have Op1 promoted to a vector of integers, where v8i1 gets
8336   // promoted to v8i16, etc.
8337 
8338   MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
8339 
8340   EVT SubVT = MVT::getVectorVT(ElType, NumElts);
8341   SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT);
8342   for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) {
8343     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1,
8344                               DAG.getIntPtrConstant(i, dl));
8345     SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
8346                          DAG.getConstant(j, dl, MVT::i32));
8347   }
8348 
8349   // Now return the result of comparing the subvector with zero,
8350   // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8351   return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec,
8352                      DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8353 }
8354 
8355 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
8356 /// element has been zero/sign-extended, depending on the isSigned parameter,
8357 /// from an integer type half its size.
isExtendedBUILD_VECTOR(SDNode * N,SelectionDAG & DAG,bool isSigned)8358 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
8359                                    bool isSigned) {
8360   // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
8361   EVT VT = N->getValueType(0);
8362   if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
8363     SDNode *BVN = N->getOperand(0).getNode();
8364     if (BVN->getValueType(0) != MVT::v4i32 ||
8365         BVN->getOpcode() != ISD::BUILD_VECTOR)
8366       return false;
8367     unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
8368     unsigned HiElt = 1 - LoElt;
8369     ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
8370     ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
8371     ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
8372     ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
8373     if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
8374       return false;
8375     if (isSigned) {
8376       if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
8377           Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
8378         return true;
8379     } else {
8380       if (Hi0->isNullValue() && Hi1->isNullValue())
8381         return true;
8382     }
8383     return false;
8384   }
8385 
8386   if (N->getOpcode() != ISD::BUILD_VECTOR)
8387     return false;
8388 
8389   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
8390     SDNode *Elt = N->getOperand(i).getNode();
8391     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
8392       unsigned EltSize = VT.getScalarSizeInBits();
8393       unsigned HalfSize = EltSize / 2;
8394       if (isSigned) {
8395         if (!isIntN(HalfSize, C->getSExtValue()))
8396           return false;
8397       } else {
8398         if (!isUIntN(HalfSize, C->getZExtValue()))
8399           return false;
8400       }
8401       continue;
8402     }
8403     return false;
8404   }
8405 
8406   return true;
8407 }
8408 
8409 /// isSignExtended - Check if a node is a vector value that is sign-extended
8410 /// or a constant BUILD_VECTOR with sign-extended elements.
isSignExtended(SDNode * N,SelectionDAG & DAG)8411 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
8412   if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
8413     return true;
8414   if (isExtendedBUILD_VECTOR(N, DAG, true))
8415     return true;
8416   return false;
8417 }
8418 
8419 /// isZeroExtended - Check if a node is a vector value that is zero-extended
8420 /// or a constant BUILD_VECTOR with zero-extended elements.
isZeroExtended(SDNode * N,SelectionDAG & DAG)8421 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
8422   if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
8423     return true;
8424   if (isExtendedBUILD_VECTOR(N, DAG, false))
8425     return true;
8426   return false;
8427 }
8428 
getExtensionTo64Bits(const EVT & OrigVT)8429 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
8430   if (OrigVT.getSizeInBits() >= 64)
8431     return OrigVT;
8432 
8433   assert(OrigVT.isSimple() && "Expecting a simple value type");
8434 
8435   MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
8436   switch (OrigSimpleTy) {
8437   default: llvm_unreachable("Unexpected Vector Type");
8438   case MVT::v2i8:
8439   case MVT::v2i16:
8440      return MVT::v2i32;
8441   case MVT::v4i8:
8442     return  MVT::v4i16;
8443   }
8444 }
8445 
8446 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
8447 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
8448 /// We insert the required extension here to get the vector to fill a D register.
AddRequiredExtensionForVMULL(SDValue N,SelectionDAG & DAG,const EVT & OrigTy,const EVT & ExtTy,unsigned ExtOpcode)8449 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
8450                                             const EVT &OrigTy,
8451                                             const EVT &ExtTy,
8452                                             unsigned ExtOpcode) {
8453   // The vector originally had a size of OrigTy. It was then extended to ExtTy.
8454   // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
8455   // 64-bits we need to insert a new extension so that it will be 64-bits.
8456   assert(ExtTy.is128BitVector() && "Unexpected extension size");
8457   if (OrigTy.getSizeInBits() >= 64)
8458     return N;
8459 
8460   // Must extend size to at least 64 bits to be used as an operand for VMULL.
8461   EVT NewVT = getExtensionTo64Bits(OrigTy);
8462 
8463   return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
8464 }
8465 
8466 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
8467 /// does not do any sign/zero extension. If the original vector is less
8468 /// than 64 bits, an appropriate extension will be added after the load to
8469 /// reach a total size of 64 bits. We have to add the extension separately
8470 /// because ARM does not have a sign/zero extending load for vectors.
SkipLoadExtensionForVMULL(LoadSDNode * LD,SelectionDAG & DAG)8471 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
8472   EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
8473 
8474   // The load already has the right type.
8475   if (ExtendedTy == LD->getMemoryVT())
8476     return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
8477                        LD->getBasePtr(), LD->getPointerInfo(),
8478                        LD->getAlignment(), LD->getMemOperand()->getFlags());
8479 
8480   // We need to create a zextload/sextload. We cannot just create a load
8481   // followed by a zext/zext node because LowerMUL is also run during normal
8482   // operation legalization where we can't create illegal types.
8483   return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
8484                         LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
8485                         LD->getMemoryVT(), LD->getAlignment(),
8486                         LD->getMemOperand()->getFlags());
8487 }
8488 
8489 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
8490 /// extending load, or BUILD_VECTOR with extended elements, return the
8491 /// unextended value. The unextended vector should be 64 bits so that it can
8492 /// be used as an operand to a VMULL instruction. If the original vector size
8493 /// before extension is less than 64 bits we add a an extension to resize
8494 /// the vector to 64 bits.
SkipExtensionForVMULL(SDNode * N,SelectionDAG & DAG)8495 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
8496   if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
8497     return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
8498                                         N->getOperand(0)->getValueType(0),
8499                                         N->getValueType(0),
8500                                         N->getOpcode());
8501 
8502   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
8503     assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
8504            "Expected extending load");
8505 
8506     SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
8507     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
8508     unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8509     SDValue extLoad =
8510         DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
8511     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);
8512 
8513     return newLoad;
8514   }
8515 
8516   // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
8517   // have been legalized as a BITCAST from v4i32.
8518   if (N->getOpcode() == ISD::BITCAST) {
8519     SDNode *BVN = N->getOperand(0).getNode();
8520     assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
8521            BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
8522     unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
8523     return DAG.getBuildVector(
8524         MVT::v2i32, SDLoc(N),
8525         {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
8526   }
8527   // Construct a new BUILD_VECTOR with elements truncated to half the size.
8528   assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
8529   EVT VT = N->getValueType(0);
8530   unsigned EltSize = VT.getScalarSizeInBits() / 2;
8531   unsigned NumElts = VT.getVectorNumElements();
8532   MVT TruncVT = MVT::getIntegerVT(EltSize);
8533   SmallVector<SDValue, 8> Ops;
8534   SDLoc dl(N);
8535   for (unsigned i = 0; i != NumElts; ++i) {
8536     ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
8537     const APInt &CInt = C->getAPIntValue();
8538     // Element types smaller than 32 bits are not legal, so use i32 elements.
8539     // The values are implicitly truncated so sext vs. zext doesn't matter.
8540     Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
8541   }
8542   return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
8543 }
8544 
isAddSubSExt(SDNode * N,SelectionDAG & DAG)8545 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
8546   unsigned Opcode = N->getOpcode();
8547   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
8548     SDNode *N0 = N->getOperand(0).getNode();
8549     SDNode *N1 = N->getOperand(1).getNode();
8550     return N0->hasOneUse() && N1->hasOneUse() &&
8551       isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
8552   }
8553   return false;
8554 }
8555 
isAddSubZExt(SDNode * N,SelectionDAG & DAG)8556 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
8557   unsigned Opcode = N->getOpcode();
8558   if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
8559     SDNode *N0 = N->getOperand(0).getNode();
8560     SDNode *N1 = N->getOperand(1).getNode();
8561     return N0->hasOneUse() && N1->hasOneUse() &&
8562       isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
8563   }
8564   return false;
8565 }
8566 
LowerMUL(SDValue Op,SelectionDAG & DAG)8567 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
8568   // Multiplications are only custom-lowered for 128-bit vectors so that
8569   // VMULL can be detected.  Otherwise v2i64 multiplications are not legal.
8570   EVT VT = Op.getValueType();
8571   assert(VT.is128BitVector() && VT.isInteger() &&
8572          "unexpected type for custom-lowering ISD::MUL");
8573   SDNode *N0 = Op.getOperand(0).getNode();
8574   SDNode *N1 = Op.getOperand(1).getNode();
8575   unsigned NewOpc = 0;
8576   bool isMLA = false;
8577   bool isN0SExt = isSignExtended(N0, DAG);
8578   bool isN1SExt = isSignExtended(N1, DAG);
8579   if (isN0SExt && isN1SExt)
8580     NewOpc = ARMISD::VMULLs;
8581   else {
8582     bool isN0ZExt = isZeroExtended(N0, DAG);
8583     bool isN1ZExt = isZeroExtended(N1, DAG);
8584     if (isN0ZExt && isN1ZExt)
8585       NewOpc = ARMISD::VMULLu;
8586     else if (isN1SExt || isN1ZExt) {
8587       // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
8588       // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
8589       if (isN1SExt && isAddSubSExt(N0, DAG)) {
8590         NewOpc = ARMISD::VMULLs;
8591         isMLA = true;
8592       } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
8593         NewOpc = ARMISD::VMULLu;
8594         isMLA = true;
8595       } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
8596         std::swap(N0, N1);
8597         NewOpc = ARMISD::VMULLu;
8598         isMLA = true;
8599       }
8600     }
8601 
8602     if (!NewOpc) {
8603       if (VT == MVT::v2i64)
8604         // Fall through to expand this.  It is not legal.
8605         return SDValue();
8606       else
8607         // Other vector multiplications are legal.
8608         return Op;
8609     }
8610   }
8611 
8612   // Legalize to a VMULL instruction.
8613   SDLoc DL(Op);
8614   SDValue Op0;
8615   SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
8616   if (!isMLA) {
8617     Op0 = SkipExtensionForVMULL(N0, DAG);
8618     assert(Op0.getValueType().is64BitVector() &&
8619            Op1.getValueType().is64BitVector() &&
8620            "unexpected types for extended operands to VMULL");
8621     return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
8622   }
8623 
8624   // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
8625   // isel lowering to take advantage of no-stall back to back vmul + vmla.
8626   //   vmull q0, d4, d6
8627   //   vmlal q0, d5, d6
8628   // is faster than
8629   //   vaddl q0, d4, d5
8630   //   vmovl q1, d6
8631   //   vmul  q0, q0, q1
8632   SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
8633   SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
8634   EVT Op1VT = Op1.getValueType();
8635   return DAG.getNode(N0->getOpcode(), DL, VT,
8636                      DAG.getNode(NewOpc, DL, VT,
8637                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
8638                      DAG.getNode(NewOpc, DL, VT,
8639                                DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
8640 }
8641 
LowerSDIV_v4i8(SDValue X,SDValue Y,const SDLoc & dl,SelectionDAG & DAG)8642 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
8643                               SelectionDAG &DAG) {
8644   // TODO: Should this propagate fast-math-flags?
8645 
8646   // Convert to float
8647   // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
8648   // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
8649   X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
8650   Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
8651   X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
8652   Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
8653   // Get reciprocal estimate.
8654   // float4 recip = vrecpeq_f32(yf);
8655   Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8656                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
8657                    Y);
8658   // Because char has a smaller range than uchar, we can actually get away
8659   // without any newton steps.  This requires that we use a weird bias
8660   // of 0xb000, however (again, this has been exhaustively tested).
8661   // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
8662   X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
8663   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
8664   Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
8665   X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
8666   X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
8667   // Convert back to short.
8668   X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
8669   X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
8670   return X;
8671 }
8672 
LowerSDIV_v4i16(SDValue N0,SDValue N1,const SDLoc & dl,SelectionDAG & DAG)8673 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
8674                                SelectionDAG &DAG) {
8675   // TODO: Should this propagate fast-math-flags?
8676 
8677   SDValue N2;
8678   // Convert to float.
8679   // float4 yf = vcvt_f32_s32(vmovl_s16(y));
8680   // float4 xf = vcvt_f32_s32(vmovl_s16(x));
8681   N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
8682   N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
8683   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
8684   N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
8685 
8686   // Use reciprocal estimate and one refinement step.
8687   // float4 recip = vrecpeq_f32(yf);
8688   // recip *= vrecpsq_f32(yf, recip);
8689   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8690                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
8691                    N1);
8692   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8693                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
8694                    N1, N2);
8695   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
8696   // Because short has a smaller range than ushort, we can actually get away
8697   // with only a single newton step.  This requires that we use a weird bias
8698   // of 89, however (again, this has been exhaustively tested).
8699   // float4 result = as_float4(as_int4(xf*recip) + 0x89);
8700   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
8701   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
8702   N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
8703   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
8704   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
8705   // Convert back to integer and return.
8706   // return vmovn_s32(vcvt_s32_f32(result));
8707   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
8708   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
8709   return N0;
8710 }
8711 
LowerSDIV(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)8712 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG,
8713                          const ARMSubtarget *ST) {
8714   EVT VT = Op.getValueType();
8715   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
8716          "unexpected type for custom-lowering ISD::SDIV");
8717 
8718   SDLoc dl(Op);
8719   SDValue N0 = Op.getOperand(0);
8720   SDValue N1 = Op.getOperand(1);
8721   SDValue N2, N3;
8722 
8723   if (VT == MVT::v8i8) {
8724     N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
8725     N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
8726 
8727     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
8728                      DAG.getIntPtrConstant(4, dl));
8729     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
8730                      DAG.getIntPtrConstant(4, dl));
8731     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
8732                      DAG.getIntPtrConstant(0, dl));
8733     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
8734                      DAG.getIntPtrConstant(0, dl));
8735 
8736     N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
8737     N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
8738 
8739     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
8740     N0 = LowerCONCAT_VECTORS(N0, DAG, ST);
8741 
8742     N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
8743     return N0;
8744   }
8745   return LowerSDIV_v4i16(N0, N1, dl, DAG);
8746 }
8747 
LowerUDIV(SDValue Op,SelectionDAG & DAG,const ARMSubtarget * ST)8748 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG,
8749                          const ARMSubtarget *ST) {
8750   // TODO: Should this propagate fast-math-flags?
8751   EVT VT = Op.getValueType();
8752   assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
8753          "unexpected type for custom-lowering ISD::UDIV");
8754 
8755   SDLoc dl(Op);
8756   SDValue N0 = Op.getOperand(0);
8757   SDValue N1 = Op.getOperand(1);
8758   SDValue N2, N3;
8759 
8760   if (VT == MVT::v8i8) {
8761     N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
8762     N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
8763 
8764     N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
8765                      DAG.getIntPtrConstant(4, dl));
8766     N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
8767                      DAG.getIntPtrConstant(4, dl));
8768     N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
8769                      DAG.getIntPtrConstant(0, dl));
8770     N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
8771                      DAG.getIntPtrConstant(0, dl));
8772 
8773     N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
8774     N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
8775 
8776     N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
8777     N0 = LowerCONCAT_VECTORS(N0, DAG, ST);
8778 
8779     N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
8780                      DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
8781                                      MVT::i32),
8782                      N0);
8783     return N0;
8784   }
8785 
8786   // v4i16 sdiv ... Convert to float.
8787   // float4 yf = vcvt_f32_s32(vmovl_u16(y));
8788   // float4 xf = vcvt_f32_s32(vmovl_u16(x));
8789   N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
8790   N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
8791   N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
8792   SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
8793 
8794   // Use reciprocal estimate and two refinement steps.
8795   // float4 recip = vrecpeq_f32(yf);
8796   // recip *= vrecpsq_f32(yf, recip);
8797   // recip *= vrecpsq_f32(yf, recip);
8798   N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8799                    DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
8800                    BN1);
8801   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8802                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
8803                    BN1, N2);
8804   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
8805   N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8806                    DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
8807                    BN1, N2);
8808   N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
8809   // Simply multiplying by the reciprocal estimate can leave us a few ulps
8810   // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
8811   // and that it will never cause us to return an answer too large).
8812   // float4 result = as_float4(as_int4(xf*recip) + 2);
8813   N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
8814   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
8815   N1 = DAG.getConstant(2, dl, MVT::v4i32);
8816   N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
8817   N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
8818   // Convert back to integer and return.
8819   // return vmovn_u32(vcvt_s32_f32(result));
8820   N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
8821   N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
8822   return N0;
8823 }
8824 
LowerADDSUBCARRY(SDValue Op,SelectionDAG & DAG)8825 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
8826   SDNode *N = Op.getNode();
8827   EVT VT = N->getValueType(0);
8828   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
8829 
8830   SDValue Carry = Op.getOperand(2);
8831 
8832   SDLoc DL(Op);
8833 
8834   SDValue Result;
8835   if (Op.getOpcode() == ISD::ADDCARRY) {
8836     // This converts the boolean value carry into the carry flag.
8837     Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
8838 
8839     // Do the addition proper using the carry flag we wanted.
8840     Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0),
8841                          Op.getOperand(1), Carry);
8842 
8843     // Now convert the carry flag into a boolean value.
8844     Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
8845   } else {
8846     // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
8847     // have to invert the carry first.
8848     Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
8849                         DAG.getConstant(1, DL, MVT::i32), Carry);
8850     // This converts the boolean value carry into the carry flag.
8851     Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
8852 
8853     // Do the subtraction proper using the carry flag we wanted.
8854     Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0),
8855                          Op.getOperand(1), Carry);
8856 
8857     // Now convert the carry flag into a boolean value.
8858     Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
8859     // But the carry returned by ARMISD::SUBE is not a borrow as expected
8860     // by ISD::SUBCARRY, so compute 1 - C.
8861     Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
8862                         DAG.getConstant(1, DL, MVT::i32), Carry);
8863   }
8864 
8865   // Return both values.
8866   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry);
8867 }
8868 
LowerFSINCOS(SDValue Op,SelectionDAG & DAG) const8869 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
8870   assert(Subtarget->isTargetDarwin());
8871 
8872   // For iOS, we want to call an alternative entry point: __sincos_stret,
8873   // return values are passed via sret.
8874   SDLoc dl(Op);
8875   SDValue Arg = Op.getOperand(0);
8876   EVT ArgVT = Arg.getValueType();
8877   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
8878   auto PtrVT = getPointerTy(DAG.getDataLayout());
8879 
8880   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8881   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8882 
8883   // Pair of floats / doubles used to pass the result.
8884   Type *RetTy = StructType::get(ArgTy, ArgTy);
8885   auto &DL = DAG.getDataLayout();
8886 
8887   ArgListTy Args;
8888   bool ShouldUseSRet = Subtarget->isAPCS_ABI();
8889   SDValue SRet;
8890   if (ShouldUseSRet) {
8891     // Create stack object for sret.
8892     const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
8893     const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy);
8894     int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
8895     SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
8896 
8897     ArgListEntry Entry;
8898     Entry.Node = SRet;
8899     Entry.Ty = RetTy->getPointerTo();
8900     Entry.IsSExt = false;
8901     Entry.IsZExt = false;
8902     Entry.IsSRet = true;
8903     Args.push_back(Entry);
8904     RetTy = Type::getVoidTy(*DAG.getContext());
8905   }
8906 
8907   ArgListEntry Entry;
8908   Entry.Node = Arg;
8909   Entry.Ty = ArgTy;
8910   Entry.IsSExt = false;
8911   Entry.IsZExt = false;
8912   Args.push_back(Entry);
8913 
8914   RTLIB::Libcall LC =
8915       (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
8916   const char *LibcallName = getLibcallName(LC);
8917   CallingConv::ID CC = getLibcallCallingConv(LC);
8918   SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
8919 
8920   TargetLowering::CallLoweringInfo CLI(DAG);
8921   CLI.setDebugLoc(dl)
8922       .setChain(DAG.getEntryNode())
8923       .setCallee(CC, RetTy, Callee, std::move(Args))
8924       .setDiscardResult(ShouldUseSRet);
8925   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
8926 
8927   if (!ShouldUseSRet)
8928     return CallResult.first;
8929 
8930   SDValue LoadSin =
8931       DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
8932 
8933   // Address of cos field.
8934   SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
8935                             DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
8936   SDValue LoadCos =
8937       DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
8938 
8939   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
8940   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
8941                      LoadSin.getValue(0), LoadCos.getValue(0));
8942 }
8943 
LowerWindowsDIVLibCall(SDValue Op,SelectionDAG & DAG,bool Signed,SDValue & Chain) const8944 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
8945                                                   bool Signed,
8946                                                   SDValue &Chain) const {
8947   EVT VT = Op.getValueType();
8948   assert((VT == MVT::i32 || VT == MVT::i64) &&
8949          "unexpected type for custom lowering DIV");
8950   SDLoc dl(Op);
8951 
8952   const auto &DL = DAG.getDataLayout();
8953   const auto &TLI = DAG.getTargetLoweringInfo();
8954 
8955   const char *Name = nullptr;
8956   if (Signed)
8957     Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
8958   else
8959     Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
8960 
8961   SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
8962 
8963   ARMTargetLowering::ArgListTy Args;
8964 
8965   for (auto AI : {1, 0}) {
8966     ArgListEntry Arg;
8967     Arg.Node = Op.getOperand(AI);
8968     Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
8969     Args.push_back(Arg);
8970   }
8971 
8972   CallLoweringInfo CLI(DAG);
8973   CLI.setDebugLoc(dl)
8974     .setChain(Chain)
8975     .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
8976                ES, std::move(Args));
8977 
8978   return LowerCallTo(CLI).first;
8979 }
8980 
8981 // This is a code size optimisation: return the original SDIV node to
8982 // DAGCombiner when we don't want to expand SDIV into a sequence of
8983 // instructions, and an empty node otherwise which will cause the
8984 // SDIV to be expanded in DAGCombine.
8985 SDValue
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created) const8986 ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
8987                                  SelectionDAG &DAG,
8988                                  SmallVectorImpl<SDNode *> &Created) const {
8989   // TODO: Support SREM
8990   if (N->getOpcode() != ISD::SDIV)
8991     return SDValue();
8992 
8993   const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget());
8994   const bool MinSize = ST.hasMinSize();
8995   const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode()
8996                                       : ST.hasDivideInARMMode();
8997 
8998   // Don't touch vector types; rewriting this may lead to scalarizing
8999   // the int divs.
9000   if (N->getOperand(0).getValueType().isVector())
9001     return SDValue();
9002 
9003   // Bail if MinSize is not set, and also for both ARM and Thumb mode we need
9004   // hwdiv support for this to be really profitable.
9005   if (!(MinSize && HasDivide))
9006     return SDValue();
9007 
9008   // ARM mode is a bit simpler than Thumb: we can handle large power
9009   // of 2 immediates with 1 mov instruction; no further checks required,
9010   // just return the sdiv node.
9011   if (!ST.isThumb())
9012     return SDValue(N, 0);
9013 
9014   // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV,
9015   // and thus lose the code size benefits of a MOVS that requires only 2.
9016   // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here,
9017   // but as it's doing exactly this, it's not worth the trouble to get TTI.
9018   if (Divisor.sgt(128))
9019     return SDValue();
9020 
9021   return SDValue(N, 0);
9022 }
9023 
LowerDIV_Windows(SDValue Op,SelectionDAG & DAG,bool Signed) const9024 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
9025                                             bool Signed) const {
9026   assert(Op.getValueType() == MVT::i32 &&
9027          "unexpected type for custom lowering DIV");
9028   SDLoc dl(Op);
9029 
9030   SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
9031                                DAG.getEntryNode(), Op.getOperand(1));
9032 
9033   return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
9034 }
9035 
WinDBZCheckDenominator(SelectionDAG & DAG,SDNode * N,SDValue InChain)9036 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
9037   SDLoc DL(N);
9038   SDValue Op = N->getOperand(1);
9039   if (N->getValueType(0) == MVT::i32)
9040     return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
9041   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
9042                            DAG.getConstant(0, DL, MVT::i32));
9043   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
9044                            DAG.getConstant(1, DL, MVT::i32));
9045   return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
9046                      DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
9047 }
9048 
ExpandDIV_Windows(SDValue Op,SelectionDAG & DAG,bool Signed,SmallVectorImpl<SDValue> & Results) const9049 void ARMTargetLowering::ExpandDIV_Windows(
9050     SDValue Op, SelectionDAG &DAG, bool Signed,
9051     SmallVectorImpl<SDValue> &Results) const {
9052   const auto &DL = DAG.getDataLayout();
9053   const auto &TLI = DAG.getTargetLoweringInfo();
9054 
9055   assert(Op.getValueType() == MVT::i64 &&
9056          "unexpected type for custom lowering DIV");
9057   SDLoc dl(Op);
9058 
9059   SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
9060 
9061   SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
9062 
9063   SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
9064   SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
9065                               DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
9066   Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
9067 
9068   Results.push_back(Lower);
9069   Results.push_back(Upper);
9070 }
9071 
LowerPredicateLoad(SDValue Op,SelectionDAG & DAG)9072 static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) {
9073   LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
9074   EVT MemVT = LD->getMemoryVT();
9075   assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) &&
9076          "Expected a predicate type!");
9077   assert(MemVT == Op.getValueType());
9078   assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
9079          "Expected a non-extending load");
9080   assert(LD->isUnindexed() && "Expected a unindexed load");
9081 
9082   // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bit
9083   // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We
9084   // need to make sure that 8/4 bits are actually loaded into the correct
9085   // place, which means loading the value and then shuffling the values into
9086   // the bottom bits of the predicate.
9087   // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect
9088   // for BE).
9089 
9090   SDLoc dl(Op);
9091   SDValue Load = DAG.getExtLoad(
9092       ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(),
9093       EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
9094       LD->getMemOperand());
9095   SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Load);
9096   if (MemVT != MVT::v16i1)
9097     Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred,
9098                        DAG.getConstant(0, dl, MVT::i32));
9099   return DAG.getMergeValues({Pred, Load.getValue(1)}, dl);
9100 }
9101 
LowerPredicateStore(SDValue Op,SelectionDAG & DAG)9102 static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) {
9103   StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
9104   EVT MemVT = ST->getMemoryVT();
9105   assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) &&
9106          "Expected a predicate type!");
9107   assert(MemVT == ST->getValue().getValueType());
9108   assert(!ST->isTruncatingStore() && "Expected a non-extending store");
9109   assert(ST->isUnindexed() && "Expected a unindexed store");
9110 
9111   // Only store the v4i1 or v8i1 worth of bits, via a buildvector with top bits
9112   // unset and a scalar store.
9113   SDLoc dl(Op);
9114   SDValue Build = ST->getValue();
9115   if (MemVT != MVT::v16i1) {
9116     SmallVector<SDValue, 16> Ops;
9117     for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++)
9118       Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build,
9119                                 DAG.getConstant(I, dl, MVT::i32)));
9120     for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++)
9121       Ops.push_back(DAG.getUNDEF(MVT::i32));
9122     Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops);
9123   }
9124   SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build);
9125   return DAG.getTruncStore(
9126       ST->getChain(), dl, GRP, ST->getBasePtr(),
9127       EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
9128       ST->getMemOperand());
9129 }
9130 
isZeroVector(SDValue N)9131 static bool isZeroVector(SDValue N) {
9132   return (ISD::isBuildVectorAllZeros(N.getNode()) ||
9133           (N->getOpcode() == ARMISD::VMOVIMM &&
9134            isNullConstant(N->getOperand(0))));
9135 }
9136 
LowerMLOAD(SDValue Op,SelectionDAG & DAG)9137 static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) {
9138   MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
9139   MVT VT = Op.getSimpleValueType();
9140   SDValue Mask = N->getMask();
9141   SDValue PassThru = N->getPassThru();
9142   SDLoc dl(Op);
9143 
9144   if (isZeroVector(PassThru))
9145     return Op;
9146 
9147   // MVE Masked loads use zero as the passthru value. Here we convert undef to
9148   // zero too, and other values are lowered to a select.
9149   SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
9150                                 DAG.getTargetConstant(0, dl, MVT::i32));
9151   SDValue NewLoad = DAG.getMaskedLoad(
9152       VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec,
9153       N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
9154       N->getExtensionType(), N->isExpandingLoad());
9155   SDValue Combo = NewLoad;
9156   if (!PassThru.isUndef() &&
9157       (PassThru.getOpcode() != ISD::BITCAST ||
9158        !isZeroVector(PassThru->getOperand(0))))
9159     Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
9160   return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl);
9161 }
9162 
LowerAtomicLoadStore(SDValue Op,SelectionDAG & DAG)9163 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
9164   if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
9165     // Acquire/Release load/store is not legal for targets without a dmb or
9166     // equivalent available.
9167     return SDValue();
9168 
9169   // Monotonic load/store is legal for all targets.
9170   return Op;
9171 }
9172 
ReplaceREADCYCLECOUNTER(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG,const ARMSubtarget * Subtarget)9173 static void ReplaceREADCYCLECOUNTER(SDNode *N,
9174                                     SmallVectorImpl<SDValue> &Results,
9175                                     SelectionDAG &DAG,
9176                                     const ARMSubtarget *Subtarget) {
9177   SDLoc DL(N);
9178   // Under Power Management extensions, the cycle-count is:
9179   //    mrc p15, #0, <Rt>, c9, c13, #0
9180   SDValue Ops[] = { N->getOperand(0), // Chain
9181                     DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
9182                     DAG.getTargetConstant(15, DL, MVT::i32),
9183                     DAG.getTargetConstant(0, DL, MVT::i32),
9184                     DAG.getTargetConstant(9, DL, MVT::i32),
9185                     DAG.getTargetConstant(13, DL, MVT::i32),
9186                     DAG.getTargetConstant(0, DL, MVT::i32)
9187   };
9188 
9189   SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
9190                                  DAG.getVTList(MVT::i32, MVT::Other), Ops);
9191   Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
9192                                 DAG.getConstant(0, DL, MVT::i32)));
9193   Results.push_back(Cycles32.getValue(1));
9194 }
9195 
createGPRPairNode(SelectionDAG & DAG,SDValue V)9196 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
9197   SDLoc dl(V.getNode());
9198   SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
9199   SDValue VHi = DAG.getAnyExtOrTrunc(
9200       DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
9201       dl, MVT::i32);
9202   bool isBigEndian = DAG.getDataLayout().isBigEndian();
9203   if (isBigEndian)
9204     std::swap (VLo, VHi);
9205   SDValue RegClass =
9206       DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
9207   SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
9208   SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
9209   const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
9210   return SDValue(
9211       DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
9212 }
9213 
ReplaceCMP_SWAP_64Results(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG)9214 static void ReplaceCMP_SWAP_64Results(SDNode *N,
9215                                        SmallVectorImpl<SDValue> & Results,
9216                                        SelectionDAG &DAG) {
9217   assert(N->getValueType(0) == MVT::i64 &&
9218          "AtomicCmpSwap on types less than 64 should be legal");
9219   SDValue Ops[] = {N->getOperand(1),
9220                    createGPRPairNode(DAG, N->getOperand(2)),
9221                    createGPRPairNode(DAG, N->getOperand(3)),
9222                    N->getOperand(0)};
9223   SDNode *CmpSwap = DAG.getMachineNode(
9224       ARM::CMP_SWAP_64, SDLoc(N),
9225       DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
9226 
9227   MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
9228   DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
9229 
9230   bool isBigEndian = DAG.getDataLayout().isBigEndian();
9231 
9232   Results.push_back(
9233       DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0,
9234                                  SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)));
9235   Results.push_back(
9236       DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1,
9237                                  SDLoc(N), MVT::i32, SDValue(CmpSwap, 0)));
9238   Results.push_back(SDValue(CmpSwap, 2));
9239 }
9240 
LowerFSETCC(SDValue Op,SelectionDAG & DAG) const9241 SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const {
9242   SDLoc dl(Op);
9243   EVT VT = Op.getValueType();
9244   SDValue Chain = Op.getOperand(0);
9245   SDValue LHS = Op.getOperand(1);
9246   SDValue RHS = Op.getOperand(2);
9247   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get();
9248   bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
9249 
9250   // If we don't have instructions of this float type then soften to a libcall
9251   // and use SETCC instead.
9252   if (isUnsupportedFloatingType(LHS.getValueType())) {
9253     DAG.getTargetLoweringInfo().softenSetCCOperands(
9254       DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling);
9255     if (!RHS.getNode()) {
9256       RHS = DAG.getConstant(0, dl, LHS.getValueType());
9257       CC = ISD::SETNE;
9258     }
9259     SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS,
9260                                  DAG.getCondCode(CC));
9261     return DAG.getMergeValues({Result, Chain}, dl);
9262   }
9263 
9264   ARMCC::CondCodes CondCode, CondCode2;
9265   FPCCToARMCC(CC, CondCode, CondCode2);
9266 
9267   // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit
9268   // in CMPFP and CMPFPE, but instead it should be made explicit by these
9269   // instructions using a chain instead of glue. This would also fix the problem
9270   // here (and also in LowerSELECT_CC) where we generate two comparisons when
9271   // CondCode2 != AL.
9272   SDValue True = DAG.getConstant(1, dl, VT);
9273   SDValue False =  DAG.getConstant(0, dl, VT);
9274   SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
9275   SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
9276   SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
9277   SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG);
9278   if (CondCode2 != ARMCC::AL) {
9279     ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
9280     Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
9281     Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG);
9282   }
9283   return DAG.getMergeValues({Result, Chain}, dl);
9284 }
9285 
LowerOperation(SDValue Op,SelectionDAG & DAG) const9286 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9287   LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump());
9288   switch (Op.getOpcode()) {
9289   default: llvm_unreachable("Don't know how to custom lower this!");
9290   case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
9291   case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
9292   case ISD::BlockAddress:  return LowerBlockAddress(Op, DAG);
9293   case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
9294   case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
9295   case ISD::SELECT:        return LowerSELECT(Op, DAG);
9296   case ISD::SELECT_CC:     return LowerSELECT_CC(Op, DAG);
9297   case ISD::BRCOND:        return LowerBRCOND(Op, DAG);
9298   case ISD::BR_CC:         return LowerBR_CC(Op, DAG);
9299   case ISD::BR_JT:         return LowerBR_JT(Op, DAG);
9300   case ISD::VASTART:       return LowerVASTART(Op, DAG);
9301   case ISD::ATOMIC_FENCE:  return LowerATOMIC_FENCE(Op, DAG, Subtarget);
9302   case ISD::PREFETCH:      return LowerPREFETCH(Op, DAG, Subtarget);
9303   case ISD::SINT_TO_FP:
9304   case ISD::UINT_TO_FP:    return LowerINT_TO_FP(Op, DAG);
9305   case ISD::STRICT_FP_TO_SINT:
9306   case ISD::STRICT_FP_TO_UINT:
9307   case ISD::FP_TO_SINT:
9308   case ISD::FP_TO_UINT:    return LowerFP_TO_INT(Op, DAG);
9309   case ISD::FCOPYSIGN:     return LowerFCOPYSIGN(Op, DAG);
9310   case ISD::RETURNADDR:    return LowerRETURNADDR(Op, DAG);
9311   case ISD::FRAMEADDR:     return LowerFRAMEADDR(Op, DAG);
9312   case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
9313   case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
9314   case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
9315   case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget);
9316   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
9317                                                                Subtarget);
9318   case ISD::BITCAST:       return ExpandBITCAST(Op.getNode(), DAG, Subtarget);
9319   case ISD::SHL:
9320   case ISD::SRL:
9321   case ISD::SRA:           return LowerShift(Op.getNode(), DAG, Subtarget);
9322   case ISD::SREM:          return LowerREM(Op.getNode(), DAG);
9323   case ISD::UREM:          return LowerREM(Op.getNode(), DAG);
9324   case ISD::SHL_PARTS:     return LowerShiftLeftParts(Op, DAG);
9325   case ISD::SRL_PARTS:
9326   case ISD::SRA_PARTS:     return LowerShiftRightParts(Op, DAG);
9327   case ISD::CTTZ:
9328   case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
9329   case ISD::CTPOP:         return LowerCTPOP(Op.getNode(), DAG, Subtarget);
9330   case ISD::SETCC:         return LowerVSETCC(Op, DAG, Subtarget);
9331   case ISD::SETCCCARRY:    return LowerSETCCCARRY(Op, DAG);
9332   case ISD::ConstantFP:    return LowerConstantFP(Op, DAG, Subtarget);
9333   case ISD::BUILD_VECTOR:  return LowerBUILD_VECTOR(Op, DAG, Subtarget);
9334   case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
9335   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget);
9336   case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
9337   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget);
9338   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget);
9339   case ISD::FLT_ROUNDS_:   return LowerFLT_ROUNDS_(Op, DAG);
9340   case ISD::MUL:           return LowerMUL(Op, DAG);
9341   case ISD::SDIV:
9342     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
9343       return LowerDIV_Windows(Op, DAG, /* Signed */ true);
9344     return LowerSDIV(Op, DAG, Subtarget);
9345   case ISD::UDIV:
9346     if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
9347       return LowerDIV_Windows(Op, DAG, /* Signed */ false);
9348     return LowerUDIV(Op, DAG, Subtarget);
9349   case ISD::ADDCARRY:
9350   case ISD::SUBCARRY:      return LowerADDSUBCARRY(Op, DAG);
9351   case ISD::SADDO:
9352   case ISD::SSUBO:
9353     return LowerSignedALUO(Op, DAG);
9354   case ISD::UADDO:
9355   case ISD::USUBO:
9356     return LowerUnsignedALUO(Op, DAG);
9357   case ISD::SADDSAT:
9358   case ISD::SSUBSAT:
9359     return LowerSADDSUBSAT(Op, DAG, Subtarget);
9360   case ISD::LOAD:
9361     return LowerPredicateLoad(Op, DAG);
9362   case ISD::STORE:
9363     return LowerPredicateStore(Op, DAG);
9364   case ISD::MLOAD:
9365     return LowerMLOAD(Op, DAG);
9366   case ISD::ATOMIC_LOAD:
9367   case ISD::ATOMIC_STORE:  return LowerAtomicLoadStore(Op, DAG);
9368   case ISD::FSINCOS:       return LowerFSINCOS(Op, DAG);
9369   case ISD::SDIVREM:
9370   case ISD::UDIVREM:       return LowerDivRem(Op, DAG);
9371   case ISD::DYNAMIC_STACKALLOC:
9372     if (Subtarget->isTargetWindows())
9373       return LowerDYNAMIC_STACKALLOC(Op, DAG);
9374     llvm_unreachable("Don't know how to custom lower this!");
9375   case ISD::STRICT_FP_ROUND:
9376   case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
9377   case ISD::STRICT_FP_EXTEND:
9378   case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
9379   case ISD::STRICT_FSETCC:
9380   case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG);
9381   case ARMISD::WIN__DBZCHK: return SDValue();
9382   }
9383 }
9384 
ReplaceLongIntrinsic(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG)9385 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
9386                                  SelectionDAG &DAG) {
9387   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
9388   unsigned Opc = 0;
9389   if (IntNo == Intrinsic::arm_smlald)
9390     Opc = ARMISD::SMLALD;
9391   else if (IntNo == Intrinsic::arm_smlaldx)
9392     Opc = ARMISD::SMLALDX;
9393   else if (IntNo == Intrinsic::arm_smlsld)
9394     Opc = ARMISD::SMLSLD;
9395   else if (IntNo == Intrinsic::arm_smlsldx)
9396     Opc = ARMISD::SMLSLDX;
9397   else
9398     return;
9399 
9400   SDLoc dl(N);
9401   SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
9402                            N->getOperand(3),
9403                            DAG.getConstant(0, dl, MVT::i32));
9404   SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
9405                            N->getOperand(3),
9406                            DAG.getConstant(1, dl, MVT::i32));
9407 
9408   SDValue LongMul = DAG.getNode(Opc, dl,
9409                                 DAG.getVTList(MVT::i32, MVT::i32),
9410                                 N->getOperand(1), N->getOperand(2),
9411                                 Lo, Hi);
9412   Results.push_back(LongMul.getValue(0));
9413   Results.push_back(LongMul.getValue(1));
9414 }
9415 
9416 /// ReplaceNodeResults - Replace the results of node with an illegal result
9417 /// type with new values built out of custom code.
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const9418 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
9419                                            SmallVectorImpl<SDValue> &Results,
9420                                            SelectionDAG &DAG) const {
9421   SDValue Res;
9422   switch (N->getOpcode()) {
9423   default:
9424     llvm_unreachable("Don't know how to custom expand this!");
9425   case ISD::READ_REGISTER:
9426     ExpandREAD_REGISTER(N, Results, DAG);
9427     break;
9428   case ISD::BITCAST:
9429     Res = ExpandBITCAST(N, DAG, Subtarget);
9430     break;
9431   case ISD::SRL:
9432   case ISD::SRA:
9433   case ISD::SHL:
9434     Res = Expand64BitShift(N, DAG, Subtarget);
9435     break;
9436   case ISD::SREM:
9437   case ISD::UREM:
9438     Res = LowerREM(N, DAG);
9439     break;
9440   case ISD::SDIVREM:
9441   case ISD::UDIVREM:
9442     Res = LowerDivRem(SDValue(N, 0), DAG);
9443     assert(Res.getNumOperands() == 2 && "DivRem needs two values");
9444     Results.push_back(Res.getValue(0));
9445     Results.push_back(Res.getValue(1));
9446     return;
9447   case ISD::SADDSAT:
9448   case ISD::SSUBSAT:
9449     Res = LowerSADDSUBSAT(SDValue(N, 0), DAG, Subtarget);
9450     break;
9451   case ISD::READCYCLECOUNTER:
9452     ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
9453     return;
9454   case ISD::UDIV:
9455   case ISD::SDIV:
9456     assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
9457     return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
9458                              Results);
9459   case ISD::ATOMIC_CMP_SWAP:
9460     ReplaceCMP_SWAP_64Results(N, Results, DAG);
9461     return;
9462   case ISD::INTRINSIC_WO_CHAIN:
9463     return ReplaceLongIntrinsic(N, Results, DAG);
9464   case ISD::ABS:
9465      lowerABS(N, Results, DAG);
9466      return ;
9467 
9468   }
9469   if (Res.getNode())
9470     Results.push_back(Res);
9471 }
9472 
9473 //===----------------------------------------------------------------------===//
9474 //                           ARM Scheduler Hooks
9475 //===----------------------------------------------------------------------===//
9476 
9477 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
9478 /// registers the function context.
SetupEntryBlockForSjLj(MachineInstr & MI,MachineBasicBlock * MBB,MachineBasicBlock * DispatchBB,int FI) const9479 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
9480                                                MachineBasicBlock *MBB,
9481                                                MachineBasicBlock *DispatchBB,
9482                                                int FI) const {
9483   assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
9484          "ROPI/RWPI not currently supported with SjLj");
9485   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
9486   DebugLoc dl = MI.getDebugLoc();
9487   MachineFunction *MF = MBB->getParent();
9488   MachineRegisterInfo *MRI = &MF->getRegInfo();
9489   MachineConstantPool *MCP = MF->getConstantPool();
9490   ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
9491   const Function &F = MF->getFunction();
9492 
9493   bool isThumb = Subtarget->isThumb();
9494   bool isThumb2 = Subtarget->isThumb2();
9495 
9496   unsigned PCLabelId = AFI->createPICLabelUId();
9497   unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
9498   ARMConstantPoolValue *CPV =
9499     ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj);
9500   unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
9501 
9502   const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
9503                                            : &ARM::GPRRegClass;
9504 
9505   // Grab constant pool and fixed stack memory operands.
9506   MachineMemOperand *CPMMO =
9507       MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
9508                                MachineMemOperand::MOLoad, 4, 4);
9509 
9510   MachineMemOperand *FIMMOSt =
9511       MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
9512                                MachineMemOperand::MOStore, 4, 4);
9513 
9514   // Load the address of the dispatch MBB into the jump buffer.
9515   if (isThumb2) {
9516     // Incoming value: jbuf
9517     //   ldr.n  r5, LCPI1_1
9518     //   orr    r5, r5, #1
9519     //   add    r5, pc
9520     //   str    r5, [$jbuf, #+4] ; &jbuf[1]
9521     Register NewVReg1 = MRI->createVirtualRegister(TRC);
9522     BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
9523         .addConstantPoolIndex(CPI)
9524         .addMemOperand(CPMMO)
9525         .add(predOps(ARMCC::AL));
9526     // Set the low bit because of thumb mode.
9527     Register NewVReg2 = MRI->createVirtualRegister(TRC);
9528     BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
9529         .addReg(NewVReg1, RegState::Kill)
9530         .addImm(0x01)
9531         .add(predOps(ARMCC::AL))
9532         .add(condCodeOp());
9533     Register NewVReg3 = MRI->createVirtualRegister(TRC);
9534     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
9535       .addReg(NewVReg2, RegState::Kill)
9536       .addImm(PCLabelId);
9537     BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
9538         .addReg(NewVReg3, RegState::Kill)
9539         .addFrameIndex(FI)
9540         .addImm(36) // &jbuf[1] :: pc
9541         .addMemOperand(FIMMOSt)
9542         .add(predOps(ARMCC::AL));
9543   } else if (isThumb) {
9544     // Incoming value: jbuf
9545     //   ldr.n  r1, LCPI1_4
9546     //   add    r1, pc
9547     //   mov    r2, #1
9548     //   orrs   r1, r2
9549     //   add    r2, $jbuf, #+4 ; &jbuf[1]
9550     //   str    r1, [r2]
9551     Register NewVReg1 = MRI->createVirtualRegister(TRC);
9552     BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
9553         .addConstantPoolIndex(CPI)
9554         .addMemOperand(CPMMO)
9555         .add(predOps(ARMCC::AL));
9556     Register NewVReg2 = MRI->createVirtualRegister(TRC);
9557     BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
9558       .addReg(NewVReg1, RegState::Kill)
9559       .addImm(PCLabelId);
9560     // Set the low bit because of thumb mode.
9561     Register NewVReg3 = MRI->createVirtualRegister(TRC);
9562     BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
9563         .addReg(ARM::CPSR, RegState::Define)
9564         .addImm(1)
9565         .add(predOps(ARMCC::AL));
9566     Register NewVReg4 = MRI->createVirtualRegister(TRC);
9567     BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
9568         .addReg(ARM::CPSR, RegState::Define)
9569         .addReg(NewVReg2, RegState::Kill)
9570         .addReg(NewVReg3, RegState::Kill)
9571         .add(predOps(ARMCC::AL));
9572     Register NewVReg5 = MRI->createVirtualRegister(TRC);
9573     BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
9574             .addFrameIndex(FI)
9575             .addImm(36); // &jbuf[1] :: pc
9576     BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
9577         .addReg(NewVReg4, RegState::Kill)
9578         .addReg(NewVReg5, RegState::Kill)
9579         .addImm(0)
9580         .addMemOperand(FIMMOSt)
9581         .add(predOps(ARMCC::AL));
9582   } else {
9583     // Incoming value: jbuf
9584     //   ldr  r1, LCPI1_1
9585     //   add  r1, pc, r1
9586     //   str  r1, [$jbuf, #+4] ; &jbuf[1]
9587     Register NewVReg1 = MRI->createVirtualRegister(TRC);
9588     BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
9589         .addConstantPoolIndex(CPI)
9590         .addImm(0)
9591         .addMemOperand(CPMMO)
9592         .add(predOps(ARMCC::AL));
9593     Register NewVReg2 = MRI->createVirtualRegister(TRC);
9594     BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
9595         .addReg(NewVReg1, RegState::Kill)
9596         .addImm(PCLabelId)
9597         .add(predOps(ARMCC::AL));
9598     BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
9599         .addReg(NewVReg2, RegState::Kill)
9600         .addFrameIndex(FI)
9601         .addImm(36) // &jbuf[1] :: pc
9602         .addMemOperand(FIMMOSt)
9603         .add(predOps(ARMCC::AL));
9604   }
9605 }
9606 
EmitSjLjDispatchBlock(MachineInstr & MI,MachineBasicBlock * MBB) const9607 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
9608                                               MachineBasicBlock *MBB) const {
9609   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
9610   DebugLoc dl = MI.getDebugLoc();
9611   MachineFunction *MF = MBB->getParent();
9612   MachineRegisterInfo *MRI = &MF->getRegInfo();
9613   MachineFrameInfo &MFI = MF->getFrameInfo();
9614   int FI = MFI.getFunctionContextIndex();
9615 
9616   const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
9617                                                         : &ARM::GPRnopcRegClass;
9618 
9619   // Get a mapping of the call site numbers to all of the landing pads they're
9620   // associated with.
9621   DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad;
9622   unsigned MaxCSNum = 0;
9623   for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
9624        ++BB) {
9625     if (!BB->isEHPad()) continue;
9626 
9627     // FIXME: We should assert that the EH_LABEL is the first MI in the landing
9628     // pad.
9629     for (MachineBasicBlock::iterator
9630            II = BB->begin(), IE = BB->end(); II != IE; ++II) {
9631       if (!II->isEHLabel()) continue;
9632 
9633       MCSymbol *Sym = II->getOperand(0).getMCSymbol();
9634       if (!MF->hasCallSiteLandingPad(Sym)) continue;
9635 
9636       SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
9637       for (SmallVectorImpl<unsigned>::iterator
9638              CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
9639            CSI != CSE; ++CSI) {
9640         CallSiteNumToLPad[*CSI].push_back(&*BB);
9641         MaxCSNum = std::max(MaxCSNum, *CSI);
9642       }
9643       break;
9644     }
9645   }
9646 
9647   // Get an ordered list of the machine basic blocks for the jump table.
9648   std::vector<MachineBasicBlock*> LPadList;
9649   SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
9650   LPadList.reserve(CallSiteNumToLPad.size());
9651   for (unsigned I = 1; I <= MaxCSNum; ++I) {
9652     SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
9653     for (SmallVectorImpl<MachineBasicBlock*>::iterator
9654            II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
9655       LPadList.push_back(*II);
9656       InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
9657     }
9658   }
9659 
9660   assert(!LPadList.empty() &&
9661          "No landing pad destinations for the dispatch jump table!");
9662 
9663   // Create the jump table and associated information.
9664   MachineJumpTableInfo *JTI =
9665     MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
9666   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
9667 
9668   // Create the MBBs for the dispatch code.
9669 
9670   // Shove the dispatch's address into the return slot in the function context.
9671   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
9672   DispatchBB->setIsEHPad();
9673 
9674   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
9675   unsigned trap_opcode;
9676   if (Subtarget->isThumb())
9677     trap_opcode = ARM::tTRAP;
9678   else
9679     trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
9680 
9681   BuildMI(TrapBB, dl, TII->get(trap_opcode));
9682   DispatchBB->addSuccessor(TrapBB);
9683 
9684   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
9685   DispatchBB->addSuccessor(DispContBB);
9686 
9687   // Insert and MBBs.
9688   MF->insert(MF->end(), DispatchBB);
9689   MF->insert(MF->end(), DispContBB);
9690   MF->insert(MF->end(), TrapBB);
9691 
9692   // Insert code into the entry block that creates and registers the function
9693   // context.
9694   SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
9695 
9696   MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
9697       MachinePointerInfo::getFixedStack(*MF, FI),
9698       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4);
9699 
9700   MachineInstrBuilder MIB;
9701   MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
9702 
9703   const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
9704   const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
9705 
9706   // Add a register mask with no preserved registers.  This results in all
9707   // registers being marked as clobbered. This can't work if the dispatch block
9708   // is in a Thumb1 function and is linked with ARM code which uses the FP
9709   // registers, as there is no way to preserve the FP registers in Thumb1 mode.
9710   MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
9711 
9712   bool IsPositionIndependent = isPositionIndependent();
9713   unsigned NumLPads = LPadList.size();
9714   if (Subtarget->isThumb2()) {
9715     Register NewVReg1 = MRI->createVirtualRegister(TRC);
9716     BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
9717         .addFrameIndex(FI)
9718         .addImm(4)
9719         .addMemOperand(FIMMOLd)
9720         .add(predOps(ARMCC::AL));
9721 
9722     if (NumLPads < 256) {
9723       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
9724           .addReg(NewVReg1)
9725           .addImm(LPadList.size())
9726           .add(predOps(ARMCC::AL));
9727     } else {
9728       Register VReg1 = MRI->createVirtualRegister(TRC);
9729       BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
9730           .addImm(NumLPads & 0xFFFF)
9731           .add(predOps(ARMCC::AL));
9732 
9733       unsigned VReg2 = VReg1;
9734       if ((NumLPads & 0xFFFF0000) != 0) {
9735         VReg2 = MRI->createVirtualRegister(TRC);
9736         BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
9737             .addReg(VReg1)
9738             .addImm(NumLPads >> 16)
9739             .add(predOps(ARMCC::AL));
9740       }
9741 
9742       BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
9743           .addReg(NewVReg1)
9744           .addReg(VReg2)
9745           .add(predOps(ARMCC::AL));
9746     }
9747 
9748     BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
9749       .addMBB(TrapBB)
9750       .addImm(ARMCC::HI)
9751       .addReg(ARM::CPSR);
9752 
9753     Register NewVReg3 = MRI->createVirtualRegister(TRC);
9754     BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3)
9755         .addJumpTableIndex(MJTI)
9756         .add(predOps(ARMCC::AL));
9757 
9758     Register NewVReg4 = MRI->createVirtualRegister(TRC);
9759     BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
9760         .addReg(NewVReg3, RegState::Kill)
9761         .addReg(NewVReg1)
9762         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
9763         .add(predOps(ARMCC::AL))
9764         .add(condCodeOp());
9765 
9766     BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
9767       .addReg(NewVReg4, RegState::Kill)
9768       .addReg(NewVReg1)
9769       .addJumpTableIndex(MJTI);
9770   } else if (Subtarget->isThumb()) {
9771     Register NewVReg1 = MRI->createVirtualRegister(TRC);
9772     BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
9773         .addFrameIndex(FI)
9774         .addImm(1)
9775         .addMemOperand(FIMMOLd)
9776         .add(predOps(ARMCC::AL));
9777 
9778     if (NumLPads < 256) {
9779       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
9780           .addReg(NewVReg1)
9781           .addImm(NumLPads)
9782           .add(predOps(ARMCC::AL));
9783     } else {
9784       MachineConstantPool *ConstantPool = MF->getConstantPool();
9785       Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
9786       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
9787 
9788       // MachineConstantPool wants an explicit alignment.
9789       unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
9790       if (Align == 0)
9791         Align = MF->getDataLayout().getTypeAllocSize(C->getType());
9792       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
9793 
9794       Register VReg1 = MRI->createVirtualRegister(TRC);
9795       BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
9796           .addReg(VReg1, RegState::Define)
9797           .addConstantPoolIndex(Idx)
9798           .add(predOps(ARMCC::AL));
9799       BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
9800           .addReg(NewVReg1)
9801           .addReg(VReg1)
9802           .add(predOps(ARMCC::AL));
9803     }
9804 
9805     BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
9806       .addMBB(TrapBB)
9807       .addImm(ARMCC::HI)
9808       .addReg(ARM::CPSR);
9809 
9810     Register NewVReg2 = MRI->createVirtualRegister(TRC);
9811     BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
9812         .addReg(ARM::CPSR, RegState::Define)
9813         .addReg(NewVReg1)
9814         .addImm(2)
9815         .add(predOps(ARMCC::AL));
9816 
9817     Register NewVReg3 = MRI->createVirtualRegister(TRC);
9818     BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
9819         .addJumpTableIndex(MJTI)
9820         .add(predOps(ARMCC::AL));
9821 
9822     Register NewVReg4 = MRI->createVirtualRegister(TRC);
9823     BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
9824         .addReg(ARM::CPSR, RegState::Define)
9825         .addReg(NewVReg2, RegState::Kill)
9826         .addReg(NewVReg3)
9827         .add(predOps(ARMCC::AL));
9828 
9829     MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
9830         MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
9831 
9832     Register NewVReg5 = MRI->createVirtualRegister(TRC);
9833     BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
9834         .addReg(NewVReg4, RegState::Kill)
9835         .addImm(0)
9836         .addMemOperand(JTMMOLd)
9837         .add(predOps(ARMCC::AL));
9838 
9839     unsigned NewVReg6 = NewVReg5;
9840     if (IsPositionIndependent) {
9841       NewVReg6 = MRI->createVirtualRegister(TRC);
9842       BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
9843           .addReg(ARM::CPSR, RegState::Define)
9844           .addReg(NewVReg5, RegState::Kill)
9845           .addReg(NewVReg3)
9846           .add(predOps(ARMCC::AL));
9847     }
9848 
9849     BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
9850       .addReg(NewVReg6, RegState::Kill)
9851       .addJumpTableIndex(MJTI);
9852   } else {
9853     Register NewVReg1 = MRI->createVirtualRegister(TRC);
9854     BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
9855         .addFrameIndex(FI)
9856         .addImm(4)
9857         .addMemOperand(FIMMOLd)
9858         .add(predOps(ARMCC::AL));
9859 
9860     if (NumLPads < 256) {
9861       BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
9862           .addReg(NewVReg1)
9863           .addImm(NumLPads)
9864           .add(predOps(ARMCC::AL));
9865     } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
9866       Register VReg1 = MRI->createVirtualRegister(TRC);
9867       BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
9868           .addImm(NumLPads & 0xFFFF)
9869           .add(predOps(ARMCC::AL));
9870 
9871       unsigned VReg2 = VReg1;
9872       if ((NumLPads & 0xFFFF0000) != 0) {
9873         VReg2 = MRI->createVirtualRegister(TRC);
9874         BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
9875             .addReg(VReg1)
9876             .addImm(NumLPads >> 16)
9877             .add(predOps(ARMCC::AL));
9878       }
9879 
9880       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
9881           .addReg(NewVReg1)
9882           .addReg(VReg2)
9883           .add(predOps(ARMCC::AL));
9884     } else {
9885       MachineConstantPool *ConstantPool = MF->getConstantPool();
9886       Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
9887       const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
9888 
9889       // MachineConstantPool wants an explicit alignment.
9890       unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
9891       if (Align == 0)
9892         Align = MF->getDataLayout().getTypeAllocSize(C->getType());
9893       unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
9894 
9895       Register VReg1 = MRI->createVirtualRegister(TRC);
9896       BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
9897           .addReg(VReg1, RegState::Define)
9898           .addConstantPoolIndex(Idx)
9899           .addImm(0)
9900           .add(predOps(ARMCC::AL));
9901       BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
9902           .addReg(NewVReg1)
9903           .addReg(VReg1, RegState::Kill)
9904           .add(predOps(ARMCC::AL));
9905     }
9906 
9907     BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
9908       .addMBB(TrapBB)
9909       .addImm(ARMCC::HI)
9910       .addReg(ARM::CPSR);
9911 
9912     Register NewVReg3 = MRI->createVirtualRegister(TRC);
9913     BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
9914         .addReg(NewVReg1)
9915         .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
9916         .add(predOps(ARMCC::AL))
9917         .add(condCodeOp());
9918     Register NewVReg4 = MRI->createVirtualRegister(TRC);
9919     BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
9920         .addJumpTableIndex(MJTI)
9921         .add(predOps(ARMCC::AL));
9922 
9923     MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
9924         MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
9925     Register NewVReg5 = MRI->createVirtualRegister(TRC);
9926     BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
9927         .addReg(NewVReg3, RegState::Kill)
9928         .addReg(NewVReg4)
9929         .addImm(0)
9930         .addMemOperand(JTMMOLd)
9931         .add(predOps(ARMCC::AL));
9932 
9933     if (IsPositionIndependent) {
9934       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
9935         .addReg(NewVReg5, RegState::Kill)
9936         .addReg(NewVReg4)
9937         .addJumpTableIndex(MJTI);
9938     } else {
9939       BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
9940         .addReg(NewVReg5, RegState::Kill)
9941         .addJumpTableIndex(MJTI);
9942     }
9943   }
9944 
9945   // Add the jump table entries as successors to the MBB.
9946   SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
9947   for (std::vector<MachineBasicBlock*>::iterator
9948          I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
9949     MachineBasicBlock *CurMBB = *I;
9950     if (SeenMBBs.insert(CurMBB).second)
9951       DispContBB->addSuccessor(CurMBB);
9952   }
9953 
9954   // N.B. the order the invoke BBs are processed in doesn't matter here.
9955   const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
9956   SmallVector<MachineBasicBlock*, 64> MBBLPads;
9957   for (MachineBasicBlock *BB : InvokeBBs) {
9958 
9959     // Remove the landing pad successor from the invoke block and replace it
9960     // with the new dispatch block.
9961     SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
9962                                                   BB->succ_end());
9963     while (!Successors.empty()) {
9964       MachineBasicBlock *SMBB = Successors.pop_back_val();
9965       if (SMBB->isEHPad()) {
9966         BB->removeSuccessor(SMBB);
9967         MBBLPads.push_back(SMBB);
9968       }
9969     }
9970 
9971     BB->addSuccessor(DispatchBB, BranchProbability::getZero());
9972     BB->normalizeSuccProbs();
9973 
9974     // Find the invoke call and mark all of the callee-saved registers as
9975     // 'implicit defined' so that they're spilled. This prevents code from
9976     // moving instructions to before the EH block, where they will never be
9977     // executed.
9978     for (MachineBasicBlock::reverse_iterator
9979            II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
9980       if (!II->isCall()) continue;
9981 
9982       DenseMap<unsigned, bool> DefRegs;
9983       for (MachineInstr::mop_iterator
9984              OI = II->operands_begin(), OE = II->operands_end();
9985            OI != OE; ++OI) {
9986         if (!OI->isReg()) continue;
9987         DefRegs[OI->getReg()] = true;
9988       }
9989 
9990       MachineInstrBuilder MIB(*MF, &*II);
9991 
9992       for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
9993         unsigned Reg = SavedRegs[i];
9994         if (Subtarget->isThumb2() &&
9995             !ARM::tGPRRegClass.contains(Reg) &&
9996             !ARM::hGPRRegClass.contains(Reg))
9997           continue;
9998         if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
9999           continue;
10000         if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
10001           continue;
10002         if (!DefRegs[Reg])
10003           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
10004       }
10005 
10006       break;
10007     }
10008   }
10009 
10010   // Mark all former landing pads as non-landing pads. The dispatch is the only
10011   // landing pad now.
10012   for (SmallVectorImpl<MachineBasicBlock*>::iterator
10013          I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
10014     (*I)->setIsEHPad(false);
10015 
10016   // The instruction is gone now.
10017   MI.eraseFromParent();
10018 }
10019 
10020 static
OtherSucc(MachineBasicBlock * MBB,MachineBasicBlock * Succ)10021 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
10022   for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
10023        E = MBB->succ_end(); I != E; ++I)
10024     if (*I != Succ)
10025       return *I;
10026   llvm_unreachable("Expecting a BB with two successors!");
10027 }
10028 
10029 /// Return the load opcode for a given load size. If load size >= 8,
10030 /// neon opcode will be returned.
getLdOpcode(unsigned LdSize,bool IsThumb1,bool IsThumb2)10031 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
10032   if (LdSize >= 8)
10033     return LdSize == 16 ? ARM::VLD1q32wb_fixed
10034                         : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
10035   if (IsThumb1)
10036     return LdSize == 4 ? ARM::tLDRi
10037                        : LdSize == 2 ? ARM::tLDRHi
10038                                      : LdSize == 1 ? ARM::tLDRBi : 0;
10039   if (IsThumb2)
10040     return LdSize == 4 ? ARM::t2LDR_POST
10041                        : LdSize == 2 ? ARM::t2LDRH_POST
10042                                      : LdSize == 1 ? ARM::t2LDRB_POST : 0;
10043   return LdSize == 4 ? ARM::LDR_POST_IMM
10044                      : LdSize == 2 ? ARM::LDRH_POST
10045                                    : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
10046 }
10047 
10048 /// Return the store opcode for a given store size. If store size >= 8,
10049 /// neon opcode will be returned.
getStOpcode(unsigned StSize,bool IsThumb1,bool IsThumb2)10050 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
10051   if (StSize >= 8)
10052     return StSize == 16 ? ARM::VST1q32wb_fixed
10053                         : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
10054   if (IsThumb1)
10055     return StSize == 4 ? ARM::tSTRi
10056                        : StSize == 2 ? ARM::tSTRHi
10057                                      : StSize == 1 ? ARM::tSTRBi : 0;
10058   if (IsThumb2)
10059     return StSize == 4 ? ARM::t2STR_POST
10060                        : StSize == 2 ? ARM::t2STRH_POST
10061                                      : StSize == 1 ? ARM::t2STRB_POST : 0;
10062   return StSize == 4 ? ARM::STR_POST_IMM
10063                      : StSize == 2 ? ARM::STRH_POST
10064                                    : StSize == 1 ? ARM::STRB_POST_IMM : 0;
10065 }
10066 
10067 /// Emit a post-increment load operation with given size. The instructions
10068 /// will be added to BB at Pos.
emitPostLd(MachineBasicBlock * BB,MachineBasicBlock::iterator Pos,const TargetInstrInfo * TII,const DebugLoc & dl,unsigned LdSize,unsigned Data,unsigned AddrIn,unsigned AddrOut,bool IsThumb1,bool IsThumb2)10069 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
10070                        const TargetInstrInfo *TII, const DebugLoc &dl,
10071                        unsigned LdSize, unsigned Data, unsigned AddrIn,
10072                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
10073   unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
10074   assert(LdOpc != 0 && "Should have a load opcode");
10075   if (LdSize >= 8) {
10076     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10077         .addReg(AddrOut, RegState::Define)
10078         .addReg(AddrIn)
10079         .addImm(0)
10080         .add(predOps(ARMCC::AL));
10081   } else if (IsThumb1) {
10082     // load + update AddrIn
10083     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10084         .addReg(AddrIn)
10085         .addImm(0)
10086         .add(predOps(ARMCC::AL));
10087     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
10088         .add(t1CondCodeOp())
10089         .addReg(AddrIn)
10090         .addImm(LdSize)
10091         .add(predOps(ARMCC::AL));
10092   } else if (IsThumb2) {
10093     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10094         .addReg(AddrOut, RegState::Define)
10095         .addReg(AddrIn)
10096         .addImm(LdSize)
10097         .add(predOps(ARMCC::AL));
10098   } else { // arm
10099     BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10100         .addReg(AddrOut, RegState::Define)
10101         .addReg(AddrIn)
10102         .addReg(0)
10103         .addImm(LdSize)
10104         .add(predOps(ARMCC::AL));
10105   }
10106 }
10107 
10108 /// Emit a post-increment store operation with given size. The instructions
10109 /// will be added to BB at Pos.
emitPostSt(MachineBasicBlock * BB,MachineBasicBlock::iterator Pos,const TargetInstrInfo * TII,const DebugLoc & dl,unsigned StSize,unsigned Data,unsigned AddrIn,unsigned AddrOut,bool IsThumb1,bool IsThumb2)10110 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
10111                        const TargetInstrInfo *TII, const DebugLoc &dl,
10112                        unsigned StSize, unsigned Data, unsigned AddrIn,
10113                        unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
10114   unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
10115   assert(StOpc != 0 && "Should have a store opcode");
10116   if (StSize >= 8) {
10117     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10118         .addReg(AddrIn)
10119         .addImm(0)
10120         .addReg(Data)
10121         .add(predOps(ARMCC::AL));
10122   } else if (IsThumb1) {
10123     // store + update AddrIn
10124     BuildMI(*BB, Pos, dl, TII->get(StOpc))
10125         .addReg(Data)
10126         .addReg(AddrIn)
10127         .addImm(0)
10128         .add(predOps(ARMCC::AL));
10129     BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
10130         .add(t1CondCodeOp())
10131         .addReg(AddrIn)
10132         .addImm(StSize)
10133         .add(predOps(ARMCC::AL));
10134   } else if (IsThumb2) {
10135     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10136         .addReg(Data)
10137         .addReg(AddrIn)
10138         .addImm(StSize)
10139         .add(predOps(ARMCC::AL));
10140   } else { // arm
10141     BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10142         .addReg(Data)
10143         .addReg(AddrIn)
10144         .addReg(0)
10145         .addImm(StSize)
10146         .add(predOps(ARMCC::AL));
10147   }
10148 }
10149 
10150 MachineBasicBlock *
EmitStructByval(MachineInstr & MI,MachineBasicBlock * BB) const10151 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
10152                                    MachineBasicBlock *BB) const {
10153   // This pseudo instruction has 3 operands: dst, src, size
10154   // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
10155   // Otherwise, we will generate unrolled scalar copies.
10156   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10157   const BasicBlock *LLVM_BB = BB->getBasicBlock();
10158   MachineFunction::iterator It = ++BB->getIterator();
10159 
10160   Register dest = MI.getOperand(0).getReg();
10161   Register src = MI.getOperand(1).getReg();
10162   unsigned SizeVal = MI.getOperand(2).getImm();
10163   unsigned Align = MI.getOperand(3).getImm();
10164   DebugLoc dl = MI.getDebugLoc();
10165 
10166   MachineFunction *MF = BB->getParent();
10167   MachineRegisterInfo &MRI = MF->getRegInfo();
10168   unsigned UnitSize = 0;
10169   const TargetRegisterClass *TRC = nullptr;
10170   const TargetRegisterClass *VecTRC = nullptr;
10171 
10172   bool IsThumb1 = Subtarget->isThumb1Only();
10173   bool IsThumb2 = Subtarget->isThumb2();
10174   bool IsThumb = Subtarget->isThumb();
10175 
10176   if (Align & 1) {
10177     UnitSize = 1;
10178   } else if (Align & 2) {
10179     UnitSize = 2;
10180   } else {
10181     // Check whether we can use NEON instructions.
10182     if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
10183         Subtarget->hasNEON()) {
10184       if ((Align % 16 == 0) && SizeVal >= 16)
10185         UnitSize = 16;
10186       else if ((Align % 8 == 0) && SizeVal >= 8)
10187         UnitSize = 8;
10188     }
10189     // Can't use NEON instructions.
10190     if (UnitSize == 0)
10191       UnitSize = 4;
10192   }
10193 
10194   // Select the correct opcode and register class for unit size load/store
10195   bool IsNeon = UnitSize >= 8;
10196   TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
10197   if (IsNeon)
10198     VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
10199                             : UnitSize == 8 ? &ARM::DPRRegClass
10200                                             : nullptr;
10201 
10202   unsigned BytesLeft = SizeVal % UnitSize;
10203   unsigned LoopSize = SizeVal - BytesLeft;
10204 
10205   if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
10206     // Use LDR and STR to copy.
10207     // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
10208     // [destOut] = STR_POST(scratch, destIn, UnitSize)
10209     unsigned srcIn = src;
10210     unsigned destIn = dest;
10211     for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
10212       Register srcOut = MRI.createVirtualRegister(TRC);
10213       Register destOut = MRI.createVirtualRegister(TRC);
10214       Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
10215       emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
10216                  IsThumb1, IsThumb2);
10217       emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
10218                  IsThumb1, IsThumb2);
10219       srcIn = srcOut;
10220       destIn = destOut;
10221     }
10222 
10223     // Handle the leftover bytes with LDRB and STRB.
10224     // [scratch, srcOut] = LDRB_POST(srcIn, 1)
10225     // [destOut] = STRB_POST(scratch, destIn, 1)
10226     for (unsigned i = 0; i < BytesLeft; i++) {
10227       Register srcOut = MRI.createVirtualRegister(TRC);
10228       Register destOut = MRI.createVirtualRegister(TRC);
10229       Register scratch = MRI.createVirtualRegister(TRC);
10230       emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
10231                  IsThumb1, IsThumb2);
10232       emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
10233                  IsThumb1, IsThumb2);
10234       srcIn = srcOut;
10235       destIn = destOut;
10236     }
10237     MI.eraseFromParent(); // The instruction is gone now.
10238     return BB;
10239   }
10240 
10241   // Expand the pseudo op to a loop.
10242   // thisMBB:
10243   //   ...
10244   //   movw varEnd, # --> with thumb2
10245   //   movt varEnd, #
10246   //   ldrcp varEnd, idx --> without thumb2
10247   //   fallthrough --> loopMBB
10248   // loopMBB:
10249   //   PHI varPhi, varEnd, varLoop
10250   //   PHI srcPhi, src, srcLoop
10251   //   PHI destPhi, dst, destLoop
10252   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
10253   //   [destLoop] = STR_POST(scratch, destPhi, UnitSize)
10254   //   subs varLoop, varPhi, #UnitSize
10255   //   bne loopMBB
10256   //   fallthrough --> exitMBB
10257   // exitMBB:
10258   //   epilogue to handle left-over bytes
10259   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
10260   //   [destOut] = STRB_POST(scratch, destLoop, 1)
10261   MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10262   MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10263   MF->insert(It, loopMBB);
10264   MF->insert(It, exitMBB);
10265 
10266   // Transfer the remainder of BB and its successor edges to exitMBB.
10267   exitMBB->splice(exitMBB->begin(), BB,
10268                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
10269   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10270 
10271   // Load an immediate to varEnd.
10272   Register varEnd = MRI.createVirtualRegister(TRC);
10273   if (Subtarget->useMovt()) {
10274     unsigned Vtmp = varEnd;
10275     if ((LoopSize & 0xFFFF0000) != 0)
10276       Vtmp = MRI.createVirtualRegister(TRC);
10277     BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
10278         .addImm(LoopSize & 0xFFFF)
10279         .add(predOps(ARMCC::AL));
10280 
10281     if ((LoopSize & 0xFFFF0000) != 0)
10282       BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
10283           .addReg(Vtmp)
10284           .addImm(LoopSize >> 16)
10285           .add(predOps(ARMCC::AL));
10286   } else {
10287     MachineConstantPool *ConstantPool = MF->getConstantPool();
10288     Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
10289     const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
10290 
10291     // MachineConstantPool wants an explicit alignment.
10292     unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
10293     if (Align == 0)
10294       Align = MF->getDataLayout().getTypeAllocSize(C->getType());
10295     unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
10296     MachineMemOperand *CPMMO =
10297         MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
10298                                  MachineMemOperand::MOLoad, 4, 4);
10299 
10300     if (IsThumb)
10301       BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci))
10302           .addReg(varEnd, RegState::Define)
10303           .addConstantPoolIndex(Idx)
10304           .add(predOps(ARMCC::AL))
10305           .addMemOperand(CPMMO);
10306     else
10307       BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp))
10308           .addReg(varEnd, RegState::Define)
10309           .addConstantPoolIndex(Idx)
10310           .addImm(0)
10311           .add(predOps(ARMCC::AL))
10312           .addMemOperand(CPMMO);
10313   }
10314   BB->addSuccessor(loopMBB);
10315 
10316   // Generate the loop body:
10317   //   varPhi = PHI(varLoop, varEnd)
10318   //   srcPhi = PHI(srcLoop, src)
10319   //   destPhi = PHI(destLoop, dst)
10320   MachineBasicBlock *entryBB = BB;
10321   BB = loopMBB;
10322   Register varLoop = MRI.createVirtualRegister(TRC);
10323   Register varPhi = MRI.createVirtualRegister(TRC);
10324   Register srcLoop = MRI.createVirtualRegister(TRC);
10325   Register srcPhi = MRI.createVirtualRegister(TRC);
10326   Register destLoop = MRI.createVirtualRegister(TRC);
10327   Register destPhi = MRI.createVirtualRegister(TRC);
10328 
10329   BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
10330     .addReg(varLoop).addMBB(loopMBB)
10331     .addReg(varEnd).addMBB(entryBB);
10332   BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
10333     .addReg(srcLoop).addMBB(loopMBB)
10334     .addReg(src).addMBB(entryBB);
10335   BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
10336     .addReg(destLoop).addMBB(loopMBB)
10337     .addReg(dest).addMBB(entryBB);
10338 
10339   //   [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
10340   //   [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
10341   Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
10342   emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
10343              IsThumb1, IsThumb2);
10344   emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
10345              IsThumb1, IsThumb2);
10346 
10347   // Decrement loop variable by UnitSize.
10348   if (IsThumb1) {
10349     BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop)
10350         .add(t1CondCodeOp())
10351         .addReg(varPhi)
10352         .addImm(UnitSize)
10353         .add(predOps(ARMCC::AL));
10354   } else {
10355     MachineInstrBuilder MIB =
10356         BuildMI(*BB, BB->end(), dl,
10357                 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
10358     MIB.addReg(varPhi)
10359         .addImm(UnitSize)
10360         .add(predOps(ARMCC::AL))
10361         .add(condCodeOp());
10362     MIB->getOperand(5).setReg(ARM::CPSR);
10363     MIB->getOperand(5).setIsDef(true);
10364   }
10365   BuildMI(*BB, BB->end(), dl,
10366           TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
10367       .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
10368 
10369   // loopMBB can loop back to loopMBB or fall through to exitMBB.
10370   BB->addSuccessor(loopMBB);
10371   BB->addSuccessor(exitMBB);
10372 
10373   // Add epilogue to handle BytesLeft.
10374   BB = exitMBB;
10375   auto StartOfExit = exitMBB->begin();
10376 
10377   //   [scratch, srcOut] = LDRB_POST(srcLoop, 1)
10378   //   [destOut] = STRB_POST(scratch, destLoop, 1)
10379   unsigned srcIn = srcLoop;
10380   unsigned destIn = destLoop;
10381   for (unsigned i = 0; i < BytesLeft; i++) {
10382     Register srcOut = MRI.createVirtualRegister(TRC);
10383     Register destOut = MRI.createVirtualRegister(TRC);
10384     Register scratch = MRI.createVirtualRegister(TRC);
10385     emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
10386                IsThumb1, IsThumb2);
10387     emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
10388                IsThumb1, IsThumb2);
10389     srcIn = srcOut;
10390     destIn = destOut;
10391   }
10392 
10393   MI.eraseFromParent(); // The instruction is gone now.
10394   return BB;
10395 }
10396 
10397 MachineBasicBlock *
EmitLowered__chkstk(MachineInstr & MI,MachineBasicBlock * MBB) const10398 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
10399                                        MachineBasicBlock *MBB) const {
10400   const TargetMachine &TM = getTargetMachine();
10401   const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
10402   DebugLoc DL = MI.getDebugLoc();
10403 
10404   assert(Subtarget->isTargetWindows() &&
10405          "__chkstk is only supported on Windows");
10406   assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
10407 
10408   // __chkstk takes the number of words to allocate on the stack in R4, and
10409   // returns the stack adjustment in number of bytes in R4.  This will not
10410   // clober any other registers (other than the obvious lr).
10411   //
10412   // Although, technically, IP should be considered a register which may be
10413   // clobbered, the call itself will not touch it.  Windows on ARM is a pure
10414   // thumb-2 environment, so there is no interworking required.  As a result, we
10415   // do not expect a veneer to be emitted by the linker, clobbering IP.
10416   //
10417   // Each module receives its own copy of __chkstk, so no import thunk is
10418   // required, again, ensuring that IP is not clobbered.
10419   //
10420   // Finally, although some linkers may theoretically provide a trampoline for
10421   // out of range calls (which is quite common due to a 32M range limitation of
10422   // branches for Thumb), we can generate the long-call version via
10423   // -mcmodel=large, alleviating the need for the trampoline which may clobber
10424   // IP.
10425 
10426   switch (TM.getCodeModel()) {
10427   case CodeModel::Tiny:
10428     llvm_unreachable("Tiny code model not available on ARM.");
10429   case CodeModel::Small:
10430   case CodeModel::Medium:
10431   case CodeModel::Kernel:
10432     BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
10433         .add(predOps(ARMCC::AL))
10434         .addExternalSymbol("__chkstk")
10435         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
10436         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
10437         .addReg(ARM::R12,
10438                 RegState::Implicit | RegState::Define | RegState::Dead)
10439         .addReg(ARM::CPSR,
10440                 RegState::Implicit | RegState::Define | RegState::Dead);
10441     break;
10442   case CodeModel::Large: {
10443     MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
10444     Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
10445 
10446     BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
10447       .addExternalSymbol("__chkstk");
10448     BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
10449         .add(predOps(ARMCC::AL))
10450         .addReg(Reg, RegState::Kill)
10451         .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
10452         .addReg(ARM::R4, RegState::Implicit | RegState::Define)
10453         .addReg(ARM::R12,
10454                 RegState::Implicit | RegState::Define | RegState::Dead)
10455         .addReg(ARM::CPSR,
10456                 RegState::Implicit | RegState::Define | RegState::Dead);
10457     break;
10458   }
10459   }
10460 
10461   BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
10462       .addReg(ARM::SP, RegState::Kill)
10463       .addReg(ARM::R4, RegState::Kill)
10464       .setMIFlags(MachineInstr::FrameSetup)
10465       .add(predOps(ARMCC::AL))
10466       .add(condCodeOp());
10467 
10468   MI.eraseFromParent();
10469   return MBB;
10470 }
10471 
10472 MachineBasicBlock *
EmitLowered__dbzchk(MachineInstr & MI,MachineBasicBlock * MBB) const10473 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
10474                                        MachineBasicBlock *MBB) const {
10475   DebugLoc DL = MI.getDebugLoc();
10476   MachineFunction *MF = MBB->getParent();
10477   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10478 
10479   MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
10480   MF->insert(++MBB->getIterator(), ContBB);
10481   ContBB->splice(ContBB->begin(), MBB,
10482                  std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10483   ContBB->transferSuccessorsAndUpdatePHIs(MBB);
10484   MBB->addSuccessor(ContBB);
10485 
10486   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
10487   BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
10488   MF->push_back(TrapBB);
10489   MBB->addSuccessor(TrapBB);
10490 
10491   BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
10492       .addReg(MI.getOperand(0).getReg())
10493       .addImm(0)
10494       .add(predOps(ARMCC::AL));
10495   BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
10496       .addMBB(TrapBB)
10497       .addImm(ARMCC::EQ)
10498       .addReg(ARM::CPSR);
10499 
10500   MI.eraseFromParent();
10501   return ContBB;
10502 }
10503 
10504 // The CPSR operand of SelectItr might be missing a kill marker
10505 // because there were multiple uses of CPSR, and ISel didn't know
10506 // which to mark. Figure out whether SelectItr should have had a
10507 // kill marker, and set it if it should. Returns the correct kill
10508 // marker value.
checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,MachineBasicBlock * BB,const TargetRegisterInfo * TRI)10509 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
10510                                    MachineBasicBlock* BB,
10511                                    const TargetRegisterInfo* TRI) {
10512   // Scan forward through BB for a use/def of CPSR.
10513   MachineBasicBlock::iterator miI(std::next(SelectItr));
10514   for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
10515     const MachineInstr& mi = *miI;
10516     if (mi.readsRegister(ARM::CPSR))
10517       return false;
10518     if (mi.definesRegister(ARM::CPSR))
10519       break; // Should have kill-flag - update below.
10520   }
10521 
10522   // If we hit the end of the block, check whether CPSR is live into a
10523   // successor.
10524   if (miI == BB->end()) {
10525     for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
10526                                           sEnd = BB->succ_end();
10527          sItr != sEnd; ++sItr) {
10528       MachineBasicBlock* succ = *sItr;
10529       if (succ->isLiveIn(ARM::CPSR))
10530         return false;
10531     }
10532   }
10533 
10534   // We found a def, or hit the end of the basic block and CPSR wasn't live
10535   // out. SelectMI should have a kill flag on CPSR.
10536   SelectItr->addRegisterKilled(ARM::CPSR, TRI);
10537   return true;
10538 }
10539 
10540 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const10541 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
10542                                                MachineBasicBlock *BB) const {
10543   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10544   DebugLoc dl = MI.getDebugLoc();
10545   bool isThumb2 = Subtarget->isThumb2();
10546   switch (MI.getOpcode()) {
10547   default: {
10548     MI.print(errs());
10549     llvm_unreachable("Unexpected instr type to insert");
10550   }
10551 
10552   // Thumb1 post-indexed loads are really just single-register LDMs.
10553   case ARM::tLDR_postidx: {
10554     MachineOperand Def(MI.getOperand(1));
10555     BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
10556         .add(Def)  // Rn_wb
10557         .add(MI.getOperand(2))  // Rn
10558         .add(MI.getOperand(3))  // PredImm
10559         .add(MI.getOperand(4))  // PredReg
10560         .add(MI.getOperand(0))  // Rt
10561         .cloneMemRefs(MI);
10562     MI.eraseFromParent();
10563     return BB;
10564   }
10565 
10566   // The Thumb2 pre-indexed stores have the same MI operands, they just
10567   // define them differently in the .td files from the isel patterns, so
10568   // they need pseudos.
10569   case ARM::t2STR_preidx:
10570     MI.setDesc(TII->get(ARM::t2STR_PRE));
10571     return BB;
10572   case ARM::t2STRB_preidx:
10573     MI.setDesc(TII->get(ARM::t2STRB_PRE));
10574     return BB;
10575   case ARM::t2STRH_preidx:
10576     MI.setDesc(TII->get(ARM::t2STRH_PRE));
10577     return BB;
10578 
10579   case ARM::STRi_preidx:
10580   case ARM::STRBi_preidx: {
10581     unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
10582                                                          : ARM::STRB_PRE_IMM;
10583     // Decode the offset.
10584     unsigned Offset = MI.getOperand(4).getImm();
10585     bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
10586     Offset = ARM_AM::getAM2Offset(Offset);
10587     if (isSub)
10588       Offset = -Offset;
10589 
10590     MachineMemOperand *MMO = *MI.memoperands_begin();
10591     BuildMI(*BB, MI, dl, TII->get(NewOpc))
10592         .add(MI.getOperand(0)) // Rn_wb
10593         .add(MI.getOperand(1)) // Rt
10594         .add(MI.getOperand(2)) // Rn
10595         .addImm(Offset)        // offset (skip GPR==zero_reg)
10596         .add(MI.getOperand(5)) // pred
10597         .add(MI.getOperand(6))
10598         .addMemOperand(MMO);
10599     MI.eraseFromParent();
10600     return BB;
10601   }
10602   case ARM::STRr_preidx:
10603   case ARM::STRBr_preidx:
10604   case ARM::STRH_preidx: {
10605     unsigned NewOpc;
10606     switch (MI.getOpcode()) {
10607     default: llvm_unreachable("unexpected opcode!");
10608     case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
10609     case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
10610     case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
10611     }
10612     MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
10613     for (unsigned i = 0; i < MI.getNumOperands(); ++i)
10614       MIB.add(MI.getOperand(i));
10615     MI.eraseFromParent();
10616     return BB;
10617   }
10618 
10619   case ARM::tMOVCCr_pseudo: {
10620     // To "insert" a SELECT_CC instruction, we actually have to insert the
10621     // diamond control-flow pattern.  The incoming instruction knows the
10622     // destination vreg to set, the condition code register to branch on, the
10623     // true/false values to select between, and a branch opcode to use.
10624     const BasicBlock *LLVM_BB = BB->getBasicBlock();
10625     MachineFunction::iterator It = ++BB->getIterator();
10626 
10627     //  thisMBB:
10628     //  ...
10629     //   TrueVal = ...
10630     //   cmpTY ccX, r1, r2
10631     //   bCC copy1MBB
10632     //   fallthrough --> copy0MBB
10633     MachineBasicBlock *thisMBB  = BB;
10634     MachineFunction *F = BB->getParent();
10635     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
10636     MachineBasicBlock *sinkMBB  = F->CreateMachineBasicBlock(LLVM_BB);
10637     F->insert(It, copy0MBB);
10638     F->insert(It, sinkMBB);
10639 
10640     // Check whether CPSR is live past the tMOVCCr_pseudo.
10641     const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
10642     if (!MI.killsRegister(ARM::CPSR) &&
10643         !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) {
10644       copy0MBB->addLiveIn(ARM::CPSR);
10645       sinkMBB->addLiveIn(ARM::CPSR);
10646     }
10647 
10648     // Transfer the remainder of BB and its successor edges to sinkMBB.
10649     sinkMBB->splice(sinkMBB->begin(), BB,
10650                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
10651     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
10652 
10653     BB->addSuccessor(copy0MBB);
10654     BB->addSuccessor(sinkMBB);
10655 
10656     BuildMI(BB, dl, TII->get(ARM::tBcc))
10657         .addMBB(sinkMBB)
10658         .addImm(MI.getOperand(3).getImm())
10659         .addReg(MI.getOperand(4).getReg());
10660 
10661     //  copy0MBB:
10662     //   %FalseValue = ...
10663     //   # fallthrough to sinkMBB
10664     BB = copy0MBB;
10665 
10666     // Update machine-CFG edges
10667     BB->addSuccessor(sinkMBB);
10668 
10669     //  sinkMBB:
10670     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
10671     //  ...
10672     BB = sinkMBB;
10673     BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
10674         .addReg(MI.getOperand(1).getReg())
10675         .addMBB(copy0MBB)
10676         .addReg(MI.getOperand(2).getReg())
10677         .addMBB(thisMBB);
10678 
10679     MI.eraseFromParent(); // The pseudo instruction is gone now.
10680     return BB;
10681   }
10682 
10683   case ARM::BCCi64:
10684   case ARM::BCCZi64: {
10685     // If there is an unconditional branch to the other successor, remove it.
10686     BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
10687 
10688     // Compare both parts that make up the double comparison separately for
10689     // equality.
10690     bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
10691 
10692     Register LHS1 = MI.getOperand(1).getReg();
10693     Register LHS2 = MI.getOperand(2).getReg();
10694     if (RHSisZero) {
10695       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
10696           .addReg(LHS1)
10697           .addImm(0)
10698           .add(predOps(ARMCC::AL));
10699       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
10700         .addReg(LHS2).addImm(0)
10701         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
10702     } else {
10703       Register RHS1 = MI.getOperand(3).getReg();
10704       Register RHS2 = MI.getOperand(4).getReg();
10705       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
10706           .addReg(LHS1)
10707           .addReg(RHS1)
10708           .add(predOps(ARMCC::AL));
10709       BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
10710         .addReg(LHS2).addReg(RHS2)
10711         .addImm(ARMCC::EQ).addReg(ARM::CPSR);
10712     }
10713 
10714     MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
10715     MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
10716     if (MI.getOperand(0).getImm() == ARMCC::NE)
10717       std::swap(destMBB, exitMBB);
10718 
10719     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
10720       .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
10721     if (isThumb2)
10722       BuildMI(BB, dl, TII->get(ARM::t2B))
10723           .addMBB(exitMBB)
10724           .add(predOps(ARMCC::AL));
10725     else
10726       BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
10727 
10728     MI.eraseFromParent(); // The pseudo instruction is gone now.
10729     return BB;
10730   }
10731 
10732   case ARM::Int_eh_sjlj_setjmp:
10733   case ARM::Int_eh_sjlj_setjmp_nofp:
10734   case ARM::tInt_eh_sjlj_setjmp:
10735   case ARM::t2Int_eh_sjlj_setjmp:
10736   case ARM::t2Int_eh_sjlj_setjmp_nofp:
10737     return BB;
10738 
10739   case ARM::Int_eh_sjlj_setup_dispatch:
10740     EmitSjLjDispatchBlock(MI, BB);
10741     return BB;
10742 
10743   case ARM::ABS:
10744   case ARM::t2ABS: {
10745     // To insert an ABS instruction, we have to insert the
10746     // diamond control-flow pattern.  The incoming instruction knows the
10747     // source vreg to test against 0, the destination vreg to set,
10748     // the condition code register to branch on, the
10749     // true/false values to select between, and a branch opcode to use.
10750     // It transforms
10751     //     V1 = ABS V0
10752     // into
10753     //     V2 = MOVS V0
10754     //     BCC                      (branch to SinkBB if V0 >= 0)
10755     //     RSBBB: V3 = RSBri V2, 0  (compute ABS if V2 < 0)
10756     //     SinkBB: V1 = PHI(V2, V3)
10757     const BasicBlock *LLVM_BB = BB->getBasicBlock();
10758     MachineFunction::iterator BBI = ++BB->getIterator();
10759     MachineFunction *Fn = BB->getParent();
10760     MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
10761     MachineBasicBlock *SinkBB  = Fn->CreateMachineBasicBlock(LLVM_BB);
10762     Fn->insert(BBI, RSBBB);
10763     Fn->insert(BBI, SinkBB);
10764 
10765     Register ABSSrcReg = MI.getOperand(1).getReg();
10766     Register ABSDstReg = MI.getOperand(0).getReg();
10767     bool ABSSrcKIll = MI.getOperand(1).isKill();
10768     bool isThumb2 = Subtarget->isThumb2();
10769     MachineRegisterInfo &MRI = Fn->getRegInfo();
10770     // In Thumb mode S must not be specified if source register is the SP or
10771     // PC and if destination register is the SP, so restrict register class
10772     Register NewRsbDstReg = MRI.createVirtualRegister(
10773         isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
10774 
10775     // Transfer the remainder of BB and its successor edges to sinkMBB.
10776     SinkBB->splice(SinkBB->begin(), BB,
10777                    std::next(MachineBasicBlock::iterator(MI)), BB->end());
10778     SinkBB->transferSuccessorsAndUpdatePHIs(BB);
10779 
10780     BB->addSuccessor(RSBBB);
10781     BB->addSuccessor(SinkBB);
10782 
10783     // fall through to SinkMBB
10784     RSBBB->addSuccessor(SinkBB);
10785 
10786     // insert a cmp at the end of BB
10787     BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
10788         .addReg(ABSSrcReg)
10789         .addImm(0)
10790         .add(predOps(ARMCC::AL));
10791 
10792     // insert a bcc with opposite CC to ARMCC::MI at the end of BB
10793     BuildMI(BB, dl,
10794       TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
10795       .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
10796 
10797     // insert rsbri in RSBBB
10798     // Note: BCC and rsbri will be converted into predicated rsbmi
10799     // by if-conversion pass
10800     BuildMI(*RSBBB, RSBBB->begin(), dl,
10801             TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
10802         .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
10803         .addImm(0)
10804         .add(predOps(ARMCC::AL))
10805         .add(condCodeOp());
10806 
10807     // insert PHI in SinkBB,
10808     // reuse ABSDstReg to not change uses of ABS instruction
10809     BuildMI(*SinkBB, SinkBB->begin(), dl,
10810       TII->get(ARM::PHI), ABSDstReg)
10811       .addReg(NewRsbDstReg).addMBB(RSBBB)
10812       .addReg(ABSSrcReg).addMBB(BB);
10813 
10814     // remove ABS instruction
10815     MI.eraseFromParent();
10816 
10817     // return last added BB
10818     return SinkBB;
10819   }
10820   case ARM::COPY_STRUCT_BYVAL_I32:
10821     ++NumLoopByVals;
10822     return EmitStructByval(MI, BB);
10823   case ARM::WIN__CHKSTK:
10824     return EmitLowered__chkstk(MI, BB);
10825   case ARM::WIN__DBZCHK:
10826     return EmitLowered__dbzchk(MI, BB);
10827   }
10828 }
10829 
10830 /// Attaches vregs to MEMCPY that it will use as scratch registers
10831 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
10832 /// instead of as a custom inserter because we need the use list from the SDNode.
attachMEMCPYScratchRegs(const ARMSubtarget * Subtarget,MachineInstr & MI,const SDNode * Node)10833 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
10834                                     MachineInstr &MI, const SDNode *Node) {
10835   bool isThumb1 = Subtarget->isThumb1Only();
10836 
10837   DebugLoc DL = MI.getDebugLoc();
10838   MachineFunction *MF = MI.getParent()->getParent();
10839   MachineRegisterInfo &MRI = MF->getRegInfo();
10840   MachineInstrBuilder MIB(*MF, MI);
10841 
10842   // If the new dst/src is unused mark it as dead.
10843   if (!Node->hasAnyUseOfValue(0)) {
10844     MI.getOperand(0).setIsDead(true);
10845   }
10846   if (!Node->hasAnyUseOfValue(1)) {
10847     MI.getOperand(1).setIsDead(true);
10848   }
10849 
10850   // The MEMCPY both defines and kills the scratch registers.
10851   for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
10852     Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
10853                                                          : &ARM::GPRRegClass);
10854     MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
10855   }
10856 }
10857 
AdjustInstrPostInstrSelection(MachineInstr & MI,SDNode * Node) const10858 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
10859                                                       SDNode *Node) const {
10860   if (MI.getOpcode() == ARM::MEMCPY) {
10861     attachMEMCPYScratchRegs(Subtarget, MI, Node);
10862     return;
10863   }
10864 
10865   const MCInstrDesc *MCID = &MI.getDesc();
10866   // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
10867   // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
10868   // operand is still set to noreg. If needed, set the optional operand's
10869   // register to CPSR, and remove the redundant implicit def.
10870   //
10871   // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).
10872 
10873   // Rename pseudo opcodes.
10874   unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
10875   unsigned ccOutIdx;
10876   if (NewOpc) {
10877     const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
10878     MCID = &TII->get(NewOpc);
10879 
10880     assert(MCID->getNumOperands() ==
10881            MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
10882         && "converted opcode should be the same except for cc_out"
10883            " (and, on Thumb1, pred)");
10884 
10885     MI.setDesc(*MCID);
10886 
10887     // Add the optional cc_out operand
10888     MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
10889 
10890     // On Thumb1, move all input operands to the end, then add the predicate
10891     if (Subtarget->isThumb1Only()) {
10892       for (unsigned c = MCID->getNumOperands() - 4; c--;) {
10893         MI.addOperand(MI.getOperand(1));
10894         MI.RemoveOperand(1);
10895       }
10896 
10897       // Restore the ties
10898       for (unsigned i = MI.getNumOperands(); i--;) {
10899         const MachineOperand& op = MI.getOperand(i);
10900         if (op.isReg() && op.isUse()) {
10901           int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
10902           if (DefIdx != -1)
10903             MI.tieOperands(DefIdx, i);
10904         }
10905       }
10906 
10907       MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
10908       MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
10909       ccOutIdx = 1;
10910     } else
10911       ccOutIdx = MCID->getNumOperands() - 1;
10912   } else
10913     ccOutIdx = MCID->getNumOperands() - 1;
10914 
10915   // Any ARM instruction that sets the 's' bit should specify an optional
10916   // "cc_out" operand in the last operand position.
10917   if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
10918     assert(!NewOpc && "Optional cc_out operand required");
10919     return;
10920   }
10921   // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
10922   // since we already have an optional CPSR def.
10923   bool definesCPSR = false;
10924   bool deadCPSR = false;
10925   for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
10926        ++i) {
10927     const MachineOperand &MO = MI.getOperand(i);
10928     if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
10929       definesCPSR = true;
10930       if (MO.isDead())
10931         deadCPSR = true;
10932       MI.RemoveOperand(i);
10933       break;
10934     }
10935   }
10936   if (!definesCPSR) {
10937     assert(!NewOpc && "Optional cc_out operand required");
10938     return;
10939   }
10940   assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
10941   if (deadCPSR) {
10942     assert(!MI.getOperand(ccOutIdx).getReg() &&
10943            "expect uninitialized optional cc_out operand");
10944     // Thumb1 instructions must have the S bit even if the CPSR is dead.
10945     if (!Subtarget->isThumb1Only())
10946       return;
10947   }
10948 
10949   // If this instruction was defined with an optional CPSR def and its dag node
10950   // had a live implicit CPSR def, then activate the optional CPSR def.
10951   MachineOperand &MO = MI.getOperand(ccOutIdx);
10952   MO.setReg(ARM::CPSR);
10953   MO.setIsDef(true);
10954 }
10955 
10956 //===----------------------------------------------------------------------===//
10957 //                           ARM Optimization Hooks
10958 //===----------------------------------------------------------------------===//
10959 
10960 // Helper function that checks if N is a null or all ones constant.
isZeroOrAllOnes(SDValue N,bool AllOnes)10961 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
10962   return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
10963 }
10964 
10965 // Return true if N is conditionally 0 or all ones.
10966 // Detects these expressions where cc is an i1 value:
10967 //
10968 //   (select cc 0, y)   [AllOnes=0]
10969 //   (select cc y, 0)   [AllOnes=0]
10970 //   (zext cc)          [AllOnes=0]
10971 //   (sext cc)          [AllOnes=0/1]
10972 //   (select cc -1, y)  [AllOnes=1]
10973 //   (select cc y, -1)  [AllOnes=1]
10974 //
10975 // Invert is set when N is the null/all ones constant when CC is false.
10976 // OtherOp is set to the alternative value of N.
isConditionalZeroOrAllOnes(SDNode * N,bool AllOnes,SDValue & CC,bool & Invert,SDValue & OtherOp,SelectionDAG & DAG)10977 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
10978                                        SDValue &CC, bool &Invert,
10979                                        SDValue &OtherOp,
10980                                        SelectionDAG &DAG) {
10981   switch (N->getOpcode()) {
10982   default: return false;
10983   case ISD::SELECT: {
10984     CC = N->getOperand(0);
10985     SDValue N1 = N->getOperand(1);
10986     SDValue N2 = N->getOperand(2);
10987     if (isZeroOrAllOnes(N1, AllOnes)) {
10988       Invert = false;
10989       OtherOp = N2;
10990       return true;
10991     }
10992     if (isZeroOrAllOnes(N2, AllOnes)) {
10993       Invert = true;
10994       OtherOp = N1;
10995       return true;
10996     }
10997     return false;
10998   }
10999   case ISD::ZERO_EXTEND:
11000     // (zext cc) can never be the all ones value.
11001     if (AllOnes)
11002       return false;
11003     LLVM_FALLTHROUGH;
11004   case ISD::SIGN_EXTEND: {
11005     SDLoc dl(N);
11006     EVT VT = N->getValueType(0);
11007     CC = N->getOperand(0);
11008     if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC)
11009       return false;
11010     Invert = !AllOnes;
11011     if (AllOnes)
11012       // When looking for an AllOnes constant, N is an sext, and the 'other'
11013       // value is 0.
11014       OtherOp = DAG.getConstant(0, dl, VT);
11015     else if (N->getOpcode() == ISD::ZERO_EXTEND)
11016       // When looking for a 0 constant, N can be zext or sext.
11017       OtherOp = DAG.getConstant(1, dl, VT);
11018     else
11019       OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
11020                                 VT);
11021     return true;
11022   }
11023   }
11024 }
11025 
11026 // Combine a constant select operand into its use:
11027 //
11028 //   (add (select cc, 0, c), x)  -> (select cc, x, (add, x, c))
11029 //   (sub x, (select cc, 0, c))  -> (select cc, x, (sub, x, c))
11030 //   (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))  [AllOnes=1]
11031 //   (or  (select cc, 0, c), x)  -> (select cc, x, (or, x, c))
11032 //   (xor (select cc, 0, c), x)  -> (select cc, x, (xor, x, c))
11033 //
11034 // The transform is rejected if the select doesn't have a constant operand that
11035 // is null, or all ones when AllOnes is set.
11036 //
11037 // Also recognize sext/zext from i1:
11038 //
11039 //   (add (zext cc), x) -> (select cc (add x, 1), x)
11040 //   (add (sext cc), x) -> (select cc (add x, -1), x)
11041 //
11042 // These transformations eventually create predicated instructions.
11043 //
11044 // @param N       The node to transform.
11045 // @param Slct    The N operand that is a select.
11046 // @param OtherOp The other N operand (x above).
11047 // @param DCI     Context.
11048 // @param AllOnes Require the select constant to be all ones instead of null.
11049 // @returns The new node, or SDValue() on failure.
11050 static
combineSelectAndUse(SDNode * N,SDValue Slct,SDValue OtherOp,TargetLowering::DAGCombinerInfo & DCI,bool AllOnes=false)11051 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
11052                             TargetLowering::DAGCombinerInfo &DCI,
11053                             bool AllOnes = false) {
11054   SelectionDAG &DAG = DCI.DAG;
11055   EVT VT = N->getValueType(0);
11056   SDValue NonConstantVal;
11057   SDValue CCOp;
11058   bool SwapSelectOps;
11059   if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
11060                                   NonConstantVal, DAG))
11061     return SDValue();
11062 
11063   // Slct is now know to be the desired identity constant when CC is true.
11064   SDValue TrueVal = OtherOp;
11065   SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
11066                                  OtherOp, NonConstantVal);
11067   // Unless SwapSelectOps says CC should be false.
11068   if (SwapSelectOps)
11069     std::swap(TrueVal, FalseVal);
11070 
11071   return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
11072                      CCOp, TrueVal, FalseVal);
11073 }
11074 
11075 // Attempt combineSelectAndUse on each operand of a commutative operator N.
11076 static
combineSelectAndUseCommutative(SDNode * N,bool AllOnes,TargetLowering::DAGCombinerInfo & DCI)11077 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
11078                                        TargetLowering::DAGCombinerInfo &DCI) {
11079   SDValue N0 = N->getOperand(0);
11080   SDValue N1 = N->getOperand(1);
11081   if (N0.getNode()->hasOneUse())
11082     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
11083       return Result;
11084   if (N1.getNode()->hasOneUse())
11085     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
11086       return Result;
11087   return SDValue();
11088 }
11089 
IsVUZPShuffleNode(SDNode * N)11090 static bool IsVUZPShuffleNode(SDNode *N) {
11091   // VUZP shuffle node.
11092   if (N->getOpcode() == ARMISD::VUZP)
11093     return true;
11094 
11095   // "VUZP" on i32 is an alias for VTRN.
11096   if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
11097     return true;
11098 
11099   return false;
11100 }
11101 
AddCombineToVPADD(SDNode * N,SDValue N0,SDValue N1,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11102 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
11103                                  TargetLowering::DAGCombinerInfo &DCI,
11104                                  const ARMSubtarget *Subtarget) {
11105   // Look for ADD(VUZP.0, VUZP.1).
11106   if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
11107       N0 == N1)
11108    return SDValue();
11109 
11110   // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
11111   if (!N->getValueType(0).is64BitVector())
11112     return SDValue();
11113 
11114   // Generate vpadd.
11115   SelectionDAG &DAG = DCI.DAG;
11116   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11117   SDLoc dl(N);
11118   SDNode *Unzip = N0.getNode();
11119   EVT VT = N->getValueType(0);
11120 
11121   SmallVector<SDValue, 8> Ops;
11122   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
11123                                 TLI.getPointerTy(DAG.getDataLayout())));
11124   Ops.push_back(Unzip->getOperand(0));
11125   Ops.push_back(Unzip->getOperand(1));
11126 
11127   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
11128 }
11129 
AddCombineVUZPToVPADDL(SDNode * N,SDValue N0,SDValue N1,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11130 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
11131                                       TargetLowering::DAGCombinerInfo &DCI,
11132                                       const ARMSubtarget *Subtarget) {
11133   // Check for two extended operands.
11134   if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
11135         N1.getOpcode() == ISD::SIGN_EXTEND) &&
11136       !(N0.getOpcode() == ISD::ZERO_EXTEND &&
11137         N1.getOpcode() == ISD::ZERO_EXTEND))
11138     return SDValue();
11139 
11140   SDValue N00 = N0.getOperand(0);
11141   SDValue N10 = N1.getOperand(0);
11142 
11143   // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
11144   if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
11145       N00 == N10)
11146     return SDValue();
11147 
11148   // We only recognize Q register paddl here; this can't be reached until
11149   // after type legalization.
11150   if (!N00.getValueType().is64BitVector() ||
11151       !N0.getValueType().is128BitVector())
11152     return SDValue();
11153 
11154   // Generate vpaddl.
11155   SelectionDAG &DAG = DCI.DAG;
11156   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11157   SDLoc dl(N);
11158   EVT VT = N->getValueType(0);
11159 
11160   SmallVector<SDValue, 8> Ops;
11161   // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
11162   unsigned Opcode;
11163   if (N0.getOpcode() == ISD::SIGN_EXTEND)
11164     Opcode = Intrinsic::arm_neon_vpaddls;
11165   else
11166     Opcode = Intrinsic::arm_neon_vpaddlu;
11167   Ops.push_back(DAG.getConstant(Opcode, dl,
11168                                 TLI.getPointerTy(DAG.getDataLayout())));
11169   EVT ElemTy = N00.getValueType().getVectorElementType();
11170   unsigned NumElts = VT.getVectorNumElements();
11171   EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
11172   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
11173                                N00.getOperand(0), N00.getOperand(1));
11174   Ops.push_back(Concat);
11175 
11176   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
11177 }
11178 
11179 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
11180 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
11181 // much easier to match.
11182 static SDValue
AddCombineBUILD_VECTORToVPADDL(SDNode * N,SDValue N0,SDValue N1,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11183 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
11184                                TargetLowering::DAGCombinerInfo &DCI,
11185                                const ARMSubtarget *Subtarget) {
11186   // Only perform optimization if after legalize, and if NEON is available. We
11187   // also expected both operands to be BUILD_VECTORs.
11188   if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
11189       || N0.getOpcode() != ISD::BUILD_VECTOR
11190       || N1.getOpcode() != ISD::BUILD_VECTOR)
11191     return SDValue();
11192 
11193   // Check output type since VPADDL operand elements can only be 8, 16, or 32.
11194   EVT VT = N->getValueType(0);
11195   if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
11196     return SDValue();
11197 
11198   // Check that the vector operands are of the right form.
11199   // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
11200   // operands, where N is the size of the formed vector.
11201   // Each EXTRACT_VECTOR should have the same input vector and odd or even
11202   // index such that we have a pair wise add pattern.
11203 
11204   // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
11205   if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11206     return SDValue();
11207   SDValue Vec = N0->getOperand(0)->getOperand(0);
11208   SDNode *V = Vec.getNode();
11209   unsigned nextIndex = 0;
11210 
11211   // For each operands to the ADD which are BUILD_VECTORs,
11212   // check to see if each of their operands are an EXTRACT_VECTOR with
11213   // the same vector and appropriate index.
11214   for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
11215     if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
11216         && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11217 
11218       SDValue ExtVec0 = N0->getOperand(i);
11219       SDValue ExtVec1 = N1->getOperand(i);
11220 
11221       // First operand is the vector, verify its the same.
11222       if (V != ExtVec0->getOperand(0).getNode() ||
11223           V != ExtVec1->getOperand(0).getNode())
11224         return SDValue();
11225 
11226       // Second is the constant, verify its correct.
11227       ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
11228       ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
11229 
11230       // For the constant, we want to see all the even or all the odd.
11231       if (!C0 || !C1 || C0->getZExtValue() != nextIndex
11232           || C1->getZExtValue() != nextIndex+1)
11233         return SDValue();
11234 
11235       // Increment index.
11236       nextIndex+=2;
11237     } else
11238       return SDValue();
11239   }
11240 
11241   // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
11242   // we're using the entire input vector, otherwise there's a size/legality
11243   // mismatch somewhere.
11244   if (nextIndex != Vec.getValueType().getVectorNumElements() ||
11245       Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
11246     return SDValue();
11247 
11248   // Create VPADDL node.
11249   SelectionDAG &DAG = DCI.DAG;
11250   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11251 
11252   SDLoc dl(N);
11253 
11254   // Build operand list.
11255   SmallVector<SDValue, 8> Ops;
11256   Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
11257                                 TLI.getPointerTy(DAG.getDataLayout())));
11258 
11259   // Input is the vector.
11260   Ops.push_back(Vec);
11261 
11262   // Get widened type and narrowed type.
11263   MVT widenType;
11264   unsigned numElem = VT.getVectorNumElements();
11265 
11266   EVT inputLaneType = Vec.getValueType().getVectorElementType();
11267   switch (inputLaneType.getSimpleVT().SimpleTy) {
11268     case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
11269     case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
11270     case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
11271     default:
11272       llvm_unreachable("Invalid vector element type for padd optimization.");
11273   }
11274 
11275   SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
11276   unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
11277   return DAG.getNode(ExtOp, dl, VT, tmp);
11278 }
11279 
findMUL_LOHI(SDValue V)11280 static SDValue findMUL_LOHI(SDValue V) {
11281   if (V->getOpcode() == ISD::UMUL_LOHI ||
11282       V->getOpcode() == ISD::SMUL_LOHI)
11283     return V;
11284   return SDValue();
11285 }
11286 
AddCombineTo64BitSMLAL16(SDNode * AddcNode,SDNode * AddeNode,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11287 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
11288                                         TargetLowering::DAGCombinerInfo &DCI,
11289                                         const ARMSubtarget *Subtarget) {
11290   if (!Subtarget->hasBaseDSP())
11291     return SDValue();
11292 
11293   // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
11294   // accumulates the product into a 64-bit value. The 16-bit values will
11295   // be sign extended somehow or SRA'd into 32-bit values
11296   // (addc (adde (mul 16bit, 16bit), lo), hi)
11297   SDValue Mul = AddcNode->getOperand(0);
11298   SDValue Lo = AddcNode->getOperand(1);
11299   if (Mul.getOpcode() != ISD::MUL) {
11300     Lo = AddcNode->getOperand(0);
11301     Mul = AddcNode->getOperand(1);
11302     if (Mul.getOpcode() != ISD::MUL)
11303       return SDValue();
11304   }
11305 
11306   SDValue SRA = AddeNode->getOperand(0);
11307   SDValue Hi = AddeNode->getOperand(1);
11308   if (SRA.getOpcode() != ISD::SRA) {
11309     SRA = AddeNode->getOperand(1);
11310     Hi = AddeNode->getOperand(0);
11311     if (SRA.getOpcode() != ISD::SRA)
11312       return SDValue();
11313   }
11314   if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
11315     if (Const->getZExtValue() != 31)
11316       return SDValue();
11317   } else
11318     return SDValue();
11319 
11320   if (SRA.getOperand(0) != Mul)
11321     return SDValue();
11322 
11323   SelectionDAG &DAG = DCI.DAG;
11324   SDLoc dl(AddcNode);
11325   unsigned Opcode = 0;
11326   SDValue Op0;
11327   SDValue Op1;
11328 
11329   if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) {
11330     Opcode = ARMISD::SMLALBB;
11331     Op0 = Mul.getOperand(0);
11332     Op1 = Mul.getOperand(1);
11333   } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) {
11334     Opcode = ARMISD::SMLALBT;
11335     Op0 = Mul.getOperand(0);
11336     Op1 = Mul.getOperand(1).getOperand(0);
11337   } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) {
11338     Opcode = ARMISD::SMLALTB;
11339     Op0 = Mul.getOperand(0).getOperand(0);
11340     Op1 = Mul.getOperand(1);
11341   } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) {
11342     Opcode = ARMISD::SMLALTT;
11343     Op0 = Mul->getOperand(0).getOperand(0);
11344     Op1 = Mul->getOperand(1).getOperand(0);
11345   }
11346 
11347   if (!Op0 || !Op1)
11348     return SDValue();
11349 
11350   SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
11351                               Op0, Op1, Lo, Hi);
11352   // Replace the ADDs' nodes uses by the MLA node's values.
11353   SDValue HiMLALResult(SMLAL.getNode(), 1);
11354   SDValue LoMLALResult(SMLAL.getNode(), 0);
11355 
11356   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
11357   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
11358 
11359   // Return original node to notify the driver to stop replacing.
11360   SDValue resNode(AddcNode, 0);
11361   return resNode;
11362 }
11363 
AddCombineTo64bitMLAL(SDNode * AddeSubeNode,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11364 static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode,
11365                                      TargetLowering::DAGCombinerInfo &DCI,
11366                                      const ARMSubtarget *Subtarget) {
11367   // Look for multiply add opportunities.
11368   // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
11369   // each add nodes consumes a value from ISD::UMUL_LOHI and there is
11370   // a glue link from the first add to the second add.
11371   // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
11372   // a S/UMLAL instruction.
11373   //                  UMUL_LOHI
11374   //                 / :lo    \ :hi
11375   //                V          \          [no multiline comment]
11376   //    loAdd ->  ADDC         |
11377   //                 \ :carry /
11378   //                  V      V
11379   //                    ADDE   <- hiAdd
11380   //
11381   // In the special case where only the higher part of a signed result is used
11382   // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts
11383   // a constant with the exact value of 0x80000000, we recognize we are dealing
11384   // with a "rounded multiply and add" (or subtract) and transform it into
11385   // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively.
11386 
11387   assert((AddeSubeNode->getOpcode() == ARMISD::ADDE ||
11388           AddeSubeNode->getOpcode() == ARMISD::SUBE) &&
11389          "Expect an ADDE or SUBE");
11390 
11391   assert(AddeSubeNode->getNumOperands() == 3 &&
11392          AddeSubeNode->getOperand(2).getValueType() == MVT::i32 &&
11393          "ADDE node has the wrong inputs");
11394 
11395   // Check that we are chained to the right ADDC or SUBC node.
11396   SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode();
11397   if ((AddeSubeNode->getOpcode() == ARMISD::ADDE &&
11398        AddcSubcNode->getOpcode() != ARMISD::ADDC) ||
11399       (AddeSubeNode->getOpcode() == ARMISD::SUBE &&
11400        AddcSubcNode->getOpcode() != ARMISD::SUBC))
11401     return SDValue();
11402 
11403   SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0);
11404   SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1);
11405 
11406   // Check if the two operands are from the same mul_lohi node.
11407   if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode())
11408     return SDValue();
11409 
11410   assert(AddcSubcNode->getNumValues() == 2 &&
11411          AddcSubcNode->getValueType(0) == MVT::i32 &&
11412          "Expect ADDC with two result values. First: i32");
11413 
11414   // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
11415   // maybe a SMLAL which multiplies two 16-bit values.
11416   if (AddeSubeNode->getOpcode() == ARMISD::ADDE &&
11417       AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI &&
11418       AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI &&
11419       AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI &&
11420       AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI)
11421     return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget);
11422 
11423   // Check for the triangle shape.
11424   SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0);
11425   SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1);
11426 
11427   // Make sure that the ADDE/SUBE operands are not coming from the same node.
11428   if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode())
11429     return SDValue();
11430 
11431   // Find the MUL_LOHI node walking up ADDE/SUBE's operands.
11432   bool IsLeftOperandMUL = false;
11433   SDValue MULOp = findMUL_LOHI(AddeSubeOp0);
11434   if (MULOp == SDValue())
11435     MULOp = findMUL_LOHI(AddeSubeOp1);
11436   else
11437     IsLeftOperandMUL = true;
11438   if (MULOp == SDValue())
11439     return SDValue();
11440 
11441   // Figure out the right opcode.
11442   unsigned Opc = MULOp->getOpcode();
11443   unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
11444 
11445   // Figure out the high and low input values to the MLAL node.
11446   SDValue *HiAddSub = nullptr;
11447   SDValue *LoMul = nullptr;
11448   SDValue *LowAddSub = nullptr;
11449 
11450   // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI.
11451   if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1)))
11452     return SDValue();
11453 
11454   if (IsLeftOperandMUL)
11455     HiAddSub = &AddeSubeOp1;
11456   else
11457     HiAddSub = &AddeSubeOp0;
11458 
11459   // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node
11460   // whose low result is fed to the ADDC/SUBC we are checking.
11461 
11462   if (AddcSubcOp0 == MULOp.getValue(0)) {
11463     LoMul = &AddcSubcOp0;
11464     LowAddSub = &AddcSubcOp1;
11465   }
11466   if (AddcSubcOp1 == MULOp.getValue(0)) {
11467     LoMul = &AddcSubcOp1;
11468     LowAddSub = &AddcSubcOp0;
11469   }
11470 
11471   if (!LoMul)
11472     return SDValue();
11473 
11474   // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC
11475   // the replacement below will create a cycle.
11476   if (AddcSubcNode == HiAddSub->getNode() ||
11477       AddcSubcNode->isPredecessorOf(HiAddSub->getNode()))
11478     return SDValue();
11479 
11480   // Create the merged node.
11481   SelectionDAG &DAG = DCI.DAG;
11482 
11483   // Start building operand list.
11484   SmallVector<SDValue, 8> Ops;
11485   Ops.push_back(LoMul->getOperand(0));
11486   Ops.push_back(LoMul->getOperand(1));
11487 
11488   // Check whether we can use SMMLAR, SMMLSR or SMMULR instead.  For this to be
11489   // the case, we must be doing signed multiplication and only use the higher
11490   // part of the result of the MLAL, furthermore the LowAddSub must be a constant
11491   // addition or subtraction with the value of 0x800000.
11492   if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() &&
11493       FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) &&
11494       LowAddSub->getNode()->getOpcode() == ISD::Constant &&
11495       static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() ==
11496           0x80000000) {
11497     Ops.push_back(*HiAddSub);
11498     if (AddcSubcNode->getOpcode() == ARMISD::SUBC) {
11499       FinalOpc = ARMISD::SMMLSR;
11500     } else {
11501       FinalOpc = ARMISD::SMMLAR;
11502     }
11503     SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops);
11504     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode);
11505 
11506     return SDValue(AddeSubeNode, 0);
11507   } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC)
11508     // SMMLS is generated during instruction selection and the rest of this
11509     // function can not handle the case where AddcSubcNode is a SUBC.
11510     return SDValue();
11511 
11512   // Finish building the operand list for {U/S}MLAL
11513   Ops.push_back(*LowAddSub);
11514   Ops.push_back(*HiAddSub);
11515 
11516   SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode),
11517                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
11518 
11519   // Replace the ADDs' nodes uses by the MLA node's values.
11520   SDValue HiMLALResult(MLALNode.getNode(), 1);
11521   DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult);
11522 
11523   SDValue LoMLALResult(MLALNode.getNode(), 0);
11524   DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult);
11525 
11526   // Return original node to notify the driver to stop replacing.
11527   return SDValue(AddeSubeNode, 0);
11528 }
11529 
AddCombineTo64bitUMAAL(SDNode * AddeNode,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11530 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
11531                                       TargetLowering::DAGCombinerInfo &DCI,
11532                                       const ARMSubtarget *Subtarget) {
11533   // UMAAL is similar to UMLAL except that it adds two unsigned values.
11534   // While trying to combine for the other MLAL nodes, first search for the
11535   // chance to use UMAAL. Check if Addc uses a node which has already
11536   // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
11537   // as the addend, and it's handled in PerformUMLALCombine.
11538 
11539   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
11540     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
11541 
11542   // Check that we have a glued ADDC node.
11543   SDNode* AddcNode = AddeNode->getOperand(2).getNode();
11544   if (AddcNode->getOpcode() != ARMISD::ADDC)
11545     return SDValue();
11546 
11547   // Find the converted UMAAL or quit if it doesn't exist.
11548   SDNode *UmlalNode = nullptr;
11549   SDValue AddHi;
11550   if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
11551     UmlalNode = AddcNode->getOperand(0).getNode();
11552     AddHi = AddcNode->getOperand(1);
11553   } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
11554     UmlalNode = AddcNode->getOperand(1).getNode();
11555     AddHi = AddcNode->getOperand(0);
11556   } else {
11557     return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
11558   }
11559 
11560   // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
11561   // the ADDC as well as Zero.
11562   if (!isNullConstant(UmlalNode->getOperand(3)))
11563     return SDValue();
11564 
11565   if ((isNullConstant(AddeNode->getOperand(0)) &&
11566        AddeNode->getOperand(1).getNode() == UmlalNode) ||
11567       (AddeNode->getOperand(0).getNode() == UmlalNode &&
11568        isNullConstant(AddeNode->getOperand(1)))) {
11569     SelectionDAG &DAG = DCI.DAG;
11570     SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
11571                       UmlalNode->getOperand(2), AddHi };
11572     SDValue UMAAL =  DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
11573                                  DAG.getVTList(MVT::i32, MVT::i32), Ops);
11574 
11575     // Replace the ADDs' nodes uses by the UMAAL node's values.
11576     DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
11577     DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
11578 
11579     // Return original node to notify the driver to stop replacing.
11580     return SDValue(AddeNode, 0);
11581   }
11582   return SDValue();
11583 }
11584 
PerformUMLALCombine(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * Subtarget)11585 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
11586                                    const ARMSubtarget *Subtarget) {
11587   if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
11588     return SDValue();
11589 
11590   // Check that we have a pair of ADDC and ADDE as operands.
11591   // Both addends of the ADDE must be zero.
11592   SDNode* AddcNode = N->getOperand(2).getNode();
11593   SDNode* AddeNode = N->getOperand(3).getNode();
11594   if ((AddcNode->getOpcode() == ARMISD::ADDC) &&
11595       (AddeNode->getOpcode() == ARMISD::ADDE) &&
11596       isNullConstant(AddeNode->getOperand(0)) &&
11597       isNullConstant(AddeNode->getOperand(1)) &&
11598       (AddeNode->getOperand(2).getNode() == AddcNode))
11599     return DAG.getNode(ARMISD::UMAAL, SDLoc(N),
11600                        DAG.getVTList(MVT::i32, MVT::i32),
11601                        {N->getOperand(0), N->getOperand(1),
11602                         AddcNode->getOperand(0), AddcNode->getOperand(1)});
11603   else
11604     return SDValue();
11605 }
11606 
PerformAddcSubcCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11607 static SDValue PerformAddcSubcCombine(SDNode *N,
11608                                       TargetLowering::DAGCombinerInfo &DCI,
11609                                       const ARMSubtarget *Subtarget) {
11610   SelectionDAG &DAG(DCI.DAG);
11611 
11612   if (N->getOpcode() == ARMISD::SUBC) {
11613     // (SUBC (ADDE 0, 0, C), 1) -> C
11614     SDValue LHS = N->getOperand(0);
11615     SDValue RHS = N->getOperand(1);
11616     if (LHS->getOpcode() == ARMISD::ADDE &&
11617         isNullConstant(LHS->getOperand(0)) &&
11618         isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) {
11619       return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2));
11620     }
11621   }
11622 
11623   if (Subtarget->isThumb1Only()) {
11624     SDValue RHS = N->getOperand(1);
11625     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
11626       int32_t imm = C->getSExtValue();
11627       if (imm < 0 && imm > std::numeric_limits<int>::min()) {
11628         SDLoc DL(N);
11629         RHS = DAG.getConstant(-imm, DL, MVT::i32);
11630         unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
11631                                                            : ARMISD::ADDC;
11632         return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
11633       }
11634     }
11635   }
11636 
11637   return SDValue();
11638 }
11639 
PerformAddeSubeCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11640 static SDValue PerformAddeSubeCombine(SDNode *N,
11641                                       TargetLowering::DAGCombinerInfo &DCI,
11642                                       const ARMSubtarget *Subtarget) {
11643   if (Subtarget->isThumb1Only()) {
11644     SelectionDAG &DAG = DCI.DAG;
11645     SDValue RHS = N->getOperand(1);
11646     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
11647       int64_t imm = C->getSExtValue();
11648       if (imm < 0) {
11649         SDLoc DL(N);
11650 
11651         // The with-carry-in form matches bitwise not instead of the negation.
11652         // Effectively, the inverse interpretation of the carry flag already
11653         // accounts for part of the negation.
11654         RHS = DAG.getConstant(~imm, DL, MVT::i32);
11655 
11656         unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
11657                                                            : ARMISD::ADDE;
11658         return DAG.getNode(Opcode, DL, N->getVTList(),
11659                            N->getOperand(0), RHS, N->getOperand(2));
11660       }
11661     }
11662   } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) {
11663     return AddCombineTo64bitMLAL(N, DCI, Subtarget);
11664   }
11665   return SDValue();
11666 }
11667 
PerformABSCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11668 static SDValue PerformABSCombine(SDNode *N,
11669                                   TargetLowering::DAGCombinerInfo &DCI,
11670                                   const ARMSubtarget *Subtarget) {
11671   SDValue res;
11672   SelectionDAG &DAG = DCI.DAG;
11673   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11674 
11675   if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0)))
11676     return SDValue();
11677 
11678   if (!TLI.expandABS(N, res, DAG))
11679       return SDValue();
11680 
11681   return res;
11682 }
11683 
11684 /// PerformADDECombine - Target-specific dag combine transform from
11685 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
11686 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
PerformADDECombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11687 static SDValue PerformADDECombine(SDNode *N,
11688                                   TargetLowering::DAGCombinerInfo &DCI,
11689                                   const ARMSubtarget *Subtarget) {
11690   // Only ARM and Thumb2 support UMLAL/SMLAL.
11691   if (Subtarget->isThumb1Only())
11692     return PerformAddeSubeCombine(N, DCI, Subtarget);
11693 
11694   // Only perform the checks after legalize when the pattern is available.
11695   if (DCI.isBeforeLegalize()) return SDValue();
11696 
11697   return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
11698 }
11699 
11700 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
11701 /// operands N0 and N1.  This is a helper for PerformADDCombine that is
11702 /// called with the default operands, and if that fails, with commuted
11703 /// operands.
PerformADDCombineWithOperands(SDNode * N,SDValue N0,SDValue N1,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11704 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
11705                                           TargetLowering::DAGCombinerInfo &DCI,
11706                                           const ARMSubtarget *Subtarget){
11707   // Attempt to create vpadd for this add.
11708   if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
11709     return Result;
11710 
11711   // Attempt to create vpaddl for this add.
11712   if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
11713     return Result;
11714   if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
11715                                                       Subtarget))
11716     return Result;
11717 
11718   // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
11719   if (N0.getNode()->hasOneUse())
11720     if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
11721       return Result;
11722   return SDValue();
11723 }
11724 
11725 bool
isDesirableToCommuteWithShift(const SDNode * N,CombineLevel Level) const11726 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
11727                                                  CombineLevel Level) const {
11728   if (Level == BeforeLegalizeTypes)
11729     return true;
11730 
11731   if (N->getOpcode() != ISD::SHL)
11732     return true;
11733 
11734   if (Subtarget->isThumb1Only()) {
11735     // Avoid making expensive immediates by commuting shifts. (This logic
11736     // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted
11737     // for free.)
11738     if (N->getOpcode() != ISD::SHL)
11739       return true;
11740     SDValue N1 = N->getOperand(0);
11741     if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND &&
11742         N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR)
11743       return true;
11744     if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) {
11745       if (Const->getAPIntValue().ult(256))
11746         return false;
11747       if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) &&
11748           Const->getAPIntValue().sgt(-256))
11749         return false;
11750     }
11751     return true;
11752   }
11753 
11754   // Turn off commute-with-shift transform after legalization, so it doesn't
11755   // conflict with PerformSHLSimplify.  (We could try to detect when
11756   // PerformSHLSimplify would trigger more precisely, but it isn't
11757   // really necessary.)
11758   return false;
11759 }
11760 
shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level) const11761 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
11762     const SDNode *N, CombineLevel Level) const {
11763   if (!Subtarget->isThumb1Only())
11764     return true;
11765 
11766   if (Level == BeforeLegalizeTypes)
11767     return true;
11768 
11769   return false;
11770 }
11771 
preferIncOfAddToSubOfNot(EVT VT) const11772 bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
11773   if (!Subtarget->hasNEON()) {
11774     if (Subtarget->isThumb1Only())
11775       return VT.getScalarSizeInBits() <= 32;
11776     return true;
11777   }
11778   return VT.isScalarInteger();
11779 }
11780 
PerformSHLSimplify(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * ST)11781 static SDValue PerformSHLSimplify(SDNode *N,
11782                                 TargetLowering::DAGCombinerInfo &DCI,
11783                                 const ARMSubtarget *ST) {
11784   // Allow the generic combiner to identify potential bswaps.
11785   if (DCI.isBeforeLegalize())
11786     return SDValue();
11787 
11788   // DAG combiner will fold:
11789   // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
11790   // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2
11791   // Other code patterns that can be also be modified have the following form:
11792   // b + ((a << 1) | 510)
11793   // b + ((a << 1) & 510)
11794   // b + ((a << 1) ^ 510)
11795   // b + ((a << 1) + 510)
11796 
11797   // Many instructions can  perform the shift for free, but it requires both
11798   // the operands to be registers. If c1 << c2 is too large, a mov immediate
11799   // instruction will needed. So, unfold back to the original pattern if:
11800   // - if c1 and c2 are small enough that they don't require mov imms.
11801   // - the user(s) of the node can perform an shl
11802 
11803   // No shifted operands for 16-bit instructions.
11804   if (ST->isThumb() && ST->isThumb1Only())
11805     return SDValue();
11806 
11807   // Check that all the users could perform the shl themselves.
11808   for (auto U : N->uses()) {
11809     switch(U->getOpcode()) {
11810     default:
11811       return SDValue();
11812     case ISD::SUB:
11813     case ISD::ADD:
11814     case ISD::AND:
11815     case ISD::OR:
11816     case ISD::XOR:
11817     case ISD::SETCC:
11818     case ARMISD::CMP:
11819       // Check that the user isn't already using a constant because there
11820       // aren't any instructions that support an immediate operand and a
11821       // shifted operand.
11822       if (isa<ConstantSDNode>(U->getOperand(0)) ||
11823           isa<ConstantSDNode>(U->getOperand(1)))
11824         return SDValue();
11825 
11826       // Check that it's not already using a shift.
11827       if (U->getOperand(0).getOpcode() == ISD::SHL ||
11828           U->getOperand(1).getOpcode() == ISD::SHL)
11829         return SDValue();
11830       break;
11831     }
11832   }
11833 
11834   if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR &&
11835       N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND)
11836     return SDValue();
11837 
11838   if (N->getOperand(0).getOpcode() != ISD::SHL)
11839     return SDValue();
11840 
11841   SDValue SHL = N->getOperand(0);
11842 
11843   auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
11844   auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
11845   if (!C1ShlC2 || !C2)
11846     return SDValue();
11847 
11848   APInt C2Int = C2->getAPIntValue();
11849   APInt C1Int = C1ShlC2->getAPIntValue();
11850 
11851   // Check that performing a lshr will not lose any information.
11852   APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(),
11853                                      C2Int.getBitWidth() - C2->getZExtValue());
11854   if ((C1Int & Mask) != C1Int)
11855     return SDValue();
11856 
11857   // Shift the first constant.
11858   C1Int.lshrInPlace(C2Int);
11859 
11860   // The immediates are encoded as an 8-bit value that can be rotated.
11861   auto LargeImm = [](const APInt &Imm) {
11862     unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
11863     return Imm.getBitWidth() - Zeros > 8;
11864   };
11865 
11866   if (LargeImm(C1Int) || LargeImm(C2Int))
11867     return SDValue();
11868 
11869   SelectionDAG &DAG = DCI.DAG;
11870   SDLoc dl(N);
11871   SDValue X = SHL.getOperand(0);
11872   SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X,
11873                               DAG.getConstant(C1Int, dl, MVT::i32));
11874   // Shift left to compensate for the lshr of C1Int.
11875   SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1));
11876 
11877   LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump();
11878              SHL.dump(); N->dump());
11879   LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump());
11880   return Res;
11881 }
11882 
11883 
11884 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
11885 ///
PerformADDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11886 static SDValue PerformADDCombine(SDNode *N,
11887                                  TargetLowering::DAGCombinerInfo &DCI,
11888                                  const ARMSubtarget *Subtarget) {
11889   SDValue N0 = N->getOperand(0);
11890   SDValue N1 = N->getOperand(1);
11891 
11892   // Only works one way, because it needs an immediate operand.
11893   if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
11894     return Result;
11895 
11896   // First try with the default operand order.
11897   if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
11898     return Result;
11899 
11900   // If that didn't work, try again with the operands commuted.
11901   return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
11902 }
11903 
11904 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
11905 ///
PerformSUBCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11906 static SDValue PerformSUBCombine(SDNode *N,
11907                                  TargetLowering::DAGCombinerInfo &DCI,
11908                                  const ARMSubtarget *Subtarget) {
11909   SDValue N0 = N->getOperand(0);
11910   SDValue N1 = N->getOperand(1);
11911 
11912   // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
11913   if (N1.getNode()->hasOneUse())
11914     if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
11915       return Result;
11916 
11917   if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector())
11918     return SDValue();
11919 
11920   // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x))
11921   // so that we can readily pattern match more mve instructions which can use
11922   // a scalar operand.
11923   SDValue VDup = N->getOperand(1);
11924   if (VDup->getOpcode() != ARMISD::VDUP)
11925     return SDValue();
11926 
11927   SDValue VMov = N->getOperand(0);
11928   if (VMov->getOpcode() == ISD::BITCAST)
11929     VMov = VMov->getOperand(0);
11930 
11931   if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov))
11932     return SDValue();
11933 
11934   SDLoc dl(N);
11935   SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32,
11936                                    DCI.DAG.getConstant(0, dl, MVT::i32),
11937                                    VDup->getOperand(0));
11938   return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate);
11939 }
11940 
11941 /// PerformVMULCombine
11942 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
11943 /// special multiplier accumulator forwarding.
11944 ///   vmul d3, d0, d2
11945 ///   vmla d3, d1, d2
11946 /// is faster than
11947 ///   vadd d3, d0, d1
11948 ///   vmul d3, d3, d2
11949 //  However, for (A + B) * (A + B),
11950 //    vadd d2, d0, d1
11951 //    vmul d3, d0, d2
11952 //    vmla d3, d1, d2
11953 //  is slower than
11954 //    vadd d2, d0, d1
11955 //    vmul d3, d2, d2
PerformVMULCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11956 static SDValue PerformVMULCombine(SDNode *N,
11957                                   TargetLowering::DAGCombinerInfo &DCI,
11958                                   const ARMSubtarget *Subtarget) {
11959   if (!Subtarget->hasVMLxForwarding())
11960     return SDValue();
11961 
11962   SelectionDAG &DAG = DCI.DAG;
11963   SDValue N0 = N->getOperand(0);
11964   SDValue N1 = N->getOperand(1);
11965   unsigned Opcode = N0.getOpcode();
11966   if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
11967       Opcode != ISD::FADD && Opcode != ISD::FSUB) {
11968     Opcode = N1.getOpcode();
11969     if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
11970         Opcode != ISD::FADD && Opcode != ISD::FSUB)
11971       return SDValue();
11972     std::swap(N0, N1);
11973   }
11974 
11975   if (N0 == N1)
11976     return SDValue();
11977 
11978   EVT VT = N->getValueType(0);
11979   SDLoc DL(N);
11980   SDValue N00 = N0->getOperand(0);
11981   SDValue N01 = N0->getOperand(1);
11982   return DAG.getNode(Opcode, DL, VT,
11983                      DAG.getNode(ISD::MUL, DL, VT, N00, N1),
11984                      DAG.getNode(ISD::MUL, DL, VT, N01, N1));
11985 }
11986 
PerformMULCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)11987 static SDValue PerformMULCombine(SDNode *N,
11988                                  TargetLowering::DAGCombinerInfo &DCI,
11989                                  const ARMSubtarget *Subtarget) {
11990   SelectionDAG &DAG = DCI.DAG;
11991 
11992   if (Subtarget->isThumb1Only())
11993     return SDValue();
11994 
11995   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
11996     return SDValue();
11997 
11998   EVT VT = N->getValueType(0);
11999   if (VT.is64BitVector() || VT.is128BitVector())
12000     return PerformVMULCombine(N, DCI, Subtarget);
12001   if (VT != MVT::i32)
12002     return SDValue();
12003 
12004   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
12005   if (!C)
12006     return SDValue();
12007 
12008   int64_t MulAmt = C->getSExtValue();
12009   unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
12010 
12011   ShiftAmt = ShiftAmt & (32 - 1);
12012   SDValue V = N->getOperand(0);
12013   SDLoc DL(N);
12014 
12015   SDValue Res;
12016   MulAmt >>= ShiftAmt;
12017 
12018   if (MulAmt >= 0) {
12019     if (isPowerOf2_32(MulAmt - 1)) {
12020       // (mul x, 2^N + 1) => (add (shl x, N), x)
12021       Res = DAG.getNode(ISD::ADD, DL, VT,
12022                         V,
12023                         DAG.getNode(ISD::SHL, DL, VT,
12024                                     V,
12025                                     DAG.getConstant(Log2_32(MulAmt - 1), DL,
12026                                                     MVT::i32)));
12027     } else if (isPowerOf2_32(MulAmt + 1)) {
12028       // (mul x, 2^N - 1) => (sub (shl x, N), x)
12029       Res = DAG.getNode(ISD::SUB, DL, VT,
12030                         DAG.getNode(ISD::SHL, DL, VT,
12031                                     V,
12032                                     DAG.getConstant(Log2_32(MulAmt + 1), DL,
12033                                                     MVT::i32)),
12034                         V);
12035     } else
12036       return SDValue();
12037   } else {
12038     uint64_t MulAmtAbs = -MulAmt;
12039     if (isPowerOf2_32(MulAmtAbs + 1)) {
12040       // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
12041       Res = DAG.getNode(ISD::SUB, DL, VT,
12042                         V,
12043                         DAG.getNode(ISD::SHL, DL, VT,
12044                                     V,
12045                                     DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
12046                                                     MVT::i32)));
12047     } else if (isPowerOf2_32(MulAmtAbs - 1)) {
12048       // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
12049       Res = DAG.getNode(ISD::ADD, DL, VT,
12050                         V,
12051                         DAG.getNode(ISD::SHL, DL, VT,
12052                                     V,
12053                                     DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
12054                                                     MVT::i32)));
12055       Res = DAG.getNode(ISD::SUB, DL, VT,
12056                         DAG.getConstant(0, DL, MVT::i32), Res);
12057     } else
12058       return SDValue();
12059   }
12060 
12061   if (ShiftAmt != 0)
12062     Res = DAG.getNode(ISD::SHL, DL, VT,
12063                       Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
12064 
12065   // Do not add new nodes to DAG combiner worklist.
12066   DCI.CombineTo(N, Res, false);
12067   return SDValue();
12068 }
12069 
CombineANDShift(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12070 static SDValue CombineANDShift(SDNode *N,
12071                                TargetLowering::DAGCombinerInfo &DCI,
12072                                const ARMSubtarget *Subtarget) {
12073   // Allow DAGCombine to pattern-match before we touch the canonical form.
12074   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
12075     return SDValue();
12076 
12077   if (N->getValueType(0) != MVT::i32)
12078     return SDValue();
12079 
12080   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
12081   if (!N1C)
12082     return SDValue();
12083 
12084   uint32_t C1 = (uint32_t)N1C->getZExtValue();
12085   // Don't transform uxtb/uxth.
12086   if (C1 == 255 || C1 == 65535)
12087     return SDValue();
12088 
12089   SDNode *N0 = N->getOperand(0).getNode();
12090   if (!N0->hasOneUse())
12091     return SDValue();
12092 
12093   if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL)
12094     return SDValue();
12095 
12096   bool LeftShift = N0->getOpcode() == ISD::SHL;
12097 
12098   ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
12099   if (!N01C)
12100     return SDValue();
12101 
12102   uint32_t C2 = (uint32_t)N01C->getZExtValue();
12103   if (!C2 || C2 >= 32)
12104     return SDValue();
12105 
12106   // Clear irrelevant bits in the mask.
12107   if (LeftShift)
12108     C1 &= (-1U << C2);
12109   else
12110     C1 &= (-1U >> C2);
12111 
12112   SelectionDAG &DAG = DCI.DAG;
12113   SDLoc DL(N);
12114 
12115   // We have a pattern of the form "(and (shl x, c2) c1)" or
12116   // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to
12117   // transform to a pair of shifts, to save materializing c1.
12118 
12119   // First pattern: right shift, then mask off leading bits.
12120   // FIXME: Use demanded bits?
12121   if (!LeftShift && isMask_32(C1)) {
12122     uint32_t C3 = countLeadingZeros(C1);
12123     if (C2 < C3) {
12124       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
12125                                 DAG.getConstant(C3 - C2, DL, MVT::i32));
12126       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
12127                          DAG.getConstant(C3, DL, MVT::i32));
12128     }
12129   }
12130 
12131   // First pattern, reversed: left shift, then mask off trailing bits.
12132   if (LeftShift && isMask_32(~C1)) {
12133     uint32_t C3 = countTrailingZeros(C1);
12134     if (C2 < C3) {
12135       SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
12136                                 DAG.getConstant(C3 - C2, DL, MVT::i32));
12137       return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
12138                          DAG.getConstant(C3, DL, MVT::i32));
12139     }
12140   }
12141 
12142   // Second pattern: left shift, then mask off leading bits.
12143   // FIXME: Use demanded bits?
12144   if (LeftShift && isShiftedMask_32(C1)) {
12145     uint32_t Trailing = countTrailingZeros(C1);
12146     uint32_t C3 = countLeadingZeros(C1);
12147     if (Trailing == C2 && C2 + C3 < 32) {
12148       SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
12149                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
12150       return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
12151                         DAG.getConstant(C3, DL, MVT::i32));
12152     }
12153   }
12154 
12155   // Second pattern, reversed: right shift, then mask off trailing bits.
12156   // FIXME: Handle other patterns of known/demanded bits.
12157   if (!LeftShift && isShiftedMask_32(C1)) {
12158     uint32_t Leading = countLeadingZeros(C1);
12159     uint32_t C3 = countTrailingZeros(C1);
12160     if (Leading == C2 && C2 + C3 < 32) {
12161       SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
12162                                 DAG.getConstant(C2 + C3, DL, MVT::i32));
12163       return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
12164                          DAG.getConstant(C3, DL, MVT::i32));
12165     }
12166   }
12167 
12168   // FIXME: Transform "(and (shl x, c2) c1)" ->
12169   // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
12170   // c1.
12171   return SDValue();
12172 }
12173 
PerformANDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12174 static SDValue PerformANDCombine(SDNode *N,
12175                                  TargetLowering::DAGCombinerInfo &DCI,
12176                                  const ARMSubtarget *Subtarget) {
12177   // Attempt to use immediate-form VBIC
12178   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
12179   SDLoc dl(N);
12180   EVT VT = N->getValueType(0);
12181   SelectionDAG &DAG = DCI.DAG;
12182 
12183   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
12184     return SDValue();
12185 
12186   APInt SplatBits, SplatUndef;
12187   unsigned SplatBitSize;
12188   bool HasAnyUndefs;
12189   if (BVN && Subtarget->hasNEON() &&
12190       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
12191     if (SplatBitSize <= 64) {
12192       EVT VbicVT;
12193       SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(),
12194                                       SplatUndef.getZExtValue(), SplatBitSize,
12195                                       DAG, dl, VbicVT, VT.is128BitVector(),
12196                                       OtherModImm);
12197       if (Val.getNode()) {
12198         SDValue Input =
12199           DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
12200         SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
12201         return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
12202       }
12203     }
12204   }
12205 
12206   if (!Subtarget->isThumb1Only()) {
12207     // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
12208     if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
12209       return Result;
12210 
12211     if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
12212       return Result;
12213   }
12214 
12215   if (Subtarget->isThumb1Only())
12216     if (SDValue Result = CombineANDShift(N, DCI, Subtarget))
12217       return Result;
12218 
12219   return SDValue();
12220 }
12221 
12222 // Try combining OR nodes to SMULWB, SMULWT.
PerformORCombineToSMULWBT(SDNode * OR,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12223 static SDValue PerformORCombineToSMULWBT(SDNode *OR,
12224                                          TargetLowering::DAGCombinerInfo &DCI,
12225                                          const ARMSubtarget *Subtarget) {
12226   if (!Subtarget->hasV6Ops() ||
12227       (Subtarget->isThumb() &&
12228        (!Subtarget->hasThumb2() || !Subtarget->hasDSP())))
12229     return SDValue();
12230 
12231   SDValue SRL = OR->getOperand(0);
12232   SDValue SHL = OR->getOperand(1);
12233 
12234   if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
12235     SRL = OR->getOperand(1);
12236     SHL = OR->getOperand(0);
12237   }
12238   if (!isSRL16(SRL) || !isSHL16(SHL))
12239     return SDValue();
12240 
12241   // The first operands to the shifts need to be the two results from the
12242   // same smul_lohi node.
12243   if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
12244        SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
12245     return SDValue();
12246 
12247   SDNode *SMULLOHI = SRL.getOperand(0).getNode();
12248   if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
12249       SHL.getOperand(0) != SDValue(SMULLOHI, 1))
12250     return SDValue();
12251 
12252   // Now we have:
12253   // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
12254   // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
12255   // For SMUWB the 16-bit value will signed extended somehow.
12256   // For SMULWT only the SRA is required.
12257   // Check both sides of SMUL_LOHI
12258   SDValue OpS16 = SMULLOHI->getOperand(0);
12259   SDValue OpS32 = SMULLOHI->getOperand(1);
12260 
12261   SelectionDAG &DAG = DCI.DAG;
12262   if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) {
12263     OpS16 = OpS32;
12264     OpS32 = SMULLOHI->getOperand(0);
12265   }
12266 
12267   SDLoc dl(OR);
12268   unsigned Opcode = 0;
12269   if (isS16(OpS16, DAG))
12270     Opcode = ARMISD::SMULWB;
12271   else if (isSRA16(OpS16)) {
12272     Opcode = ARMISD::SMULWT;
12273     OpS16 = OpS16->getOperand(0);
12274   }
12275   else
12276     return SDValue();
12277 
12278   SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16);
12279   DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res);
12280   return SDValue(OR, 0);
12281 }
12282 
PerformORCombineToBFI(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12283 static SDValue PerformORCombineToBFI(SDNode *N,
12284                                      TargetLowering::DAGCombinerInfo &DCI,
12285                                      const ARMSubtarget *Subtarget) {
12286   // BFI is only available on V6T2+
12287   if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
12288     return SDValue();
12289 
12290   EVT VT = N->getValueType(0);
12291   SDValue N0 = N->getOperand(0);
12292   SDValue N1 = N->getOperand(1);
12293   SelectionDAG &DAG = DCI.DAG;
12294   SDLoc DL(N);
12295   // 1) or (and A, mask), val => ARMbfi A, val, mask
12296   //      iff (val & mask) == val
12297   //
12298   // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
12299   //  2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
12300   //          && mask == ~mask2
12301   //  2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
12302   //          && ~mask == mask2
12303   //  (i.e., copy a bitfield value into another bitfield of the same width)
12304 
12305   if (VT != MVT::i32)
12306     return SDValue();
12307 
12308   SDValue N00 = N0.getOperand(0);
12309 
12310   // The value and the mask need to be constants so we can verify this is
12311   // actually a bitfield set. If the mask is 0xffff, we can do better
12312   // via a movt instruction, so don't use BFI in that case.
12313   SDValue MaskOp = N0.getOperand(1);
12314   ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
12315   if (!MaskC)
12316     return SDValue();
12317   unsigned Mask = MaskC->getZExtValue();
12318   if (Mask == 0xffff)
12319     return SDValue();
12320   SDValue Res;
12321   // Case (1): or (and A, mask), val => ARMbfi A, val, mask
12322   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
12323   if (N1C) {
12324     unsigned Val = N1C->getZExtValue();
12325     if ((Val & ~Mask) != Val)
12326       return SDValue();
12327 
12328     if (ARM::isBitFieldInvertedMask(Mask)) {
12329       Val >>= countTrailingZeros(~Mask);
12330 
12331       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
12332                         DAG.getConstant(Val, DL, MVT::i32),
12333                         DAG.getConstant(Mask, DL, MVT::i32));
12334 
12335       DCI.CombineTo(N, Res, false);
12336       // Return value from the original node to inform the combiner than N is
12337       // now dead.
12338       return SDValue(N, 0);
12339     }
12340   } else if (N1.getOpcode() == ISD::AND) {
12341     // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
12342     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
12343     if (!N11C)
12344       return SDValue();
12345     unsigned Mask2 = N11C->getZExtValue();
12346 
12347     // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
12348     // as is to match.
12349     if (ARM::isBitFieldInvertedMask(Mask) &&
12350         (Mask == ~Mask2)) {
12351       // The pack halfword instruction works better for masks that fit it,
12352       // so use that when it's available.
12353       if (Subtarget->hasDSP() &&
12354           (Mask == 0xffff || Mask == 0xffff0000))
12355         return SDValue();
12356       // 2a
12357       unsigned amt = countTrailingZeros(Mask2);
12358       Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
12359                         DAG.getConstant(amt, DL, MVT::i32));
12360       Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
12361                         DAG.getConstant(Mask, DL, MVT::i32));
12362       DCI.CombineTo(N, Res, false);
12363       // Return value from the original node to inform the combiner than N is
12364       // now dead.
12365       return SDValue(N, 0);
12366     } else if (ARM::isBitFieldInvertedMask(~Mask) &&
12367                (~Mask == Mask2)) {
12368       // The pack halfword instruction works better for masks that fit it,
12369       // so use that when it's available.
12370       if (Subtarget->hasDSP() &&
12371           (Mask2 == 0xffff || Mask2 == 0xffff0000))
12372         return SDValue();
12373       // 2b
12374       unsigned lsb = countTrailingZeros(Mask);
12375       Res = DAG.getNode(ISD::SRL, DL, VT, N00,
12376                         DAG.getConstant(lsb, DL, MVT::i32));
12377       Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
12378                         DAG.getConstant(Mask2, DL, MVT::i32));
12379       DCI.CombineTo(N, Res, false);
12380       // Return value from the original node to inform the combiner than N is
12381       // now dead.
12382       return SDValue(N, 0);
12383     }
12384   }
12385 
12386   if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
12387       N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
12388       ARM::isBitFieldInvertedMask(~Mask)) {
12389     // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
12390     // where lsb(mask) == #shamt and masked bits of B are known zero.
12391     SDValue ShAmt = N00.getOperand(1);
12392     unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
12393     unsigned LSB = countTrailingZeros(Mask);
12394     if (ShAmtC != LSB)
12395       return SDValue();
12396 
12397     Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
12398                       DAG.getConstant(~Mask, DL, MVT::i32));
12399 
12400     DCI.CombineTo(N, Res, false);
12401     // Return value from the original node to inform the combiner than N is
12402     // now dead.
12403     return SDValue(N, 0);
12404   }
12405 
12406   return SDValue();
12407 }
12408 
isValidMVECond(unsigned CC,bool IsFloat)12409 static bool isValidMVECond(unsigned CC, bool IsFloat) {
12410   switch (CC) {
12411   case ARMCC::EQ:
12412   case ARMCC::NE:
12413   case ARMCC::LE:
12414   case ARMCC::GT:
12415   case ARMCC::GE:
12416   case ARMCC::LT:
12417     return true;
12418   case ARMCC::HS:
12419   case ARMCC::HI:
12420     return !IsFloat;
12421   default:
12422     return false;
12423   };
12424 }
12425 
PerformORCombine_i1(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12426 static SDValue PerformORCombine_i1(SDNode *N,
12427                                    TargetLowering::DAGCombinerInfo &DCI,
12428                                    const ARMSubtarget *Subtarget) {
12429   // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain
12430   // together with predicates
12431   EVT VT = N->getValueType(0);
12432   SDValue N0 = N->getOperand(0);
12433   SDValue N1 = N->getOperand(1);
12434 
12435   ARMCC::CondCodes CondCode0 = ARMCC::AL;
12436   ARMCC::CondCodes CondCode1 = ARMCC::AL;
12437   if (N0->getOpcode() == ARMISD::VCMP)
12438     CondCode0 = (ARMCC::CondCodes)cast<const ConstantSDNode>(N0->getOperand(2))
12439                     ->getZExtValue();
12440   else if (N0->getOpcode() == ARMISD::VCMPZ)
12441     CondCode0 = (ARMCC::CondCodes)cast<const ConstantSDNode>(N0->getOperand(1))
12442                     ->getZExtValue();
12443   if (N1->getOpcode() == ARMISD::VCMP)
12444     CondCode1 = (ARMCC::CondCodes)cast<const ConstantSDNode>(N1->getOperand(2))
12445                     ->getZExtValue();
12446   else if (N1->getOpcode() == ARMISD::VCMPZ)
12447     CondCode1 = (ARMCC::CondCodes)cast<const ConstantSDNode>(N1->getOperand(1))
12448                     ->getZExtValue();
12449 
12450   if (CondCode0 == ARMCC::AL || CondCode1 == ARMCC::AL)
12451     return SDValue();
12452 
12453   unsigned Opposite0 = ARMCC::getOppositeCondition(CondCode0);
12454   unsigned Opposite1 = ARMCC::getOppositeCondition(CondCode1);
12455 
12456   if (!isValidMVECond(Opposite0,
12457                       N0->getOperand(0)->getValueType(0).isFloatingPoint()) ||
12458       !isValidMVECond(Opposite1,
12459                       N1->getOperand(0)->getValueType(0).isFloatingPoint()))
12460     return SDValue();
12461 
12462   SmallVector<SDValue, 4> Ops0;
12463   Ops0.push_back(N0->getOperand(0));
12464   if (N0->getOpcode() == ARMISD::VCMP)
12465     Ops0.push_back(N0->getOperand(1));
12466   Ops0.push_back(DCI.DAG.getConstant(Opposite0, SDLoc(N0), MVT::i32));
12467   SmallVector<SDValue, 4> Ops1;
12468   Ops1.push_back(N1->getOperand(0));
12469   if (N1->getOpcode() == ARMISD::VCMP)
12470     Ops1.push_back(N1->getOperand(1));
12471   Ops1.push_back(DCI.DAG.getConstant(Opposite1, SDLoc(N1), MVT::i32));
12472 
12473   SDValue NewN0 = DCI.DAG.getNode(N0->getOpcode(), SDLoc(N0), VT, Ops0);
12474   SDValue NewN1 = DCI.DAG.getNode(N1->getOpcode(), SDLoc(N1), VT, Ops1);
12475   SDValue And = DCI.DAG.getNode(ISD::AND, SDLoc(N), VT, NewN0, NewN1);
12476   return DCI.DAG.getNode(ISD::XOR, SDLoc(N), VT, And,
12477                          DCI.DAG.getAllOnesConstant(SDLoc(N), VT));
12478 }
12479 
12480 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
PerformORCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12481 static SDValue PerformORCombine(SDNode *N,
12482                                 TargetLowering::DAGCombinerInfo &DCI,
12483                                 const ARMSubtarget *Subtarget) {
12484   // Attempt to use immediate-form VORR
12485   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
12486   SDLoc dl(N);
12487   EVT VT = N->getValueType(0);
12488   SelectionDAG &DAG = DCI.DAG;
12489 
12490   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
12491     return SDValue();
12492 
12493   APInt SplatBits, SplatUndef;
12494   unsigned SplatBitSize;
12495   bool HasAnyUndefs;
12496   if (BVN && Subtarget->hasNEON() &&
12497       BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
12498     if (SplatBitSize <= 64) {
12499       EVT VorrVT;
12500       SDValue Val = isVMOVModifiedImm(SplatBits.getZExtValue(),
12501                                       SplatUndef.getZExtValue(), SplatBitSize,
12502                                       DAG, dl, VorrVT, VT.is128BitVector(),
12503                                       OtherModImm);
12504       if (Val.getNode()) {
12505         SDValue Input =
12506           DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
12507         SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
12508         return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
12509       }
12510     }
12511   }
12512 
12513   if (!Subtarget->isThumb1Only()) {
12514     // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
12515     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
12516       return Result;
12517     if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget))
12518       return Result;
12519   }
12520 
12521   SDValue N0 = N->getOperand(0);
12522   SDValue N1 = N->getOperand(1);
12523 
12524   // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
12525   if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
12526       DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
12527 
12528     // The code below optimizes (or (and X, Y), Z).
12529     // The AND operand needs to have a single user to make these optimizations
12530     // profitable.
12531     if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
12532       return SDValue();
12533 
12534     APInt SplatUndef;
12535     unsigned SplatBitSize;
12536     bool HasAnyUndefs;
12537 
12538     APInt SplatBits0, SplatBits1;
12539     BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
12540     BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
12541     // Ensure that the second operand of both ands are constants
12542     if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
12543                                       HasAnyUndefs) && !HasAnyUndefs) {
12544         if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
12545                                           HasAnyUndefs) && !HasAnyUndefs) {
12546             // Ensure that the bit width of the constants are the same and that
12547             // the splat arguments are logical inverses as per the pattern we
12548             // are trying to simplify.
12549             if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
12550                 SplatBits0 == ~SplatBits1) {
12551                 // Canonicalize the vector type to make instruction selection
12552                 // simpler.
12553                 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
12554                 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
12555                                              N0->getOperand(1),
12556                                              N0->getOperand(0),
12557                                              N1->getOperand(0));
12558                 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
12559             }
12560         }
12561     }
12562   }
12563 
12564   if (Subtarget->hasMVEIntegerOps() &&
12565       (VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1))
12566     return PerformORCombine_i1(N, DCI, Subtarget);
12567 
12568   // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
12569   // reasonable.
12570   if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
12571     if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget))
12572       return Res;
12573   }
12574 
12575   if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
12576     return Result;
12577 
12578   return SDValue();
12579 }
12580 
PerformXORCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12581 static SDValue PerformXORCombine(SDNode *N,
12582                                  TargetLowering::DAGCombinerInfo &DCI,
12583                                  const ARMSubtarget *Subtarget) {
12584   EVT VT = N->getValueType(0);
12585   SelectionDAG &DAG = DCI.DAG;
12586 
12587   if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
12588     return SDValue();
12589 
12590   if (!Subtarget->isThumb1Only()) {
12591     // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
12592     if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
12593       return Result;
12594 
12595     if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
12596       return Result;
12597   }
12598 
12599   return SDValue();
12600 }
12601 
12602 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
12603 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
12604 // their position in "to" (Rd).
ParseBFI(SDNode * N,APInt & ToMask,APInt & FromMask)12605 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
12606   assert(N->getOpcode() == ARMISD::BFI);
12607 
12608   SDValue From = N->getOperand(1);
12609   ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
12610   FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());
12611 
12612   // If the Base came from a SHR #C, we can deduce that it is really testing bit
12613   // #C in the base of the SHR.
12614   if (From->getOpcode() == ISD::SRL &&
12615       isa<ConstantSDNode>(From->getOperand(1))) {
12616     APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
12617     assert(Shift.getLimitedValue() < 32 && "Shift too large!");
12618     FromMask <<= Shift.getLimitedValue(31);
12619     From = From->getOperand(0);
12620   }
12621 
12622   return From;
12623 }
12624 
12625 // If A and B contain one contiguous set of bits, does A | B == A . B?
12626 //
12627 // Neither A nor B must be zero.
BitsProperlyConcatenate(const APInt & A,const APInt & B)12628 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
12629   unsigned LastActiveBitInA =  A.countTrailingZeros();
12630   unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
12631   return LastActiveBitInA - 1 == FirstActiveBitInB;
12632 }
12633 
FindBFIToCombineWith(SDNode * N)12634 static SDValue FindBFIToCombineWith(SDNode *N) {
12635   // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
12636   // if one exists.
12637   APInt ToMask, FromMask;
12638   SDValue From = ParseBFI(N, ToMask, FromMask);
12639   SDValue To = N->getOperand(0);
12640 
12641   // Now check for a compatible BFI to merge with. We can pass through BFIs that
12642   // aren't compatible, but not if they set the same bit in their destination as
12643   // we do (or that of any BFI we're going to combine with).
12644   SDValue V = To;
12645   APInt CombinedToMask = ToMask;
12646   while (V.getOpcode() == ARMISD::BFI) {
12647     APInt NewToMask, NewFromMask;
12648     SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
12649     if (NewFrom != From) {
12650       // This BFI has a different base. Keep going.
12651       CombinedToMask |= NewToMask;
12652       V = V.getOperand(0);
12653       continue;
12654     }
12655 
12656     // Do the written bits conflict with any we've seen so far?
12657     if ((NewToMask & CombinedToMask).getBoolValue())
12658       // Conflicting bits - bail out because going further is unsafe.
12659       return SDValue();
12660 
12661     // Are the new bits contiguous when combined with the old bits?
12662     if (BitsProperlyConcatenate(ToMask, NewToMask) &&
12663         BitsProperlyConcatenate(FromMask, NewFromMask))
12664       return V;
12665     if (BitsProperlyConcatenate(NewToMask, ToMask) &&
12666         BitsProperlyConcatenate(NewFromMask, FromMask))
12667       return V;
12668 
12669     // We've seen a write to some bits, so track it.
12670     CombinedToMask |= NewToMask;
12671     // Keep going...
12672     V = V.getOperand(0);
12673   }
12674 
12675   return SDValue();
12676 }
12677 
PerformBFICombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)12678 static SDValue PerformBFICombine(SDNode *N,
12679                                  TargetLowering::DAGCombinerInfo &DCI) {
12680   SDValue N1 = N->getOperand(1);
12681   if (N1.getOpcode() == ISD::AND) {
12682     // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
12683     // the bits being cleared by the AND are not demanded by the BFI.
12684     ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
12685     if (!N11C)
12686       return SDValue();
12687     unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
12688     unsigned LSB = countTrailingZeros(~InvMask);
12689     unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
12690     assert(Width <
12691                static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
12692            "undefined behavior");
12693     unsigned Mask = (1u << Width) - 1;
12694     unsigned Mask2 = N11C->getZExtValue();
12695     if ((Mask & (~Mask2)) == 0)
12696       return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
12697                              N->getOperand(0), N1.getOperand(0),
12698                              N->getOperand(2));
12699   } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
12700     // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
12701     // Keep track of any consecutive bits set that all come from the same base
12702     // value. We can combine these together into a single BFI.
12703     SDValue CombineBFI = FindBFIToCombineWith(N);
12704     if (CombineBFI == SDValue())
12705       return SDValue();
12706 
12707     // We've found a BFI.
12708     APInt ToMask1, FromMask1;
12709     SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
12710 
12711     APInt ToMask2, FromMask2;
12712     SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
12713     assert(From1 == From2);
12714     (void)From2;
12715 
12716     // First, unlink CombineBFI.
12717     DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
12718     // Then create a new BFI, combining the two together.
12719     APInt NewFromMask = FromMask1 | FromMask2;
12720     APInt NewToMask = ToMask1 | ToMask2;
12721 
12722     EVT VT = N->getValueType(0);
12723     SDLoc dl(N);
12724 
12725     if (NewFromMask[0] == 0)
12726       From1 = DCI.DAG.getNode(
12727         ISD::SRL, dl, VT, From1,
12728         DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
12729     return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
12730                            DCI.DAG.getConstant(~NewToMask, dl, VT));
12731   }
12732   return SDValue();
12733 }
12734 
12735 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
12736 /// ARMISD::VMOVRRD.
PerformVMOVRRDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12737 static SDValue PerformVMOVRRDCombine(SDNode *N,
12738                                      TargetLowering::DAGCombinerInfo &DCI,
12739                                      const ARMSubtarget *Subtarget) {
12740   // vmovrrd(vmovdrr x, y) -> x,y
12741   SDValue InDouble = N->getOperand(0);
12742   if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64())
12743     return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
12744 
12745   // vmovrrd(load f64) -> (load i32), (load i32)
12746   SDNode *InNode = InDouble.getNode();
12747   if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
12748       InNode->getValueType(0) == MVT::f64 &&
12749       InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
12750       !cast<LoadSDNode>(InNode)->isVolatile()) {
12751     // TODO: Should this be done for non-FrameIndex operands?
12752     LoadSDNode *LD = cast<LoadSDNode>(InNode);
12753 
12754     SelectionDAG &DAG = DCI.DAG;
12755     SDLoc DL(LD);
12756     SDValue BasePtr = LD->getBasePtr();
12757     SDValue NewLD1 =
12758         DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
12759                     LD->getAlignment(), LD->getMemOperand()->getFlags());
12760 
12761     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
12762                                     DAG.getConstant(4, DL, MVT::i32));
12763 
12764     SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr,
12765                                  LD->getPointerInfo().getWithOffset(4),
12766                                  std::min(4U, LD->getAlignment()),
12767                                  LD->getMemOperand()->getFlags());
12768 
12769     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
12770     if (DCI.DAG.getDataLayout().isBigEndian())
12771       std::swap (NewLD1, NewLD2);
12772     SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
12773     return Result;
12774   }
12775 
12776   return SDValue();
12777 }
12778 
12779 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
12780 /// ARMISD::VMOVDRR.  This is also used for BUILD_VECTORs with 2 operands.
PerformVMOVDRRCombine(SDNode * N,SelectionDAG & DAG)12781 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
12782   // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
12783   SDValue Op0 = N->getOperand(0);
12784   SDValue Op1 = N->getOperand(1);
12785   if (Op0.getOpcode() == ISD::BITCAST)
12786     Op0 = Op0.getOperand(0);
12787   if (Op1.getOpcode() == ISD::BITCAST)
12788     Op1 = Op1.getOperand(0);
12789   if (Op0.getOpcode() == ARMISD::VMOVRRD &&
12790       Op0.getNode() == Op1.getNode() &&
12791       Op0.getResNo() == 0 && Op1.getResNo() == 1)
12792     return DAG.getNode(ISD::BITCAST, SDLoc(N),
12793                        N->getValueType(0), Op0.getOperand(0));
12794   return SDValue();
12795 }
12796 
12797 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
12798 /// are normal, non-volatile loads.  If so, it is profitable to bitcast an
12799 /// i64 vector to have f64 elements, since the value can then be loaded
12800 /// directly into a VFP register.
hasNormalLoadOperand(SDNode * N)12801 static bool hasNormalLoadOperand(SDNode *N) {
12802   unsigned NumElts = N->getValueType(0).getVectorNumElements();
12803   for (unsigned i = 0; i < NumElts; ++i) {
12804     SDNode *Elt = N->getOperand(i).getNode();
12805     if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
12806       return true;
12807   }
12808   return false;
12809 }
12810 
12811 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
12812 /// ISD::BUILD_VECTOR.
PerformBUILD_VECTORCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12813 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
12814                                           TargetLowering::DAGCombinerInfo &DCI,
12815                                           const ARMSubtarget *Subtarget) {
12816   // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
12817   // VMOVRRD is introduced when legalizing i64 types.  It forces the i64 value
12818   // into a pair of GPRs, which is fine when the value is used as a scalar,
12819   // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
12820   SelectionDAG &DAG = DCI.DAG;
12821   if (N->getNumOperands() == 2)
12822     if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
12823       return RV;
12824 
12825   // Load i64 elements as f64 values so that type legalization does not split
12826   // them up into i32 values.
12827   EVT VT = N->getValueType(0);
12828   if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
12829     return SDValue();
12830   SDLoc dl(N);
12831   SmallVector<SDValue, 8> Ops;
12832   unsigned NumElts = VT.getVectorNumElements();
12833   for (unsigned i = 0; i < NumElts; ++i) {
12834     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
12835     Ops.push_back(V);
12836     // Make the DAGCombiner fold the bitcast.
12837     DCI.AddToWorklist(V.getNode());
12838   }
12839   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
12840   SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
12841   return DAG.getNode(ISD::BITCAST, dl, VT, BV);
12842 }
12843 
12844 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
12845 static SDValue
PerformARMBUILD_VECTORCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)12846 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
12847   // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
12848   // At that time, we may have inserted bitcasts from integer to float.
12849   // If these bitcasts have survived DAGCombine, change the lowering of this
12850   // BUILD_VECTOR in something more vector friendly, i.e., that does not
12851   // force to use floating point types.
12852 
12853   // Make sure we can change the type of the vector.
12854   // This is possible iff:
12855   // 1. The vector is only used in a bitcast to a integer type. I.e.,
12856   //    1.1. Vector is used only once.
12857   //    1.2. Use is a bit convert to an integer type.
12858   // 2. The size of its operands are 32-bits (64-bits are not legal).
12859   EVT VT = N->getValueType(0);
12860   EVT EltVT = VT.getVectorElementType();
12861 
12862   // Check 1.1. and 2.
12863   if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
12864     return SDValue();
12865 
12866   // By construction, the input type must be float.
12867   assert(EltVT == MVT::f32 && "Unexpected type!");
12868 
12869   // Check 1.2.
12870   SDNode *Use = *N->use_begin();
12871   if (Use->getOpcode() != ISD::BITCAST ||
12872       Use->getValueType(0).isFloatingPoint())
12873     return SDValue();
12874 
12875   // Check profitability.
12876   // Model is, if more than half of the relevant operands are bitcast from
12877   // i32, turn the build_vector into a sequence of insert_vector_elt.
12878   // Relevant operands are everything that is not statically
12879   // (i.e., at compile time) bitcasted.
12880   unsigned NumOfBitCastedElts = 0;
12881   unsigned NumElts = VT.getVectorNumElements();
12882   unsigned NumOfRelevantElts = NumElts;
12883   for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
12884     SDValue Elt = N->getOperand(Idx);
12885     if (Elt->getOpcode() == ISD::BITCAST) {
12886       // Assume only bit cast to i32 will go away.
12887       if (Elt->getOperand(0).getValueType() == MVT::i32)
12888         ++NumOfBitCastedElts;
12889     } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
12890       // Constants are statically casted, thus do not count them as
12891       // relevant operands.
12892       --NumOfRelevantElts;
12893   }
12894 
12895   // Check if more than half of the elements require a non-free bitcast.
12896   if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
12897     return SDValue();
12898 
12899   SelectionDAG &DAG = DCI.DAG;
12900   // Create the new vector type.
12901   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
12902   // Check if the type is legal.
12903   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12904   if (!TLI.isTypeLegal(VecVT))
12905     return SDValue();
12906 
12907   // Combine:
12908   // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
12909   // => BITCAST INSERT_VECTOR_ELT
12910   //                      (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
12911   //                      (BITCAST EN), N.
12912   SDValue Vec = DAG.getUNDEF(VecVT);
12913   SDLoc dl(N);
12914   for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
12915     SDValue V = N->getOperand(Idx);
12916     if (V.isUndef())
12917       continue;
12918     if (V.getOpcode() == ISD::BITCAST &&
12919         V->getOperand(0).getValueType() == MVT::i32)
12920       // Fold obvious case.
12921       V = V.getOperand(0);
12922     else {
12923       V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
12924       // Make the DAGCombiner fold the bitcasts.
12925       DCI.AddToWorklist(V.getNode());
12926     }
12927     SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
12928     Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
12929   }
12930   Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
12931   // Make the DAGCombiner fold the bitcasts.
12932   DCI.AddToWorklist(Vec.getNode());
12933   return Vec;
12934 }
12935 
12936 static SDValue
PerformPREDICATE_CASTCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)12937 PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
12938   EVT VT = N->getValueType(0);
12939   SDValue Op = N->getOperand(0);
12940   SDLoc dl(N);
12941 
12942   // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x)
12943   if (Op->getOpcode() == ARMISD::PREDICATE_CAST) {
12944     // If the valuetypes are the same, we can remove the cast entirely.
12945     if (Op->getOperand(0).getValueType() == VT)
12946       return Op->getOperand(0);
12947     return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl,
12948                            Op->getOperand(0).getValueType(), Op->getOperand(0));
12949   }
12950 
12951   return SDValue();
12952 }
12953 
PerformVCMPCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)12954 static SDValue PerformVCMPCombine(SDNode *N,
12955                                   TargetLowering::DAGCombinerInfo &DCI,
12956                                   const ARMSubtarget *Subtarget) {
12957   if (!Subtarget->hasMVEIntegerOps())
12958     return SDValue();
12959 
12960   EVT VT = N->getValueType(0);
12961   SDValue Op0 = N->getOperand(0);
12962   SDValue Op1 = N->getOperand(1);
12963   ARMCC::CondCodes Cond =
12964       (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
12965   SDLoc dl(N);
12966 
12967   // vcmp X, 0, cc -> vcmpz X, cc
12968   if (isZeroVector(Op1))
12969     return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0,
12970                            N->getOperand(2));
12971 
12972   unsigned SwappedCond = getSwappedCondition(Cond);
12973   if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) {
12974     // vcmp 0, X, cc -> vcmpz X, reversed(cc)
12975     if (isZeroVector(Op0))
12976       return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1,
12977                              DCI.DAG.getConstant(SwappedCond, dl, MVT::i32));
12978     // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc)
12979     if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP)
12980       return DCI.DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0,
12981                              DCI.DAG.getConstant(SwappedCond, dl, MVT::i32));
12982   }
12983 
12984   return SDValue();
12985 }
12986 
12987 /// PerformInsertEltCombine - Target-specific dag combine xforms for
12988 /// ISD::INSERT_VECTOR_ELT.
PerformInsertEltCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)12989 static SDValue PerformInsertEltCombine(SDNode *N,
12990                                        TargetLowering::DAGCombinerInfo &DCI) {
12991   // Bitcast an i64 load inserted into a vector to f64.
12992   // Otherwise, the i64 value will be legalized to a pair of i32 values.
12993   EVT VT = N->getValueType(0);
12994   SDNode *Elt = N->getOperand(1).getNode();
12995   if (VT.getVectorElementType() != MVT::i64 ||
12996       !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
12997     return SDValue();
12998 
12999   SelectionDAG &DAG = DCI.DAG;
13000   SDLoc dl(N);
13001   EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
13002                                  VT.getVectorNumElements());
13003   SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
13004   SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
13005   // Make the DAGCombiner fold the bitcasts.
13006   DCI.AddToWorklist(Vec.getNode());
13007   DCI.AddToWorklist(V.getNode());
13008   SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
13009                                Vec, V, N->getOperand(2));
13010   return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
13011 }
13012 
13013 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
13014 /// ISD::VECTOR_SHUFFLE.
PerformVECTOR_SHUFFLECombine(SDNode * N,SelectionDAG & DAG)13015 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
13016   // The LLVM shufflevector instruction does not require the shuffle mask
13017   // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
13018   // have that requirement.  When translating to ISD::VECTOR_SHUFFLE, if the
13019   // operands do not match the mask length, they are extended by concatenating
13020   // them with undef vectors.  That is probably the right thing for other
13021   // targets, but for NEON it is better to concatenate two double-register
13022   // size vector operands into a single quad-register size vector.  Do that
13023   // transformation here:
13024   //   shuffle(concat(v1, undef), concat(v2, undef)) ->
13025   //   shuffle(concat(v1, v2), undef)
13026   SDValue Op0 = N->getOperand(0);
13027   SDValue Op1 = N->getOperand(1);
13028   if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
13029       Op1.getOpcode() != ISD::CONCAT_VECTORS ||
13030       Op0.getNumOperands() != 2 ||
13031       Op1.getNumOperands() != 2)
13032     return SDValue();
13033   SDValue Concat0Op1 = Op0.getOperand(1);
13034   SDValue Concat1Op1 = Op1.getOperand(1);
13035   if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
13036     return SDValue();
13037   // Skip the transformation if any of the types are illegal.
13038   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13039   EVT VT = N->getValueType(0);
13040   if (!TLI.isTypeLegal(VT) ||
13041       !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
13042       !TLI.isTypeLegal(Concat1Op1.getValueType()))
13043     return SDValue();
13044 
13045   SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
13046                                   Op0.getOperand(0), Op1.getOperand(0));
13047   // Translate the shuffle mask.
13048   SmallVector<int, 16> NewMask;
13049   unsigned NumElts = VT.getVectorNumElements();
13050   unsigned HalfElts = NumElts/2;
13051   ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
13052   for (unsigned n = 0; n < NumElts; ++n) {
13053     int MaskElt = SVN->getMaskElt(n);
13054     int NewElt = -1;
13055     if (MaskElt < (int)HalfElts)
13056       NewElt = MaskElt;
13057     else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
13058       NewElt = HalfElts + MaskElt - NumElts;
13059     NewMask.push_back(NewElt);
13060   }
13061   return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
13062                               DAG.getUNDEF(VT), NewMask);
13063 }
13064 
13065 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
13066 /// NEON load/store intrinsics, and generic vector load/stores, to merge
13067 /// base address updates.
13068 /// For generic load/stores, the memory type is assumed to be a vector.
13069 /// The caller is assumed to have checked legality.
CombineBaseUpdate(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)13070 static SDValue CombineBaseUpdate(SDNode *N,
13071                                  TargetLowering::DAGCombinerInfo &DCI) {
13072   SelectionDAG &DAG = DCI.DAG;
13073   const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
13074                             N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
13075   const bool isStore = N->getOpcode() == ISD::STORE;
13076   const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
13077   SDValue Addr = N->getOperand(AddrOpIdx);
13078   MemSDNode *MemN = cast<MemSDNode>(N);
13079   SDLoc dl(N);
13080 
13081   // Search for a use of the address operand that is an increment.
13082   for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
13083          UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
13084     SDNode *User = *UI;
13085     if (User->getOpcode() != ISD::ADD ||
13086         UI.getUse().getResNo() != Addr.getResNo())
13087       continue;
13088 
13089     // Check that the add is independent of the load/store.  Otherwise, folding
13090     // it would create a cycle. We can avoid searching through Addr as it's a
13091     // predecessor to both.
13092     SmallPtrSet<const SDNode *, 32> Visited;
13093     SmallVector<const SDNode *, 16> Worklist;
13094     Visited.insert(Addr.getNode());
13095     Worklist.push_back(N);
13096     Worklist.push_back(User);
13097     if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
13098         SDNode::hasPredecessorHelper(User, Visited, Worklist))
13099       continue;
13100 
13101     // Find the new opcode for the updating load/store.
13102     bool isLoadOp = true;
13103     bool isLaneOp = false;
13104     unsigned NewOpc = 0;
13105     unsigned NumVecs = 0;
13106     if (isIntrinsic) {
13107       unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
13108       switch (IntNo) {
13109       default: llvm_unreachable("unexpected intrinsic for Neon base update");
13110       case Intrinsic::arm_neon_vld1:     NewOpc = ARMISD::VLD1_UPD;
13111         NumVecs = 1; break;
13112       case Intrinsic::arm_neon_vld2:     NewOpc = ARMISD::VLD2_UPD;
13113         NumVecs = 2; break;
13114       case Intrinsic::arm_neon_vld3:     NewOpc = ARMISD::VLD3_UPD;
13115         NumVecs = 3; break;
13116       case Intrinsic::arm_neon_vld4:     NewOpc = ARMISD::VLD4_UPD;
13117         NumVecs = 4; break;
13118       case Intrinsic::arm_neon_vld2dup:
13119       case Intrinsic::arm_neon_vld3dup:
13120       case Intrinsic::arm_neon_vld4dup:
13121         // TODO: Support updating VLDxDUP nodes. For now, we just skip
13122         // combining base updates for such intrinsics.
13123         continue;
13124       case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
13125         NumVecs = 2; isLaneOp = true; break;
13126       case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
13127         NumVecs = 3; isLaneOp = true; break;
13128       case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
13129         NumVecs = 4; isLaneOp = true; break;
13130       case Intrinsic::arm_neon_vst1:     NewOpc = ARMISD::VST1_UPD;
13131         NumVecs = 1; isLoadOp = false; break;
13132       case Intrinsic::arm_neon_vst2:     NewOpc = ARMISD::VST2_UPD;
13133         NumVecs = 2; isLoadOp = false; break;
13134       case Intrinsic::arm_neon_vst3:     NewOpc = ARMISD::VST3_UPD;
13135         NumVecs = 3; isLoadOp = false; break;
13136       case Intrinsic::arm_neon_vst4:     NewOpc = ARMISD::VST4_UPD;
13137         NumVecs = 4; isLoadOp = false; break;
13138       case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
13139         NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
13140       case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
13141         NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
13142       case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
13143         NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
13144       }
13145     } else {
13146       isLaneOp = true;
13147       switch (N->getOpcode()) {
13148       default: llvm_unreachable("unexpected opcode for Neon base update");
13149       case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
13150       case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
13151       case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
13152       case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
13153       case ISD::LOAD:       NewOpc = ARMISD::VLD1_UPD;
13154         NumVecs = 1; isLaneOp = false; break;
13155       case ISD::STORE:      NewOpc = ARMISD::VST1_UPD;
13156         NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
13157       }
13158     }
13159 
13160     // Find the size of memory referenced by the load/store.
13161     EVT VecTy;
13162     if (isLoadOp) {
13163       VecTy = N->getValueType(0);
13164     } else if (isIntrinsic) {
13165       VecTy = N->getOperand(AddrOpIdx+1).getValueType();
13166     } else {
13167       assert(isStore && "Node has to be a load, a store, or an intrinsic!");
13168       VecTy = N->getOperand(1).getValueType();
13169     }
13170 
13171     unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
13172     if (isLaneOp)
13173       NumBytes /= VecTy.getVectorNumElements();
13174 
13175     // If the increment is a constant, it must match the memory ref size.
13176     SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
13177     ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
13178     if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) {
13179       // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
13180       // separate instructions that make it harder to use a non-constant update.
13181       continue;
13182     }
13183 
13184     // OK, we found an ADD we can fold into the base update.
13185     // Now, create a _UPD node, taking care of not breaking alignment.
13186 
13187     EVT AlignedVecTy = VecTy;
13188     unsigned Alignment = MemN->getAlignment();
13189 
13190     // If this is a less-than-standard-aligned load/store, change the type to
13191     // match the standard alignment.
13192     // The alignment is overlooked when selecting _UPD variants; and it's
13193     // easier to introduce bitcasts here than fix that.
13194     // There are 3 ways to get to this base-update combine:
13195     // - intrinsics: they are assumed to be properly aligned (to the standard
13196     //   alignment of the memory type), so we don't need to do anything.
13197     // - ARMISD::VLDx nodes: they are only generated from the aforementioned
13198     //   intrinsics, so, likewise, there's nothing to do.
13199     // - generic load/store instructions: the alignment is specified as an
13200     //   explicit operand, rather than implicitly as the standard alignment
13201     //   of the memory type (like the intrisics).  We need to change the
13202     //   memory type to match the explicit alignment.  That way, we don't
13203     //   generate non-standard-aligned ARMISD::VLDx nodes.
13204     if (isa<LSBaseSDNode>(N)) {
13205       if (Alignment == 0)
13206         Alignment = 1;
13207       if (Alignment < VecTy.getScalarSizeInBits() / 8) {
13208         MVT EltTy = MVT::getIntegerVT(Alignment * 8);
13209         assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
13210         assert(!isLaneOp && "Unexpected generic load/store lane.");
13211         unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
13212         AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
13213       }
13214       // Don't set an explicit alignment on regular load/stores that we want
13215       // to transform to VLD/VST 1_UPD nodes.
13216       // This matches the behavior of regular load/stores, which only get an
13217       // explicit alignment if the MMO alignment is larger than the standard
13218       // alignment of the memory type.
13219       // Intrinsics, however, always get an explicit alignment, set to the
13220       // alignment of the MMO.
13221       Alignment = 1;
13222     }
13223 
13224     // Create the new updating load/store node.
13225     // First, create an SDVTList for the new updating node's results.
13226     EVT Tys[6];
13227     unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
13228     unsigned n;
13229     for (n = 0; n < NumResultVecs; ++n)
13230       Tys[n] = AlignedVecTy;
13231     Tys[n++] = MVT::i32;
13232     Tys[n] = MVT::Other;
13233     SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
13234 
13235     // Then, gather the new node's operands.
13236     SmallVector<SDValue, 8> Ops;
13237     Ops.push_back(N->getOperand(0)); // incoming chain
13238     Ops.push_back(N->getOperand(AddrOpIdx));
13239     Ops.push_back(Inc);
13240 
13241     if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
13242       // Try to match the intrinsic's signature
13243       Ops.push_back(StN->getValue());
13244     } else {
13245       // Loads (and of course intrinsics) match the intrinsics' signature,
13246       // so just add all but the alignment operand.
13247       for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
13248         Ops.push_back(N->getOperand(i));
13249     }
13250 
13251     // For all node types, the alignment operand is always the last one.
13252     Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
13253 
13254     // If this is a non-standard-aligned STORE, the penultimate operand is the
13255     // stored value.  Bitcast it to the aligned type.
13256     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
13257       SDValue &StVal = Ops[Ops.size()-2];
13258       StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
13259     }
13260 
13261     EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
13262     SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
13263                                            MemN->getMemOperand());
13264 
13265     // Update the uses.
13266     SmallVector<SDValue, 5> NewResults;
13267     for (unsigned i = 0; i < NumResultVecs; ++i)
13268       NewResults.push_back(SDValue(UpdN.getNode(), i));
13269 
13270     // If this is an non-standard-aligned LOAD, the first result is the loaded
13271     // value.  Bitcast it to the expected result type.
13272     if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
13273       SDValue &LdVal = NewResults[0];
13274       LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
13275     }
13276 
13277     NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
13278     DCI.CombineTo(N, NewResults);
13279     DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
13280 
13281     break;
13282   }
13283   return SDValue();
13284 }
13285 
PerformVLDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)13286 static SDValue PerformVLDCombine(SDNode *N,
13287                                  TargetLowering::DAGCombinerInfo &DCI) {
13288   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
13289     return SDValue();
13290 
13291   return CombineBaseUpdate(N, DCI);
13292 }
13293 
13294 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
13295 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
13296 /// are also VDUPLANEs.  If so, combine them to a vldN-dup operation and
13297 /// return true.
CombineVLDDUP(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)13298 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
13299   SelectionDAG &DAG = DCI.DAG;
13300   EVT VT = N->getValueType(0);
13301   // vldN-dup instructions only support 64-bit vectors for N > 1.
13302   if (!VT.is64BitVector())
13303     return false;
13304 
13305   // Check if the VDUPLANE operand is a vldN-dup intrinsic.
13306   SDNode *VLD = N->getOperand(0).getNode();
13307   if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
13308     return false;
13309   unsigned NumVecs = 0;
13310   unsigned NewOpc = 0;
13311   unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
13312   if (IntNo == Intrinsic::arm_neon_vld2lane) {
13313     NumVecs = 2;
13314     NewOpc = ARMISD::VLD2DUP;
13315   } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
13316     NumVecs = 3;
13317     NewOpc = ARMISD::VLD3DUP;
13318   } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
13319     NumVecs = 4;
13320     NewOpc = ARMISD::VLD4DUP;
13321   } else {
13322     return false;
13323   }
13324 
13325   // First check that all the vldN-lane uses are VDUPLANEs and that the lane
13326   // numbers match the load.
13327   unsigned VLDLaneNo =
13328     cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
13329   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
13330        UI != UE; ++UI) {
13331     // Ignore uses of the chain result.
13332     if (UI.getUse().getResNo() == NumVecs)
13333       continue;
13334     SDNode *User = *UI;
13335     if (User->getOpcode() != ARMISD::VDUPLANE ||
13336         VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
13337       return false;
13338   }
13339 
13340   // Create the vldN-dup node.
13341   EVT Tys[5];
13342   unsigned n;
13343   for (n = 0; n < NumVecs; ++n)
13344     Tys[n] = VT;
13345   Tys[n] = MVT::Other;
13346   SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
13347   SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
13348   MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
13349   SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
13350                                            Ops, VLDMemInt->getMemoryVT(),
13351                                            VLDMemInt->getMemOperand());
13352 
13353   // Update the uses.
13354   for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
13355        UI != UE; ++UI) {
13356     unsigned ResNo = UI.getUse().getResNo();
13357     // Ignore uses of the chain result.
13358     if (ResNo == NumVecs)
13359       continue;
13360     SDNode *User = *UI;
13361     DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
13362   }
13363 
13364   // Now the vldN-lane intrinsic is dead except for its chain result.
13365   // Update uses of the chain.
13366   std::vector<SDValue> VLDDupResults;
13367   for (unsigned n = 0; n < NumVecs; ++n)
13368     VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
13369   VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
13370   DCI.CombineTo(VLD, VLDDupResults);
13371 
13372   return true;
13373 }
13374 
13375 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
13376 /// ARMISD::VDUPLANE.
PerformVDUPLANECombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)13377 static SDValue PerformVDUPLANECombine(SDNode *N,
13378                                       TargetLowering::DAGCombinerInfo &DCI) {
13379   SDValue Op = N->getOperand(0);
13380 
13381   // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
13382   // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
13383   if (CombineVLDDUP(N, DCI))
13384     return SDValue(N, 0);
13385 
13386   // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
13387   // redundant.  Ignore bit_converts for now; element sizes are checked below.
13388   while (Op.getOpcode() == ISD::BITCAST)
13389     Op = Op.getOperand(0);
13390   if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
13391     return SDValue();
13392 
13393   // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
13394   unsigned EltSize = Op.getScalarValueSizeInBits();
13395   // The canonical VMOV for a zero vector uses a 32-bit element size.
13396   unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
13397   unsigned EltBits;
13398   if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0)
13399     EltSize = 8;
13400   EVT VT = N->getValueType(0);
13401   if (EltSize > VT.getScalarSizeInBits())
13402     return SDValue();
13403 
13404   return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
13405 }
13406 
13407 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
PerformVDUPCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)13408 static SDValue PerformVDUPCombine(SDNode *N,
13409                                   TargetLowering::DAGCombinerInfo &DCI,
13410                                   const ARMSubtarget *Subtarget) {
13411   SelectionDAG &DAG = DCI.DAG;
13412   SDValue Op = N->getOperand(0);
13413 
13414   if (!Subtarget->hasNEON())
13415     return SDValue();
13416 
13417   // Match VDUP(LOAD) -> VLD1DUP.
13418   // We match this pattern here rather than waiting for isel because the
13419   // transform is only legal for unindexed loads.
13420   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
13421   if (LD && Op.hasOneUse() && LD->isUnindexed() &&
13422       LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
13423     SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
13424                       DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
13425     SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
13426     SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
13427                                              Ops, LD->getMemoryVT(),
13428                                              LD->getMemOperand());
13429     DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
13430     return VLDDup;
13431   }
13432 
13433   return SDValue();
13434 }
13435 
PerformLOADCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI)13436 static SDValue PerformLOADCombine(SDNode *N,
13437                                   TargetLowering::DAGCombinerInfo &DCI) {
13438   EVT VT = N->getValueType(0);
13439 
13440   // If this is a legal vector load, try to combine it into a VLD1_UPD.
13441   if (ISD::isNormalLoad(N) && VT.isVector() &&
13442       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
13443     return CombineBaseUpdate(N, DCI);
13444 
13445   return SDValue();
13446 }
13447 
13448 // Optimize trunc store (of multiple scalars) to shuffle and store.  First,
13449 // pack all of the elements in one place.  Next, store to memory in fewer
13450 // chunks.
PerformTruncatingStoreCombine(StoreSDNode * St,SelectionDAG & DAG)13451 static SDValue PerformTruncatingStoreCombine(StoreSDNode *St,
13452                                              SelectionDAG &DAG) {
13453   SDValue StVal = St->getValue();
13454   EVT VT = StVal.getValueType();
13455   if (!St->isTruncatingStore() || !VT.isVector())
13456     return SDValue();
13457   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13458   EVT StVT = St->getMemoryVT();
13459   unsigned NumElems = VT.getVectorNumElements();
13460   assert(StVT != VT && "Cannot truncate to the same type");
13461   unsigned FromEltSz = VT.getScalarSizeInBits();
13462   unsigned ToEltSz = StVT.getScalarSizeInBits();
13463 
13464   // From, To sizes and ElemCount must be pow of two
13465   if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz))
13466     return SDValue();
13467 
13468   // We are going to use the original vector elt for storing.
13469   // Accumulated smaller vector elements must be a multiple of the store size.
13470   if (0 != (NumElems * FromEltSz) % ToEltSz)
13471     return SDValue();
13472 
13473   unsigned SizeRatio = FromEltSz / ToEltSz;
13474   assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
13475 
13476   // Create a type on which we perform the shuffle.
13477   EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
13478                                    NumElems * SizeRatio);
13479   assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
13480 
13481   SDLoc DL(St);
13482   SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
13483   SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
13484   for (unsigned i = 0; i < NumElems; ++i)
13485     ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1
13486                                                       : i * SizeRatio;
13487 
13488   // Can't shuffle using an illegal type.
13489   if (!TLI.isTypeLegal(WideVecVT))
13490     return SDValue();
13491 
13492   SDValue Shuff = DAG.getVectorShuffle(
13493       WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec);
13494   // At this point all of the data is stored at the bottom of the
13495   // register. We now need to save it to mem.
13496 
13497   // Find the largest store unit
13498   MVT StoreType = MVT::i8;
13499   for (MVT Tp : MVT::integer_valuetypes()) {
13500     if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
13501       StoreType = Tp;
13502   }
13503   // Didn't find a legal store type.
13504   if (!TLI.isTypeLegal(StoreType))
13505     return SDValue();
13506 
13507   // Bitcast the original vector into a vector of store-size units
13508   EVT StoreVecVT =
13509       EVT::getVectorVT(*DAG.getContext(), StoreType,
13510                        VT.getSizeInBits() / EVT(StoreType).getSizeInBits());
13511   assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
13512   SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
13513   SmallVector<SDValue, 8> Chains;
13514   SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
13515                                       TLI.getPointerTy(DAG.getDataLayout()));
13516   SDValue BasePtr = St->getBasePtr();
13517 
13518   // Perform one or more big stores into memory.
13519   unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits();
13520   for (unsigned I = 0; I < E; I++) {
13521     SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType,
13522                                  ShuffWide, DAG.getIntPtrConstant(I, DL));
13523     SDValue Ch =
13524         DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(),
13525                      St->getAlignment(), St->getMemOperand()->getFlags());
13526     BasePtr =
13527         DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment);
13528     Chains.push_back(Ch);
13529   }
13530   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
13531 }
13532 
13533 // Try taking a single vector store from an truncate (which would otherwise turn
13534 // into an expensive buildvector) and splitting it into a series of narrowing
13535 // stores.
PerformSplittingToNarrowingStores(StoreSDNode * St,SelectionDAG & DAG)13536 static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
13537                                                  SelectionDAG &DAG) {
13538   if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed())
13539     return SDValue();
13540   SDValue Trunc = St->getValue();
13541   if (Trunc->getOpcode() != ISD::TRUNCATE)
13542     return SDValue();
13543   EVT FromVT = Trunc->getOperand(0).getValueType();
13544   EVT ToVT = Trunc.getValueType();
13545   if (!ToVT.isVector())
13546     return SDValue();
13547   assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
13548   EVT ToEltVT = ToVT.getVectorElementType();
13549   EVT FromEltVT = FromVT.getVectorElementType();
13550 
13551   unsigned NumElements = 0;
13552   if (FromEltVT == MVT::i32 && (ToEltVT == MVT::i16 || ToEltVT == MVT::i8))
13553     NumElements = 4;
13554   if (FromEltVT == MVT::i16 && ToEltVT == MVT::i8)
13555     NumElements = 8;
13556   if (NumElements == 0 || FromVT.getVectorNumElements() == NumElements ||
13557       FromVT.getVectorNumElements() % NumElements != 0)
13558     return SDValue();
13559 
13560   SDLoc DL(St);
13561   // Details about the old store
13562   SDValue Ch = St->getChain();
13563   SDValue BasePtr = St->getBasePtr();
13564   unsigned Alignment = St->getOriginalAlignment();
13565   MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags();
13566   AAMDNodes AAInfo = St->getAAInfo();
13567 
13568   EVT NewFromVT = EVT::getVectorVT(*DAG.getContext(), FromEltVT, NumElements);
13569   EVT NewToVT = EVT::getVectorVT(*DAG.getContext(), ToEltVT, NumElements);
13570 
13571   SmallVector<SDValue, 4> Stores;
13572   for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
13573     unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8;
13574     SDValue NewPtr = DAG.getObjectPtrOffset(DL, BasePtr, NewOffset);
13575 
13576     SDValue Extract =
13577         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0),
13578                     DAG.getConstant(i * NumElements, DL, MVT::i32));
13579     SDValue Store = DAG.getTruncStore(
13580         Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset),
13581         NewToVT, Alignment, MMOFlags, AAInfo);
13582     Stores.push_back(Store);
13583   }
13584   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
13585 }
13586 
13587 /// PerformSTORECombine - Target-specific dag combine xforms for
13588 /// ISD::STORE.
PerformSTORECombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * Subtarget)13589 static SDValue PerformSTORECombine(SDNode *N,
13590                                    TargetLowering::DAGCombinerInfo &DCI,
13591                                    const ARMSubtarget *Subtarget) {
13592   StoreSDNode *St = cast<StoreSDNode>(N);
13593   if (St->isVolatile())
13594     return SDValue();
13595   SDValue StVal = St->getValue();
13596   EVT VT = StVal.getValueType();
13597 
13598   if (Subtarget->hasNEON())
13599     if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG))
13600       return Store;
13601 
13602   if (Subtarget->hasMVEIntegerOps())
13603     if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG))
13604       return NewToken;
13605 
13606   if (!ISD::isNormalStore(St))
13607     return SDValue();
13608 
13609   // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
13610   // ARM stores of arguments in the same cache line.
13611   if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
13612       StVal.getNode()->hasOneUse()) {
13613     SelectionDAG  &DAG = DCI.DAG;
13614     bool isBigEndian = DAG.getDataLayout().isBigEndian();
13615     SDLoc DL(St);
13616     SDValue BasePtr = St->getBasePtr();
13617     SDValue NewST1 = DAG.getStore(
13618         St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
13619         BasePtr, St->getPointerInfo(), St->getAlignment(),
13620         St->getMemOperand()->getFlags());
13621 
13622     SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
13623                                     DAG.getConstant(4, DL, MVT::i32));
13624     return DAG.getStore(NewST1.getValue(0), DL,
13625                         StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
13626                         OffsetPtr, St->getPointerInfo(),
13627                         std::min(4U, St->getAlignment() / 2),
13628                         St->getMemOperand()->getFlags());
13629   }
13630 
13631   if (StVal.getValueType() == MVT::i64 &&
13632       StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
13633 
13634     // Bitcast an i64 store extracted from a vector to f64.
13635     // Otherwise, the i64 value will be legalized to a pair of i32 values.
13636     SelectionDAG &DAG = DCI.DAG;
13637     SDLoc dl(StVal);
13638     SDValue IntVec = StVal.getOperand(0);
13639     EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
13640                                    IntVec.getValueType().getVectorNumElements());
13641     SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
13642     SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13643                                  Vec, StVal.getOperand(1));
13644     dl = SDLoc(N);
13645     SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
13646     // Make the DAGCombiner fold the bitcasts.
13647     DCI.AddToWorklist(Vec.getNode());
13648     DCI.AddToWorklist(ExtElt.getNode());
13649     DCI.AddToWorklist(V.getNode());
13650     return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
13651                         St->getPointerInfo(), St->getAlignment(),
13652                         St->getMemOperand()->getFlags(), St->getAAInfo());
13653   }
13654 
13655   // If this is a legal vector store, try to combine it into a VST1_UPD.
13656   if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() &&
13657       DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
13658     return CombineBaseUpdate(N, DCI);
13659 
13660   return SDValue();
13661 }
13662 
13663 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
13664 /// can replace combinations of VMUL and VCVT (floating-point to integer)
13665 /// when the VMUL has a constant operand that is a power of 2.
13666 ///
13667 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
13668 ///  vmul.f32        d16, d17, d16
13669 ///  vcvt.s32.f32    d16, d16
13670 /// becomes:
13671 ///  vcvt.s32.f32    d16, d16, #3
PerformVCVTCombine(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * Subtarget)13672 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
13673                                   const ARMSubtarget *Subtarget) {
13674   if (!Subtarget->hasNEON())
13675     return SDValue();
13676 
13677   SDValue Op = N->getOperand(0);
13678   if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
13679       Op.getOpcode() != ISD::FMUL)
13680     return SDValue();
13681 
13682   SDValue ConstVec = Op->getOperand(1);
13683   if (!isa<BuildVectorSDNode>(ConstVec))
13684     return SDValue();
13685 
13686   MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
13687   uint32_t FloatBits = FloatTy.getSizeInBits();
13688   MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
13689   uint32_t IntBits = IntTy.getSizeInBits();
13690   unsigned NumLanes = Op.getValueType().getVectorNumElements();
13691   if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
13692     // These instructions only exist converting from f32 to i32. We can handle
13693     // smaller integers by generating an extra truncate, but larger ones would
13694     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
13695     // these intructions only support v2i32/v4i32 types.
13696     return SDValue();
13697   }
13698 
13699   BitVector UndefElements;
13700   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
13701   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
13702   if (C == -1 || C == 0 || C > 32)
13703     return SDValue();
13704 
13705   SDLoc dl(N);
13706   bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
13707   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
13708     Intrinsic::arm_neon_vcvtfp2fxu;
13709   SDValue FixConv = DAG.getNode(
13710       ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
13711       DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
13712       DAG.getConstant(C, dl, MVT::i32));
13713 
13714   if (IntBits < FloatBits)
13715     FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
13716 
13717   return FixConv;
13718 }
13719 
13720 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
13721 /// can replace combinations of VCVT (integer to floating-point) and VDIV
13722 /// when the VDIV has a constant operand that is a power of 2.
13723 ///
13724 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
13725 ///  vcvt.f32.s32    d16, d16
13726 ///  vdiv.f32        d16, d17, d16
13727 /// becomes:
13728 ///  vcvt.f32.s32    d16, d16, #3
PerformVDIVCombine(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * Subtarget)13729 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
13730                                   const ARMSubtarget *Subtarget) {
13731   if (!Subtarget->hasNEON())
13732     return SDValue();
13733 
13734   SDValue Op = N->getOperand(0);
13735   unsigned OpOpcode = Op.getNode()->getOpcode();
13736   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
13737       (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
13738     return SDValue();
13739 
13740   SDValue ConstVec = N->getOperand(1);
13741   if (!isa<BuildVectorSDNode>(ConstVec))
13742     return SDValue();
13743 
13744   MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
13745   uint32_t FloatBits = FloatTy.getSizeInBits();
13746   MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
13747   uint32_t IntBits = IntTy.getSizeInBits();
13748   unsigned NumLanes = Op.getValueType().getVectorNumElements();
13749   if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
13750     // These instructions only exist converting from i32 to f32. We can handle
13751     // smaller integers by generating an extra extend, but larger ones would
13752     // be lossy. We also can't handle anything other than 2 or 4 lanes, since
13753     // these intructions only support v2i32/v4i32 types.
13754     return SDValue();
13755   }
13756 
13757   BitVector UndefElements;
13758   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
13759   int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
13760   if (C == -1 || C == 0 || C > 32)
13761     return SDValue();
13762 
13763   SDLoc dl(N);
13764   bool isSigned = OpOpcode == ISD::SINT_TO_FP;
13765   SDValue ConvInput = Op.getOperand(0);
13766   if (IntBits < FloatBits)
13767     ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
13768                             dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
13769                             ConvInput);
13770 
13771   unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
13772     Intrinsic::arm_neon_vcvtfxu2fp;
13773   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
13774                      Op.getValueType(),
13775                      DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
13776                      ConvInput, DAG.getConstant(C, dl, MVT::i32));
13777 }
13778 
13779 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
PerformIntrinsicCombine(SDNode * N,SelectionDAG & DAG)13780 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
13781   unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
13782   switch (IntNo) {
13783   default:
13784     // Don't do anything for most intrinsics.
13785     break;
13786 
13787   // Vector shifts: check for immediate versions and lower them.
13788   // Note: This is done during DAG combining instead of DAG legalizing because
13789   // the build_vectors for 64-bit vector element shift counts are generally
13790   // not legal, and it is hard to see their values after they get legalized to
13791   // loads from a constant pool.
13792   case Intrinsic::arm_neon_vshifts:
13793   case Intrinsic::arm_neon_vshiftu:
13794   case Intrinsic::arm_neon_vrshifts:
13795   case Intrinsic::arm_neon_vrshiftu:
13796   case Intrinsic::arm_neon_vrshiftn:
13797   case Intrinsic::arm_neon_vqshifts:
13798   case Intrinsic::arm_neon_vqshiftu:
13799   case Intrinsic::arm_neon_vqshiftsu:
13800   case Intrinsic::arm_neon_vqshiftns:
13801   case Intrinsic::arm_neon_vqshiftnu:
13802   case Intrinsic::arm_neon_vqshiftnsu:
13803   case Intrinsic::arm_neon_vqrshiftns:
13804   case Intrinsic::arm_neon_vqrshiftnu:
13805   case Intrinsic::arm_neon_vqrshiftnsu: {
13806     EVT VT = N->getOperand(1).getValueType();
13807     int64_t Cnt;
13808     unsigned VShiftOpc = 0;
13809 
13810     switch (IntNo) {
13811     case Intrinsic::arm_neon_vshifts:
13812     case Intrinsic::arm_neon_vshiftu:
13813       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
13814         VShiftOpc = ARMISD::VSHLIMM;
13815         break;
13816       }
13817       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
13818         VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM
13819                                                           : ARMISD::VSHRuIMM);
13820         break;
13821       }
13822       return SDValue();
13823 
13824     case Intrinsic::arm_neon_vrshifts:
13825     case Intrinsic::arm_neon_vrshiftu:
13826       if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
13827         break;
13828       return SDValue();
13829 
13830     case Intrinsic::arm_neon_vqshifts:
13831     case Intrinsic::arm_neon_vqshiftu:
13832       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
13833         break;
13834       return SDValue();
13835 
13836     case Intrinsic::arm_neon_vqshiftsu:
13837       if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
13838         break;
13839       llvm_unreachable("invalid shift count for vqshlu intrinsic");
13840 
13841     case Intrinsic::arm_neon_vrshiftn:
13842     case Intrinsic::arm_neon_vqshiftns:
13843     case Intrinsic::arm_neon_vqshiftnu:
13844     case Intrinsic::arm_neon_vqshiftnsu:
13845     case Intrinsic::arm_neon_vqrshiftns:
13846     case Intrinsic::arm_neon_vqrshiftnu:
13847     case Intrinsic::arm_neon_vqrshiftnsu:
13848       // Narrowing shifts require an immediate right shift.
13849       if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
13850         break;
13851       llvm_unreachable("invalid shift count for narrowing vector shift "
13852                        "intrinsic");
13853 
13854     default:
13855       llvm_unreachable("unhandled vector shift");
13856     }
13857 
13858     switch (IntNo) {
13859     case Intrinsic::arm_neon_vshifts:
13860     case Intrinsic::arm_neon_vshiftu:
13861       // Opcode already set above.
13862       break;
13863     case Intrinsic::arm_neon_vrshifts:
13864       VShiftOpc = ARMISD::VRSHRsIMM;
13865       break;
13866     case Intrinsic::arm_neon_vrshiftu:
13867       VShiftOpc = ARMISD::VRSHRuIMM;
13868       break;
13869     case Intrinsic::arm_neon_vrshiftn:
13870       VShiftOpc = ARMISD::VRSHRNIMM;
13871       break;
13872     case Intrinsic::arm_neon_vqshifts:
13873       VShiftOpc = ARMISD::VQSHLsIMM;
13874       break;
13875     case Intrinsic::arm_neon_vqshiftu:
13876       VShiftOpc = ARMISD::VQSHLuIMM;
13877       break;
13878     case Intrinsic::arm_neon_vqshiftsu:
13879       VShiftOpc = ARMISD::VQSHLsuIMM;
13880       break;
13881     case Intrinsic::arm_neon_vqshiftns:
13882       VShiftOpc = ARMISD::VQSHRNsIMM;
13883       break;
13884     case Intrinsic::arm_neon_vqshiftnu:
13885       VShiftOpc = ARMISD::VQSHRNuIMM;
13886       break;
13887     case Intrinsic::arm_neon_vqshiftnsu:
13888       VShiftOpc = ARMISD::VQSHRNsuIMM;
13889       break;
13890     case Intrinsic::arm_neon_vqrshiftns:
13891       VShiftOpc = ARMISD::VQRSHRNsIMM;
13892       break;
13893     case Intrinsic::arm_neon_vqrshiftnu:
13894       VShiftOpc = ARMISD::VQRSHRNuIMM;
13895       break;
13896     case Intrinsic::arm_neon_vqrshiftnsu:
13897       VShiftOpc = ARMISD::VQRSHRNsuIMM;
13898       break;
13899     }
13900 
13901     SDLoc dl(N);
13902     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
13903                        N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
13904   }
13905 
13906   case Intrinsic::arm_neon_vshiftins: {
13907     EVT VT = N->getOperand(1).getValueType();
13908     int64_t Cnt;
13909     unsigned VShiftOpc = 0;
13910 
13911     if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
13912       VShiftOpc = ARMISD::VSLIIMM;
13913     else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
13914       VShiftOpc = ARMISD::VSRIIMM;
13915     else {
13916       llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
13917     }
13918 
13919     SDLoc dl(N);
13920     return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
13921                        N->getOperand(1), N->getOperand(2),
13922                        DAG.getConstant(Cnt, dl, MVT::i32));
13923   }
13924 
13925   case Intrinsic::arm_neon_vqrshifts:
13926   case Intrinsic::arm_neon_vqrshiftu:
13927     // No immediate versions of these to check for.
13928     break;
13929   }
13930 
13931   return SDValue();
13932 }
13933 
13934 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
13935 /// lowers them.  As with the vector shift intrinsics, this is done during DAG
13936 /// combining instead of DAG legalizing because the build_vectors for 64-bit
13937 /// vector element shift counts are generally not legal, and it is hard to see
13938 /// their values after they get legalized to loads from a constant pool.
PerformShiftCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * ST)13939 static SDValue PerformShiftCombine(SDNode *N,
13940                                    TargetLowering::DAGCombinerInfo &DCI,
13941                                    const ARMSubtarget *ST) {
13942   SelectionDAG &DAG = DCI.DAG;
13943   EVT VT = N->getValueType(0);
13944   if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
13945     // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
13946     // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
13947     SDValue N1 = N->getOperand(1);
13948     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
13949       SDValue N0 = N->getOperand(0);
13950       if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
13951           DAG.MaskedValueIsZero(N0.getOperand(0),
13952                                 APInt::getHighBitsSet(32, 16)))
13953         return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
13954     }
13955   }
13956 
13957   if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 &&
13958       N->getOperand(0)->getOpcode() == ISD::AND &&
13959       N->getOperand(0)->hasOneUse()) {
13960     if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
13961       return SDValue();
13962     // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't
13963     // usually show up because instcombine prefers to canonicalize it to
13964     // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come
13965     // out of GEP lowering in some cases.
13966     SDValue N0 = N->getOperand(0);
13967     ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
13968     if (!ShiftAmtNode)
13969       return SDValue();
13970     uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue());
13971     ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1));
13972     if (!AndMaskNode)
13973       return SDValue();
13974     uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue());
13975     // Don't transform uxtb/uxth.
13976     if (AndMask == 255 || AndMask == 65535)
13977       return SDValue();
13978     if (isMask_32(AndMask)) {
13979       uint32_t MaskedBits = countLeadingZeros(AndMask);
13980       if (MaskedBits > ShiftAmt) {
13981         SDLoc DL(N);
13982         SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
13983                                   DAG.getConstant(MaskedBits, DL, MVT::i32));
13984         return DAG.getNode(
13985             ISD::SRL, DL, MVT::i32, SHL,
13986             DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32));
13987       }
13988     }
13989   }
13990 
13991   // Nothing to be done for scalar shifts.
13992   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13993   if (!VT.isVector() || !TLI.isTypeLegal(VT))
13994     return SDValue();
13995   if (ST->hasMVEIntegerOps() && VT == MVT::v2i64)
13996     return SDValue();
13997 
13998   int64_t Cnt;
13999 
14000   switch (N->getOpcode()) {
14001   default: llvm_unreachable("unexpected shift opcode");
14002 
14003   case ISD::SHL:
14004     if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
14005       SDLoc dl(N);
14006       return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
14007                          DAG.getConstant(Cnt, dl, MVT::i32));
14008     }
14009     break;
14010 
14011   case ISD::SRA:
14012   case ISD::SRL:
14013     if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
14014       unsigned VShiftOpc =
14015           (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
14016       SDLoc dl(N);
14017       return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
14018                          DAG.getConstant(Cnt, dl, MVT::i32));
14019     }
14020   }
14021   return SDValue();
14022 }
14023 
14024 // Look for a sign/zero extend of a larger than legal load. This can be split
14025 // into two extending loads, which are simpler to deal with than an arbitrary
14026 // sign extend.
PerformSplittingToWideningLoad(SDNode * N,SelectionDAG & DAG)14027 static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) {
14028   SDValue N0 = N->getOperand(0);
14029   if (N0.getOpcode() != ISD::LOAD)
14030     return SDValue();
14031   LoadSDNode *LD = cast<LoadSDNode>(N0.getNode());
14032   if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() ||
14033       LD->getExtensionType() != ISD::NON_EXTLOAD)
14034     return SDValue();
14035   EVT FromVT = LD->getValueType(0);
14036   EVT ToVT = N->getValueType(0);
14037   if (!ToVT.isVector())
14038     return SDValue();
14039   assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
14040   EVT ToEltVT = ToVT.getVectorElementType();
14041   EVT FromEltVT = FromVT.getVectorElementType();
14042 
14043   unsigned NumElements = 0;
14044   if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8))
14045     NumElements = 4;
14046   if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8)
14047     NumElements = 8;
14048   if (NumElements == 0 ||
14049       FromVT.getVectorNumElements() == NumElements ||
14050       FromVT.getVectorNumElements() % NumElements != 0 ||
14051       !isPowerOf2_32(NumElements))
14052     return SDValue();
14053 
14054   SDLoc DL(LD);
14055   // Details about the old load
14056   SDValue Ch = LD->getChain();
14057   SDValue BasePtr = LD->getBasePtr();
14058   unsigned Alignment = LD->getOriginalAlignment();
14059   MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
14060   AAMDNodes AAInfo = LD->getAAInfo();
14061 
14062   ISD::LoadExtType NewExtType =
14063       N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
14064   SDValue Offset = DAG.getUNDEF(BasePtr.getValueType());
14065   EVT NewFromVT = FromVT.getHalfNumVectorElementsVT(*DAG.getContext());
14066   EVT NewToVT = ToVT.getHalfNumVectorElementsVT(*DAG.getContext());
14067   unsigned NewOffset = NewFromVT.getSizeInBits() / 8;
14068   SDValue NewPtr = DAG.getObjectPtrOffset(DL, BasePtr, NewOffset);
14069 
14070   // Split the load in half, each side of which is extended separately. This
14071   // is good enough, as legalisation will take it from there. They are either
14072   // already legal or they will be split further into something that is
14073   // legal.
14074   SDValue NewLoad1 =
14075       DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, BasePtr, Offset,
14076                   LD->getPointerInfo(), NewFromVT, Alignment, MMOFlags, AAInfo);
14077   SDValue NewLoad2 =
14078       DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset,
14079                   LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT,
14080                   Alignment, MMOFlags, AAInfo);
14081 
14082   SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
14083                                  SDValue(NewLoad1.getNode(), 1),
14084                                  SDValue(NewLoad2.getNode(), 1));
14085   DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain);
14086   return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, NewLoad1, NewLoad2);
14087 }
14088 
14089 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
14090 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
PerformExtendCombine(SDNode * N,SelectionDAG & DAG,const ARMSubtarget * ST)14091 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
14092                                     const ARMSubtarget *ST) {
14093   SDValue N0 = N->getOperand(0);
14094 
14095   // Check for sign- and zero-extensions of vector extract operations of 8- and
14096   // 16-bit vector elements. NEON and MVE support these directly. They are
14097   // handled during DAG combining because type legalization will promote them
14098   // to 32-bit types and it is messy to recognize the operations after that.
14099   if ((ST->hasNEON() || ST->hasMVEIntegerOps()) &&
14100       N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14101     SDValue Vec = N0.getOperand(0);
14102     SDValue Lane = N0.getOperand(1);
14103     EVT VT = N->getValueType(0);
14104     EVT EltVT = N0.getValueType();
14105     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14106 
14107     if (VT == MVT::i32 &&
14108         (EltVT == MVT::i8 || EltVT == MVT::i16) &&
14109         TLI.isTypeLegal(Vec.getValueType()) &&
14110         isa<ConstantSDNode>(Lane)) {
14111 
14112       unsigned Opc = 0;
14113       switch (N->getOpcode()) {
14114       default: llvm_unreachable("unexpected opcode");
14115       case ISD::SIGN_EXTEND:
14116         Opc = ARMISD::VGETLANEs;
14117         break;
14118       case ISD::ZERO_EXTEND:
14119       case ISD::ANY_EXTEND:
14120         Opc = ARMISD::VGETLANEu;
14121         break;
14122       }
14123       return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
14124     }
14125   }
14126 
14127   if (ST->hasMVEIntegerOps())
14128     if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
14129       return NewLoad;
14130 
14131   return SDValue();
14132 }
14133 
isPowerOf2Constant(SDValue V)14134 static const APInt *isPowerOf2Constant(SDValue V) {
14135   ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
14136   if (!C)
14137     return nullptr;
14138   const APInt *CV = &C->getAPIntValue();
14139   return CV->isPowerOf2() ? CV : nullptr;
14140 }
14141 
PerformCMOVToBFICombine(SDNode * CMOV,SelectionDAG & DAG) const14142 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
14143   // If we have a CMOV, OR and AND combination such as:
14144   //   if (x & CN)
14145   //     y |= CM;
14146   //
14147   // And:
14148   //   * CN is a single bit;
14149   //   * All bits covered by CM are known zero in y
14150   //
14151   // Then we can convert this into a sequence of BFI instructions. This will
14152   // always be a win if CM is a single bit, will always be no worse than the
14153   // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
14154   // three bits (due to the extra IT instruction).
14155 
14156   SDValue Op0 = CMOV->getOperand(0);
14157   SDValue Op1 = CMOV->getOperand(1);
14158   auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
14159   auto CC = CCNode->getAPIntValue().getLimitedValue();
14160   SDValue CmpZ = CMOV->getOperand(4);
14161 
14162   // The compare must be against zero.
14163   if (!isNullConstant(CmpZ->getOperand(1)))
14164     return SDValue();
14165 
14166   assert(CmpZ->getOpcode() == ARMISD::CMPZ);
14167   SDValue And = CmpZ->getOperand(0);
14168   if (And->getOpcode() != ISD::AND)
14169     return SDValue();
14170   const APInt *AndC = isPowerOf2Constant(And->getOperand(1));
14171   if (!AndC)
14172     return SDValue();
14173   SDValue X = And->getOperand(0);
14174 
14175   if (CC == ARMCC::EQ) {
14176     // We're performing an "equal to zero" compare. Swap the operands so we
14177     // canonicalize on a "not equal to zero" compare.
14178     std::swap(Op0, Op1);
14179   } else {
14180     assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
14181   }
14182 
14183   if (Op1->getOpcode() != ISD::OR)
14184     return SDValue();
14185 
14186   ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
14187   if (!OrC)
14188     return SDValue();
14189   SDValue Y = Op1->getOperand(0);
14190 
14191   if (Op0 != Y)
14192     return SDValue();
14193 
14194   // Now, is it profitable to continue?
14195   APInt OrCI = OrC->getAPIntValue();
14196   unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
14197   if (OrCI.countPopulation() > Heuristic)
14198     return SDValue();
14199 
14200   // Lastly, can we determine that the bits defined by OrCI
14201   // are zero in Y?
14202   KnownBits Known = DAG.computeKnownBits(Y);
14203   if ((OrCI & Known.Zero) != OrCI)
14204     return SDValue();
14205 
14206   // OK, we can do the combine.
14207   SDValue V = Y;
14208   SDLoc dl(X);
14209   EVT VT = X.getValueType();
14210   unsigned BitInX = AndC->logBase2();
14211 
14212   if (BitInX != 0) {
14213     // We must shift X first.
14214     X = DAG.getNode(ISD::SRL, dl, VT, X,
14215                     DAG.getConstant(BitInX, dl, VT));
14216   }
14217 
14218   for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
14219        BitInY < NumActiveBits; ++BitInY) {
14220     if (OrCI[BitInY] == 0)
14221       continue;
14222     APInt Mask(VT.getSizeInBits(), 0);
14223     Mask.setBit(BitInY);
14224     V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
14225                     // Confusingly, the operand is an *inverted* mask.
14226                     DAG.getConstant(~Mask, dl, VT));
14227   }
14228 
14229   return V;
14230 }
14231 
14232 // Given N, the value controlling the conditional branch, search for the loop
14233 // intrinsic, returning it, along with how the value is used. We need to handle
14234 // patterns such as the following:
14235 // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit)
14236 // (brcond (setcc (loop.decrement), 0, eq), exit)
14237 // (brcond (setcc (loop.decrement), 0, ne), header)
SearchLoopIntrinsic(SDValue N,ISD::CondCode & CC,int & Imm,bool & Negate)14238 static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm,
14239                                    bool &Negate) {
14240   switch (N->getOpcode()) {
14241   default:
14242     break;
14243   case ISD::XOR: {
14244     if (!isa<ConstantSDNode>(N.getOperand(1)))
14245       return SDValue();
14246     if (!cast<ConstantSDNode>(N.getOperand(1))->isOne())
14247       return SDValue();
14248     Negate = !Negate;
14249     return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate);
14250   }
14251   case ISD::SETCC: {
14252     auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1));
14253     if (!Const)
14254       return SDValue();
14255     if (Const->isNullValue())
14256       Imm = 0;
14257     else if (Const->isOne())
14258       Imm = 1;
14259     else
14260       return SDValue();
14261     CC = cast<CondCodeSDNode>(N.getOperand(2))->get();
14262     return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate);
14263   }
14264   case ISD::INTRINSIC_W_CHAIN: {
14265     unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue();
14266     if (IntOp != Intrinsic::test_set_loop_iterations &&
14267         IntOp != Intrinsic::loop_decrement_reg)
14268       return SDValue();
14269     return N;
14270   }
14271   }
14272   return SDValue();
14273 }
14274 
PerformHWLoopCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const ARMSubtarget * ST)14275 static SDValue PerformHWLoopCombine(SDNode *N,
14276                                     TargetLowering::DAGCombinerInfo &DCI,
14277                                     const ARMSubtarget *ST) {
14278 
14279   // The hwloop intrinsics that we're interested are used for control-flow,
14280   // either for entering or exiting the loop:
14281   // - test.set.loop.iterations will test whether its operand is zero. If it
14282   //   is zero, the proceeding branch should not enter the loop.
14283   // - loop.decrement.reg also tests whether its operand is zero. If it is
14284   //   zero, the proceeding branch should not branch back to the beginning of
14285   //   the loop.
14286   // So here, we need to check that how the brcond is using the result of each
14287   // of the intrinsics to ensure that we're branching to the right place at the
14288   // right time.
14289 
14290   ISD::CondCode CC;
14291   SDValue Cond;
14292   int Imm = 1;
14293   bool Negate = false;
14294   SDValue Chain = N->getOperand(0);
14295   SDValue Dest;
14296 
14297   if (N->getOpcode() == ISD::BRCOND) {
14298     CC = ISD::SETEQ;
14299     Cond = N->getOperand(1);
14300     Dest = N->getOperand(2);
14301   } else {
14302     assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!");
14303     CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
14304     Cond = N->getOperand(2);
14305     Dest = N->getOperand(4);
14306     if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) {
14307       if (!Const->isOne() && !Const->isNullValue())
14308         return SDValue();
14309       Imm = Const->getZExtValue();
14310     } else
14311       return SDValue();
14312   }
14313 
14314   SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate);
14315   if (!Int)
14316     return SDValue();
14317 
14318   if (Negate)
14319     CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32);
14320 
14321   auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) {
14322     return (CC == ISD::SETEQ && Imm == 0) ||
14323            (CC == ISD::SETNE && Imm == 1) ||
14324            (CC == ISD::SETLT && Imm == 1) ||
14325            (CC == ISD::SETULT && Imm == 1);
14326   };
14327 
14328   auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) {
14329     return (CC == ISD::SETEQ && Imm == 1) ||
14330            (CC == ISD::SETNE && Imm == 0) ||
14331            (CC == ISD::SETGT && Imm == 0) ||
14332            (CC == ISD::SETUGT && Imm == 0) ||
14333            (CC == ISD::SETGE && Imm == 1) ||
14334            (CC == ISD::SETUGE && Imm == 1);
14335   };
14336 
14337   assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) &&
14338          "unsupported condition");
14339 
14340   SDLoc dl(Int);
14341   SelectionDAG &DAG = DCI.DAG;
14342   SDValue Elements = Int.getOperand(2);
14343   unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
14344   assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR)
14345           && "expected single br user");
14346   SDNode *Br = *N->use_begin();
14347   SDValue OtherTarget = Br->getOperand(1);
14348 
14349   // Update the unconditional branch to branch to the given Dest.
14350   auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) {
14351     SDValue NewBrOps[] = { Br->getOperand(0), Dest };
14352     SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps);
14353     DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr);
14354   };
14355 
14356   if (IntOp == Intrinsic::test_set_loop_iterations) {
14357     SDValue Res;
14358     // We expect this 'instruction' to branch when the counter is zero.
14359     if (IsTrueIfZero(CC, Imm)) {
14360       SDValue Ops[] = { Chain, Elements, Dest };
14361       Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
14362     } else {
14363       // The logic is the reverse of what we need for WLS, so find the other
14364       // basic block target: the target of the proceeding br.
14365       UpdateUncondBr(Br, Dest, DAG);
14366 
14367       SDValue Ops[] = { Chain, Elements, OtherTarget };
14368       Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
14369     }
14370     DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0));
14371     return Res;
14372   } else {
14373     SDValue Size = DAG.getTargetConstant(
14374       cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32);
14375     SDValue Args[] = { Int.getOperand(0), Elements, Size, };
14376     SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl,
14377                                   DAG.getVTList(MVT::i32, MVT::Other), Args);
14378     DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode());
14379 
14380     // We expect this instruction to branch when the count is not zero.
14381     SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget;
14382 
14383     // Update the unconditional branch to target the loop preheader if we've
14384     // found the condition has been reversed.
14385     if (Target == OtherTarget)
14386       UpdateUncondBr(Br, Dest, DAG);
14387 
14388     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14389                         SDValue(LoopDec.getNode(), 1), Chain);
14390 
14391     SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target };
14392     return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs);
14393   }
14394   return SDValue();
14395 }
14396 
14397 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
14398 SDValue
PerformBRCONDCombine(SDNode * N,SelectionDAG & DAG) const14399 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
14400   SDValue Cmp = N->getOperand(4);
14401   if (Cmp.getOpcode() != ARMISD::CMPZ)
14402     // Only looking at NE cases.
14403     return SDValue();
14404 
14405   EVT VT = N->getValueType(0);
14406   SDLoc dl(N);
14407   SDValue LHS = Cmp.getOperand(0);
14408   SDValue RHS = Cmp.getOperand(1);
14409   SDValue Chain = N->getOperand(0);
14410   SDValue BB = N->getOperand(1);
14411   SDValue ARMcc = N->getOperand(2);
14412   ARMCC::CondCodes CC =
14413     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
14414 
14415   // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
14416   // -> (brcond Chain BB CC CPSR Cmp)
14417   if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
14418       LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
14419       LHS->getOperand(0)->hasOneUse()) {
14420     auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
14421     auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
14422     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
14423     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
14424     if ((LHS00C && LHS00C->getZExtValue() == 0) &&
14425         (LHS01C && LHS01C->getZExtValue() == 1) &&
14426         (LHS1C && LHS1C->getZExtValue() == 1) &&
14427         (RHSC && RHSC->getZExtValue() == 0)) {
14428       return DAG.getNode(
14429           ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
14430           LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
14431     }
14432   }
14433 
14434   return SDValue();
14435 }
14436 
14437 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
14438 SDValue
PerformCMOVCombine(SDNode * N,SelectionDAG & DAG) const14439 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
14440   SDValue Cmp = N->getOperand(4);
14441   if (Cmp.getOpcode() != ARMISD::CMPZ)
14442     // Only looking at EQ and NE cases.
14443     return SDValue();
14444 
14445   EVT VT = N->getValueType(0);
14446   SDLoc dl(N);
14447   SDValue LHS = Cmp.getOperand(0);
14448   SDValue RHS = Cmp.getOperand(1);
14449   SDValue FalseVal = N->getOperand(0);
14450   SDValue TrueVal = N->getOperand(1);
14451   SDValue ARMcc = N->getOperand(2);
14452   ARMCC::CondCodes CC =
14453     (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
14454 
14455   // BFI is only available on V6T2+.
14456   if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
14457     SDValue R = PerformCMOVToBFICombine(N, DAG);
14458     if (R)
14459       return R;
14460   }
14461 
14462   // Simplify
14463   //   mov     r1, r0
14464   //   cmp     r1, x
14465   //   mov     r0, y
14466   //   moveq   r0, x
14467   // to
14468   //   cmp     r0, x
14469   //   movne   r0, y
14470   //
14471   //   mov     r1, r0
14472   //   cmp     r1, x
14473   //   mov     r0, x
14474   //   movne   r0, y
14475   // to
14476   //   cmp     r0, x
14477   //   movne   r0, y
14478   /// FIXME: Turn this into a target neutral optimization?
14479   SDValue Res;
14480   if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
14481     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
14482                       N->getOperand(3), Cmp);
14483   } else if (CC == ARMCC::EQ && TrueVal == RHS) {
14484     SDValue ARMcc;
14485     SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
14486     Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
14487                       N->getOperand(3), NewCmp);
14488   }
14489 
14490   // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
14491   // -> (cmov F T CC CPSR Cmp)
14492   if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
14493     auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
14494     auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
14495     auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
14496     if ((LHS0C && LHS0C->getZExtValue() == 0) &&
14497         (LHS1C && LHS1C->getZExtValue() == 1) &&
14498         (RHSC && RHSC->getZExtValue() == 0)) {
14499       return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
14500                          LHS->getOperand(2), LHS->getOperand(3),
14501                          LHS->getOperand(4));
14502     }
14503   }
14504 
14505   if (!VT.isInteger())
14506       return SDValue();
14507 
14508   // Materialize a boolean comparison for integers so we can avoid branching.
14509   if (isNullConstant(FalseVal)) {
14510     if (CC == ARMCC::EQ && isOneConstant(TrueVal)) {
14511       if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) {
14512         // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it
14513         // right 5 bits will make that 32 be 1, otherwise it will be 0.
14514         // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5
14515         SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
14516         Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub),
14517                           DAG.getConstant(5, dl, MVT::i32));
14518       } else {
14519         // CMOV 0, 1, ==, (CMPZ x, y) ->
14520         //     (ADDCARRY (SUB x, y), t:0, t:1)
14521         // where t = (SUBCARRY 0, (SUB x, y), 0)
14522         //
14523         // The SUBCARRY computes 0 - (x - y) and this will give a borrow when
14524         // x != y. In other words, a carry C == 1 when x == y, C == 0
14525         // otherwise.
14526         // The final ADDCARRY computes
14527         //     x - y + (0 - (x - y)) + C == C
14528         SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
14529         SDVTList VTs = DAG.getVTList(VT, MVT::i32);
14530         SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub);
14531         // ISD::SUBCARRY returns a borrow but we want the carry here
14532         // actually.
14533         SDValue Carry =
14534             DAG.getNode(ISD::SUB, dl, MVT::i32,
14535                         DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1));
14536         Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry);
14537       }
14538     } else if (CC == ARMCC::NE && !isNullConstant(RHS) &&
14539                (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) {
14540       // This seems pointless but will allow us to combine it further below.
14541       // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
14542       SDValue Sub =
14543           DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
14544       SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
14545                                           Sub.getValue(1), SDValue());
14546       Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc,
14547                         N->getOperand(3), CPSRGlue.getValue(1));
14548       FalseVal = Sub;
14549     }
14550   } else if (isNullConstant(TrueVal)) {
14551     if (CC == ARMCC::EQ && !isNullConstant(RHS) &&
14552         (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) {
14553       // This seems pointless but will allow us to combine it further below
14554       // Note that we change == for != as this is the dual for the case above.
14555       // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
14556       SDValue Sub =
14557           DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
14558       SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
14559                                           Sub.getValue(1), SDValue());
14560       Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal,
14561                         DAG.getConstant(ARMCC::NE, dl, MVT::i32),
14562                         N->getOperand(3), CPSRGlue.getValue(1));
14563       FalseVal = Sub;
14564     }
14565   }
14566 
14567   // On Thumb1, the DAG above may be further combined if z is a power of 2
14568   // (z == 2 ^ K).
14569   // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 ->
14570   // t1 = (USUBO (SUB x, y), 1)
14571   // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1)
14572   // Result = if K != 0 then (SHL t2:0, K) else t2:0
14573   //
14574   // This also handles the special case of comparing against zero; it's
14575   // essentially, the same pattern, except there's no SUBS:
14576   // CMOV x, z, !=, (CMPZ x, 0) ->
14577   // t1 = (USUBO x, 1)
14578   // t2 = (SUBCARRY x, t1:0, t1:1)
14579   // Result = if K != 0 then (SHL t2:0, K) else t2:0
14580   const APInt *TrueConst;
14581   if (Subtarget->isThumb1Only() && CC == ARMCC::NE &&
14582       ((FalseVal.getOpcode() == ARMISD::SUBS &&
14583         FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) ||
14584        (FalseVal == LHS && isNullConstant(RHS))) &&
14585       (TrueConst = isPowerOf2Constant(TrueVal))) {
14586     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
14587     unsigned ShiftAmount = TrueConst->logBase2();
14588     if (ShiftAmount)
14589       TrueVal = DAG.getConstant(1, dl, VT);
14590     SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal);
14591     Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1));
14592 
14593     if (ShiftAmount)
14594       Res = DAG.getNode(ISD::SHL, dl, VT, Res,
14595                         DAG.getConstant(ShiftAmount, dl, MVT::i32));
14596   }
14597 
14598   if (Res.getNode()) {
14599     KnownBits Known = DAG.computeKnownBits(SDValue(N,0));
14600     // Capture demanded bits information that would be otherwise lost.
14601     if (Known.Zero == 0xfffffffe)
14602       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
14603                         DAG.getValueType(MVT::i1));
14604     else if (Known.Zero == 0xffffff00)
14605       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
14606                         DAG.getValueType(MVT::i8));
14607     else if (Known.Zero == 0xffff0000)
14608       Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
14609                         DAG.getValueType(MVT::i16));
14610   }
14611 
14612   return Res;
14613 }
14614 
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const14615 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
14616                                              DAGCombinerInfo &DCI) const {
14617   switch (N->getOpcode()) {
14618   default: break;
14619   case ISD::ABS:        return PerformABSCombine(N, DCI, Subtarget);
14620   case ARMISD::ADDE:    return PerformADDECombine(N, DCI, Subtarget);
14621   case ARMISD::UMLAL:   return PerformUMLALCombine(N, DCI.DAG, Subtarget);
14622   case ISD::ADD:        return PerformADDCombine(N, DCI, Subtarget);
14623   case ISD::SUB:        return PerformSUBCombine(N, DCI, Subtarget);
14624   case ISD::MUL:        return PerformMULCombine(N, DCI, Subtarget);
14625   case ISD::OR:         return PerformORCombine(N, DCI, Subtarget);
14626   case ISD::XOR:        return PerformXORCombine(N, DCI, Subtarget);
14627   case ISD::AND:        return PerformANDCombine(N, DCI, Subtarget);
14628   case ISD::BRCOND:
14629   case ISD::BR_CC:      return PerformHWLoopCombine(N, DCI, Subtarget);
14630   case ARMISD::ADDC:
14631   case ARMISD::SUBC:    return PerformAddcSubcCombine(N, DCI, Subtarget);
14632   case ARMISD::SUBE:    return PerformAddeSubeCombine(N, DCI, Subtarget);
14633   case ARMISD::BFI:     return PerformBFICombine(N, DCI);
14634   case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
14635   case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
14636   case ISD::STORE:      return PerformSTORECombine(N, DCI, Subtarget);
14637   case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
14638   case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
14639   case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
14640   case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
14641   case ARMISD::VDUP: return PerformVDUPCombine(N, DCI, Subtarget);
14642   case ISD::FP_TO_SINT:
14643   case ISD::FP_TO_UINT:
14644     return PerformVCVTCombine(N, DCI.DAG, Subtarget);
14645   case ISD::FDIV:
14646     return PerformVDIVCombine(N, DCI.DAG, Subtarget);
14647   case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
14648   case ISD::SHL:
14649   case ISD::SRA:
14650   case ISD::SRL:
14651     return PerformShiftCombine(N, DCI, Subtarget);
14652   case ISD::SIGN_EXTEND:
14653   case ISD::ZERO_EXTEND:
14654   case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
14655   case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
14656   case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
14657   case ISD::LOAD:       return PerformLOADCombine(N, DCI);
14658   case ARMISD::VLD1DUP:
14659   case ARMISD::VLD2DUP:
14660   case ARMISD::VLD3DUP:
14661   case ARMISD::VLD4DUP:
14662     return PerformVLDCombine(N, DCI);
14663   case ARMISD::BUILD_VECTOR:
14664     return PerformARMBUILD_VECTORCombine(N, DCI);
14665   case ARMISD::PREDICATE_CAST:
14666     return PerformPREDICATE_CASTCombine(N, DCI);
14667   case ARMISD::VCMP:
14668     return PerformVCMPCombine(N, DCI, Subtarget);
14669   case ARMISD::SMULWB: {
14670     unsigned BitWidth = N->getValueType(0).getSizeInBits();
14671     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
14672     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
14673       return SDValue();
14674     break;
14675   }
14676   case ARMISD::SMULWT: {
14677     unsigned BitWidth = N->getValueType(0).getSizeInBits();
14678     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
14679     if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
14680       return SDValue();
14681     break;
14682   }
14683   case ARMISD::SMLALBB:
14684   case ARMISD::QADD16b:
14685   case ARMISD::QSUB16b: {
14686     unsigned BitWidth = N->getValueType(0).getSizeInBits();
14687     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
14688     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
14689         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
14690       return SDValue();
14691     break;
14692   }
14693   case ARMISD::SMLALBT: {
14694     unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits();
14695     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
14696     unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits();
14697     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
14698     if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) ||
14699         (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI)))
14700       return SDValue();
14701     break;
14702   }
14703   case ARMISD::SMLALTB: {
14704     unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits();
14705     APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
14706     unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits();
14707     APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
14708     if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) ||
14709         (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI)))
14710       return SDValue();
14711     break;
14712   }
14713   case ARMISD::SMLALTT: {
14714     unsigned BitWidth = N->getValueType(0).getSizeInBits();
14715     APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
14716     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
14717         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
14718       return SDValue();
14719     break;
14720   }
14721   case ARMISD::QADD8b:
14722   case ARMISD::QSUB8b: {
14723     unsigned BitWidth = N->getValueType(0).getSizeInBits();
14724     APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
14725     if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
14726         (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
14727       return SDValue();
14728     break;
14729   }
14730   case ISD::INTRINSIC_VOID:
14731   case ISD::INTRINSIC_W_CHAIN:
14732     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14733     case Intrinsic::arm_neon_vld1:
14734     case Intrinsic::arm_neon_vld1x2:
14735     case Intrinsic::arm_neon_vld1x3:
14736     case Intrinsic::arm_neon_vld1x4:
14737     case Intrinsic::arm_neon_vld2:
14738     case Intrinsic::arm_neon_vld3:
14739     case Intrinsic::arm_neon_vld4:
14740     case Intrinsic::arm_neon_vld2lane:
14741     case Intrinsic::arm_neon_vld3lane:
14742     case Intrinsic::arm_neon_vld4lane:
14743     case Intrinsic::arm_neon_vld2dup:
14744     case Intrinsic::arm_neon_vld3dup:
14745     case Intrinsic::arm_neon_vld4dup:
14746     case Intrinsic::arm_neon_vst1:
14747     case Intrinsic::arm_neon_vst1x2:
14748     case Intrinsic::arm_neon_vst1x3:
14749     case Intrinsic::arm_neon_vst1x4:
14750     case Intrinsic::arm_neon_vst2:
14751     case Intrinsic::arm_neon_vst3:
14752     case Intrinsic::arm_neon_vst4:
14753     case Intrinsic::arm_neon_vst2lane:
14754     case Intrinsic::arm_neon_vst3lane:
14755     case Intrinsic::arm_neon_vst4lane:
14756       return PerformVLDCombine(N, DCI);
14757     default: break;
14758     }
14759     break;
14760   }
14761   return SDValue();
14762 }
14763 
isDesirableToTransformToIntegerOp(unsigned Opc,EVT VT) const14764 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
14765                                                           EVT VT) const {
14766   return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
14767 }
14768 
allowsMisalignedMemoryAccesses(EVT VT,unsigned,unsigned Alignment,MachineMemOperand::Flags,bool * Fast) const14769 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
14770                                                        unsigned Alignment,
14771                                                        MachineMemOperand::Flags,
14772                                                        bool *Fast) const {
14773   // Depends what it gets converted into if the type is weird.
14774   if (!VT.isSimple())
14775     return false;
14776 
14777   // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus
14778   bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
14779   auto Ty = VT.getSimpleVT().SimpleTy;
14780 
14781   if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) {
14782     // Unaligned access can use (for example) LRDB, LRDH, LDR
14783     if (AllowsUnaligned) {
14784       if (Fast)
14785         *Fast = Subtarget->hasV7Ops();
14786       return true;
14787     }
14788   }
14789 
14790   if (Ty == MVT::f64 || Ty == MVT::v2f64) {
14791     // For any little-endian targets with neon, we can support unaligned ld/st
14792     // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
14793     // A big-endian target may also explicitly support unaligned accesses
14794     if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
14795       if (Fast)
14796         *Fast = true;
14797       return true;
14798     }
14799   }
14800 
14801   if (!Subtarget->hasMVEIntegerOps())
14802     return false;
14803 
14804   // These are for predicates
14805   if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1)) {
14806     if (Fast)
14807       *Fast = true;
14808     return true;
14809   }
14810 
14811   // These are for truncated stores/narrowing loads. They are fine so long as
14812   // the alignment is at least the size of the item being loaded
14813   if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) &&
14814       Alignment >= VT.getScalarSizeInBits() / 8) {
14815     if (Fast)
14816       *Fast = true;
14817     return true;
14818   }
14819 
14820   // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and
14821   // VSTRW.U32 all store the vector register in exactly the same format, and
14822   // differ only in the range of their immediate offset field and the required
14823   // alignment. So there is always a store that can be used, regardless of
14824   // actual type.
14825   //
14826   // For big endian, that is not the case. But can still emit a (VSTRB.U8;
14827   // VREV64.8) pair and get the same effect. This will likely be better than
14828   // aligning the vector through the stack.
14829   if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 ||
14830       Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 ||
14831       Ty == MVT::v2f64) {
14832     if (Fast)
14833       *Fast = true;
14834     return true;
14835   }
14836 
14837   return false;
14838 }
14839 
memOpAlign(unsigned DstAlign,unsigned SrcAlign,unsigned AlignCheck)14840 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign,
14841                        unsigned AlignCheck) {
14842   return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
14843           (DstAlign == 0 || DstAlign % AlignCheck == 0));
14844 }
14845 
getOptimalMemOpType(uint64_t Size,unsigned DstAlign,unsigned SrcAlign,bool IsMemset,bool ZeroMemset,bool MemcpyStrSrc,const AttributeList & FuncAttributes) const14846 EVT ARMTargetLowering::getOptimalMemOpType(
14847     uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
14848     bool ZeroMemset, bool MemcpyStrSrc,
14849     const AttributeList &FuncAttributes) const {
14850   // See if we can use NEON instructions for this...
14851   if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
14852       !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
14853     bool Fast;
14854     if (Size >= 16 &&
14855         (memOpAlign(SrcAlign, DstAlign, 16) ||
14856          (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1,
14857                                          MachineMemOperand::MONone, &Fast) &&
14858           Fast))) {
14859       return MVT::v2f64;
14860     } else if (Size >= 8 &&
14861                (memOpAlign(SrcAlign, DstAlign, 8) ||
14862                 (allowsMisalignedMemoryAccesses(
14863                      MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) &&
14864                  Fast))) {
14865       return MVT::f64;
14866     }
14867   }
14868 
14869   // Let the target-independent logic figure it out.
14870   return MVT::Other;
14871 }
14872 
14873 // 64-bit integers are split into their high and low parts and held in two
14874 // different registers, so the trunc is free since the low register can just
14875 // be used.
isTruncateFree(Type * SrcTy,Type * DstTy) const14876 bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
14877   if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
14878     return false;
14879   unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
14880   unsigned DestBits = DstTy->getPrimitiveSizeInBits();
14881   return (SrcBits == 64 && DestBits == 32);
14882 }
14883 
isTruncateFree(EVT SrcVT,EVT DstVT) const14884 bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
14885   if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
14886       !DstVT.isInteger())
14887     return false;
14888   unsigned SrcBits = SrcVT.getSizeInBits();
14889   unsigned DestBits = DstVT.getSizeInBits();
14890   return (SrcBits == 64 && DestBits == 32);
14891 }
14892 
isZExtFree(SDValue Val,EVT VT2) const14893 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
14894   if (Val.getOpcode() != ISD::LOAD)
14895     return false;
14896 
14897   EVT VT1 = Val.getValueType();
14898   if (!VT1.isSimple() || !VT1.isInteger() ||
14899       !VT2.isSimple() || !VT2.isInteger())
14900     return false;
14901 
14902   switch (VT1.getSimpleVT().SimpleTy) {
14903   default: break;
14904   case MVT::i1:
14905   case MVT::i8:
14906   case MVT::i16:
14907     // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
14908     return true;
14909   }
14910 
14911   return false;
14912 }
14913 
isFNegFree(EVT VT) const14914 bool ARMTargetLowering::isFNegFree(EVT VT) const {
14915   if (!VT.isSimple())
14916     return false;
14917 
14918   // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that
14919   // negate values directly (fneg is free). So, we don't want to let the DAG
14920   // combiner rewrite fneg into xors and some other instructions.  For f16 and
14921   // FullFP16 argument passing, some bitcast nodes may be introduced,
14922   // triggering this DAG combine rewrite, so we are avoiding that with this.
14923   switch (VT.getSimpleVT().SimpleTy) {
14924   default: break;
14925   case MVT::f16:
14926     return Subtarget->hasFullFP16();
14927   }
14928 
14929   return false;
14930 }
14931 
14932 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
14933 /// of the vector elements.
areExtractExts(Value * Ext1,Value * Ext2)14934 static bool areExtractExts(Value *Ext1, Value *Ext2) {
14935   auto areExtDoubled = [](Instruction *Ext) {
14936     return Ext->getType()->getScalarSizeInBits() ==
14937            2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
14938   };
14939 
14940   if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
14941       !match(Ext2, m_ZExtOrSExt(m_Value())) ||
14942       !areExtDoubled(cast<Instruction>(Ext1)) ||
14943       !areExtDoubled(cast<Instruction>(Ext2)))
14944     return false;
14945 
14946   return true;
14947 }
14948 
14949 /// Check if sinking \p I's operands to I's basic block is profitable, because
14950 /// the operands can be folded into a target instruction, e.g.
14951 /// sext/zext can be folded into vsubl.
shouldSinkOperands(Instruction * I,SmallVectorImpl<Use * > & Ops) const14952 bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
14953                                            SmallVectorImpl<Use *> &Ops) const {
14954   if (!I->getType()->isVectorTy())
14955     return false;
14956 
14957   if (Subtarget->hasNEON()) {
14958     switch (I->getOpcode()) {
14959     case Instruction::Sub:
14960     case Instruction::Add: {
14961       if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
14962         return false;
14963       Ops.push_back(&I->getOperandUse(0));
14964       Ops.push_back(&I->getOperandUse(1));
14965       return true;
14966     }
14967     default:
14968       return false;
14969     }
14970   }
14971 
14972   if (!Subtarget->hasMVEIntegerOps())
14973     return false;
14974 
14975   auto IsSinker = [](Instruction *I, int Operand) {
14976     switch (I->getOpcode()) {
14977     case Instruction::Add:
14978     case Instruction::Mul:
14979     case Instruction::ICmp:
14980       return true;
14981     case Instruction::Sub:
14982     case Instruction::Shl:
14983     case Instruction::LShr:
14984     case Instruction::AShr:
14985       return Operand == 1;
14986     default:
14987       return false;
14988     }
14989   };
14990 
14991   int Op = 0;
14992   if (!isa<ShuffleVectorInst>(I->getOperand(Op)))
14993     Op = 1;
14994   if (!IsSinker(I, Op))
14995     return false;
14996   if (!match(I->getOperand(Op),
14997              m_ShuffleVector(m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()),
14998                              m_Undef(), m_Zero()))) {
14999     return false;
15000   }
15001   Instruction *Shuffle = cast<Instruction>(I->getOperand(Op));
15002   // All uses of the shuffle should be sunk to avoid duplicating it across gpr
15003   // and vector registers
15004   for (Use &U : Shuffle->uses()) {
15005     Instruction *Insn = cast<Instruction>(U.getUser());
15006     if (!IsSinker(Insn, U.getOperandNo()))
15007       return false;
15008   }
15009   Ops.push_back(&Shuffle->getOperandUse(0));
15010   Ops.push_back(&I->getOperandUse(Op));
15011   return true;
15012 }
15013 
isVectorLoadExtDesirable(SDValue ExtVal) const15014 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
15015   EVT VT = ExtVal.getValueType();
15016 
15017   if (!isTypeLegal(VT))
15018     return false;
15019 
15020   if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) {
15021     if (Ld->isExpandingLoad())
15022       return false;
15023   }
15024 
15025   // Don't create a loadext if we can fold the extension into a wide/long
15026   // instruction.
15027   // If there's more than one user instruction, the loadext is desirable no
15028   // matter what.  There can be two uses by the same instruction.
15029   if (ExtVal->use_empty() ||
15030       !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
15031     return true;
15032 
15033   SDNode *U = *ExtVal->use_begin();
15034   if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
15035        U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM))
15036     return false;
15037 
15038   return true;
15039 }
15040 
allowTruncateForTailCall(Type * Ty1,Type * Ty2) const15041 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
15042   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15043     return false;
15044 
15045   if (!isTypeLegal(EVT::getEVT(Ty1)))
15046     return false;
15047 
15048   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
15049 
15050   // Assuming the caller doesn't have a zeroext or signext return parameter,
15051   // truncation all the way down to i1 is valid.
15052   return true;
15053 }
15054 
getScalingFactorCost(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS) const15055 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
15056                                                 const AddrMode &AM, Type *Ty,
15057                                                 unsigned AS) const {
15058   if (isLegalAddressingMode(DL, AM, Ty, AS)) {
15059     if (Subtarget->hasFPAO())
15060       return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
15061     return 0;
15062   }
15063   return -1;
15064 }
15065 
15066 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
15067 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
15068 /// expanded to FMAs when this method returns true, otherwise fmuladd is
15069 /// expanded to fmul + fadd.
15070 ///
15071 /// ARM supports both fused and unfused multiply-add operations; we already
15072 /// lower a pair of fmul and fadd to the latter so it's not clear that there
15073 /// would be a gain or that the gain would be worthwhile enough to risk
15074 /// correctness bugs.
15075 ///
15076 /// For MVE, we set this to true as it helps simplify the need for some
15077 /// patterns (and we don't have the non-fused floating point instruction).
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const15078 bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15079                                                    EVT VT) const {
15080   if (!VT.isSimple())
15081     return false;
15082 
15083   switch (VT.getSimpleVT().SimpleTy) {
15084   case MVT::v4f32:
15085   case MVT::v8f16:
15086     return Subtarget->hasMVEFloatOps();
15087   case MVT::f16:
15088     return Subtarget->useFPVFMx16();
15089   case MVT::f32:
15090     return Subtarget->useFPVFMx();
15091   case MVT::f64:
15092     return Subtarget->useFPVFMx64();
15093   default:
15094     break;
15095   }
15096 
15097   return false;
15098 }
15099 
isLegalT1AddressImmediate(int64_t V,EVT VT)15100 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
15101   if (V < 0)
15102     return false;
15103 
15104   unsigned Scale = 1;
15105   switch (VT.getSimpleVT().SimpleTy) {
15106   case MVT::i1:
15107   case MVT::i8:
15108     // Scale == 1;
15109     break;
15110   case MVT::i16:
15111     // Scale == 2;
15112     Scale = 2;
15113     break;
15114   default:
15115     // On thumb1 we load most things (i32, i64, floats, etc) with a LDR
15116     // Scale == 4;
15117     Scale = 4;
15118     break;
15119   }
15120 
15121   if ((V & (Scale - 1)) != 0)
15122     return false;
15123   return isUInt<5>(V / Scale);
15124 }
15125 
isLegalT2AddressImmediate(int64_t V,EVT VT,const ARMSubtarget * Subtarget)15126 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
15127                                       const ARMSubtarget *Subtarget) {
15128   if (!VT.isInteger() && !VT.isFloatingPoint())
15129     return false;
15130   if (VT.isVector() && Subtarget->hasNEON())
15131     return false;
15132   if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() &&
15133       !Subtarget->hasMVEFloatOps())
15134     return false;
15135 
15136   bool IsNeg = false;
15137   if (V < 0) {
15138     IsNeg = true;
15139     V = -V;
15140   }
15141 
15142   unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U);
15143 
15144   // MVE: size * imm7
15145   if (VT.isVector() && Subtarget->hasMVEIntegerOps()) {
15146     switch (VT.getSimpleVT().getVectorElementType().SimpleTy) {
15147     case MVT::i32:
15148     case MVT::f32:
15149       return isShiftedUInt<7,2>(V);
15150     case MVT::i16:
15151     case MVT::f16:
15152       return isShiftedUInt<7,1>(V);
15153     case MVT::i8:
15154       return isUInt<7>(V);
15155     default:
15156       return false;
15157     }
15158   }
15159 
15160   // half VLDR: 2 * imm8
15161   if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16())
15162     return isShiftedUInt<8, 1>(V);
15163   // VLDR and LDRD: 4 * imm8
15164   if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8)
15165     return isShiftedUInt<8, 2>(V);
15166 
15167   if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
15168     // + imm12 or - imm8
15169     if (IsNeg)
15170       return isUInt<8>(V);
15171     return isUInt<12>(V);
15172   }
15173 
15174   return false;
15175 }
15176 
15177 /// isLegalAddressImmediate - Return true if the integer value can be used
15178 /// as the offset of the target addressing mode for load / store of the
15179 /// given type.
isLegalAddressImmediate(int64_t V,EVT VT,const ARMSubtarget * Subtarget)15180 static bool isLegalAddressImmediate(int64_t V, EVT VT,
15181                                     const ARMSubtarget *Subtarget) {
15182   if (V == 0)
15183     return true;
15184 
15185   if (!VT.isSimple())
15186     return false;
15187 
15188   if (Subtarget->isThumb1Only())
15189     return isLegalT1AddressImmediate(V, VT);
15190   else if (Subtarget->isThumb2())
15191     return isLegalT2AddressImmediate(V, VT, Subtarget);
15192 
15193   // ARM mode.
15194   if (V < 0)
15195     V = - V;
15196   switch (VT.getSimpleVT().SimpleTy) {
15197   default: return false;
15198   case MVT::i1:
15199   case MVT::i8:
15200   case MVT::i32:
15201     // +- imm12
15202     return isUInt<12>(V);
15203   case MVT::i16:
15204     // +- imm8
15205     return isUInt<8>(V);
15206   case MVT::f32:
15207   case MVT::f64:
15208     if (!Subtarget->hasVFP2Base()) // FIXME: NEON?
15209       return false;
15210     return isShiftedUInt<8, 2>(V);
15211   }
15212 }
15213 
isLegalT2ScaledAddressingMode(const AddrMode & AM,EVT VT) const15214 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
15215                                                       EVT VT) const {
15216   int Scale = AM.Scale;
15217   if (Scale < 0)
15218     return false;
15219 
15220   switch (VT.getSimpleVT().SimpleTy) {
15221   default: return false;
15222   case MVT::i1:
15223   case MVT::i8:
15224   case MVT::i16:
15225   case MVT::i32:
15226     if (Scale == 1)
15227       return true;
15228     // r + r << imm
15229     Scale = Scale & ~1;
15230     return Scale == 2 || Scale == 4 || Scale == 8;
15231   case MVT::i64:
15232     // FIXME: What are we trying to model here? ldrd doesn't have an r + r
15233     // version in Thumb mode.
15234     // r + r
15235     if (Scale == 1)
15236       return true;
15237     // r * 2 (this can be lowered to r + r).
15238     if (!AM.HasBaseReg && Scale == 2)
15239       return true;
15240     return false;
15241   case MVT::isVoid:
15242     // Note, we allow "void" uses (basically, uses that aren't loads or
15243     // stores), because arm allows folding a scale into many arithmetic
15244     // operations.  This should be made more precise and revisited later.
15245 
15246     // Allow r << imm, but the imm has to be a multiple of two.
15247     if (Scale & 1) return false;
15248     return isPowerOf2_32(Scale);
15249   }
15250 }
15251 
isLegalT1ScaledAddressingMode(const AddrMode & AM,EVT VT) const15252 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
15253                                                       EVT VT) const {
15254   const int Scale = AM.Scale;
15255 
15256   // Negative scales are not supported in Thumb1.
15257   if (Scale < 0)
15258     return false;
15259 
15260   // Thumb1 addressing modes do not support register scaling excepting the
15261   // following cases:
15262   // 1. Scale == 1 means no scaling.
15263   // 2. Scale == 2 this can be lowered to r + r if there is no base register.
15264   return (Scale == 1) || (!AM.HasBaseReg && Scale == 2);
15265 }
15266 
15267 /// isLegalAddressingMode - Return true if the addressing mode represented
15268 /// by AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const15269 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15270                                               const AddrMode &AM, Type *Ty,
15271                                               unsigned AS, Instruction *I) const {
15272   EVT VT = getValueType(DL, Ty, true);
15273   if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
15274     return false;
15275 
15276   // Can never fold addr of global into load/store.
15277   if (AM.BaseGV)
15278     return false;
15279 
15280   switch (AM.Scale) {
15281   case 0:  // no scale reg, must be "r+i" or "r", or "i".
15282     break;
15283   default:
15284     // ARM doesn't support any R+R*scale+imm addr modes.
15285     if (AM.BaseOffs)
15286       return false;
15287 
15288     if (!VT.isSimple())
15289       return false;
15290 
15291     if (Subtarget->isThumb1Only())
15292       return isLegalT1ScaledAddressingMode(AM, VT);
15293 
15294     if (Subtarget->isThumb2())
15295       return isLegalT2ScaledAddressingMode(AM, VT);
15296 
15297     int Scale = AM.Scale;
15298     switch (VT.getSimpleVT().SimpleTy) {
15299     default: return false;
15300     case MVT::i1:
15301     case MVT::i8:
15302     case MVT::i32:
15303       if (Scale < 0) Scale = -Scale;
15304       if (Scale == 1)
15305         return true;
15306       // r + r << imm
15307       return isPowerOf2_32(Scale & ~1);
15308     case MVT::i16:
15309     case MVT::i64:
15310       // r +/- r
15311       if (Scale == 1 || (AM.HasBaseReg && Scale == -1))
15312         return true;
15313       // r * 2 (this can be lowered to r + r).
15314       if (!AM.HasBaseReg && Scale == 2)
15315         return true;
15316       return false;
15317 
15318     case MVT::isVoid:
15319       // Note, we allow "void" uses (basically, uses that aren't loads or
15320       // stores), because arm allows folding a scale into many arithmetic
15321       // operations.  This should be made more precise and revisited later.
15322 
15323       // Allow r << imm, but the imm has to be a multiple of two.
15324       if (Scale & 1) return false;
15325       return isPowerOf2_32(Scale);
15326     }
15327   }
15328   return true;
15329 }
15330 
15331 /// isLegalICmpImmediate - Return true if the specified immediate is legal
15332 /// icmp immediate, that is the target has icmp instructions which can compare
15333 /// a register against the immediate without having to materialize the
15334 /// immediate into a register.
isLegalICmpImmediate(int64_t Imm) const15335 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15336   // Thumb2 and ARM modes can use cmn for negative immediates.
15337   if (!Subtarget->isThumb())
15338     return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 ||
15339            ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1;
15340   if (Subtarget->isThumb2())
15341     return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 ||
15342            ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1;
15343   // Thumb1 doesn't have cmn, and only 8-bit immediates.
15344   return Imm >= 0 && Imm <= 255;
15345 }
15346 
15347 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
15348 /// *or sub* immediate, that is the target has add or sub instructions which can
15349 /// add a register with the immediate without having to materialize the
15350 /// immediate into a register.
isLegalAddImmediate(int64_t Imm) const15351 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15352   // Same encoding for add/sub, just flip the sign.
15353   int64_t AbsImm = std::abs(Imm);
15354   if (!Subtarget->isThumb())
15355     return ARM_AM::getSOImmVal(AbsImm) != -1;
15356   if (Subtarget->isThumb2())
15357     return ARM_AM::getT2SOImmVal(AbsImm) != -1;
15358   // Thumb1 only has 8-bit unsigned immediate.
15359   return AbsImm >= 0 && AbsImm <= 255;
15360 }
15361 
getARMIndexedAddressParts(SDNode * Ptr,EVT VT,bool isSEXTLoad,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)15362 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
15363                                       bool isSEXTLoad, SDValue &Base,
15364                                       SDValue &Offset, bool &isInc,
15365                                       SelectionDAG &DAG) {
15366   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
15367     return false;
15368 
15369   if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
15370     // AddressingMode 3
15371     Base = Ptr->getOperand(0);
15372     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
15373       int RHSC = (int)RHS->getZExtValue();
15374       if (RHSC < 0 && RHSC > -256) {
15375         assert(Ptr->getOpcode() == ISD::ADD);
15376         isInc = false;
15377         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
15378         return true;
15379       }
15380     }
15381     isInc = (Ptr->getOpcode() == ISD::ADD);
15382     Offset = Ptr->getOperand(1);
15383     return true;
15384   } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
15385     // AddressingMode 2
15386     if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
15387       int RHSC = (int)RHS->getZExtValue();
15388       if (RHSC < 0 && RHSC > -0x1000) {
15389         assert(Ptr->getOpcode() == ISD::ADD);
15390         isInc = false;
15391         Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
15392         Base = Ptr->getOperand(0);
15393         return true;
15394       }
15395     }
15396 
15397     if (Ptr->getOpcode() == ISD::ADD) {
15398       isInc = true;
15399       ARM_AM::ShiftOpc ShOpcVal=
15400         ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
15401       if (ShOpcVal != ARM_AM::no_shift) {
15402         Base = Ptr->getOperand(1);
15403         Offset = Ptr->getOperand(0);
15404       } else {
15405         Base = Ptr->getOperand(0);
15406         Offset = Ptr->getOperand(1);
15407       }
15408       return true;
15409     }
15410 
15411     isInc = (Ptr->getOpcode() == ISD::ADD);
15412     Base = Ptr->getOperand(0);
15413     Offset = Ptr->getOperand(1);
15414     return true;
15415   }
15416 
15417   // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
15418   return false;
15419 }
15420 
getT2IndexedAddressParts(SDNode * Ptr,EVT VT,bool isSEXTLoad,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)15421 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
15422                                      bool isSEXTLoad, SDValue &Base,
15423                                      SDValue &Offset, bool &isInc,
15424                                      SelectionDAG &DAG) {
15425   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
15426     return false;
15427 
15428   Base = Ptr->getOperand(0);
15429   if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
15430     int RHSC = (int)RHS->getZExtValue();
15431     if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
15432       assert(Ptr->getOpcode() == ISD::ADD);
15433       isInc = false;
15434       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
15435       return true;
15436     } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
15437       isInc = Ptr->getOpcode() == ISD::ADD;
15438       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
15439       return true;
15440     }
15441   }
15442 
15443   return false;
15444 }
15445 
getMVEIndexedAddressParts(SDNode * Ptr,EVT VT,unsigned Align,bool isSEXTLoad,bool IsMasked,bool isLE,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)15446 static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, unsigned Align,
15447                                       bool isSEXTLoad, bool IsMasked, bool isLE,
15448                                       SDValue &Base, SDValue &Offset,
15449                                       bool &isInc, SelectionDAG &DAG) {
15450   if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
15451     return false;
15452   if (!isa<ConstantSDNode>(Ptr->getOperand(1)))
15453     return false;
15454 
15455   // We allow LE non-masked loads to change the type (for example use a vldrb.8
15456   // as opposed to a vldrw.32). This can allow extra addressing modes or
15457   // alignments for what is otherwise an equivalent instruction.
15458   bool CanChangeType = isLE && !IsMasked;
15459 
15460   ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1));
15461   int RHSC = (int)RHS->getZExtValue();
15462 
15463   auto IsInRange = [&](int RHSC, int Limit, int Scale) {
15464     if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) {
15465       assert(Ptr->getOpcode() == ISD::ADD);
15466       isInc = false;
15467       Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
15468       return true;
15469     } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) {
15470       isInc = Ptr->getOpcode() == ISD::ADD;
15471       Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
15472       return true;
15473     }
15474     return false;
15475   };
15476 
15477   // Try to find a matching instruction based on s/zext, Alignment, Offset and
15478   // (in BE/masked) type.
15479   Base = Ptr->getOperand(0);
15480   if (VT == MVT::v4i16) {
15481     if (Align >= 2 && IsInRange(RHSC, 0x80, 2))
15482       return true;
15483   } else if (VT == MVT::v4i8 || VT == MVT::v8i8) {
15484     if (IsInRange(RHSC, 0x80, 1))
15485       return true;
15486   } else if (Align >= 4 &&
15487              (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) &&
15488              IsInRange(RHSC, 0x80, 4))
15489     return true;
15490   else if (Align >= 2 &&
15491            (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) &&
15492            IsInRange(RHSC, 0x80, 2))
15493     return true;
15494   else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1))
15495     return true;
15496   return false;
15497 }
15498 
15499 /// getPreIndexedAddressParts - returns true by value, base pointer and
15500 /// offset pointer and addressing mode by reference if the node's address
15501 /// can be legally represented as pre-indexed load / store address.
15502 bool
getPreIndexedAddressParts(SDNode * N,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const15503 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
15504                                              SDValue &Offset,
15505                                              ISD::MemIndexedMode &AM,
15506                                              SelectionDAG &DAG) const {
15507   if (Subtarget->isThumb1Only())
15508     return false;
15509 
15510   EVT VT;
15511   SDValue Ptr;
15512   unsigned Align;
15513   bool isSEXTLoad = false;
15514   bool IsMasked = false;
15515   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
15516     Ptr = LD->getBasePtr();
15517     VT = LD->getMemoryVT();
15518     Align = LD->getAlignment();
15519     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
15520   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
15521     Ptr = ST->getBasePtr();
15522     VT = ST->getMemoryVT();
15523     Align = ST->getAlignment();
15524   } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
15525     Ptr = LD->getBasePtr();
15526     VT = LD->getMemoryVT();
15527     Align = LD->getAlignment();
15528     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
15529     IsMasked = true;
15530   } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
15531     Ptr = ST->getBasePtr();
15532     VT = ST->getMemoryVT();
15533     Align = ST->getAlignment();
15534     IsMasked = true;
15535   } else
15536     return false;
15537 
15538   bool isInc;
15539   bool isLegal = false;
15540   if (VT.isVector())
15541     isLegal = Subtarget->hasMVEIntegerOps() &&
15542               getMVEIndexedAddressParts(Ptr.getNode(), VT, Align, isSEXTLoad,
15543                                         IsMasked, Subtarget->isLittle(), Base,
15544                                         Offset, isInc, DAG);
15545   else {
15546     if (Subtarget->isThumb2())
15547       isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
15548                                          Offset, isInc, DAG);
15549     else
15550       isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
15551                                           Offset, isInc, DAG);
15552   }
15553   if (!isLegal)
15554     return false;
15555 
15556   AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
15557   return true;
15558 }
15559 
15560 /// getPostIndexedAddressParts - returns true by value, base pointer and
15561 /// offset pointer and addressing mode by reference if this node can be
15562 /// combined with a load / store to form a post-indexed load / store.
getPostIndexedAddressParts(SDNode * N,SDNode * Op,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const15563 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
15564                                                    SDValue &Base,
15565                                                    SDValue &Offset,
15566                                                    ISD::MemIndexedMode &AM,
15567                                                    SelectionDAG &DAG) const {
15568   EVT VT;
15569   SDValue Ptr;
15570   unsigned Align;
15571   bool isSEXTLoad = false, isNonExt;
15572   bool IsMasked = false;
15573   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
15574     VT = LD->getMemoryVT();
15575     Ptr = LD->getBasePtr();
15576     Align = LD->getAlignment();
15577     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
15578     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
15579   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
15580     VT = ST->getMemoryVT();
15581     Ptr = ST->getBasePtr();
15582     Align = ST->getAlignment();
15583     isNonExt = !ST->isTruncatingStore();
15584   } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
15585     VT = LD->getMemoryVT();
15586     Ptr = LD->getBasePtr();
15587     Align = LD->getAlignment();
15588     isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
15589     isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
15590     IsMasked = true;
15591   } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
15592     VT = ST->getMemoryVT();
15593     Ptr = ST->getBasePtr();
15594     Align = ST->getAlignment();
15595     isNonExt = !ST->isTruncatingStore();
15596     IsMasked = true;
15597   } else
15598     return false;
15599 
15600   if (Subtarget->isThumb1Only()) {
15601     // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
15602     // must be non-extending/truncating, i32, with an offset of 4.
15603     assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
15604     if (Op->getOpcode() != ISD::ADD || !isNonExt)
15605       return false;
15606     auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
15607     if (!RHS || RHS->getZExtValue() != 4)
15608       return false;
15609 
15610     Offset = Op->getOperand(1);
15611     Base = Op->getOperand(0);
15612     AM = ISD::POST_INC;
15613     return true;
15614   }
15615 
15616   bool isInc;
15617   bool isLegal = false;
15618   if (VT.isVector())
15619     isLegal = Subtarget->hasMVEIntegerOps() &&
15620               getMVEIndexedAddressParts(Op, VT, Align, isSEXTLoad, IsMasked,
15621                                         Subtarget->isLittle(), Base, Offset,
15622                                         isInc, DAG);
15623   else {
15624     if (Subtarget->isThumb2())
15625       isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
15626                                          isInc, DAG);
15627     else
15628       isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
15629                                           isInc, DAG);
15630   }
15631   if (!isLegal)
15632     return false;
15633 
15634   if (Ptr != Base) {
15635     // Swap base ptr and offset to catch more post-index load / store when
15636     // it's legal. In Thumb2 mode, offset must be an immediate.
15637     if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
15638         !Subtarget->isThumb2())
15639       std::swap(Base, Offset);
15640 
15641     // Post-indexed load / store update the base pointer.
15642     if (Ptr != Base)
15643       return false;
15644   }
15645 
15646   AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
15647   return true;
15648 }
15649 
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const15650 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15651                                                       KnownBits &Known,
15652                                                       const APInt &DemandedElts,
15653                                                       const SelectionDAG &DAG,
15654                                                       unsigned Depth) const {
15655   unsigned BitWidth = Known.getBitWidth();
15656   Known.resetAll();
15657   switch (Op.getOpcode()) {
15658   default: break;
15659   case ARMISD::ADDC:
15660   case ARMISD::ADDE:
15661   case ARMISD::SUBC:
15662   case ARMISD::SUBE:
15663     // Special cases when we convert a carry to a boolean.
15664     if (Op.getResNo() == 0) {
15665       SDValue LHS = Op.getOperand(0);
15666       SDValue RHS = Op.getOperand(1);
15667       // (ADDE 0, 0, C) will give us a single bit.
15668       if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) &&
15669           isNullConstant(RHS)) {
15670         Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
15671         return;
15672       }
15673     }
15674     break;
15675   case ARMISD::CMOV: {
15676     // Bits are known zero/one if known on the LHS and RHS.
15677     Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
15678     if (Known.isUnknown())
15679       return;
15680 
15681     KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
15682     Known.Zero &= KnownRHS.Zero;
15683     Known.One  &= KnownRHS.One;
15684     return;
15685   }
15686   case ISD::INTRINSIC_W_CHAIN: {
15687     ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
15688     Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
15689     switch (IntID) {
15690     default: return;
15691     case Intrinsic::arm_ldaex:
15692     case Intrinsic::arm_ldrex: {
15693       EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
15694       unsigned MemBits = VT.getScalarSizeInBits();
15695       Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
15696       return;
15697     }
15698     }
15699   }
15700   case ARMISD::BFI: {
15701     // Conservatively, we can recurse down the first operand
15702     // and just mask out all affected bits.
15703     Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
15704 
15705     // The operand to BFI is already a mask suitable for removing the bits it
15706     // sets.
15707     ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
15708     const APInt &Mask = CI->getAPIntValue();
15709     Known.Zero &= Mask;
15710     Known.One &= Mask;
15711     return;
15712   }
15713   case ARMISD::VGETLANEs:
15714   case ARMISD::VGETLANEu: {
15715     const SDValue &SrcSV = Op.getOperand(0);
15716     EVT VecVT = SrcSV.getValueType();
15717     assert(VecVT.isVector() && "VGETLANE expected a vector type");
15718     const unsigned NumSrcElts = VecVT.getVectorNumElements();
15719     ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode());
15720     assert(Pos->getAPIntValue().ult(NumSrcElts) &&
15721            "VGETLANE index out of bounds");
15722     unsigned Idx = Pos->getZExtValue();
15723     APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
15724     Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1);
15725 
15726     EVT VT = Op.getValueType();
15727     const unsigned DstSz = VT.getScalarSizeInBits();
15728     const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits();
15729     (void)SrcSz;
15730     assert(SrcSz == Known.getBitWidth());
15731     assert(DstSz > SrcSz);
15732     if (Op.getOpcode() == ARMISD::VGETLANEs)
15733       Known = Known.sext(DstSz);
15734     else {
15735       Known = Known.zext(DstSz, true /* extended bits are known zero */);
15736     }
15737     assert(DstSz == Known.getBitWidth());
15738     break;
15739   }
15740   }
15741 }
15742 
15743 bool
targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedAPInt,TargetLoweringOpt & TLO) const15744 ARMTargetLowering::targetShrinkDemandedConstant(SDValue Op,
15745                                                 const APInt &DemandedAPInt,
15746                                                 TargetLoweringOpt &TLO) const {
15747   // Delay optimization, so we don't have to deal with illegal types, or block
15748   // optimizations.
15749   if (!TLO.LegalOps)
15750     return false;
15751 
15752   // Only optimize AND for now.
15753   if (Op.getOpcode() != ISD::AND)
15754     return false;
15755 
15756   EVT VT = Op.getValueType();
15757 
15758   // Ignore vectors.
15759   if (VT.isVector())
15760     return false;
15761 
15762   assert(VT == MVT::i32 && "Unexpected integer type");
15763 
15764   // Make sure the RHS really is a constant.
15765   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
15766   if (!C)
15767     return false;
15768 
15769   unsigned Mask = C->getZExtValue();
15770 
15771   unsigned Demanded = DemandedAPInt.getZExtValue();
15772   unsigned ShrunkMask = Mask & Demanded;
15773   unsigned ExpandedMask = Mask | ~Demanded;
15774 
15775   // If the mask is all zeros, let the target-independent code replace the
15776   // result with zero.
15777   if (ShrunkMask == 0)
15778     return false;
15779 
15780   // If the mask is all ones, erase the AND. (Currently, the target-independent
15781   // code won't do this, so we have to do it explicitly to avoid an infinite
15782   // loop in obscure cases.)
15783   if (ExpandedMask == ~0U)
15784     return TLO.CombineTo(Op, Op.getOperand(0));
15785 
15786   auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
15787     return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
15788   };
15789   auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool {
15790     if (NewMask == Mask)
15791       return true;
15792     SDLoc DL(Op);
15793     SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
15794     SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
15795     return TLO.CombineTo(Op, NewOp);
15796   };
15797 
15798   // Prefer uxtb mask.
15799   if (IsLegalMask(0xFF))
15800     return UseMask(0xFF);
15801 
15802   // Prefer uxth mask.
15803   if (IsLegalMask(0xFFFF))
15804     return UseMask(0xFFFF);
15805 
15806   // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
15807   // FIXME: Prefer a contiguous sequence of bits for other optimizations.
15808   if (ShrunkMask < 256)
15809     return UseMask(ShrunkMask);
15810 
15811   // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
15812   // FIXME: Prefer a contiguous sequence of bits for other optimizations.
15813   if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
15814     return UseMask(ExpandedMask);
15815 
15816   // Potential improvements:
15817   //
15818   // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
15819   // We could try to prefer Thumb1 immediates which can be lowered to a
15820   // two-instruction sequence.
15821   // We could try to recognize more legal ARM/Thumb2 immediates here.
15822 
15823   return false;
15824 }
15825 
15826 
15827 //===----------------------------------------------------------------------===//
15828 //                           ARM Inline Assembly Support
15829 //===----------------------------------------------------------------------===//
15830 
ExpandInlineAsm(CallInst * CI) const15831 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
15832   // Looking for "rev" which is V6+.
15833   if (!Subtarget->hasV6Ops())
15834     return false;
15835 
15836   InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
15837   std::string AsmStr = IA->getAsmString();
15838   SmallVector<StringRef, 4> AsmPieces;
15839   SplitString(AsmStr, AsmPieces, ";\n");
15840 
15841   switch (AsmPieces.size()) {
15842   default: return false;
15843   case 1:
15844     AsmStr = AsmPieces[0];
15845     AsmPieces.clear();
15846     SplitString(AsmStr, AsmPieces, " \t,");
15847 
15848     // rev $0, $1
15849     if (AsmPieces.size() == 3 &&
15850         AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
15851         IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
15852       IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
15853       if (Ty && Ty->getBitWidth() == 32)
15854         return IntrinsicLowering::LowerToByteSwap(CI);
15855     }
15856     break;
15857   }
15858 
15859   return false;
15860 }
15861 
LowerXConstraint(EVT ConstraintVT) const15862 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
15863   // At this point, we have to lower this constraint to something else, so we
15864   // lower it to an "r" or "w". However, by doing this we will force the result
15865   // to be in register, while the X constraint is much more permissive.
15866   //
15867   // Although we are correct (we are free to emit anything, without
15868   // constraints), we might break use cases that would expect us to be more
15869   // efficient and emit something else.
15870   if (!Subtarget->hasVFP2Base())
15871     return "r";
15872   if (ConstraintVT.isFloatingPoint())
15873     return "w";
15874   if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
15875      (ConstraintVT.getSizeInBits() == 64 ||
15876       ConstraintVT.getSizeInBits() == 128))
15877     return "w";
15878 
15879   return "r";
15880 }
15881 
15882 /// getConstraintType - Given a constraint letter, return the type of
15883 /// constraint it is for this target.
15884 ARMTargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const15885 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
15886   unsigned S = Constraint.size();
15887   if (S == 1) {
15888     switch (Constraint[0]) {
15889     default:  break;
15890     case 'l': return C_RegisterClass;
15891     case 'w': return C_RegisterClass;
15892     case 'h': return C_RegisterClass;
15893     case 'x': return C_RegisterClass;
15894     case 't': return C_RegisterClass;
15895     case 'j': return C_Immediate; // Constant for movw.
15896     // An address with a single base register. Due to the way we
15897     // currently handle addresses it is the same as an 'r' memory constraint.
15898     case 'Q': return C_Memory;
15899     }
15900   } else if (S == 2) {
15901     switch (Constraint[0]) {
15902     default: break;
15903     case 'T': return C_RegisterClass;
15904     // All 'U+' constraints are addresses.
15905     case 'U': return C_Memory;
15906     }
15907   }
15908   return TargetLowering::getConstraintType(Constraint);
15909 }
15910 
15911 /// Examine constraint type and operand type and determine a weight value.
15912 /// This object must already have been set up with the operand type
15913 /// and the current alternative constraint selected.
15914 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const15915 ARMTargetLowering::getSingleConstraintMatchWeight(
15916     AsmOperandInfo &info, const char *constraint) const {
15917   ConstraintWeight weight = CW_Invalid;
15918   Value *CallOperandVal = info.CallOperandVal;
15919     // If we don't have a value, we can't do a match,
15920     // but allow it at the lowest weight.
15921   if (!CallOperandVal)
15922     return CW_Default;
15923   Type *type = CallOperandVal->getType();
15924   // Look at the constraint type.
15925   switch (*constraint) {
15926   default:
15927     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15928     break;
15929   case 'l':
15930     if (type->isIntegerTy()) {
15931       if (Subtarget->isThumb())
15932         weight = CW_SpecificReg;
15933       else
15934         weight = CW_Register;
15935     }
15936     break;
15937   case 'w':
15938     if (type->isFloatingPointTy())
15939       weight = CW_Register;
15940     break;
15941   }
15942   return weight;
15943 }
15944 
15945 using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
15946 
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const15947 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
15948     const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
15949   switch (Constraint.size()) {
15950   case 1:
15951     // GCC ARM Constraint Letters
15952     switch (Constraint[0]) {
15953     case 'l': // Low regs or general regs.
15954       if (Subtarget->isThumb())
15955         return RCPair(0U, &ARM::tGPRRegClass);
15956       return RCPair(0U, &ARM::GPRRegClass);
15957     case 'h': // High regs or no regs.
15958       if (Subtarget->isThumb())
15959         return RCPair(0U, &ARM::hGPRRegClass);
15960       break;
15961     case 'r':
15962       if (Subtarget->isThumb1Only())
15963         return RCPair(0U, &ARM::tGPRRegClass);
15964       return RCPair(0U, &ARM::GPRRegClass);
15965     case 'w':
15966       if (VT == MVT::Other)
15967         break;
15968       if (VT == MVT::f32)
15969         return RCPair(0U, &ARM::SPRRegClass);
15970       if (VT.getSizeInBits() == 64)
15971         return RCPair(0U, &ARM::DPRRegClass);
15972       if (VT.getSizeInBits() == 128)
15973         return RCPair(0U, &ARM::QPRRegClass);
15974       break;
15975     case 'x':
15976       if (VT == MVT::Other)
15977         break;
15978       if (VT == MVT::f32)
15979         return RCPair(0U, &ARM::SPR_8RegClass);
15980       if (VT.getSizeInBits() == 64)
15981         return RCPair(0U, &ARM::DPR_8RegClass);
15982       if (VT.getSizeInBits() == 128)
15983         return RCPair(0U, &ARM::QPR_8RegClass);
15984       break;
15985     case 't':
15986       if (VT == MVT::Other)
15987         break;
15988       if (VT == MVT::f32 || VT == MVT::i32)
15989         return RCPair(0U, &ARM::SPRRegClass);
15990       if (VT.getSizeInBits() == 64)
15991         return RCPair(0U, &ARM::DPR_VFP2RegClass);
15992       if (VT.getSizeInBits() == 128)
15993         return RCPair(0U, &ARM::QPR_VFP2RegClass);
15994       break;
15995     }
15996     break;
15997 
15998   case 2:
15999     if (Constraint[0] == 'T') {
16000       switch (Constraint[1]) {
16001       default:
16002         break;
16003       case 'e':
16004         return RCPair(0U, &ARM::tGPREvenRegClass);
16005       case 'o':
16006         return RCPair(0U, &ARM::tGPROddRegClass);
16007       }
16008     }
16009     break;
16010 
16011   default:
16012     break;
16013   }
16014 
16015   if (StringRef("{cc}").equals_lower(Constraint))
16016     return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
16017 
16018   return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
16019 }
16020 
16021 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
16022 /// vector.  If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const16023 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
16024                                                      std::string &Constraint,
16025                                                      std::vector<SDValue>&Ops,
16026                                                      SelectionDAG &DAG) const {
16027   SDValue Result;
16028 
16029   // Currently only support length 1 constraints.
16030   if (Constraint.length() != 1) return;
16031 
16032   char ConstraintLetter = Constraint[0];
16033   switch (ConstraintLetter) {
16034   default: break;
16035   case 'j':
16036   case 'I': case 'J': case 'K': case 'L':
16037   case 'M': case 'N': case 'O':
16038     ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
16039     if (!C)
16040       return;
16041 
16042     int64_t CVal64 = C->getSExtValue();
16043     int CVal = (int) CVal64;
16044     // None of these constraints allow values larger than 32 bits.  Check
16045     // that the value fits in an int.
16046     if (CVal != CVal64)
16047       return;
16048 
16049     switch (ConstraintLetter) {
16050       case 'j':
16051         // Constant suitable for movw, must be between 0 and
16052         // 65535.
16053         if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps()))
16054           if (CVal >= 0 && CVal <= 65535)
16055             break;
16056         return;
16057       case 'I':
16058         if (Subtarget->isThumb1Only()) {
16059           // This must be a constant between 0 and 255, for ADD
16060           // immediates.
16061           if (CVal >= 0 && CVal <= 255)
16062             break;
16063         } else if (Subtarget->isThumb2()) {
16064           // A constant that can be used as an immediate value in a
16065           // data-processing instruction.
16066           if (ARM_AM::getT2SOImmVal(CVal) != -1)
16067             break;
16068         } else {
16069           // A constant that can be used as an immediate value in a
16070           // data-processing instruction.
16071           if (ARM_AM::getSOImmVal(CVal) != -1)
16072             break;
16073         }
16074         return;
16075 
16076       case 'J':
16077         if (Subtarget->isThumb1Only()) {
16078           // This must be a constant between -255 and -1, for negated ADD
16079           // immediates. This can be used in GCC with an "n" modifier that
16080           // prints the negated value, for use with SUB instructions. It is
16081           // not useful otherwise but is implemented for compatibility.
16082           if (CVal >= -255 && CVal <= -1)
16083             break;
16084         } else {
16085           // This must be a constant between -4095 and 4095. It is not clear
16086           // what this constraint is intended for. Implemented for
16087           // compatibility with GCC.
16088           if (CVal >= -4095 && CVal <= 4095)
16089             break;
16090         }
16091         return;
16092 
16093       case 'K':
16094         if (Subtarget->isThumb1Only()) {
16095           // A 32-bit value where only one byte has a nonzero value. Exclude
16096           // zero to match GCC. This constraint is used by GCC internally for
16097           // constants that can be loaded with a move/shift combination.
16098           // It is not useful otherwise but is implemented for compatibility.
16099           if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
16100             break;
16101         } else if (Subtarget->isThumb2()) {
16102           // A constant whose bitwise inverse can be used as an immediate
16103           // value in a data-processing instruction. This can be used in GCC
16104           // with a "B" modifier that prints the inverted value, for use with
16105           // BIC and MVN instructions. It is not useful otherwise but is
16106           // implemented for compatibility.
16107           if (ARM_AM::getT2SOImmVal(~CVal) != -1)
16108             break;
16109         } else {
16110           // A constant whose bitwise inverse can be used as an immediate
16111           // value in a data-processing instruction. This can be used in GCC
16112           // with a "B" modifier that prints the inverted value, for use with
16113           // BIC and MVN instructions. It is not useful otherwise but is
16114           // implemented for compatibility.
16115           if (ARM_AM::getSOImmVal(~CVal) != -1)
16116             break;
16117         }
16118         return;
16119 
16120       case 'L':
16121         if (Subtarget->isThumb1Only()) {
16122           // This must be a constant between -7 and 7,
16123           // for 3-operand ADD/SUB immediate instructions.
16124           if (CVal >= -7 && CVal < 7)
16125             break;
16126         } else if (Subtarget->isThumb2()) {
16127           // A constant whose negation can be used as an immediate value in a
16128           // data-processing instruction. This can be used in GCC with an "n"
16129           // modifier that prints the negated value, for use with SUB
16130           // instructions. It is not useful otherwise but is implemented for
16131           // compatibility.
16132           if (ARM_AM::getT2SOImmVal(-CVal) != -1)
16133             break;
16134         } else {
16135           // A constant whose negation can be used as an immediate value in a
16136           // data-processing instruction. This can be used in GCC with an "n"
16137           // modifier that prints the negated value, for use with SUB
16138           // instructions. It is not useful otherwise but is implemented for
16139           // compatibility.
16140           if (ARM_AM::getSOImmVal(-CVal) != -1)
16141             break;
16142         }
16143         return;
16144 
16145       case 'M':
16146         if (Subtarget->isThumb1Only()) {
16147           // This must be a multiple of 4 between 0 and 1020, for
16148           // ADD sp + immediate.
16149           if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
16150             break;
16151         } else {
16152           // A power of two or a constant between 0 and 32.  This is used in
16153           // GCC for the shift amount on shifted register operands, but it is
16154           // useful in general for any shift amounts.
16155           if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
16156             break;
16157         }
16158         return;
16159 
16160       case 'N':
16161         if (Subtarget->isThumb1Only()) {
16162           // This must be a constant between 0 and 31, for shift amounts.
16163           if (CVal >= 0 && CVal <= 31)
16164             break;
16165         }
16166         return;
16167 
16168       case 'O':
16169         if (Subtarget->isThumb1Only()) {
16170           // This must be a multiple of 4 between -508 and 508, for
16171           // ADD/SUB sp = sp + immediate.
16172           if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
16173             break;
16174         }
16175         return;
16176     }
16177     Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
16178     break;
16179   }
16180 
16181   if (Result.getNode()) {
16182     Ops.push_back(Result);
16183     return;
16184   }
16185   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
16186 }
16187 
getDivRemLibcall(const SDNode * N,MVT::SimpleValueType SVT)16188 static RTLIB::Libcall getDivRemLibcall(
16189     const SDNode *N, MVT::SimpleValueType SVT) {
16190   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
16191           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
16192          "Unhandled Opcode in getDivRemLibcall");
16193   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
16194                   N->getOpcode() == ISD::SREM;
16195   RTLIB::Libcall LC;
16196   switch (SVT) {
16197   default: llvm_unreachable("Unexpected request for libcall!");
16198   case MVT::i8:  LC = isSigned ? RTLIB::SDIVREM_I8  : RTLIB::UDIVREM_I8;  break;
16199   case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
16200   case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
16201   case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
16202   }
16203   return LC;
16204 }
16205 
getDivRemArgList(const SDNode * N,LLVMContext * Context,const ARMSubtarget * Subtarget)16206 static TargetLowering::ArgListTy getDivRemArgList(
16207     const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
16208   assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
16209           N->getOpcode() == ISD::SREM    || N->getOpcode() == ISD::UREM) &&
16210          "Unhandled Opcode in getDivRemArgList");
16211   bool isSigned = N->getOpcode() == ISD::SDIVREM ||
16212                   N->getOpcode() == ISD::SREM;
16213   TargetLowering::ArgListTy Args;
16214   TargetLowering::ArgListEntry Entry;
16215   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
16216     EVT ArgVT = N->getOperand(i).getValueType();
16217     Type *ArgTy = ArgVT.getTypeForEVT(*Context);
16218     Entry.Node = N->getOperand(i);
16219     Entry.Ty = ArgTy;
16220     Entry.IsSExt = isSigned;
16221     Entry.IsZExt = !isSigned;
16222     Args.push_back(Entry);
16223   }
16224   if (Subtarget->isTargetWindows() && Args.size() >= 2)
16225     std::swap(Args[0], Args[1]);
16226   return Args;
16227 }
16228 
LowerDivRem(SDValue Op,SelectionDAG & DAG) const16229 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
16230   assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
16231           Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
16232           Subtarget->isTargetWindows()) &&
16233          "Register-based DivRem lowering only");
16234   unsigned Opcode = Op->getOpcode();
16235   assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
16236          "Invalid opcode for Div/Rem lowering");
16237   bool isSigned = (Opcode == ISD::SDIVREM);
16238   EVT VT = Op->getValueType(0);
16239   Type *Ty = VT.getTypeForEVT(*DAG.getContext());
16240   SDLoc dl(Op);
16241 
16242   // If the target has hardware divide, use divide + multiply + subtract:
16243   //     div = a / b
16244   //     rem = a - b * div
16245   //     return {div, rem}
16246   // This should be lowered into UDIV/SDIV + MLS later on.
16247   bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
16248                                         : Subtarget->hasDivideInARMMode();
16249   if (hasDivide && Op->getValueType(0).isSimple() &&
16250       Op->getSimpleValueType(0) == MVT::i32) {
16251     unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
16252     const SDValue Dividend = Op->getOperand(0);
16253     const SDValue Divisor = Op->getOperand(1);
16254     SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
16255     SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
16256     SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
16257 
16258     SDValue Values[2] = {Div, Rem};
16259     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
16260   }
16261 
16262   RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
16263                                        VT.getSimpleVT().SimpleTy);
16264   SDValue InChain = DAG.getEntryNode();
16265 
16266   TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
16267                                                     DAG.getContext(),
16268                                                     Subtarget);
16269 
16270   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
16271                                          getPointerTy(DAG.getDataLayout()));
16272 
16273   Type *RetTy = StructType::get(Ty, Ty);
16274 
16275   if (Subtarget->isTargetWindows())
16276     InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
16277 
16278   TargetLowering::CallLoweringInfo CLI(DAG);
16279   CLI.setDebugLoc(dl).setChain(InChain)
16280     .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
16281     .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
16282 
16283   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
16284   return CallInfo.first;
16285 }
16286 
16287 // Lowers REM using divmod helpers
16288 // see RTABI section 4.2/4.3
LowerREM(SDNode * N,SelectionDAG & DAG) const16289 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
16290   // Build return types (div and rem)
16291   std::vector<Type*> RetTyParams;
16292   Type *RetTyElement;
16293 
16294   switch (N->getValueType(0).getSimpleVT().SimpleTy) {
16295   default: llvm_unreachable("Unexpected request for libcall!");
16296   case MVT::i8:   RetTyElement = Type::getInt8Ty(*DAG.getContext());  break;
16297   case MVT::i16:  RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
16298   case MVT::i32:  RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
16299   case MVT::i64:  RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
16300   }
16301 
16302   RetTyParams.push_back(RetTyElement);
16303   RetTyParams.push_back(RetTyElement);
16304   ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
16305   Type *RetTy = StructType::get(*DAG.getContext(), ret);
16306 
16307   RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
16308                                                              SimpleTy);
16309   SDValue InChain = DAG.getEntryNode();
16310   TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
16311                                                     Subtarget);
16312   bool isSigned = N->getOpcode() == ISD::SREM;
16313   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
16314                                          getPointerTy(DAG.getDataLayout()));
16315 
16316   if (Subtarget->isTargetWindows())
16317     InChain = WinDBZCheckDenominator(DAG, N, InChain);
16318 
16319   // Lower call
16320   CallLoweringInfo CLI(DAG);
16321   CLI.setChain(InChain)
16322      .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
16323      .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
16324   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
16325 
16326   // Return second (rem) result operand (first contains div)
16327   SDNode *ResNode = CallResult.first.getNode();
16328   assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
16329   return ResNode->getOperand(1);
16330 }
16331 
16332 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const16333 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
16334   assert(Subtarget->isTargetWindows() && "unsupported target platform");
16335   SDLoc DL(Op);
16336 
16337   // Get the inputs.
16338   SDValue Chain = Op.getOperand(0);
16339   SDValue Size  = Op.getOperand(1);
16340 
16341   if (DAG.getMachineFunction().getFunction().hasFnAttribute(
16342           "no-stack-arg-probe")) {
16343     unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16344     SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
16345     Chain = SP.getValue(1);
16346     SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size);
16347     if (Align)
16348       SP = DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0),
16349                        DAG.getConstant(-(uint64_t)Align, DL, MVT::i32));
16350     Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP);
16351     SDValue Ops[2] = { SP, Chain };
16352     return DAG.getMergeValues(Ops, DL);
16353   }
16354 
16355   SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
16356                               DAG.getConstant(2, DL, MVT::i32));
16357 
16358   SDValue Flag;
16359   Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
16360   Flag = Chain.getValue(1);
16361 
16362   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16363   Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
16364 
16365   SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
16366   Chain = NewSP.getValue(1);
16367 
16368   SDValue Ops[2] = { NewSP, Chain };
16369   return DAG.getMergeValues(Ops, DL);
16370 }
16371 
LowerFP_EXTEND(SDValue Op,SelectionDAG & DAG) const16372 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
16373   bool IsStrict = Op->isStrictFPOpcode();
16374   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
16375   const unsigned DstSz = Op.getValueType().getSizeInBits();
16376   const unsigned SrcSz = SrcVal.getValueType().getSizeInBits();
16377   assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&
16378          "Unexpected type for custom-lowering FP_EXTEND");
16379 
16380   assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
16381          "With both FP DP and 16, any FP conversion is legal!");
16382 
16383   assert(!(DstSz == 32 && Subtarget->hasFP16()) &&
16384          "With FP16, 16 to 32 conversion is legal!");
16385 
16386   // Converting from 32 -> 64 is valid if we have FP64.
16387   if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) {
16388     // FIXME: Remove this when we have strict fp instruction selection patterns
16389     if (IsStrict) {
16390       SDLoc Loc(Op);
16391       SDValue Result = DAG.getNode(ISD::FP_EXTEND,
16392                                    Loc, Op.getValueType(), SrcVal);
16393       return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
16394     }
16395     return Op;
16396   }
16397 
16398   // Either we are converting from 16 -> 64, without FP16 and/or
16399   // FP.double-precision or without Armv8-fp. So we must do it in two
16400   // steps.
16401   // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32
16402   // without FP16. So we must do a function call.
16403   SDLoc Loc(Op);
16404   RTLIB::Libcall LC;
16405   MakeLibCallOptions CallOptions;
16406   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
16407   for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) {
16408     bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64());
16409     MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32);
16410     MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64);
16411     if (Supported) {
16412       if (IsStrict) {
16413         SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc,
16414                              {DstVT, MVT::Other}, {Chain, SrcVal});
16415         Chain = SrcVal.getValue(1);
16416       } else {
16417         SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal);
16418       }
16419     } else {
16420       LC = RTLIB::getFPEXT(SrcVT, DstVT);
16421       assert(LC != RTLIB::UNKNOWN_LIBCALL &&
16422              "Unexpected type for custom-lowering FP_EXTEND");
16423       std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
16424                                             Loc, Chain);
16425     }
16426   }
16427 
16428   return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal;
16429 }
16430 
LowerFP_ROUND(SDValue Op,SelectionDAG & DAG) const16431 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
16432   bool IsStrict = Op->isStrictFPOpcode();
16433 
16434   SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
16435   EVT SrcVT = SrcVal.getValueType();
16436   EVT DstVT = Op.getValueType();
16437   const unsigned DstSz = Op.getValueType().getSizeInBits();
16438   const unsigned SrcSz = SrcVT.getSizeInBits();
16439   (void)DstSz;
16440   assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&
16441          "Unexpected type for custom-lowering FP_ROUND");
16442 
16443   assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
16444          "With both FP DP and 16, any FP conversion is legal!");
16445 
16446   SDLoc Loc(Op);
16447 
16448   // Instruction from 32 -> 16 if hasFP16 is valid
16449   if (SrcSz == 32 && Subtarget->hasFP16())
16450     return Op;
16451 
16452   // Lib call from 32 -> 16 / 64 -> [32, 16]
16453   RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT);
16454   assert(LC != RTLIB::UNKNOWN_LIBCALL &&
16455          "Unexpected type for custom-lowering FP_ROUND");
16456   MakeLibCallOptions CallOptions;
16457   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
16458   SDValue Result;
16459   std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
16460                                         Loc, Chain);
16461   return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
16462 }
16463 
lowerABS(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const16464 void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
16465                                  SelectionDAG &DAG) const {
16466   assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS.");
16467   MVT HalfT = MVT::i32;
16468   SDLoc dl(N);
16469   SDValue Hi, Lo, Tmp;
16470 
16471   if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) ||
16472       !isOperationLegalOrCustom(ISD::UADDO, HalfT))
16473     return ;
16474 
16475   unsigned OpTypeBits = HalfT.getScalarSizeInBits();
16476   SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
16477 
16478   Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
16479                    DAG.getConstant(0, dl, HalfT));
16480   Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
16481                    DAG.getConstant(1, dl, HalfT));
16482 
16483   Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi,
16484                     DAG.getConstant(OpTypeBits - 1, dl,
16485                     getShiftAmountTy(HalfT, DAG.getDataLayout())));
16486   Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
16487   Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
16488                    SDValue(Lo.getNode(), 1));
16489   Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
16490   Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
16491 
16492   Results.push_back(Lo);
16493   Results.push_back(Hi);
16494 }
16495 
16496 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const16497 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
16498   // The ARM target isn't yet aware of offsets.
16499   return false;
16500 }
16501 
isBitFieldInvertedMask(unsigned v)16502 bool ARM::isBitFieldInvertedMask(unsigned v) {
16503   if (v == 0xffffffff)
16504     return false;
16505 
16506   // there can be 1's on either or both "outsides", all the "inside"
16507   // bits must be 0's
16508   return isShiftedMask_32(~v);
16509 }
16510 
16511 /// isFPImmLegal - Returns true if the target can instruction select the
16512 /// specified FP immediate natively. If false, the legalizer will
16513 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const16514 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16515                                      bool ForCodeSize) const {
16516   if (!Subtarget->hasVFP3Base())
16517     return false;
16518   if (VT == MVT::f16 && Subtarget->hasFullFP16())
16519     return ARM_AM::getFP16Imm(Imm) != -1;
16520   if (VT == MVT::f32)
16521     return ARM_AM::getFP32Imm(Imm) != -1;
16522   if (VT == MVT::f64 && Subtarget->hasFP64())
16523     return ARM_AM::getFP64Imm(Imm) != -1;
16524   return false;
16525 }
16526 
16527 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
16528 /// MemIntrinsicNodes.  The associated MachineMemOperands record the alignment
16529 /// specified in the intrinsic calls.
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const16530 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
16531                                            const CallInst &I,
16532                                            MachineFunction &MF,
16533                                            unsigned Intrinsic) const {
16534   switch (Intrinsic) {
16535   case Intrinsic::arm_neon_vld1:
16536   case Intrinsic::arm_neon_vld2:
16537   case Intrinsic::arm_neon_vld3:
16538   case Intrinsic::arm_neon_vld4:
16539   case Intrinsic::arm_neon_vld2lane:
16540   case Intrinsic::arm_neon_vld3lane:
16541   case Intrinsic::arm_neon_vld4lane:
16542   case Intrinsic::arm_neon_vld2dup:
16543   case Intrinsic::arm_neon_vld3dup:
16544   case Intrinsic::arm_neon_vld4dup: {
16545     Info.opc = ISD::INTRINSIC_W_CHAIN;
16546     // Conservatively set memVT to the entire set of vectors loaded.
16547     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
16548     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
16549     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
16550     Info.ptrVal = I.getArgOperand(0);
16551     Info.offset = 0;
16552     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
16553     Info.align = MaybeAlign(cast<ConstantInt>(AlignArg)->getZExtValue());
16554     // volatile loads with NEON intrinsics not supported
16555     Info.flags = MachineMemOperand::MOLoad;
16556     return true;
16557   }
16558   case Intrinsic::arm_neon_vld1x2:
16559   case Intrinsic::arm_neon_vld1x3:
16560   case Intrinsic::arm_neon_vld1x4: {
16561     Info.opc = ISD::INTRINSIC_W_CHAIN;
16562     // Conservatively set memVT to the entire set of vectors loaded.
16563     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
16564     uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
16565     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
16566     Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
16567     Info.offset = 0;
16568     Info.align.reset();
16569     // volatile loads with NEON intrinsics not supported
16570     Info.flags = MachineMemOperand::MOLoad;
16571     return true;
16572   }
16573   case Intrinsic::arm_neon_vst1:
16574   case Intrinsic::arm_neon_vst2:
16575   case Intrinsic::arm_neon_vst3:
16576   case Intrinsic::arm_neon_vst4:
16577   case Intrinsic::arm_neon_vst2lane:
16578   case Intrinsic::arm_neon_vst3lane:
16579   case Intrinsic::arm_neon_vst4lane: {
16580     Info.opc = ISD::INTRINSIC_VOID;
16581     // Conservatively set memVT to the entire set of vectors stored.
16582     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
16583     unsigned NumElts = 0;
16584     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
16585       Type *ArgTy = I.getArgOperand(ArgI)->getType();
16586       if (!ArgTy->isVectorTy())
16587         break;
16588       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
16589     }
16590     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
16591     Info.ptrVal = I.getArgOperand(0);
16592     Info.offset = 0;
16593     Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
16594     Info.align = MaybeAlign(cast<ConstantInt>(AlignArg)->getZExtValue());
16595     // volatile stores with NEON intrinsics not supported
16596     Info.flags = MachineMemOperand::MOStore;
16597     return true;
16598   }
16599   case Intrinsic::arm_neon_vst1x2:
16600   case Intrinsic::arm_neon_vst1x3:
16601   case Intrinsic::arm_neon_vst1x4: {
16602     Info.opc = ISD::INTRINSIC_VOID;
16603     // Conservatively set memVT to the entire set of vectors stored.
16604     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
16605     unsigned NumElts = 0;
16606     for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
16607       Type *ArgTy = I.getArgOperand(ArgI)->getType();
16608       if (!ArgTy->isVectorTy())
16609         break;
16610       NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
16611     }
16612     Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
16613     Info.ptrVal = I.getArgOperand(0);
16614     Info.offset = 0;
16615     Info.align.reset();
16616     // volatile stores with NEON intrinsics not supported
16617     Info.flags = MachineMemOperand::MOStore;
16618     return true;
16619   }
16620   case Intrinsic::arm_ldaex:
16621   case Intrinsic::arm_ldrex: {
16622     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
16623     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
16624     Info.opc = ISD::INTRINSIC_W_CHAIN;
16625     Info.memVT = MVT::getVT(PtrTy->getElementType());
16626     Info.ptrVal = I.getArgOperand(0);
16627     Info.offset = 0;
16628     Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType()));
16629     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
16630     return true;
16631   }
16632   case Intrinsic::arm_stlex:
16633   case Intrinsic::arm_strex: {
16634     auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
16635     PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
16636     Info.opc = ISD::INTRINSIC_W_CHAIN;
16637     Info.memVT = MVT::getVT(PtrTy->getElementType());
16638     Info.ptrVal = I.getArgOperand(1);
16639     Info.offset = 0;
16640     Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType()));
16641     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
16642     return true;
16643   }
16644   case Intrinsic::arm_stlexd:
16645   case Intrinsic::arm_strexd:
16646     Info.opc = ISD::INTRINSIC_W_CHAIN;
16647     Info.memVT = MVT::i64;
16648     Info.ptrVal = I.getArgOperand(2);
16649     Info.offset = 0;
16650     Info.align = Align(8);
16651     Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
16652     return true;
16653 
16654   case Intrinsic::arm_ldaexd:
16655   case Intrinsic::arm_ldrexd:
16656     Info.opc = ISD::INTRINSIC_W_CHAIN;
16657     Info.memVT = MVT::i64;
16658     Info.ptrVal = I.getArgOperand(0);
16659     Info.offset = 0;
16660     Info.align = Align(8);
16661     Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
16662     return true;
16663 
16664   default:
16665     break;
16666   }
16667 
16668   return false;
16669 }
16670 
16671 /// Returns true if it is beneficial to convert a load of a constant
16672 /// to just the constant itself.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty) const16673 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16674                                                           Type *Ty) const {
16675   assert(Ty->isIntegerTy());
16676 
16677   unsigned Bits = Ty->getPrimitiveSizeInBits();
16678   if (Bits == 0 || Bits > 32)
16679     return false;
16680   return true;
16681 }
16682 
isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index) const16683 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
16684                                                 unsigned Index) const {
16685   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
16686     return false;
16687 
16688   return (Index == 0 || Index == ResVT.getVectorNumElements());
16689 }
16690 
makeDMB(IRBuilder<> & Builder,ARM_MB::MemBOpt Domain) const16691 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
16692                                         ARM_MB::MemBOpt Domain) const {
16693   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
16694 
16695   // First, if the target has no DMB, see what fallback we can use.
16696   if (!Subtarget->hasDataBarrier()) {
16697     // Some ARMv6 cpus can support data barriers with an mcr instruction.
16698     // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
16699     // here.
16700     if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
16701       Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
16702       Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
16703                         Builder.getInt32(0), Builder.getInt32(7),
16704                         Builder.getInt32(10), Builder.getInt32(5)};
16705       return Builder.CreateCall(MCR, args);
16706     } else {
16707       // Instead of using barriers, atomic accesses on these subtargets use
16708       // libcalls.
16709       llvm_unreachable("makeDMB on a target so old that it has no barriers");
16710     }
16711   } else {
16712     Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
16713     // Only a full system barrier exists in the M-class architectures.
16714     Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
16715     Constant *CDomain = Builder.getInt32(Domain);
16716     return Builder.CreateCall(DMB, CDomain);
16717   }
16718 }
16719 
16720 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
emitLeadingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord) const16721 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
16722                                                  Instruction *Inst,
16723                                                  AtomicOrdering Ord) const {
16724   switch (Ord) {
16725   case AtomicOrdering::NotAtomic:
16726   case AtomicOrdering::Unordered:
16727     llvm_unreachable("Invalid fence: unordered/non-atomic");
16728   case AtomicOrdering::Monotonic:
16729   case AtomicOrdering::Acquire:
16730     return nullptr; // Nothing to do
16731   case AtomicOrdering::SequentiallyConsistent:
16732     if (!Inst->hasAtomicStore())
16733       return nullptr; // Nothing to do
16734     LLVM_FALLTHROUGH;
16735   case AtomicOrdering::Release:
16736   case AtomicOrdering::AcquireRelease:
16737     if (Subtarget->preferISHSTBarriers())
16738       return makeDMB(Builder, ARM_MB::ISHST);
16739     // FIXME: add a comment with a link to documentation justifying this.
16740     else
16741       return makeDMB(Builder, ARM_MB::ISH);
16742   }
16743   llvm_unreachable("Unknown fence ordering in emitLeadingFence");
16744 }
16745 
emitTrailingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord) const16746 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
16747                                                   Instruction *Inst,
16748                                                   AtomicOrdering Ord) const {
16749   switch (Ord) {
16750   case AtomicOrdering::NotAtomic:
16751   case AtomicOrdering::Unordered:
16752     llvm_unreachable("Invalid fence: unordered/not-atomic");
16753   case AtomicOrdering::Monotonic:
16754   case AtomicOrdering::Release:
16755     return nullptr; // Nothing to do
16756   case AtomicOrdering::Acquire:
16757   case AtomicOrdering::AcquireRelease:
16758   case AtomicOrdering::SequentiallyConsistent:
16759     return makeDMB(Builder, ARM_MB::ISH);
16760   }
16761   llvm_unreachable("Unknown fence ordering in emitTrailingFence");
16762 }
16763 
16764 // Loads and stores less than 64-bits are already atomic; ones above that
16765 // are doomed anyway, so defer to the default libcall and blame the OS when
16766 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
16767 // anything for those.
shouldExpandAtomicStoreInIR(StoreInst * SI) const16768 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
16769   unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
16770   return (Size == 64) && !Subtarget->isMClass();
16771 }
16772 
16773 // Loads and stores less than 64-bits are already atomic; ones above that
16774 // are doomed anyway, so defer to the default libcall and blame the OS when
16775 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
16776 // anything for those.
16777 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
16778 // guarantee, see DDI0406C ARM architecture reference manual,
16779 // sections A8.8.72-74 LDRD)
16780 TargetLowering::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst * LI) const16781 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
16782   unsigned Size = LI->getType()->getPrimitiveSizeInBits();
16783   return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
16784                                                   : AtomicExpansionKind::None;
16785 }
16786 
16787 // For the real atomic operations, we have ldrex/strex up to 32 bits,
16788 // and up to 64 bits on the non-M profiles
16789 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const16790 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
16791   if (AI->isFloatingPointOperation())
16792     return AtomicExpansionKind::CmpXChg;
16793 
16794   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
16795   bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
16796   return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
16797              ? AtomicExpansionKind::LLSC
16798              : AtomicExpansionKind::None;
16799 }
16800 
16801 TargetLowering::AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * AI) const16802 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
16803   // At -O0, fast-regalloc cannot cope with the live vregs necessary to
16804   // implement cmpxchg without spilling. If the address being exchanged is also
16805   // on the stack and close enough to the spill slot, this can lead to a
16806   // situation where the monitor always gets cleared and the atomic operation
16807   // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
16808   bool HasAtomicCmpXchg =
16809       !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
16810   if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg)
16811     return AtomicExpansionKind::LLSC;
16812   return AtomicExpansionKind::None;
16813 }
16814 
shouldInsertFencesForAtomic(const Instruction * I) const16815 bool ARMTargetLowering::shouldInsertFencesForAtomic(
16816     const Instruction *I) const {
16817   return InsertFencesForAtomic;
16818 }
16819 
16820 // This has so far only been implemented for MachO.
useLoadStackGuardNode() const16821 bool ARMTargetLowering::useLoadStackGuardNode() const {
16822   return Subtarget->isTargetMachO();
16823 }
16824 
insertSSPDeclarations(Module & M) const16825 void ARMTargetLowering::insertSSPDeclarations(Module &M) const {
16826   if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
16827     return TargetLowering::insertSSPDeclarations(M);
16828 
16829   // MSVC CRT has a global variable holding security cookie.
16830   M.getOrInsertGlobal("__security_cookie",
16831                       Type::getInt8PtrTy(M.getContext()));
16832 
16833   // MSVC CRT has a function to validate security cookie.
16834   FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
16835       "__security_check_cookie", Type::getVoidTy(M.getContext()),
16836       Type::getInt8PtrTy(M.getContext()));
16837   if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee()))
16838     F->addAttribute(1, Attribute::AttrKind::InReg);
16839 }
16840 
getSDagStackGuard(const Module & M) const16841 Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const {
16842   // MSVC CRT has a global variable holding security cookie.
16843   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
16844     return M.getGlobalVariable("__security_cookie");
16845   return TargetLowering::getSDagStackGuard(M);
16846 }
16847 
getSSPStackGuardCheck(const Module & M) const16848 Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const {
16849   // MSVC CRT has a function to validate security cookie.
16850   if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
16851     return M.getFunction("__security_check_cookie");
16852   return TargetLowering::getSSPStackGuardCheck(M);
16853 }
16854 
canCombineStoreAndExtract(Type * VectorTy,Value * Idx,unsigned & Cost) const16855 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
16856                                                   unsigned &Cost) const {
16857   // If we do not have NEON, vector types are not natively supported.
16858   if (!Subtarget->hasNEON())
16859     return false;
16860 
16861   // Floating point values and vector values map to the same register file.
16862   // Therefore, although we could do a store extract of a vector type, this is
16863   // better to leave at float as we have more freedom in the addressing mode for
16864   // those.
16865   if (VectorTy->isFPOrFPVectorTy())
16866     return false;
16867 
16868   // If the index is unknown at compile time, this is very expensive to lower
16869   // and it is not possible to combine the store with the extract.
16870   if (!isa<ConstantInt>(Idx))
16871     return false;
16872 
16873   assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
16874   unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth();
16875   // We can do a store + vector extract on any vector that fits perfectly in a D
16876   // or Q register.
16877   if (BitWidth == 64 || BitWidth == 128) {
16878     Cost = 0;
16879     return true;
16880   }
16881   return false;
16882 }
16883 
isCheapToSpeculateCttz() const16884 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
16885   return Subtarget->hasV6T2Ops();
16886 }
16887 
isCheapToSpeculateCtlz() const16888 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
16889   return Subtarget->hasV6T2Ops();
16890 }
16891 
shouldExpandShift(SelectionDAG & DAG,SDNode * N) const16892 bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
16893   return !Subtarget->hasMinSize() || Subtarget->isTargetWindows();
16894 }
16895 
emitLoadLinked(IRBuilder<> & Builder,Value * Addr,AtomicOrdering Ord) const16896 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
16897                                          AtomicOrdering Ord) const {
16898   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
16899   Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
16900   bool IsAcquire = isAcquireOrStronger(Ord);
16901 
16902   // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
16903   // intrinsic must return {i32, i32} and we have to recombine them into a
16904   // single i64 here.
16905   if (ValTy->getPrimitiveSizeInBits() == 64) {
16906     Intrinsic::ID Int =
16907         IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
16908     Function *Ldrex = Intrinsic::getDeclaration(M, Int);
16909 
16910     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
16911     Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
16912 
16913     Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
16914     Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
16915     if (!Subtarget->isLittle())
16916       std::swap (Lo, Hi);
16917     Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
16918     Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
16919     return Builder.CreateOr(
16920         Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
16921   }
16922 
16923   Type *Tys[] = { Addr->getType() };
16924   Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
16925   Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);
16926 
16927   return Builder.CreateTruncOrBitCast(
16928       Builder.CreateCall(Ldrex, Addr),
16929       cast<PointerType>(Addr->getType())->getElementType());
16930 }
16931 
emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> & Builder) const16932 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
16933     IRBuilder<> &Builder) const {
16934   if (!Subtarget->hasV7Ops())
16935     return;
16936   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
16937   Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
16938 }
16939 
emitStoreConditional(IRBuilder<> & Builder,Value * Val,Value * Addr,AtomicOrdering Ord) const16940 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
16941                                                Value *Addr,
16942                                                AtomicOrdering Ord) const {
16943   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
16944   bool IsRelease = isReleaseOrStronger(Ord);
16945 
16946   // Since the intrinsics must have legal type, the i64 intrinsics take two
16947   // parameters: "i32, i32". We must marshal Val into the appropriate form
16948   // before the call.
16949   if (Val->getType()->getPrimitiveSizeInBits() == 64) {
16950     Intrinsic::ID Int =
16951         IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
16952     Function *Strex = Intrinsic::getDeclaration(M, Int);
16953     Type *Int32Ty = Type::getInt32Ty(M->getContext());
16954 
16955     Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
16956     Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
16957     if (!Subtarget->isLittle())
16958       std::swap(Lo, Hi);
16959     Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
16960     return Builder.CreateCall(Strex, {Lo, Hi, Addr});
16961   }
16962 
16963   Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
16964   Type *Tys[] = { Addr->getType() };
16965   Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
16966 
16967   return Builder.CreateCall(
16968       Strex, {Builder.CreateZExtOrBitCast(
16969                   Val, Strex->getFunctionType()->getParamType(0)),
16970               Addr});
16971 }
16972 
16973 
alignLoopsWithOptSize() const16974 bool ARMTargetLowering::alignLoopsWithOptSize() const {
16975   return Subtarget->isMClass();
16976 }
16977 
16978 /// A helper function for determining the number of interleaved accesses we
16979 /// will generate when lowering accesses of the given type.
16980 unsigned
getNumInterleavedAccesses(VectorType * VecTy,const DataLayout & DL) const16981 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
16982                                              const DataLayout &DL) const {
16983   return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
16984 }
16985 
isLegalInterleavedAccessType(unsigned Factor,VectorType * VecTy,const DataLayout & DL) const16986 bool ARMTargetLowering::isLegalInterleavedAccessType(
16987     unsigned Factor, VectorType *VecTy, const DataLayout &DL) const {
16988 
16989   unsigned VecSize = DL.getTypeSizeInBits(VecTy);
16990   unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
16991 
16992   if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps())
16993     return false;
16994 
16995   // Ensure the vector doesn't have f16 elements. Even though we could do an
16996   // i16 vldN, we can't hold the f16 vectors and will end up converting via
16997   // f32.
16998   if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy())
16999     return false;
17000   if (Subtarget->hasMVEIntegerOps() && Factor == 3)
17001     return false;
17002 
17003   // Ensure the number of vector elements is greater than 1.
17004   if (VecTy->getNumElements() < 2)
17005     return false;
17006 
17007   // Ensure the element type is legal.
17008   if (ElSize != 8 && ElSize != 16 && ElSize != 32)
17009     return false;
17010 
17011   // Ensure the total vector size is 64 or a multiple of 128. Types larger than
17012   // 128 will be split into multiple interleaved accesses.
17013   if (Subtarget->hasNEON() && VecSize == 64)
17014     return true;
17015   return VecSize % 128 == 0;
17016 }
17017 
getMaxSupportedInterleaveFactor() const17018 unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const {
17019   if (Subtarget->hasNEON())
17020     return 4;
17021   if (Subtarget->hasMVEIntegerOps())
17022     return MVEMaxSupportedInterleaveFactor;
17023   return TargetLoweringBase::getMaxSupportedInterleaveFactor();
17024 }
17025 
17026 /// Lower an interleaved load into a vldN intrinsic.
17027 ///
17028 /// E.g. Lower an interleaved load (Factor = 2):
17029 ///        %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
17030 ///        %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6>  ; Extract even elements
17031 ///        %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7>  ; Extract odd elements
17032 ///
17033 ///      Into:
17034 ///        %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
17035 ///        %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
17036 ///        %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
lowerInterleavedLoad(LoadInst * LI,ArrayRef<ShuffleVectorInst * > Shuffles,ArrayRef<unsigned> Indices,unsigned Factor) const17037 bool ARMTargetLowering::lowerInterleavedLoad(
17038     LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
17039     ArrayRef<unsigned> Indices, unsigned Factor) const {
17040   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
17041          "Invalid interleave factor");
17042   assert(!Shuffles.empty() && "Empty shufflevector input");
17043   assert(Shuffles.size() == Indices.size() &&
17044          "Unmatched number of shufflevectors and indices");
17045 
17046   VectorType *VecTy = Shuffles[0]->getType();
17047   Type *EltTy = VecTy->getVectorElementType();
17048 
17049   const DataLayout &DL = LI->getModule()->getDataLayout();
17050 
17051   // Skip if we do not have NEON and skip illegal vector types. We can
17052   // "legalize" wide vector types into multiple interleaved accesses as long as
17053   // the vector types are divisible by 128.
17054   if (!isLegalInterleavedAccessType(Factor, VecTy, DL))
17055     return false;
17056 
17057   unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);
17058 
17059   // A pointer vector can not be the return type of the ldN intrinsics. Need to
17060   // load integer vectors first and then convert to pointer vectors.
17061   if (EltTy->isPointerTy())
17062     VecTy =
17063         VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
17064 
17065   IRBuilder<> Builder(LI);
17066 
17067   // The base address of the load.
17068   Value *BaseAddr = LI->getPointerOperand();
17069 
17070   if (NumLoads > 1) {
17071     // If we're going to generate more than one load, reset the sub-vector type
17072     // to something legal.
17073     VecTy = VectorType::get(VecTy->getVectorElementType(),
17074                             VecTy->getVectorNumElements() / NumLoads);
17075 
17076     // We will compute the pointer operand of each load from the original base
17077     // address using GEPs. Cast the base address to a pointer to the scalar
17078     // element type.
17079     BaseAddr = Builder.CreateBitCast(
17080         BaseAddr, VecTy->getVectorElementType()->getPointerTo(
17081                       LI->getPointerAddressSpace()));
17082   }
17083 
17084   assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");
17085 
17086   auto createLoadIntrinsic = [&](Value *BaseAddr) {
17087     if (Subtarget->hasNEON()) {
17088       Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
17089       Type *Tys[] = {VecTy, Int8Ptr};
17090       static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
17091                                                 Intrinsic::arm_neon_vld3,
17092                                                 Intrinsic::arm_neon_vld4};
17093       Function *VldnFunc =
17094           Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
17095 
17096       SmallVector<Value *, 2> Ops;
17097       Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
17098       Ops.push_back(Builder.getInt32(LI->getAlignment()));
17099 
17100       return Builder.CreateCall(VldnFunc, Ops, "vldN");
17101     } else {
17102       assert((Factor == 2 || Factor == 4) &&
17103              "expected interleave factor of 2 or 4 for MVE");
17104       Intrinsic::ID LoadInts =
17105           Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
17106       Type *VecEltTy = VecTy->getVectorElementType()->getPointerTo(
17107           LI->getPointerAddressSpace());
17108       Type *Tys[] = {VecTy, VecEltTy};
17109       Function *VldnFunc =
17110           Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys);
17111 
17112       SmallVector<Value *, 2> Ops;
17113       Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy));
17114       return Builder.CreateCall(VldnFunc, Ops, "vldN");
17115     }
17116   };
17117 
17118   // Holds sub-vectors extracted from the load intrinsic return values. The
17119   // sub-vectors are associated with the shufflevector instructions they will
17120   // replace.
17121   DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
17122 
17123   for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
17124     // If we're generating more than one load, compute the base address of
17125     // subsequent loads as an offset from the previous.
17126     if (LoadCount > 0)
17127       BaseAddr =
17128           Builder.CreateConstGEP1_32(VecTy->getVectorElementType(), BaseAddr,
17129                                      VecTy->getVectorNumElements() * Factor);
17130 
17131     CallInst *VldN = createLoadIntrinsic(BaseAddr);
17132 
17133     // Replace uses of each shufflevector with the corresponding vector loaded
17134     // by ldN.
17135     for (unsigned i = 0; i < Shuffles.size(); i++) {
17136       ShuffleVectorInst *SV = Shuffles[i];
17137       unsigned Index = Indices[i];
17138 
17139       Value *SubVec = Builder.CreateExtractValue(VldN, Index);
17140 
17141       // Convert the integer vector to pointer vector if the element is pointer.
17142       if (EltTy->isPointerTy())
17143         SubVec = Builder.CreateIntToPtr(
17144             SubVec, VectorType::get(SV->getType()->getVectorElementType(),
17145                                     VecTy->getVectorNumElements()));
17146 
17147       SubVecs[SV].push_back(SubVec);
17148     }
17149   }
17150 
17151   // Replace uses of the shufflevector instructions with the sub-vectors
17152   // returned by the load intrinsic. If a shufflevector instruction is
17153   // associated with more than one sub-vector, those sub-vectors will be
17154   // concatenated into a single wide vector.
17155   for (ShuffleVectorInst *SVI : Shuffles) {
17156     auto &SubVec = SubVecs[SVI];
17157     auto *WideVec =
17158         SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
17159     SVI->replaceAllUsesWith(WideVec);
17160   }
17161 
17162   return true;
17163 }
17164 
17165 /// Lower an interleaved store into a vstN intrinsic.
17166 ///
17167 /// E.g. Lower an interleaved store (Factor = 3):
17168 ///        %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
17169 ///                                  <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
17170 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
17171 ///
17172 ///      Into:
17173 ///        %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
17174 ///        %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
17175 ///        %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
17176 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
17177 ///
17178 /// Note that the new shufflevectors will be removed and we'll only generate one
17179 /// vst3 instruction in CodeGen.
17180 ///
17181 /// Example for a more general valid mask (Factor 3). Lower:
17182 ///        %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
17183 ///                 <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
17184 ///        store <12 x i32> %i.vec, <12 x i32>* %ptr
17185 ///
17186 ///      Into:
17187 ///        %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
17188 ///        %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
17189 ///        %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
17190 ///        call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
lowerInterleavedStore(StoreInst * SI,ShuffleVectorInst * SVI,unsigned Factor) const17191 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
17192                                               ShuffleVectorInst *SVI,
17193                                               unsigned Factor) const {
17194   assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
17195          "Invalid interleave factor");
17196 
17197   VectorType *VecTy = SVI->getType();
17198   assert(VecTy->getVectorNumElements() % Factor == 0 &&
17199          "Invalid interleaved store");
17200 
17201   unsigned LaneLen = VecTy->getVectorNumElements() / Factor;
17202   Type *EltTy = VecTy->getVectorElementType();
17203   VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
17204 
17205   const DataLayout &DL = SI->getModule()->getDataLayout();
17206 
17207   // Skip if we do not have NEON and skip illegal vector types. We can
17208   // "legalize" wide vector types into multiple interleaved accesses as long as
17209   // the vector types are divisible by 128.
17210   if (!isLegalInterleavedAccessType(Factor, SubVecTy, DL))
17211     return false;
17212 
17213   unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);
17214 
17215   Value *Op0 = SVI->getOperand(0);
17216   Value *Op1 = SVI->getOperand(1);
17217   IRBuilder<> Builder(SI);
17218 
17219   // StN intrinsics don't support pointer vectors as arguments. Convert pointer
17220   // vectors to integer vectors.
17221   if (EltTy->isPointerTy()) {
17222     Type *IntTy = DL.getIntPtrType(EltTy);
17223 
17224     // Convert to the corresponding integer vector.
17225     Type *IntVecTy =
17226         VectorType::get(IntTy, Op0->getType()->getVectorNumElements());
17227     Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
17228     Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
17229 
17230     SubVecTy = VectorType::get(IntTy, LaneLen);
17231   }
17232 
17233   // The base address of the store.
17234   Value *BaseAddr = SI->getPointerOperand();
17235 
17236   if (NumStores > 1) {
17237     // If we're going to generate more than one store, reset the lane length
17238     // and sub-vector type to something legal.
17239     LaneLen /= NumStores;
17240     SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen);
17241 
17242     // We will compute the pointer operand of each store from the original base
17243     // address using GEPs. Cast the base address to a pointer to the scalar
17244     // element type.
17245     BaseAddr = Builder.CreateBitCast(
17246         BaseAddr, SubVecTy->getVectorElementType()->getPointerTo(
17247                       SI->getPointerAddressSpace()));
17248   }
17249 
17250   assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
17251 
17252   auto Mask = SVI->getShuffleMask();
17253 
17254   auto createStoreIntrinsic = [&](Value *BaseAddr,
17255                                   SmallVectorImpl<Value *> &Shuffles) {
17256     if (Subtarget->hasNEON()) {
17257       static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
17258                                                  Intrinsic::arm_neon_vst3,
17259                                                  Intrinsic::arm_neon_vst4};
17260       Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
17261       Type *Tys[] = {Int8Ptr, SubVecTy};
17262 
17263       Function *VstNFunc = Intrinsic::getDeclaration(
17264           SI->getModule(), StoreInts[Factor - 2], Tys);
17265 
17266       SmallVector<Value *, 6> Ops;
17267       Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
17268       for (auto S : Shuffles)
17269         Ops.push_back(S);
17270       Ops.push_back(Builder.getInt32(SI->getAlignment()));
17271       Builder.CreateCall(VstNFunc, Ops);
17272     } else {
17273       assert((Factor == 2 || Factor == 4) &&
17274              "expected interleave factor of 2 or 4 for MVE");
17275       Intrinsic::ID StoreInts =
17276           Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
17277       Type *EltPtrTy = SubVecTy->getVectorElementType()->getPointerTo(
17278           SI->getPointerAddressSpace());
17279       Type *Tys[] = {EltPtrTy, SubVecTy};
17280       Function *VstNFunc =
17281           Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys);
17282 
17283       SmallVector<Value *, 6> Ops;
17284       Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy));
17285       for (auto S : Shuffles)
17286         Ops.push_back(S);
17287       for (unsigned F = 0; F < Factor; F++) {
17288         Ops.push_back(Builder.getInt32(F));
17289         Builder.CreateCall(VstNFunc, Ops);
17290         Ops.pop_back();
17291       }
17292     }
17293   };
17294 
17295   for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
17296     // If we generating more than one store, we compute the base address of
17297     // subsequent stores as an offset from the previous.
17298     if (StoreCount > 0)
17299       BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getVectorElementType(),
17300                                             BaseAddr, LaneLen * Factor);
17301 
17302     SmallVector<Value *, 4> Shuffles;
17303 
17304     // Split the shufflevector operands into sub vectors for the new vstN call.
17305     for (unsigned i = 0; i < Factor; i++) {
17306       unsigned IdxI = StoreCount * LaneLen * Factor + i;
17307       if (Mask[IdxI] >= 0) {
17308         Shuffles.push_back(Builder.CreateShuffleVector(
17309             Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0)));
17310       } else {
17311         unsigned StartMask = 0;
17312         for (unsigned j = 1; j < LaneLen; j++) {
17313           unsigned IdxJ = StoreCount * LaneLen * Factor + j;
17314           if (Mask[IdxJ * Factor + IdxI] >= 0) {
17315             StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
17316             break;
17317           }
17318         }
17319         // Note: If all elements in a chunk are undefs, StartMask=0!
17320         // Note: Filling undef gaps with random elements is ok, since
17321         // those elements were being written anyway (with undefs).
17322         // In the case of all undefs we're defaulting to using elems from 0
17323         // Note: StartMask cannot be negative, it's checked in
17324         // isReInterleaveMask
17325         Shuffles.push_back(Builder.CreateShuffleVector(
17326             Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0)));
17327       }
17328     }
17329 
17330     createStoreIntrinsic(BaseAddr, Shuffles);
17331   }
17332   return true;
17333 }
17334 
17335 enum HABaseType {
17336   HA_UNKNOWN = 0,
17337   HA_FLOAT,
17338   HA_DOUBLE,
17339   HA_VECT64,
17340   HA_VECT128
17341 };
17342 
isHomogeneousAggregate(Type * Ty,HABaseType & Base,uint64_t & Members)17343 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
17344                                    uint64_t &Members) {
17345   if (auto *ST = dyn_cast<StructType>(Ty)) {
17346     for (unsigned i = 0; i < ST->getNumElements(); ++i) {
17347       uint64_t SubMembers = 0;
17348       if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
17349         return false;
17350       Members += SubMembers;
17351     }
17352   } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
17353     uint64_t SubMembers = 0;
17354     if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
17355       return false;
17356     Members += SubMembers * AT->getNumElements();
17357   } else if (Ty->isFloatTy()) {
17358     if (Base != HA_UNKNOWN && Base != HA_FLOAT)
17359       return false;
17360     Members = 1;
17361     Base = HA_FLOAT;
17362   } else if (Ty->isDoubleTy()) {
17363     if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
17364       return false;
17365     Members = 1;
17366     Base = HA_DOUBLE;
17367   } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
17368     Members = 1;
17369     switch (Base) {
17370     case HA_FLOAT:
17371     case HA_DOUBLE:
17372       return false;
17373     case HA_VECT64:
17374       return VT->getBitWidth() == 64;
17375     case HA_VECT128:
17376       return VT->getBitWidth() == 128;
17377     case HA_UNKNOWN:
17378       switch (VT->getBitWidth()) {
17379       case 64:
17380         Base = HA_VECT64;
17381         return true;
17382       case 128:
17383         Base = HA_VECT128;
17384         return true;
17385       default:
17386         return false;
17387       }
17388     }
17389   }
17390 
17391   return (Members > 0 && Members <= 4);
17392 }
17393 
17394 /// Return the correct alignment for the current calling convention.
getABIAlignmentForCallingConv(Type * ArgTy,DataLayout DL) const17395 Align ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy,
17396                                                        DataLayout DL) const {
17397   const Align ABITypeAlign(DL.getABITypeAlignment(ArgTy));
17398   if (!ArgTy->isVectorTy())
17399     return ABITypeAlign;
17400 
17401   // Avoid over-aligning vector parameters. It would require realigning the
17402   // stack and waste space for no real benefit.
17403   return std::min(ABITypeAlign, DL.getStackAlignment());
17404 }
17405 
17406 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
17407 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
17408 /// passing according to AAPCS rules.
functionArgumentNeedsConsecutiveRegisters(Type * Ty,CallingConv::ID CallConv,bool isVarArg) const17409 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
17410     Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
17411   if (getEffectiveCallingConv(CallConv, isVarArg) !=
17412       CallingConv::ARM_AAPCS_VFP)
17413     return false;
17414 
17415   HABaseType Base = HA_UNKNOWN;
17416   uint64_t Members = 0;
17417   bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
17418   LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
17419 
17420   bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
17421   return IsHA || IsIntArray;
17422 }
17423 
getExceptionPointerRegister(const Constant * PersonalityFn) const17424 unsigned ARMTargetLowering::getExceptionPointerRegister(
17425     const Constant *PersonalityFn) const {
17426   // Platforms which do not use SjLj EH may return values in these registers
17427   // via the personality function.
17428   return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0;
17429 }
17430 
getExceptionSelectorRegister(const Constant * PersonalityFn) const17431 unsigned ARMTargetLowering::getExceptionSelectorRegister(
17432     const Constant *PersonalityFn) const {
17433   // Platforms which do not use SjLj EH may return values in these registers
17434   // via the personality function.
17435   return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1;
17436 }
17437 
initializeSplitCSR(MachineBasicBlock * Entry) const17438 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
17439   // Update IsSplitCSR in ARMFunctionInfo.
17440   ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
17441   AFI->setIsSplitCSR(true);
17442 }
17443 
insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits) const17444 void ARMTargetLowering::insertCopiesSplitCSR(
17445     MachineBasicBlock *Entry,
17446     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
17447   const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
17448   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
17449   if (!IStart)
17450     return;
17451 
17452   const TargetInstrInfo *TII = Subtarget->getInstrInfo();
17453   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
17454   MachineBasicBlock::iterator MBBI = Entry->begin();
17455   for (const MCPhysReg *I = IStart; *I; ++I) {
17456     const TargetRegisterClass *RC = nullptr;
17457     if (ARM::GPRRegClass.contains(*I))
17458       RC = &ARM::GPRRegClass;
17459     else if (ARM::DPRRegClass.contains(*I))
17460       RC = &ARM::DPRRegClass;
17461     else
17462       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
17463 
17464     Register NewVR = MRI->createVirtualRegister(RC);
17465     // Create copy from CSR to a virtual register.
17466     // FIXME: this currently does not emit CFI pseudo-instructions, it works
17467     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
17468     // nounwind. If we want to generalize this later, we may need to emit
17469     // CFI pseudo-instructions.
17470     assert(Entry->getParent()->getFunction().hasFnAttribute(
17471                Attribute::NoUnwind) &&
17472            "Function should be nounwind in insertCopiesSplitCSR!");
17473     Entry->addLiveIn(*I);
17474     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
17475         .addReg(*I);
17476 
17477     // Insert the copy-back instructions right before the terminator.
17478     for (auto *Exit : Exits)
17479       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
17480               TII->get(TargetOpcode::COPY), *I)
17481           .addReg(NewVR);
17482   }
17483 }
17484 
finalizeLowering(MachineFunction & MF) const17485 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
17486   MF.getFrameInfo().computeMaxCallFrameSize(MF);
17487   TargetLoweringBase::finalizeLowering(MF);
17488 }
17489