1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "HexagonISelLowering.h"
16 #include "HexagonMachineFunctionInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "HexagonTargetMachine.h"
19 #include "HexagonTargetObjectFile.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39
40 using namespace llvm;
41
42 #define DEBUG_TYPE "hexagon-lowering"
43
44 static cl::opt<bool>
45 EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
46 cl::desc("Control jump table emission on Hexagon target"));
47
48 namespace {
49 class HexagonCCState : public CCState {
50 int NumNamedVarArgParams;
51
52 public:
HexagonCCState(CallingConv::ID CC,bool isVarArg,MachineFunction & MF,const TargetMachine & TM,SmallVectorImpl<CCValAssign> & locs,LLVMContext & C,int NumNamedVarArgParams)53 HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
54 const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
55 LLVMContext &C, int NumNamedVarArgParams)
56 : CCState(CC, isVarArg, MF, TM, locs, C),
57 NumNamedVarArgParams(NumNamedVarArgParams) {}
58
getNumNamedVarArgParams() const59 int getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
60 };
61 }
62
63 // Implement calling convention for Hexagon.
64 static bool
65 CC_Hexagon(unsigned ValNo, MVT ValVT,
66 MVT LocVT, CCValAssign::LocInfo LocInfo,
67 ISD::ArgFlagsTy ArgFlags, CCState &State);
68
69 static bool
70 CC_Hexagon32(unsigned ValNo, MVT ValVT,
71 MVT LocVT, CCValAssign::LocInfo LocInfo,
72 ISD::ArgFlagsTy ArgFlags, CCState &State);
73
74 static bool
75 CC_Hexagon64(unsigned ValNo, MVT ValVT,
76 MVT LocVT, CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State);
78
79 static bool
80 RetCC_Hexagon(unsigned ValNo, MVT ValVT,
81 MVT LocVT, CCValAssign::LocInfo LocInfo,
82 ISD::ArgFlagsTy ArgFlags, CCState &State);
83
84 static bool
85 RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
86 MVT LocVT, CCValAssign::LocInfo LocInfo,
87 ISD::ArgFlagsTy ArgFlags, CCState &State);
88
89 static bool
90 RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
91 MVT LocVT, CCValAssign::LocInfo LocInfo,
92 ISD::ArgFlagsTy ArgFlags, CCState &State);
93
94 static bool
CC_Hexagon_VarArg(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)95 CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
96 MVT LocVT, CCValAssign::LocInfo LocInfo,
97 ISD::ArgFlagsTy ArgFlags, CCState &State) {
98 HexagonCCState &HState = static_cast<HexagonCCState &>(State);
99
100 // NumNamedVarArgParams can not be zero for a VarArg function.
101 assert((HState.getNumNamedVarArgParams() > 0) &&
102 "NumNamedVarArgParams is not bigger than zero.");
103
104 if ((int)ValNo < HState.getNumNamedVarArgParams()) {
105 // Deal with named arguments.
106 return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
107 }
108
109 // Deal with un-named arguments.
110 unsigned ofst;
111 if (ArgFlags.isByVal()) {
112 // If pass-by-value, the size allocated on stack is decided
113 // by ArgFlags.getByValSize(), not by the size of LocVT.
114 assert ((ArgFlags.getByValSize() > 8) &&
115 "ByValSize must be bigger than 8 bytes");
116 ofst = State.AllocateStack(ArgFlags.getByValSize(), 4);
117 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
118 return false;
119 }
120 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
121 LocVT = MVT::i32;
122 ValVT = MVT::i32;
123 if (ArgFlags.isSExt())
124 LocInfo = CCValAssign::SExt;
125 else if (ArgFlags.isZExt())
126 LocInfo = CCValAssign::ZExt;
127 else
128 LocInfo = CCValAssign::AExt;
129 }
130 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
131 ofst = State.AllocateStack(4, 4);
132 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
133 return false;
134 }
135 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
136 ofst = State.AllocateStack(8, 8);
137 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
138 return false;
139 }
140 llvm_unreachable(nullptr);
141 }
142
143
144 static bool
CC_Hexagon(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)145 CC_Hexagon (unsigned ValNo, MVT ValVT,
146 MVT LocVT, CCValAssign::LocInfo LocInfo,
147 ISD::ArgFlagsTy ArgFlags, CCState &State) {
148
149 if (ArgFlags.isByVal()) {
150 // Passed on stack.
151 assert ((ArgFlags.getByValSize() > 8) &&
152 "ByValSize must be bigger than 8 bytes");
153 unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4);
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
155 return false;
156 }
157
158 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
159 LocVT = MVT::i32;
160 ValVT = MVT::i32;
161 if (ArgFlags.isSExt())
162 LocInfo = CCValAssign::SExt;
163 else if (ArgFlags.isZExt())
164 LocInfo = CCValAssign::ZExt;
165 else
166 LocInfo = CCValAssign::AExt;
167 }
168
169 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
170 if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
171 return false;
172 }
173
174 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
175 if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
176 return false;
177 }
178
179 return true; // CC didn't match.
180 }
181
182
CC_Hexagon32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)183 static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
184 MVT LocVT, CCValAssign::LocInfo LocInfo,
185 ISD::ArgFlagsTy ArgFlags, CCState &State) {
186
187 static const MCPhysReg RegList[] = {
188 Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
189 Hexagon::R5
190 };
191 if (unsigned Reg = State.AllocateReg(RegList, 6)) {
192 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
193 return false;
194 }
195
196 unsigned Offset = State.AllocateStack(4, 4);
197 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
198 return false;
199 }
200
CC_Hexagon64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)201 static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
202 MVT LocVT, CCValAssign::LocInfo LocInfo,
203 ISD::ArgFlagsTy ArgFlags, CCState &State) {
204
205 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
206 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
207 return false;
208 }
209
210 static const MCPhysReg RegList1[] = {
211 Hexagon::D1, Hexagon::D2
212 };
213 static const MCPhysReg RegList2[] = {
214 Hexagon::R1, Hexagon::R3
215 };
216 if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
217 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
218 return false;
219 }
220
221 unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
222 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
223 return false;
224 }
225
RetCC_Hexagon(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)226 static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
227 MVT LocVT, CCValAssign::LocInfo LocInfo,
228 ISD::ArgFlagsTy ArgFlags, CCState &State) {
229
230
231 if (LocVT == MVT::i1 ||
232 LocVT == MVT::i8 ||
233 LocVT == MVT::i16) {
234 LocVT = MVT::i32;
235 ValVT = MVT::i32;
236 if (ArgFlags.isSExt())
237 LocInfo = CCValAssign::SExt;
238 else if (ArgFlags.isZExt())
239 LocInfo = CCValAssign::ZExt;
240 else
241 LocInfo = CCValAssign::AExt;
242 }
243
244 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
245 if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
246 return false;
247 }
248
249 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
250 if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
251 return false;
252 }
253
254 return true; // CC didn't match.
255 }
256
RetCC_Hexagon32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)257 static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
258 MVT LocVT, CCValAssign::LocInfo LocInfo,
259 ISD::ArgFlagsTy ArgFlags, CCState &State) {
260
261 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
262 if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
263 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
264 return false;
265 }
266 }
267
268 unsigned Offset = State.AllocateStack(4, 4);
269 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
270 return false;
271 }
272
RetCC_Hexagon64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)273 static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
274 MVT LocVT, CCValAssign::LocInfo LocInfo,
275 ISD::ArgFlagsTy ArgFlags, CCState &State) {
276 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
277 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
278 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
279 return false;
280 }
281 }
282
283 unsigned Offset = State.AllocateStack(8, 8);
284 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
285 return false;
286 }
287
288 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const289 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
290 const {
291 return SDValue();
292 }
293
294 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
295 /// by "Src" to address "Dst" of size "Size". Alignment information is
296 /// specified by the specific parameter attribute. The copy will be passed as
297 /// a byval function parameter. Sometimes what we are copying is the end of a
298 /// larger object, the part that does not fit in registers.
299 static SDValue
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,SDLoc dl)300 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
301 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
302 SDLoc dl) {
303
304 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
305 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
306 /*isVolatile=*/false, /*AlwaysInline=*/false,
307 MachinePointerInfo(), MachinePointerInfo());
308 }
309
310
311 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
312 // passed by value, the function prototype is modified to return void and
313 // the value is stored in memory pointed by a pointer passed by caller.
314 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,SDLoc dl,SelectionDAG & DAG) const315 HexagonTargetLowering::LowerReturn(SDValue Chain,
316 CallingConv::ID CallConv, bool isVarArg,
317 const SmallVectorImpl<ISD::OutputArg> &Outs,
318 const SmallVectorImpl<SDValue> &OutVals,
319 SDLoc dl, SelectionDAG &DAG) const {
320
321 // CCValAssign - represent the assignment of the return value to locations.
322 SmallVector<CCValAssign, 16> RVLocs;
323
324 // CCState - Info about the registers and stack slot.
325 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
326 getTargetMachine(), RVLocs, *DAG.getContext());
327
328 // Analyze return values of ISD::RET
329 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
330
331 SDValue Flag;
332 SmallVector<SDValue, 4> RetOps(1, Chain);
333
334 // Copy the result values into the output registers.
335 for (unsigned i = 0; i != RVLocs.size(); ++i) {
336 CCValAssign &VA = RVLocs[i];
337
338 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
339
340 // Guarantee that all emitted copies are stuck together with flags.
341 Flag = Chain.getValue(1);
342 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
343 }
344
345 RetOps[0] = Chain; // Update chain.
346
347 // Add the flag if we have it.
348 if (Flag.getNode())
349 RetOps.push_back(Flag);
350
351 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
352 }
353
354
355
356
357 /// LowerCallResult - Lower the result values of an ISD::CALL into the
358 /// appropriate copies out of appropriate physical registers. This assumes that
359 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
360 /// being lowered. Returns a SDNode with the same number of values as the
361 /// ISD::CALL.
362 SDValue
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,SDLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const SmallVectorImpl<SDValue> & OutVals,SDValue Callee) const363 HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
364 CallingConv::ID CallConv, bool isVarArg,
365 const
366 SmallVectorImpl<ISD::InputArg> &Ins,
367 SDLoc dl, SelectionDAG &DAG,
368 SmallVectorImpl<SDValue> &InVals,
369 const SmallVectorImpl<SDValue> &OutVals,
370 SDValue Callee) const {
371
372 // Assign locations to each value returned by this call.
373 SmallVector<CCValAssign, 16> RVLocs;
374
375 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
376 getTargetMachine(), RVLocs, *DAG.getContext());
377
378 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
379
380 // Copy all of the result registers out of their specified physreg.
381 for (unsigned i = 0; i != RVLocs.size(); ++i) {
382 Chain = DAG.getCopyFromReg(Chain, dl,
383 RVLocs[i].getLocReg(),
384 RVLocs[i].getValVT(), InFlag).getValue(1);
385 InFlag = Chain.getValue(2);
386 InVals.push_back(Chain.getValue(0));
387 }
388
389 return Chain;
390 }
391
392 /// LowerCall - Functions arguments are copied from virtual regs to
393 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
394 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const395 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
396 SmallVectorImpl<SDValue> &InVals) const {
397 SelectionDAG &DAG = CLI.DAG;
398 SDLoc &dl = CLI.DL;
399 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
400 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
401 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
402 SDValue Chain = CLI.Chain;
403 SDValue Callee = CLI.Callee;
404 bool &isTailCall = CLI.IsTailCall;
405 CallingConv::ID CallConv = CLI.CallConv;
406 bool isVarArg = CLI.IsVarArg;
407
408 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
409
410 // Check for varargs.
411 int NumNamedVarArgParams = -1;
412 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
413 {
414 const Function* CalleeFn = nullptr;
415 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
416 if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
417 {
418 // If a function has zero args and is a vararg function, that's
419 // disallowed so it must be an undeclared function. Do not assume
420 // varargs if the callee is undefined.
421 if (CalleeFn->isVarArg() &&
422 CalleeFn->getFunctionType()->getNumParams() != 0) {
423 NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
424 }
425 }
426 }
427
428 // Analyze operands of the call, assigning locations to each operand.
429 SmallVector<CCValAssign, 16> ArgLocs;
430 HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
431 getTargetMachine(), ArgLocs, *DAG.getContext(),
432 NumNamedVarArgParams);
433
434 if (NumNamedVarArgParams > 0)
435 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
436 else
437 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
438
439
440 if(isTailCall) {
441 bool StructAttrFlag =
442 DAG.getMachineFunction().getFunction()->hasStructRetAttr();
443 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
444 isVarArg, IsStructRet,
445 StructAttrFlag,
446 Outs, OutVals, Ins, DAG);
447 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){
448 CCValAssign &VA = ArgLocs[i];
449 if (VA.isMemLoc()) {
450 isTailCall = false;
451 break;
452 }
453 }
454 if (isTailCall) {
455 DEBUG(dbgs () << "Eligible for Tail Call\n");
456 } else {
457 DEBUG(dbgs () <<
458 "Argument must be passed on stack. Not eligible for Tail Call\n");
459 }
460 }
461 // Get a count of how many bytes are to be pushed on the stack.
462 unsigned NumBytes = CCInfo.getNextStackOffset();
463 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
464 SmallVector<SDValue, 8> MemOpChains;
465
466 const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
467 DAG.getTarget().getRegisterInfo());
468 SDValue StackPtr =
469 DAG.getCopyFromReg(Chain, dl, QRI->getStackRegister(), getPointerTy());
470
471 // Walk the register/memloc assignments, inserting copies/loads.
472 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
473 CCValAssign &VA = ArgLocs[i];
474 SDValue Arg = OutVals[i];
475 ISD::ArgFlagsTy Flags = Outs[i].Flags;
476
477 // Promote the value if needed.
478 switch (VA.getLocInfo()) {
479 default:
480 // Loc info must be one of Full, SExt, ZExt, or AExt.
481 llvm_unreachable("Unknown loc info!");
482 case CCValAssign::Full:
483 break;
484 case CCValAssign::SExt:
485 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
486 break;
487 case CCValAssign::ZExt:
488 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
489 break;
490 case CCValAssign::AExt:
491 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
492 break;
493 }
494
495 if (VA.isMemLoc()) {
496 unsigned LocMemOffset = VA.getLocMemOffset();
497 SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
498 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
499
500 if (Flags.isByVal()) {
501 // The argument is a struct passed by value. According to LLVM, "Arg"
502 // is is pointer.
503 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain,
504 Flags, DAG, dl));
505 } else {
506 // The argument is not passed by value. "Arg" is a buildin type. It is
507 // not a pointer.
508 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
509 MachinePointerInfo(),false, false,
510 0));
511 }
512 continue;
513 }
514
515 // Arguments that can be passed on register must be kept at RegsToPass
516 // vector.
517 if (VA.isRegLoc()) {
518 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
519 }
520 }
521
522 // Transform all store nodes into one single node because all store
523 // nodes are independent of each other.
524 if (!MemOpChains.empty()) {
525 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
526 }
527
528 if (!isTailCall)
529 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
530 getPointerTy(), true),
531 dl);
532
533 // Build a sequence of copy-to-reg nodes chained together with token
534 // chain and flag operands which copy the outgoing args into registers.
535 // The InFlag in necessary since all emitted instructions must be
536 // stuck together.
537 SDValue InFlag;
538 if (!isTailCall) {
539 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
540 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
541 RegsToPass[i].second, InFlag);
542 InFlag = Chain.getValue(1);
543 }
544 }
545
546 // For tail calls lower the arguments to the 'real' stack slot.
547 if (isTailCall) {
548 // Force all the incoming stack arguments to be loaded from the stack
549 // before any new outgoing arguments are stored to the stack, because the
550 // outgoing stack slots may alias the incoming argument stack slots, and
551 // the alias isn't otherwise explicit. This is slightly more conservative
552 // than necessary, because it means that each store effectively depends
553 // on every argument instead of just those arguments it would clobber.
554 //
555 // Do not flag preceding copytoreg stuff together with the following stuff.
556 InFlag = SDValue();
557 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
558 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
559 RegsToPass[i].second, InFlag);
560 InFlag = Chain.getValue(1);
561 }
562 InFlag =SDValue();
563 }
564
565 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
566 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
567 // node so that legalize doesn't hack it.
568 if (flag_aligned_memcpy) {
569 const char *MemcpyName =
570 "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
571 Callee =
572 DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
573 flag_aligned_memcpy = false;
574 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
575 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
576 } else if (ExternalSymbolSDNode *S =
577 dyn_cast<ExternalSymbolSDNode>(Callee)) {
578 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
579 }
580
581 // Returns a chain & a flag for retval copy to use.
582 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
583 SmallVector<SDValue, 8> Ops;
584 Ops.push_back(Chain);
585 Ops.push_back(Callee);
586
587 // Add argument registers to the end of the list so that they are
588 // known live into the call.
589 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
590 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
591 RegsToPass[i].second.getValueType()));
592 }
593
594 if (InFlag.getNode()) {
595 Ops.push_back(InFlag);
596 }
597
598 if (isTailCall)
599 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
600
601 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
602 InFlag = Chain.getValue(1);
603
604 // Create the CALLSEQ_END node.
605 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
606 DAG.getIntPtrConstant(0, true), InFlag, dl);
607 InFlag = Chain.getValue(1);
608
609 // Handle result values, copying them out of physregs into vregs that we
610 // return.
611 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
612 InVals, OutVals, Callee);
613 }
614
getIndexedAddressParts(SDNode * Ptr,EVT VT,bool isSEXTLoad,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)615 static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
616 bool isSEXTLoad, SDValue &Base,
617 SDValue &Offset, bool &isInc,
618 SelectionDAG &DAG) {
619 if (Ptr->getOpcode() != ISD::ADD)
620 return false;
621
622 if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
623 isInc = (Ptr->getOpcode() == ISD::ADD);
624 Base = Ptr->getOperand(0);
625 Offset = Ptr->getOperand(1);
626 // Ensure that Offset is a constant.
627 return (isa<ConstantSDNode>(Offset));
628 }
629
630 return false;
631 }
632
633 // TODO: Put this function along with the other isS* functions in
634 // HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
635 // functions defined in HexagonOperands.td.
Is_PostInc_S4_Offset(SDNode * S,int ShiftAmount)636 static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
637 ConstantSDNode *N = cast<ConstantSDNode>(S);
638
639 // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
640 // field.
641 int64_t v = (int64_t)N->getSExtValue();
642 int64_t m = 0;
643 if (ShiftAmount > 0) {
644 m = v % ShiftAmount;
645 v = v >> ShiftAmount;
646 }
647 return (v <= 7) && (v >= -8) && (m == 0);
648 }
649
650 /// getPostIndexedAddressParts - returns true by value, base pointer and
651 /// offset pointer and addressing mode by reference if this node can be
652 /// combined with a load / store to form a post-indexed load / store.
getPostIndexedAddressParts(SDNode * N,SDNode * Op,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const653 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
654 SDValue &Base,
655 SDValue &Offset,
656 ISD::MemIndexedMode &AM,
657 SelectionDAG &DAG) const
658 {
659 EVT VT;
660 SDValue Ptr;
661 bool isSEXTLoad = false;
662
663 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
664 VT = LD->getMemoryVT();
665 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
666 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
667 VT = ST->getMemoryVT();
668 if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
669 return false;
670 }
671 } else {
672 return false;
673 }
674
675 bool isInc = false;
676 bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
677 isInc, DAG);
678 // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
679 int ShiftAmount = VT.getSizeInBits() / 16;
680 if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
681 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
682 return true;
683 }
684
685 return false;
686 }
687
LowerINLINEASM(SDValue Op,SelectionDAG & DAG) const688 SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
689 SelectionDAG &DAG) const {
690 SDNode *Node = Op.getNode();
691 MachineFunction &MF = DAG.getMachineFunction();
692 HexagonMachineFunctionInfo *FuncInfo =
693 MF.getInfo<HexagonMachineFunctionInfo>();
694 switch (Node->getOpcode()) {
695 case ISD::INLINEASM: {
696 unsigned NumOps = Node->getNumOperands();
697 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
698 --NumOps; // Ignore the flag operand.
699
700 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
701 if (FuncInfo->hasClobberLR())
702 break;
703 unsigned Flags =
704 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
705 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
706 ++i; // Skip the ID value.
707
708 switch (InlineAsm::getKind(Flags)) {
709 default: llvm_unreachable("Bad flags!");
710 case InlineAsm::Kind_RegDef:
711 case InlineAsm::Kind_RegUse:
712 case InlineAsm::Kind_Imm:
713 case InlineAsm::Kind_Clobber:
714 case InlineAsm::Kind_Mem: {
715 for (; NumVals; --NumVals, ++i) {}
716 break;
717 }
718 case InlineAsm::Kind_RegDefEarlyClobber: {
719 for (; NumVals; --NumVals, ++i) {
720 unsigned Reg =
721 cast<RegisterSDNode>(Node->getOperand(i))->getReg();
722
723 // Check it to be lr
724 const HexagonRegisterInfo *QRI =
725 static_cast<const HexagonRegisterInfo *>(
726 DAG.getTarget().getRegisterInfo());
727 if (Reg == QRI->getRARegister()) {
728 FuncInfo->setHasClobberLR(true);
729 break;
730 }
731 }
732 break;
733 }
734 }
735 }
736 }
737 } // Node->getOpcode
738 return Op;
739 }
740
741
742 //
743 // Taken from the XCore backend.
744 //
745 SDValue HexagonTargetLowering::
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const746 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
747 {
748 SDValue Chain = Op.getOperand(0);
749 SDValue Table = Op.getOperand(1);
750 SDValue Index = Op.getOperand(2);
751 SDLoc dl(Op);
752 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
753 unsigned JTI = JT->getIndex();
754 MachineFunction &MF = DAG.getMachineFunction();
755 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
756 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
757
758 // Mark all jump table targets as address taken.
759 const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
760 const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
761 for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
762 MachineBasicBlock *MBB = JTBBs[i];
763 MBB->setHasAddressTaken();
764 // This line is needed to set the hasAddressTaken flag on the BasicBlock
765 // object.
766 BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
767 }
768
769 SDValue JumpTableBase = DAG.getNode(HexagonISD::WrapperJT, dl,
770 getPointerTy(), TargetJT);
771 SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
772 DAG.getConstant(2, MVT::i32));
773 SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
774 ShiftIndex);
775 SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
776 MachinePointerInfo(), false, false, false,
777 0);
778 return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
779 }
780
781
782 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const783 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
784 SelectionDAG &DAG) const {
785 SDValue Chain = Op.getOperand(0);
786 SDValue Size = Op.getOperand(1);
787 SDLoc dl(Op);
788
789 unsigned SPReg = getStackPointerRegisterToSaveRestore();
790
791 // Get a reference to the stack pointer.
792 SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
793
794 // Subtract the dynamic size from the actual stack size to
795 // obtain the new stack size.
796 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
797
798 //
799 // For Hexagon, the outgoing memory arguments area should be on top of the
800 // alloca area on the stack i.e., the outgoing memory arguments should be
801 // at a lower address than the alloca area. Move the alloca area down the
802 // stack by adding back the space reserved for outgoing arguments to SP
803 // here.
804 //
805 // We do not know what the size of the outgoing args is at this point.
806 // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
807 // stack pointer. We patch this instruction with the correct, known
808 // offset in emitPrologue().
809 //
810 // Use a placeholder immediate (zero) for now. This will be patched up
811 // by emitPrologue().
812 SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl,
813 MVT::i32,
814 Sub,
815 DAG.getConstant(0, MVT::i32));
816
817 // The Sub result contains the new stack start address, so it
818 // must be placed in the stack pointer register.
819 const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
820 DAG.getTarget().getRegisterInfo());
821 SDValue CopyChain = DAG.getCopyToReg(Chain, dl, QRI->getStackRegister(), Sub);
822
823 SDValue Ops[2] = { ArgAdjust, CopyChain };
824 return DAG.getMergeValues(Ops, dl);
825 }
826
827 SDValue
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,SDLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const828 HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
829 CallingConv::ID CallConv,
830 bool isVarArg,
831 const
832 SmallVectorImpl<ISD::InputArg> &Ins,
833 SDLoc dl, SelectionDAG &DAG,
834 SmallVectorImpl<SDValue> &InVals)
835 const {
836
837 MachineFunction &MF = DAG.getMachineFunction();
838 MachineFrameInfo *MFI = MF.getFrameInfo();
839 MachineRegisterInfo &RegInfo = MF.getRegInfo();
840 HexagonMachineFunctionInfo *FuncInfo =
841 MF.getInfo<HexagonMachineFunctionInfo>();
842
843
844 // Assign locations to all of the incoming arguments.
845 SmallVector<CCValAssign, 16> ArgLocs;
846 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
847 getTargetMachine(), ArgLocs, *DAG.getContext());
848
849 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
850
851 // For LLVM, in the case when returning a struct by value (>8byte),
852 // the first argument is a pointer that points to the location on caller's
853 // stack where the return value will be stored. For Hexagon, the location on
854 // caller's stack is passed only when the struct size is smaller than (and
855 // equal to) 8 bytes. If not, no address will be passed into callee and
856 // callee return the result direclty through R0/R1.
857
858 SmallVector<SDValue, 4> MemOps;
859
860 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
861 CCValAssign &VA = ArgLocs[i];
862 ISD::ArgFlagsTy Flags = Ins[i].Flags;
863 unsigned ObjSize;
864 unsigned StackLocation;
865 int FI;
866
867 if ( (VA.isRegLoc() && !Flags.isByVal())
868 || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
869 // Arguments passed in registers
870 // 1. int, long long, ptr args that get allocated in register.
871 // 2. Large struct that gets an register to put its address in.
872 EVT RegVT = VA.getLocVT();
873 if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
874 RegVT == MVT::i32 || RegVT == MVT::f32) {
875 unsigned VReg =
876 RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
877 RegInfo.addLiveIn(VA.getLocReg(), VReg);
878 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
879 } else if (RegVT == MVT::i64) {
880 unsigned VReg =
881 RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
882 RegInfo.addLiveIn(VA.getLocReg(), VReg);
883 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
884 } else {
885 assert (0);
886 }
887 } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
888 assert (0 && "ByValSize must be bigger than 8 bytes");
889 } else {
890 // Sanity check.
891 assert(VA.isMemLoc());
892
893 if (Flags.isByVal()) {
894 // If it's a byval parameter, then we need to compute the
895 // "real" size, not the size of the pointer.
896 ObjSize = Flags.getByValSize();
897 } else {
898 ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
899 }
900
901 StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
902 // Create the frame index object for this incoming parameter...
903 FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
904
905 // Create the SelectionDAG nodes cordl, responding to a load
906 // from this parameter.
907 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
908
909 if (Flags.isByVal()) {
910 // If it's a pass-by-value aggregate, then do not dereference the stack
911 // location. Instead, we should generate a reference to the stack
912 // location.
913 InVals.push_back(FIN);
914 } else {
915 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
916 MachinePointerInfo(), false, false,
917 false, 0));
918 }
919 }
920 }
921
922 if (!MemOps.empty())
923 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
924
925 if (isVarArg) {
926 // This will point to the next argument passed via stack.
927 int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize,
928 HEXAGON_LRFP_SIZE +
929 CCInfo.getNextStackOffset(),
930 true);
931 FuncInfo->setVarArgsFrameIndex(FrameIndex);
932 }
933
934 return Chain;
935 }
936
937 SDValue
LowerVASTART(SDValue Op,SelectionDAG & DAG) const938 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
939 // VASTART stores the address of the VarArgsFrameIndex slot into the
940 // memory location argument.
941 MachineFunction &MF = DAG.getMachineFunction();
942 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
943 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
944 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
945 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr,
946 Op.getOperand(1), MachinePointerInfo(SV), false,
947 false, 0);
948 }
949
950 SDValue
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const951 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
952 EVT ValTy = Op.getValueType();
953 SDLoc dl(Op);
954 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
955 SDValue Res;
956 if (CP->isMachineConstantPoolEntry())
957 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
958 CP->getAlignment());
959 else
960 Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
961 CP->getAlignment());
962 return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res);
963 }
964
965 SDValue
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const966 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
967 const TargetRegisterInfo *TRI = DAG.getTarget().getRegisterInfo();
968 MachineFunction &MF = DAG.getMachineFunction();
969 MachineFrameInfo *MFI = MF.getFrameInfo();
970 MFI->setReturnAddressIsTaken(true);
971
972 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
973 return SDValue();
974
975 EVT VT = Op.getValueType();
976 SDLoc dl(Op);
977 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
978 if (Depth) {
979 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
980 SDValue Offset = DAG.getConstant(4, MVT::i32);
981 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
982 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
983 MachinePointerInfo(), false, false, false, 0);
984 }
985
986 // Return LR, which contains the return address. Mark it an implicit live-in.
987 unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
988 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
989 }
990
991 SDValue
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const992 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
993 const HexagonRegisterInfo *TRI =
994 static_cast<const HexagonRegisterInfo *>(DAG.getTarget().getRegisterInfo());
995 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
996 MFI->setFrameAddressIsTaken(true);
997
998 EVT VT = Op.getValueType();
999 SDLoc dl(Op);
1000 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1001 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1002 TRI->getFrameRegister(), VT);
1003 while (Depth--)
1004 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1005 MachinePointerInfo(),
1006 false, false, false, 0);
1007 return FrameAddr;
1008 }
1009
LowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG) const1010 SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
1011 SelectionDAG& DAG) const {
1012 SDLoc dl(Op);
1013 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1014 }
1015
1016
LowerGLOBALADDRESS(SDValue Op,SelectionDAG & DAG) const1017 SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
1018 SelectionDAG &DAG) const {
1019 SDValue Result;
1020 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1021 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
1022 SDLoc dl(Op);
1023 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
1024
1025 const HexagonTargetObjectFile &TLOF =
1026 static_cast<const HexagonTargetObjectFile &>(getObjFileLowering());
1027 if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
1028 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
1029 }
1030
1031 return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
1032 }
1033
1034 SDValue
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const1035 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1036 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1037 SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32);
1038 SDLoc dl(Op);
1039 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD);
1040 }
1041
1042 //===----------------------------------------------------------------------===//
1043 // TargetLowering Implementation
1044 //===----------------------------------------------------------------------===//
1045
HexagonTargetLowering(const TargetMachine & targetmachine)1046 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
1047 : TargetLowering(targetmachine, new HexagonTargetObjectFile()),
1048 TM(targetmachine) {
1049
1050 const HexagonSubtarget &Subtarget = TM.getSubtarget<HexagonSubtarget>();
1051
1052 // Set up the register classes.
1053 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1054 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1055
1056 if (Subtarget.hasV5TOps()) {
1057 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1058 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1059 }
1060
1061 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1062
1063 computeRegisterProperties();
1064
1065 // Align loop entry
1066 setPrefLoopAlignment(4);
1067
1068 // Limits for inline expansion of memcpy/memmove
1069 MaxStoresPerMemcpy = 6;
1070 MaxStoresPerMemmove = 6;
1071
1072 //
1073 // Library calls for unsupported operations
1074 //
1075
1076 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1077 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1078
1079 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1080 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1081
1082 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1083 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1084
1085 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1086 setOperationAction(ISD::SDIV, MVT::i32, Expand);
1087 setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
1088 setOperationAction(ISD::SREM, MVT::i32, Expand);
1089
1090 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1091 setOperationAction(ISD::SDIV, MVT::i64, Expand);
1092 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1093 setOperationAction(ISD::SREM, MVT::i64, Expand);
1094
1095 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1096 setOperationAction(ISD::UDIV, MVT::i32, Expand);
1097
1098 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1099 setOperationAction(ISD::UDIV, MVT::i64, Expand);
1100
1101 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1102 setOperationAction(ISD::UREM, MVT::i32, Expand);
1103
1104 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1105 setOperationAction(ISD::UREM, MVT::i64, Expand);
1106
1107 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1108 setOperationAction(ISD::FDIV, MVT::f32, Expand);
1109
1110 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1111 setOperationAction(ISD::FDIV, MVT::f64, Expand);
1112
1113 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
1114 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
1115 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1116 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1117
1118 if (Subtarget.hasV5TOps()) {
1119 // Hexagon V5 Support.
1120 setOperationAction(ISD::FADD, MVT::f32, Legal);
1121 setOperationAction(ISD::FADD, MVT::f64, Legal);
1122 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
1123 setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
1124 setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
1125 setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
1126 setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
1127
1128 setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
1129 setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
1130 setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
1131 setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
1132
1133 setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
1134 setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
1135 setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
1136 setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
1137
1138 setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
1139 setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
1140 setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
1141 setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
1142
1143 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1144 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1145
1146 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1147 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1148 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1149 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1150
1151 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1152 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1153 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1154 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1155
1156 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1157 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1158 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1159 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1160
1161 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1162 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1163 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1164 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1165
1166 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1167 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1168 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1169 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1170
1171 setOperationAction(ISD::FABS, MVT::f32, Legal);
1172 setOperationAction(ISD::FABS, MVT::f64, Expand);
1173
1174 setOperationAction(ISD::FNEG, MVT::f32, Legal);
1175 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1176 } else {
1177
1178 // Expand fp<->uint.
1179 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
1180 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
1181
1182 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
1183 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
1184
1185 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
1186 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
1187
1188 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
1189 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
1190
1191 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
1192 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
1193
1194 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
1195 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
1196
1197 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
1198 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
1199
1200 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
1201 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
1202
1203 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
1204 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
1205
1206 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1207 setOperationAction(ISD::FADD, MVT::f64, Expand);
1208
1209 setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
1210 setOperationAction(ISD::FADD, MVT::f32, Expand);
1211
1212 setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
1213 setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
1214
1215 setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
1216 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
1217
1218 setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
1219 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
1220
1221 setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
1222 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
1223
1224 setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
1225 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
1226
1227 setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
1228 setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
1229
1230 setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
1231 setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
1232
1233 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
1234 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
1235
1236 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
1237 setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
1238
1239 setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
1240 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
1241
1242 setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
1243 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
1244
1245 setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
1246 setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
1247
1248 setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
1249 setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
1250
1251 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1252 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1253
1254 setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
1255 setOperationAction(ISD::MUL, MVT::f32, Expand);
1256
1257 setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
1258 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
1259
1260 setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
1261
1262 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1263 setOperationAction(ISD::SUB, MVT::f64, Expand);
1264
1265 setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
1266 setOperationAction(ISD::SUB, MVT::f32, Expand);
1267
1268 setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
1269 setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
1270
1271 setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
1272 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
1273
1274 setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
1275 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
1276
1277 setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
1278 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
1279
1280 setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
1281 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
1282
1283 setOperationAction(ISD::FABS, MVT::f32, Expand);
1284 setOperationAction(ISD::FABS, MVT::f64, Expand);
1285 setOperationAction(ISD::FNEG, MVT::f32, Expand);
1286 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1287 }
1288
1289 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1290 setOperationAction(ISD::SREM, MVT::i32, Expand);
1291
1292 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
1293 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
1294 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
1295 setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
1296
1297 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
1298 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
1299 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
1300 setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
1301
1302 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1303
1304 // Turn FP extload into load/fextend.
1305 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
1306 // Hexagon has a i1 sign extending load.
1307 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
1308 // Turn FP truncstore into trunc + store.
1309 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1310
1311 // Custom legalize GlobalAddress nodes into CONST32.
1312 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1313 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1314 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1315 // Truncate action?
1316 setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
1317
1318 // Hexagon doesn't have sext_inreg, replace them with shl/sra.
1319 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1320
1321 // Hexagon has no REM or DIVREM operations.
1322 setOperationAction(ISD::UREM, MVT::i32, Expand);
1323 setOperationAction(ISD::SREM, MVT::i32, Expand);
1324 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1325 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1326 setOperationAction(ISD::SREM, MVT::i64, Expand);
1327 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1328 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1329
1330 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1331
1332 // Lower SELECT_CC to SETCC and SELECT.
1333 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
1334 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
1335 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
1336
1337 if (Subtarget.hasV5TOps()) {
1338
1339 // We need to make the operation type of SELECT node to be Custom,
1340 // such that we don't go into the infinite loop of
1341 // select -> setcc -> select_cc -> select loop.
1342 setOperationAction(ISD::SELECT, MVT::f32, Custom);
1343 setOperationAction(ISD::SELECT, MVT::f64, Custom);
1344
1345 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
1346 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
1347
1348 } else {
1349
1350 // Hexagon has no select or setcc: expand to SELECT_CC.
1351 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1352 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1353 }
1354
1355 if (EmitJumpTables) {
1356 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1357 } else {
1358 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1359 }
1360 // Increase jump tables cutover to 5, was 4.
1361 setMinimumJumpTableEntries(5);
1362
1363 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
1364 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
1365 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1366 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
1367 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
1368
1369 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1370
1371 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1372 setOperationAction(ISD::FCOS, MVT::f64, Expand);
1373 setOperationAction(ISD::FREM, MVT::f64, Expand);
1374 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1375 setOperationAction(ISD::FCOS, MVT::f32, Expand);
1376 setOperationAction(ISD::FREM, MVT::f32, Expand);
1377 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1378 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1379
1380 // In V4, we have double word add/sub with carry. The problem with
1381 // modelling this instruction is that it produces 2 results - Rdd and Px.
1382 // To model update of Px, we will have to use Defs[p0..p3] which will
1383 // cause any predicate live range to spill. So, we pretend we dont't
1384 // have these instructions.
1385 setOperationAction(ISD::ADDE, MVT::i8, Expand);
1386 setOperationAction(ISD::ADDE, MVT::i16, Expand);
1387 setOperationAction(ISD::ADDE, MVT::i32, Expand);
1388 setOperationAction(ISD::ADDE, MVT::i64, Expand);
1389 setOperationAction(ISD::SUBE, MVT::i8, Expand);
1390 setOperationAction(ISD::SUBE, MVT::i16, Expand);
1391 setOperationAction(ISD::SUBE, MVT::i32, Expand);
1392 setOperationAction(ISD::SUBE, MVT::i64, Expand);
1393 setOperationAction(ISD::ADDC, MVT::i8, Expand);
1394 setOperationAction(ISD::ADDC, MVT::i16, Expand);
1395 setOperationAction(ISD::ADDC, MVT::i32, Expand);
1396 setOperationAction(ISD::ADDC, MVT::i64, Expand);
1397 setOperationAction(ISD::SUBC, MVT::i8, Expand);
1398 setOperationAction(ISD::SUBC, MVT::i16, Expand);
1399 setOperationAction(ISD::SUBC, MVT::i32, Expand);
1400 setOperationAction(ISD::SUBC, MVT::i64, Expand);
1401
1402 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1403 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
1404 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
1405 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
1406 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
1407 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
1408 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
1409 setOperationAction(ISD::CTLZ, MVT::i64, Expand);
1410 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
1411 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
1412 setOperationAction(ISD::ROTL, MVT::i32, Expand);
1413 setOperationAction(ISD::ROTR, MVT::i32, Expand);
1414 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1415 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1416 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1417 setOperationAction(ISD::FPOW, MVT::f64, Expand);
1418 setOperationAction(ISD::FPOW, MVT::f32, Expand);
1419
1420 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1421 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1422 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1423
1424 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1425 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1426
1427 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1428 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1429
1430 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1431
1432 if (Subtarget.isSubtargetV2()) {
1433 setExceptionPointerRegister(Hexagon::R20);
1434 setExceptionSelectorRegister(Hexagon::R21);
1435 } else {
1436 setExceptionPointerRegister(Hexagon::R0);
1437 setExceptionSelectorRegister(Hexagon::R1);
1438 }
1439
1440 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1441 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1442
1443 // Use the default implementation.
1444 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1445 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1446 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1447 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1448 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1449
1450 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1451 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1452
1453 setMinFunctionAlignment(2);
1454
1455 // Needed for DYNAMIC_STACKALLOC expansion.
1456 const HexagonRegisterInfo *QRI =
1457 static_cast<const HexagonRegisterInfo *>(TM.getRegisterInfo());
1458 setStackPointerRegisterToSaveRestore(QRI->getStackRegister());
1459 setSchedulingPreference(Sched::VLIW);
1460 }
1461
1462 const char*
getTargetNodeName(unsigned Opcode) const1463 HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1464 switch (Opcode) {
1465 default: return nullptr;
1466 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1467 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1468 case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real";
1469 case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
1470 case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
1471 case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
1472 case HexagonISD::BRICC: return "HexagonISD::BRICC";
1473 case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
1474 case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
1475 case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
1476 case HexagonISD::Hi: return "HexagonISD::Hi";
1477 case HexagonISD::Lo: return "HexagonISD::Lo";
1478 case HexagonISD::FTOI: return "HexagonISD::FTOI";
1479 case HexagonISD::ITOF: return "HexagonISD::ITOF";
1480 case HexagonISD::CALL: return "HexagonISD::CALL";
1481 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1482 case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
1483 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1484 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1485 }
1486 }
1487
1488 bool
isTruncateFree(Type * Ty1,Type * Ty2) const1489 HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
1490 EVT MTy1 = EVT::getEVT(Ty1);
1491 EVT MTy2 = EVT::getEVT(Ty2);
1492 if (!MTy1.isSimple() || !MTy2.isSimple()) {
1493 return false;
1494 }
1495 return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32));
1496 }
1497
isTruncateFree(EVT VT1,EVT VT2) const1498 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
1499 if (!VT1.isSimple() || !VT2.isSimple()) {
1500 return false;
1501 }
1502 return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
1503 }
1504
1505 bool
allowTruncateForTailCall(Type * Ty1,Type * Ty2) const1506 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
1507 // Assuming the caller does not have either a signext or zeroext modifier, and
1508 // only one value is accepted, any reasonable truncation is allowed.
1509 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
1510 return false;
1511
1512 // FIXME: in principle up to 64-bit could be made safe, but it would be very
1513 // fragile at the moment: any support for multiple value returns would be
1514 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
1515 return Ty1->getPrimitiveSizeInBits() <= 32;
1516 }
1517
1518 SDValue
LowerEH_RETURN(SDValue Op,SelectionDAG & DAG) const1519 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
1520 SDValue Chain = Op.getOperand(0);
1521 SDValue Offset = Op.getOperand(1);
1522 SDValue Handler = Op.getOperand(2);
1523 SDLoc dl(Op);
1524
1525 // Mark function as containing a call to EH_RETURN.
1526 HexagonMachineFunctionInfo *FuncInfo =
1527 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
1528 FuncInfo->setHasEHReturn();
1529
1530 unsigned OffsetReg = Hexagon::R28;
1531
1532 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
1533 DAG.getRegister(Hexagon::R30, getPointerTy()),
1534 DAG.getIntPtrConstant(4));
1535 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
1536 false, false, 0);
1537 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
1538
1539 // Not needed we already use it as explict input to EH_RETURN.
1540 // MF.getRegInfo().addLiveOut(OffsetReg);
1541
1542 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
1543 }
1544
1545 SDValue
LowerOperation(SDValue Op,SelectionDAG & DAG) const1546 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1547 switch (Op.getOpcode()) {
1548 default: llvm_unreachable("Should not custom lower this!");
1549 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
1550 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
1551 // Frame & Return address. Currently unimplemented.
1552 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
1553 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
1554 case ISD::GlobalTLSAddress:
1555 llvm_unreachable("TLS not implemented for Hexagon.");
1556 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
1557 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
1558 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
1559 case ISD::VASTART: return LowerVASTART(Op, DAG);
1560 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
1561
1562 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1563 case ISD::SELECT: return Op;
1564 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1565 case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
1566
1567 }
1568 }
1569
1570
1571
1572 //===----------------------------------------------------------------------===//
1573 // Hexagon Scheduler Hooks
1574 //===----------------------------------------------------------------------===//
1575 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr * MI,MachineBasicBlock * BB) const1576 HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1577 MachineBasicBlock *BB)
1578 const {
1579 switch (MI->getOpcode()) {
1580 case Hexagon::ADJDYNALLOC: {
1581 MachineFunction *MF = BB->getParent();
1582 HexagonMachineFunctionInfo *FuncInfo =
1583 MF->getInfo<HexagonMachineFunctionInfo>();
1584 FuncInfo->addAllocaAdjustInst(MI);
1585 return BB;
1586 }
1587 default: llvm_unreachable("Unexpected instr type to insert");
1588 } // switch
1589 }
1590
1591 //===----------------------------------------------------------------------===//
1592 // Inline Assembly Support
1593 //===----------------------------------------------------------------------===//
1594
1595 std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string & Constraint,MVT VT) const1596 HexagonTargetLowering::getRegForInlineAsmConstraint(const
1597 std::string &Constraint,
1598 MVT VT) const {
1599 if (Constraint.size() == 1) {
1600 switch (Constraint[0]) {
1601 case 'r': // R0-R31
1602 switch (VT.SimpleTy) {
1603 default:
1604 llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
1605 case MVT::i32:
1606 case MVT::i16:
1607 case MVT::i8:
1608 case MVT::f32:
1609 return std::make_pair(0U, &Hexagon::IntRegsRegClass);
1610 case MVT::i64:
1611 case MVT::f64:
1612 return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
1613 }
1614 default:
1615 llvm_unreachable("Unknown asm register class");
1616 }
1617 }
1618
1619 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1620 }
1621
1622 /// isFPImmLegal - Returns true if the target can instruction select the
1623 /// specified FP immediate natively. If false, the legalizer will
1624 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT) const1625 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
1626 return TM.getSubtarget<HexagonSubtarget>().hasV5TOps();
1627 }
1628
1629 /// isLegalAddressingMode - Return true if the addressing mode represented by
1630 /// AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const AddrMode & AM,Type * Ty) const1631 bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1632 Type *Ty) const {
1633 // Allows a signed-extended 11-bit immediate field.
1634 if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) {
1635 return false;
1636 }
1637
1638 // No global is ever allowed as a base.
1639 if (AM.BaseGV) {
1640 return false;
1641 }
1642
1643 int Scale = AM.Scale;
1644 if (Scale < 0) Scale = -Scale;
1645 switch (Scale) {
1646 case 0: // No scale reg, "r+i", "r", or just "i".
1647 break;
1648 default: // No scaled addressing mode.
1649 return false;
1650 }
1651 return true;
1652 }
1653
1654 /// isLegalICmpImmediate - Return true if the specified immediate is legal
1655 /// icmp immediate, that is the target has icmp instructions which can compare
1656 /// a register against the immediate without having to materialize the
1657 /// immediate into a register.
isLegalICmpImmediate(int64_t Imm) const1658 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1659 return Imm >= -512 && Imm <= 511;
1660 }
1661
1662 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1663 /// for tail call optimization. Targets which want to do tail call
1664 /// optimization should implement this function.
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool isVarArg,bool isCalleeStructRet,bool isCallerStructRet,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const1665 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
1666 SDValue Callee,
1667 CallingConv::ID CalleeCC,
1668 bool isVarArg,
1669 bool isCalleeStructRet,
1670 bool isCallerStructRet,
1671 const SmallVectorImpl<ISD::OutputArg> &Outs,
1672 const SmallVectorImpl<SDValue> &OutVals,
1673 const SmallVectorImpl<ISD::InputArg> &Ins,
1674 SelectionDAG& DAG) const {
1675 const Function *CallerF = DAG.getMachineFunction().getFunction();
1676 CallingConv::ID CallerCC = CallerF->getCallingConv();
1677 bool CCMatch = CallerCC == CalleeCC;
1678
1679 // ***************************************************************************
1680 // Look for obvious safe cases to perform tail call optimization that do not
1681 // require ABI changes.
1682 // ***************************************************************************
1683
1684 // If this is a tail call via a function pointer, then don't do it!
1685 if (!(dyn_cast<GlobalAddressSDNode>(Callee))
1686 && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
1687 return false;
1688 }
1689
1690 // Do not optimize if the calling conventions do not match.
1691 if (!CCMatch)
1692 return false;
1693
1694 // Do not tail call optimize vararg calls.
1695 if (isVarArg)
1696 return false;
1697
1698 // Also avoid tail call optimization if either caller or callee uses struct
1699 // return semantics.
1700 if (isCalleeStructRet || isCallerStructRet)
1701 return false;
1702
1703 // In addition to the cases above, we also disable Tail Call Optimization if
1704 // the calling convention code that at least one outgoing argument needs to
1705 // go on the stack. We cannot check that here because at this point that
1706 // information is not available.
1707 return true;
1708 }
1709