1 //===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the interfaces that Hexagon uses to lower LLVM code into a 11 // selection DAG. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H 16 #define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H 17 18 #include "Hexagon.h" 19 #include "llvm/ADT/StringRef.h" 20 #include "llvm/CodeGen/ISDOpcodes.h" 21 #include "llvm/CodeGen/SelectionDAGNodes.h" 22 #include "llvm/CodeGen/TargetLowering.h" 23 #include "llvm/CodeGen/ValueTypes.h" 24 #include "llvm/IR/CallingConv.h" 25 #include "llvm/IR/InlineAsm.h" 26 #include "llvm/Support/MachineValueType.h" 27 #include <cstdint> 28 #include <utility> 29 30 namespace llvm { 31 32 namespace HexagonISD { 33 34 enum NodeType : unsigned { 35 OP_BEGIN = ISD::BUILTIN_OP_END, 36 37 CONST32 = OP_BEGIN, 38 CONST32_GP, // For marking data present in GP. 39 ADDC, // Add with carry: (X, Y, Cin) -> (X+Y, Cout). 40 SUBC, // Sub with carry: (X, Y, Cin) -> (X+~Y+Cin, Cout). 41 ALLOCA, 42 43 AT_GOT, // Index in GOT. 44 AT_PCREL, // Offset relative to PC. 45 46 CALL, // Function call. 47 CALLnr, // Function call that does not return. 48 CALLR, 49 50 RET_FLAG, // Return with a flag operand. 51 BARRIER, // Memory barrier. 52 JT, // Jump table. 53 CP, // Constant pool. 54 55 COMBINE, 56 VSPLAT, // Generic splat, selection depends on argument/return 57 // types. 58 VASL, 59 VASR, 60 VLSR, 61 62 TSTBIT, 63 INSERT, 64 EXTRACTU, 65 VEXTRACTW, 66 VINSERTW0, 67 VROR, 68 TC_RETURN, 69 EH_RETURN, 70 DCFETCH, 71 READCYCLE, 72 D2P, // Convert 8-byte value to 8-bit predicate register. [*] 73 P2D, // Convert 8-bit predicate register to 8-byte value. [*] 74 V2Q, // Convert HVX vector to a vector predicate reg. [*] 75 Q2V, // Convert vector predicate to an HVX vector. [*] 76 // [*] The equivalence is defined as "Q <=> (V != 0)", 77 // where the != operation compares bytes. 78 // Note: V != 0 is implemented as V >u 0. 79 QCAT, 80 QTRUE, 81 QFALSE, 82 VZERO, 83 VSPLATW, // HVX splat of a 32-bit word with an arbitrary result type. 84 TYPECAST, // No-op that's used to convert between different legal 85 // types in a register. 86 VALIGN, // Align two vectors (in Op0, Op1) to one that would have 87 // been loaded from address in Op2. 88 VALIGNADDR, // Align vector address: Op0 & -Op1, except when it is 89 // an address in a vector load, then it's a no-op. 90 OP_END 91 }; 92 93 } // end namespace HexagonISD 94 95 class HexagonSubtarget; 96 97 class HexagonTargetLowering : public TargetLowering { 98 int VarArgsFrameOffset; // Frame offset to start of varargs area. 99 const HexagonTargetMachine &HTM; 100 const HexagonSubtarget &Subtarget; 101 102 bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize) 103 const; 104 void promoteLdStType(MVT VT, MVT PromotedLdStVT); 105 106 public: 107 explicit HexagonTargetLowering(const TargetMachine &TM, 108 const HexagonSubtarget &ST); 109 110 bool isHVXVectorType(MVT Ty) const; 111 112 /// IsEligibleForTailCallOptimization - Check whether the call is eligible 113 /// for tail call optimization. Targets which want to do tail call 114 /// optimization should implement this function. 115 bool IsEligibleForTailCallOptimization(SDValue Callee, 116 CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet, 117 bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs, 118 const SmallVectorImpl<SDValue> &OutVals, 119 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const; 120 121 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, 122 MachineFunction &MF, 123 unsigned Intrinsic) const override; 124 125 bool isTruncateFree(Type *Ty1, Type *Ty2) const override; 126 bool isTruncateFree(EVT VT1, EVT VT2) const override; 127 isCheapToSpeculateCttz()128 bool isCheapToSpeculateCttz() const override { return true; } isCheapToSpeculateCtlz()129 bool isCheapToSpeculateCtlz() const override { return true; } isCtlzFast()130 bool isCtlzFast() const override { return true; } 131 132 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; 133 134 /// Return true if an FMA operation is faster than a pair of mul and add 135 /// instructions. fmuladd intrinsics will be expanded to FMAs when this 136 /// method returns true (and FMAs are legal), otherwise fmuladd is 137 /// expanded to mul + add. 138 bool isFMAFasterThanFMulAndFAdd(EVT) const override; 139 140 // Should we expand the build vector with shuffles? 141 bool shouldExpandBuildVectorWithShuffles(EVT VT, 142 unsigned DefinedValues) const override; 143 144 bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override; 145 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) 146 const override; 147 148 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 149 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 150 SelectionDAG &DAG) const override; 151 152 const char *getTargetNodeName(unsigned Opcode) const override; 153 154 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; 155 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 156 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 157 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 158 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; 159 SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 160 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; 161 SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const; 162 SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const; 163 SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const; 164 SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const; 165 SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const; 166 SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; 167 SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const; 168 SDValue LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const; 169 170 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 171 SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const; 172 SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const; 173 SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; 174 SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const; 175 SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; 176 SDValue 177 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 178 const SmallVectorImpl<ISD::InputArg> &Ins, 179 const SDLoc &dl, SelectionDAG &DAG, 180 SmallVectorImpl<SDValue> &InVals) const override; 181 SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const; 182 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; 183 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; 184 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA, 185 SelectionDAG &DAG) const; 186 SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA, 187 SelectionDAG &DAG) const; 188 SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA, 189 SelectionDAG &DAG) const; 190 SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain, 191 GlobalAddressSDNode *GA, SDValue InFlag, EVT PtrVT, 192 unsigned ReturnReg, unsigned char OperandFlags) const; 193 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const; 194 195 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, 196 SmallVectorImpl<SDValue> &InVals) const override; 197 SDValue LowerCallResult(SDValue Chain, SDValue InFlag, 198 CallingConv::ID CallConv, bool isVarArg, 199 const SmallVectorImpl<ISD::InputArg> &Ins, 200 const SDLoc &dl, SelectionDAG &DAG, 201 SmallVectorImpl<SDValue> &InVals, 202 const SmallVectorImpl<SDValue> &OutVals, 203 SDValue Callee) const; 204 205 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; 206 SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const; 207 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; 208 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const; 209 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; 210 211 bool CanLowerReturn(CallingConv::ID CallConv, 212 MachineFunction &MF, bool isVarArg, 213 const SmallVectorImpl<ISD::OutputArg> &Outs, 214 LLVMContext &Context) const override; 215 216 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 217 const SmallVectorImpl<ISD::OutputArg> &Outs, 218 const SmallVectorImpl<SDValue> &OutVals, 219 const SDLoc &dl, SelectionDAG &DAG) const override; 220 221 bool mayBeEmittedAsTailCall(const CallInst *CI) const override; 222 223 /// If a physical register, this returns the register that receives the 224 /// exception address on entry to an EH pad. 225 unsigned getExceptionPointerRegister(const Constant * PersonalityFn)226 getExceptionPointerRegister(const Constant *PersonalityFn) const override { 227 return Hexagon::R0; 228 } 229 230 /// If a physical register, this returns the register that receives the 231 /// exception typeid on entry to a landing pad. 232 unsigned getExceptionSelectorRegister(const Constant * PersonalityFn)233 getExceptionSelectorRegister(const Constant *PersonalityFn) const override { 234 return Hexagon::R1; 235 } 236 237 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; 238 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; 239 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; 240 getSetCCResultType(const DataLayout &,LLVMContext & C,EVT VT)241 EVT getSetCCResultType(const DataLayout &, LLVMContext &C, 242 EVT VT) const override { 243 if (!VT.isVector()) 244 return MVT::i1; 245 else 246 return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements()); 247 } 248 249 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, 250 SDValue &Base, SDValue &Offset, 251 ISD::MemIndexedMode &AM, 252 SelectionDAG &DAG) const override; 253 254 ConstraintType getConstraintType(StringRef Constraint) const override; 255 256 std::pair<unsigned, const TargetRegisterClass *> 257 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 258 StringRef Constraint, MVT VT) const override; 259 260 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode)261 getInlineAsmMemConstraint(StringRef ConstraintCode) const override { 262 if (ConstraintCode == "o") 263 return InlineAsm::Constraint_o; 264 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 265 } 266 267 // Intrinsics 268 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; 269 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; 270 /// isLegalAddressingMode - Return true if the addressing mode represented 271 /// by AM is legal for this target, for a load/store of the specified type. 272 /// The type may be VoidTy, in which case only return true if the addressing 273 /// mode is legal for a load/store of any legal type. 274 /// TODO: Handle pre/postinc as well. 275 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, 276 Type *Ty, unsigned AS, 277 Instruction *I = nullptr) const override; 278 /// Return true if folding a constant offset with the given GlobalAddress 279 /// is legal. It is frequently not legal in PIC relocation models. 280 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override; 281 282 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; 283 284 /// isLegalICmpImmediate - Return true if the specified immediate is legal 285 /// icmp immediate, that is the target has icmp instructions which can 286 /// compare a register against the immediate without having to materialize 287 /// the immediate into a register. 288 bool isLegalICmpImmediate(int64_t Imm) const override; 289 290 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 291 unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, 292 MachineFunction &MF) const override; 293 294 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, 295 unsigned Align, bool *Fast) const override; 296 297 /// Returns relocation base for the given PIC jumptable. 298 SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) 299 const override; 300 301 // Handling of atomic RMW instructions. 302 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 303 AtomicOrdering Ord) const override; 304 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val, 305 Value *Addr, AtomicOrdering Ord) const override; 306 AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override; 307 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override; 308 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override; 309 310 AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst * AI)311 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override { 312 return AtomicExpansionKind::LLSC; 313 } 314 315 private: 316 void initializeHVXLowering(); 317 std::pair<SDValue,int> getBaseAndOffset(SDValue Addr) const; 318 319 bool getBuildVectorConstInts(ArrayRef<SDValue> Values, MVT VecTy, 320 SelectionDAG &DAG, 321 MutableArrayRef<ConstantInt*> Consts) const; 322 SDValue buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy, 323 SelectionDAG &DAG) const; 324 SDValue buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy, 325 SelectionDAG &DAG) const; 326 SDValue extractVector(SDValue VecV, SDValue IdxV, const SDLoc &dl, 327 MVT ValTy, MVT ResTy, SelectionDAG &DAG) const; 328 SDValue insertVector(SDValue VecV, SDValue ValV, SDValue IdxV, 329 const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const; 330 SDValue expandPredicate(SDValue Vec32, const SDLoc &dl, 331 SelectionDAG &DAG) const; 332 SDValue contractPredicate(SDValue Vec64, const SDLoc &dl, 333 SelectionDAG &DAG) const; 334 SDValue getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) const; 335 isUndef(SDValue Op)336 bool isUndef(SDValue Op) const { 337 if (Op.isMachineOpcode()) 338 return Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF; 339 return Op.getOpcode() == ISD::UNDEF; 340 } getInstr(unsigned MachineOpc,const SDLoc & dl,MVT Ty,ArrayRef<SDValue> Ops,SelectionDAG & DAG)341 SDValue getInstr(unsigned MachineOpc, const SDLoc &dl, MVT Ty, 342 ArrayRef<SDValue> Ops, SelectionDAG &DAG) const { 343 SDNode *N = DAG.getMachineNode(MachineOpc, dl, Ty, Ops); 344 return SDValue(N, 0); 345 } 346 SDValue getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) const; 347 348 using VectorPair = std::pair<SDValue, SDValue>; 349 using TypePair = std::pair<MVT, MVT>; 350 351 SDValue getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops, 352 const SDLoc &dl, SelectionDAG &DAG) const; 353 ty(SDValue Op)354 MVT ty(SDValue Op) const { 355 return Op.getValueType().getSimpleVT(); 356 } ty(const VectorPair & Ops)357 TypePair ty(const VectorPair &Ops) const { 358 return { Ops.first.getValueType().getSimpleVT(), 359 Ops.second.getValueType().getSimpleVT() }; 360 } tyScalar(MVT Ty)361 MVT tyScalar(MVT Ty) const { 362 if (!Ty.isVector()) 363 return Ty; 364 return MVT::getIntegerVT(Ty.getSizeInBits()); 365 } tyVector(MVT Ty,MVT ElemTy)366 MVT tyVector(MVT Ty, MVT ElemTy) const { 367 if (Ty.isVector() && Ty.getVectorElementType() == ElemTy) 368 return Ty; 369 unsigned TyWidth = Ty.getSizeInBits(); 370 unsigned ElemWidth = ElemTy.getSizeInBits(); 371 assert((TyWidth % ElemWidth) == 0); 372 return MVT::getVectorVT(ElemTy, TyWidth/ElemWidth); 373 } 374 375 MVT typeJoin(const TypePair &Tys) const; 376 TypePair typeSplit(MVT Ty) const; 377 MVT typeExtElem(MVT VecTy, unsigned Factor) const; 378 MVT typeTruncElem(MVT VecTy, unsigned Factor) const; 379 380 SDValue opJoin(const VectorPair &Ops, const SDLoc &dl, 381 SelectionDAG &DAG) const; 382 VectorPair opSplit(SDValue Vec, const SDLoc &dl, SelectionDAG &DAG) const; 383 SDValue opCastElem(SDValue Vec, MVT ElemTy, SelectionDAG &DAG) const; 384 385 bool isHvxSingleTy(MVT Ty) const; 386 bool isHvxPairTy(MVT Ty) const; 387 SDValue convertToByteIndex(SDValue ElemIdx, MVT ElemTy, 388 SelectionDAG &DAG) const; 389 SDValue getIndexInWord32(SDValue Idx, MVT ElemTy, SelectionDAG &DAG) const; 390 SDValue getByteShuffle(const SDLoc &dl, SDValue Op0, SDValue Op1, 391 ArrayRef<int> Mask, SelectionDAG &DAG) const; 392 393 SDValue buildHvxVectorReg(ArrayRef<SDValue> Values, const SDLoc &dl, 394 MVT VecTy, SelectionDAG &DAG) const; 395 SDValue buildHvxVectorPred(ArrayRef<SDValue> Values, const SDLoc &dl, 396 MVT VecTy, SelectionDAG &DAG) const; 397 SDValue createHvxPrefixPred(SDValue PredV, const SDLoc &dl, 398 unsigned BitBytes, bool ZeroFill, 399 SelectionDAG &DAG) const; 400 SDValue extractHvxElementReg(SDValue VecV, SDValue IdxV, const SDLoc &dl, 401 MVT ResTy, SelectionDAG &DAG) const; 402 SDValue extractHvxElementPred(SDValue VecV, SDValue IdxV, const SDLoc &dl, 403 MVT ResTy, SelectionDAG &DAG) const; 404 SDValue insertHvxElementReg(SDValue VecV, SDValue IdxV, SDValue ValV, 405 const SDLoc &dl, SelectionDAG &DAG) const; 406 SDValue insertHvxElementPred(SDValue VecV, SDValue IdxV, SDValue ValV, 407 const SDLoc &dl, SelectionDAG &DAG) const; 408 SDValue extractHvxSubvectorReg(SDValue VecV, SDValue IdxV, const SDLoc &dl, 409 MVT ResTy, SelectionDAG &DAG) const; 410 SDValue extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl, 411 MVT ResTy, SelectionDAG &DAG) const; 412 SDValue insertHvxSubvectorReg(SDValue VecV, SDValue SubV, SDValue IdxV, 413 const SDLoc &dl, SelectionDAG &DAG) const; 414 SDValue insertHvxSubvectorPred(SDValue VecV, SDValue SubV, SDValue IdxV, 415 const SDLoc &dl, SelectionDAG &DAG) const; 416 SDValue extendHvxVectorPred(SDValue VecV, const SDLoc &dl, MVT ResTy, 417 bool ZeroExt, SelectionDAG &DAG) const; 418 419 SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const; 420 SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const; 421 SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const; 422 SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const; 423 SDValue LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) const; 424 SDValue LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) const; 425 426 SDValue LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const; 427 SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const; 428 SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const; 429 SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const; 430 SDValue LowerHvxMul(SDValue Op, SelectionDAG &DAG) const; 431 SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const; 432 SDValue LowerHvxSetCC(SDValue Op, SelectionDAG &DAG) const; 433 SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const; 434 SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const; 435 436 SDValue SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const; 437 SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const; 438 439 std::pair<const TargetRegisterClass*, uint8_t> 440 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) 441 const override; 442 443 bool isHvxOperation(SDValue Op) const; 444 SDValue LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const; 445 }; 446 447 } // end namespace llvm 448 449 #endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H 450