1 //===- subzero/src/IceTargetLoweringMIPS32.h - MIPS32 lowering ---*- C++-*-===// 2 // 3 // The Subzero Code Generator 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// \brief Declares the TargetLoweringMIPS32 class, which implements the 12 /// TargetLowering interface for the MIPS 32-bit architecture. 13 /// 14 //===----------------------------------------------------------------------===// 15 16 #ifndef SUBZERO_SRC_ICETARGETLOWERINGMIPS32_H 17 #define SUBZERO_SRC_ICETARGETLOWERINGMIPS32_H 18 19 #include "IceAssemblerMIPS32.h" 20 #include "IceDefs.h" 21 #include "IceInstMIPS32.h" 22 #include "IceRegistersMIPS32.h" 23 #include "IceTargetLowering.h" 24 25 namespace Ice { 26 namespace MIPS32 { 27 28 class TargetMIPS32 : public TargetLowering { 29 TargetMIPS32() = delete; 30 TargetMIPS32(const TargetMIPS32 &) = delete; 31 TargetMIPS32 &operator=(const TargetMIPS32 &) = delete; 32 33 public: 34 ~TargetMIPS32() override = default; 35 36 static void staticInit(GlobalContext *Ctx); shouldBePooled(const Constant * C)37 static bool shouldBePooled(const Constant *C) { 38 if (auto *ConstDouble = llvm::dyn_cast<ConstantDouble>(C)) { 39 return !Utils::isPositiveZero(ConstDouble->getValue()); 40 } 41 if (auto *ConstFloat = llvm::dyn_cast<ConstantFloat>(C)) { 42 return !Utils::isPositiveZero(ConstFloat->getValue()); 43 } 44 return false; 45 } getPointerType()46 static ::Ice::Type getPointerType() { return ::Ice::IceType_i32; } create(Cfg * Func)47 static std::unique_ptr<::Ice::TargetLowering> create(Cfg *Func) { 48 return makeUnique<TargetMIPS32>(Func); 49 } 50 createAssembler()51 std::unique_ptr<::Ice::Assembler> createAssembler() const override { 52 return makeUnique<MIPS32::AssemblerMIPS32>(); 53 } 54 initNodeForLowering(CfgNode * Node)55 void initNodeForLowering(CfgNode *Node) override { 56 Computations.forgetProducers(); 57 Computations.recordProducers(Node); 58 Computations.dump(Func); 59 } 60 61 void translateOm1() override; 62 void translateO2() override; 63 bool doBranchOpt(Inst *Instr, const CfgNode *NextNode) override; setImplicitRet(Variable * Ret)64 void setImplicitRet(Variable *Ret) { ImplicitRet = Ret; } getImplicitRet()65 Variable *getImplicitRet() const { return ImplicitRet; } getNumRegisters()66 SizeT getNumRegisters() const override { return RegMIPS32::Reg_NUM; } 67 Variable *getPhysicalRegister(RegNumT RegNum, 68 Type Ty = IceType_void) override; 69 const char *getRegName(RegNumT RegNum, Type Ty) const override; 70 SmallBitVector getRegisterSet(RegSetMask Include, 71 RegSetMask Exclude) const override; 72 const SmallBitVector & getRegistersForVariable(const Variable * Var)73 getRegistersForVariable(const Variable *Var) const override { 74 RegClass RC = Var->getRegClass(); 75 assert(RC < RC_Target); 76 return TypeToRegisterSet[RC]; 77 } 78 const SmallBitVector & getAllRegistersForVariable(const Variable * Var)79 getAllRegistersForVariable(const Variable *Var) const override { 80 RegClass RC = Var->getRegClass(); 81 assert(RC < RC_Target); 82 return TypeToRegisterSetUnfiltered[RC]; 83 } getAliasesForRegister(RegNumT Reg)84 const SmallBitVector &getAliasesForRegister(RegNumT Reg) const override { 85 return RegisterAliases[Reg]; 86 } hasFramePointer()87 bool hasFramePointer() const override { return UsesFramePointer; } setHasFramePointer()88 void setHasFramePointer() override { UsesFramePointer = true; } getStackReg()89 RegNumT getStackReg() const override { return RegMIPS32::Reg_SP; } getFrameReg()90 RegNumT getFrameReg() const override { return RegMIPS32::Reg_FP; } getFrameOrStackReg()91 RegNumT getFrameOrStackReg() const override { 92 return UsesFramePointer ? getFrameReg() : getStackReg(); 93 } getReservedTmpReg()94 RegNumT getReservedTmpReg() const { return RegMIPS32::Reg_AT; } typeWidthInBytesOnStack(Type Ty)95 size_t typeWidthInBytesOnStack(Type Ty) const override { 96 // Round up to the next multiple of 4 bytes. In particular, i1, i8, and i16 97 // are rounded up to 4 bytes. 98 return (typeWidthInBytes(Ty) + 3) & ~3; 99 } 100 uint32_t getStackAlignment() const override; reserveFixedAllocaArea(size_t Size,size_t Align)101 void reserveFixedAllocaArea(size_t Size, size_t Align) override { 102 FixedAllocaSizeBytes = Size; 103 assert(llvm::isPowerOf2_32(Align)); 104 FixedAllocaAlignBytes = Align; 105 PrologEmitsFixedAllocas = true; 106 } getFrameFixedAllocaOffset()107 int32_t getFrameFixedAllocaOffset() const override { 108 int32_t FixedAllocaOffset = 109 Utils::applyAlignment(CurrentAllocaOffset, FixedAllocaAlignBytes); 110 return FixedAllocaOffset - MaxOutArgsSizeBytes; 111 } 112 maxOutArgsSizeBytes()113 uint32_t maxOutArgsSizeBytes() const override { return MaxOutArgsSizeBytes; } 114 getFramePointerOffset(uint32_t CurrentOffset,uint32_t Size)115 uint32_t getFramePointerOffset(uint32_t CurrentOffset, 116 uint32_t Size) const override { 117 (void)Size; 118 return CurrentOffset + MaxOutArgsSizeBytes; 119 } 120 shouldSplitToVariable64On32(Type Ty)121 bool shouldSplitToVariable64On32(Type Ty) const override { 122 return Ty == IceType_i64; 123 } 124 shouldSplitToVariableVecOn32(Type Ty)125 bool shouldSplitToVariableVecOn32(Type Ty) const override { 126 return isVectorType(Ty); 127 } 128 129 // TODO(ascull): what is the best size of MIPS? getMinJumpTableSize()130 SizeT getMinJumpTableSize() const override { return 3; } 131 void emitJumpTable(const Cfg *Func, 132 const InstJumpTable *JumpTable) const override; 133 134 void emitVariable(const Variable *Var) const override; 135 emit(const ConstantInteger32 * C)136 void emit(const ConstantInteger32 *C) const final { 137 if (!BuildDefs::dump()) 138 return; 139 Ostream &Str = Ctx->getStrEmit(); 140 Str << C->getValue(); 141 } emit(const ConstantInteger64 * C)142 void emit(const ConstantInteger64 *C) const final { 143 (void)C; 144 llvm::report_fatal_error("Not yet implemented"); 145 } emit(const ConstantFloat * C)146 void emit(const ConstantFloat *C) const final { 147 (void)C; 148 llvm::report_fatal_error("Not yet implemented"); 149 } emit(const ConstantDouble * C)150 void emit(const ConstantDouble *C) const final { 151 (void)C; 152 llvm::report_fatal_error("Not yet implemented"); 153 } emit(const ConstantUndef * C)154 void emit(const ConstantUndef *C) const final { 155 (void)C; 156 llvm::report_fatal_error("Not yet implemented"); 157 } emit(const ConstantRelocatable * C)158 void emit(const ConstantRelocatable *C) const final { 159 (void)C; 160 llvm::report_fatal_error("Not yet implemented"); 161 } 162 163 // The following are helpers that insert lowered MIPS32 instructions with 164 // minimal syntactic overhead, so that the lowering code can look as close to 165 // assembly as practical. _add(Variable * Dest,Variable * Src0,Variable * Src1)166 void _add(Variable *Dest, Variable *Src0, Variable *Src1) { 167 Context.insert<InstMIPS32Add>(Dest, Src0, Src1); 168 } 169 _addu(Variable * Dest,Variable * Src0,Variable * Src1)170 void _addu(Variable *Dest, Variable *Src0, Variable *Src1) { 171 Context.insert<InstMIPS32Addu>(Dest, Src0, Src1); 172 } 173 _and(Variable * Dest,Variable * Src0,Variable * Src1)174 void _and(Variable *Dest, Variable *Src0, Variable *Src1) { 175 Context.insert<InstMIPS32And>(Dest, Src0, Src1); 176 } 177 _andi(Variable * Dest,Variable * Src,uint32_t Imm)178 void _andi(Variable *Dest, Variable *Src, uint32_t Imm) { 179 Context.insert<InstMIPS32Andi>(Dest, Src, Imm); 180 } 181 _br(CfgNode * Target)182 void _br(CfgNode *Target) { Context.insert<InstMIPS32Br>(Target); } 183 _br(CfgNode * Target,const InstMIPS32Label * Label)184 void _br(CfgNode *Target, const InstMIPS32Label *Label) { 185 Context.insert<InstMIPS32Br>(Target, Label); 186 } 187 _br(CfgNode * TargetTrue,CfgNode * TargetFalse,Operand * Src0,Operand * Src1,CondMIPS32::Cond Condition)188 void _br(CfgNode *TargetTrue, CfgNode *TargetFalse, Operand *Src0, 189 Operand *Src1, CondMIPS32::Cond Condition) { 190 Context.insert<InstMIPS32Br>(TargetTrue, TargetFalse, Src0, Src1, 191 Condition); 192 } 193 _br(CfgNode * TargetTrue,CfgNode * TargetFalse,Operand * Src0,CondMIPS32::Cond Condition)194 void _br(CfgNode *TargetTrue, CfgNode *TargetFalse, Operand *Src0, 195 CondMIPS32::Cond Condition) { 196 Context.insert<InstMIPS32Br>(TargetTrue, TargetFalse, Src0, Condition); 197 } 198 _br(CfgNode * TargetTrue,CfgNode * TargetFalse,Operand * Src0,Operand * Src1,const InstMIPS32Label * Label,CondMIPS32::Cond Condition)199 void _br(CfgNode *TargetTrue, CfgNode *TargetFalse, Operand *Src0, 200 Operand *Src1, const InstMIPS32Label *Label, 201 CondMIPS32::Cond Condition) { 202 Context.insert<InstMIPS32Br>(TargetTrue, TargetFalse, Src0, Src1, Label, 203 Condition); 204 } 205 206 void _ret(Variable *RA, Variable *Src0 = nullptr) { 207 Context.insert<InstMIPS32Ret>(RA, Src0); 208 } 209 _abs_d(Variable * Dest,Variable * Src)210 void _abs_d(Variable *Dest, Variable *Src) { 211 Context.insert<InstMIPS32Abs_d>(Dest, Src); 212 } 213 _abs_s(Variable * Dest,Variable * Src)214 void _abs_s(Variable *Dest, Variable *Src) { 215 Context.insert<InstMIPS32Abs_s>(Dest, Src); 216 } 217 _addi(Variable * Dest,Variable * Src,uint32_t Imm)218 void _addi(Variable *Dest, Variable *Src, uint32_t Imm) { 219 Context.insert<InstMIPS32Addi>(Dest, Src, Imm); 220 } 221 _add_d(Variable * Dest,Variable * Src0,Variable * Src1)222 void _add_d(Variable *Dest, Variable *Src0, Variable *Src1) { 223 Context.insert<InstMIPS32Add_d>(Dest, Src0, Src1); 224 } 225 _add_s(Variable * Dest,Variable * Src0,Variable * Src1)226 void _add_s(Variable *Dest, Variable *Src0, Variable *Src1) { 227 Context.insert<InstMIPS32Add_s>(Dest, Src0, Src1); 228 } 229 _addiu(Variable * Dest,Variable * Src,uint32_t Imm)230 void _addiu(Variable *Dest, Variable *Src, uint32_t Imm) { 231 Context.insert<InstMIPS32Addiu>(Dest, Src, Imm); 232 } 233 _addiu(Variable * Dest,Variable * Src0,Operand * Src1,RelocOp Reloc)234 void _addiu(Variable *Dest, Variable *Src0, Operand *Src1, RelocOp Reloc) { 235 Context.insert<InstMIPS32Addiu>(Dest, Src0, Src1, Reloc); 236 } 237 _c_eq_d(Variable * Src0,Variable * Src1)238 void _c_eq_d(Variable *Src0, Variable *Src1) { 239 Context.insert<InstMIPS32C_eq_d>(Src0, Src1); 240 } 241 _c_eq_s(Variable * Src0,Variable * Src1)242 void _c_eq_s(Variable *Src0, Variable *Src1) { 243 Context.insert<InstMIPS32C_eq_s>(Src0, Src1); 244 } 245 _c_ole_d(Variable * Src0,Variable * Src1)246 void _c_ole_d(Variable *Src0, Variable *Src1) { 247 Context.insert<InstMIPS32C_ole_d>(Src0, Src1); 248 } 249 _c_ole_s(Variable * Src0,Variable * Src1)250 void _c_ole_s(Variable *Src0, Variable *Src1) { 251 Context.insert<InstMIPS32C_ole_s>(Src0, Src1); 252 } 253 _c_olt_d(Variable * Src0,Variable * Src1)254 void _c_olt_d(Variable *Src0, Variable *Src1) { 255 Context.insert<InstMIPS32C_olt_d>(Src0, Src1); 256 } 257 _c_olt_s(Variable * Src0,Variable * Src1)258 void _c_olt_s(Variable *Src0, Variable *Src1) { 259 Context.insert<InstMIPS32C_olt_s>(Src0, Src1); 260 } 261 _c_ueq_d(Variable * Src0,Variable * Src1)262 void _c_ueq_d(Variable *Src0, Variable *Src1) { 263 Context.insert<InstMIPS32C_ueq_d>(Src0, Src1); 264 } 265 _c_ueq_s(Variable * Src0,Variable * Src1)266 void _c_ueq_s(Variable *Src0, Variable *Src1) { 267 Context.insert<InstMIPS32C_ueq_s>(Src0, Src1); 268 } 269 _c_ule_d(Variable * Src0,Variable * Src1)270 void _c_ule_d(Variable *Src0, Variable *Src1) { 271 Context.insert<InstMIPS32C_ule_d>(Src0, Src1); 272 } 273 _c_ule_s(Variable * Src0,Variable * Src1)274 void _c_ule_s(Variable *Src0, Variable *Src1) { 275 Context.insert<InstMIPS32C_ule_s>(Src0, Src1); 276 } 277 _c_ult_d(Variable * Src0,Variable * Src1)278 void _c_ult_d(Variable *Src0, Variable *Src1) { 279 Context.insert<InstMIPS32C_ult_d>(Src0, Src1); 280 } 281 _c_ult_s(Variable * Src0,Variable * Src1)282 void _c_ult_s(Variable *Src0, Variable *Src1) { 283 Context.insert<InstMIPS32C_ult_s>(Src0, Src1); 284 } 285 _c_un_d(Variable * Src0,Variable * Src1)286 void _c_un_d(Variable *Src0, Variable *Src1) { 287 Context.insert<InstMIPS32C_un_d>(Src0, Src1); 288 } 289 _c_un_s(Variable * Src0,Variable * Src1)290 void _c_un_s(Variable *Src0, Variable *Src1) { 291 Context.insert<InstMIPS32C_un_s>(Src0, Src1); 292 } 293 _clz(Variable * Dest,Variable * Src)294 void _clz(Variable *Dest, Variable *Src) { 295 Context.insert<InstMIPS32Clz>(Dest, Src); 296 } 297 _cvt_d_l(Variable * Dest,Variable * Src)298 void _cvt_d_l(Variable *Dest, Variable *Src) { 299 Context.insert<InstMIPS32Cvt_d_l>(Dest, Src); 300 } 301 _cvt_d_s(Variable * Dest,Variable * Src)302 void _cvt_d_s(Variable *Dest, Variable *Src) { 303 Context.insert<InstMIPS32Cvt_d_s>(Dest, Src); 304 } 305 _cvt_d_w(Variable * Dest,Variable * Src)306 void _cvt_d_w(Variable *Dest, Variable *Src) { 307 Context.insert<InstMIPS32Cvt_d_w>(Dest, Src); 308 } 309 _cvt_s_d(Variable * Dest,Variable * Src)310 void _cvt_s_d(Variable *Dest, Variable *Src) { 311 Context.insert<InstMIPS32Cvt_s_d>(Dest, Src); 312 } 313 _cvt_s_l(Variable * Dest,Variable * Src)314 void _cvt_s_l(Variable *Dest, Variable *Src) { 315 Context.insert<InstMIPS32Cvt_s_l>(Dest, Src); 316 } 317 _cvt_s_w(Variable * Dest,Variable * Src)318 void _cvt_s_w(Variable *Dest, Variable *Src) { 319 Context.insert<InstMIPS32Cvt_s_w>(Dest, Src); 320 } 321 _div(Variable * Dest,Variable * Src0,Variable * Src1)322 void _div(Variable *Dest, Variable *Src0, Variable *Src1) { 323 Context.insert<InstMIPS32Div>(Dest, Src0, Src1); 324 } 325 _div_d(Variable * Dest,Variable * Src0,Variable * Src1)326 void _div_d(Variable *Dest, Variable *Src0, Variable *Src1) { 327 Context.insert<InstMIPS32Div_d>(Dest, Src0, Src1); 328 } 329 _div_s(Variable * Dest,Variable * Src0,Variable * Src1)330 void _div_s(Variable *Dest, Variable *Src0, Variable *Src1) { 331 Context.insert<InstMIPS32Div_s>(Dest, Src0, Src1); 332 } 333 _divu(Variable * Dest,Variable * Src0,Variable * Src1)334 void _divu(Variable *Dest, Variable *Src0, Variable *Src1) { 335 Context.insert<InstMIPS32Divu>(Dest, Src0, Src1); 336 } 337 338 void _ldc1(Variable *Value, OperandMIPS32Mem *Mem, RelocOp Reloc = RO_No) { 339 Context.insert<InstMIPS32Ldc1>(Value, Mem, Reloc); 340 } 341 _ll(Variable * Value,OperandMIPS32Mem * Mem)342 void _ll(Variable *Value, OperandMIPS32Mem *Mem) { 343 Context.insert<InstMIPS32Ll>(Value, Mem); 344 } 345 _lw(Variable * Value,OperandMIPS32Mem * Mem)346 void _lw(Variable *Value, OperandMIPS32Mem *Mem) { 347 Context.insert<InstMIPS32Lw>(Value, Mem); 348 } 349 350 void _lwc1(Variable *Value, OperandMIPS32Mem *Mem, RelocOp Reloc = RO_No) { 351 Context.insert<InstMIPS32Lwc1>(Value, Mem, Reloc); 352 } 353 354 void _lui(Variable *Dest, Operand *Src, RelocOp Reloc = RO_No) { 355 Context.insert<InstMIPS32Lui>(Dest, Src, Reloc); 356 } 357 _mfc1(Variable * Dest,Variable * Src)358 void _mfc1(Variable *Dest, Variable *Src) { 359 Context.insert<InstMIPS32Mfc1>(Dest, Src); 360 } 361 _mfhi(Variable * Dest,Operand * Src)362 void _mfhi(Variable *Dest, Operand *Src) { 363 Context.insert<InstMIPS32Mfhi>(Dest, Src); 364 } 365 _mflo(Variable * Dest,Operand * Src)366 void _mflo(Variable *Dest, Operand *Src) { 367 Context.insert<InstMIPS32Mflo>(Dest, Src); 368 } 369 370 void _mov(Variable *Dest, Operand *Src0, Operand *Src1 = nullptr) { 371 assert(Dest != nullptr); 372 // Variable* Src0_ = llvm::dyn_cast<Variable>(Src0); 373 if (llvm::isa<ConstantRelocatable>(Src0)) { 374 Context.insert<InstMIPS32La>(Dest, Src0); 375 } else { 376 auto *Instr = Context.insert<InstMIPS32Mov>(Dest, Src0, Src1); 377 if (Instr->getDestHi() != nullptr) { 378 // If DestHi is available, then Dest must be a Variable64On32. We add a 379 // fake-def for Instr.DestHi here. 380 assert(llvm::isa<Variable64On32>(Dest)); 381 Context.insert<InstFakeDef>(Instr->getDestHi()); 382 } 383 } 384 } 385 386 void _mov_redefined(Variable *Dest, Operand *Src0, Operand *Src1 = nullptr) { 387 if (llvm::isa<ConstantRelocatable>(Src0)) { 388 Context.insert<InstMIPS32La>(Dest, Src0); 389 } else { 390 auto *Instr = Context.insert<InstMIPS32Mov>(Dest, Src0, Src1); 391 Instr->setDestRedefined(); 392 if (Instr->getDestHi() != nullptr) { 393 // If Instr is multi-dest, then Dest must be a Variable64On32. We add a 394 // fake-def for Instr.DestHi here. 395 assert(llvm::isa<Variable64On32>(Dest)); 396 Context.insert<InstFakeDef>(Instr->getDestHi()); 397 } 398 } 399 } 400 _mov_fp64_to_i64(Variable * Dest,Operand * Src,Int64Part Int64HiLo)401 void _mov_fp64_to_i64(Variable *Dest, Operand *Src, Int64Part Int64HiLo) { 402 assert(Dest != nullptr); 403 Context.insert<InstMIPS32MovFP64ToI64>(Dest, Src, Int64HiLo); 404 } 405 _mov_d(Variable * Dest,Variable * Src)406 void _mov_d(Variable *Dest, Variable *Src) { 407 Context.insert<InstMIPS32Mov_d>(Dest, Src); 408 } 409 _mov_s(Variable * Dest,Variable * Src)410 void _mov_s(Variable *Dest, Variable *Src) { 411 Context.insert<InstMIPS32Mov_s>(Dest, Src); 412 } 413 _movf(Variable * Dest,Variable * Src0,Operand * FCC)414 void _movf(Variable *Dest, Variable *Src0, Operand *FCC) { 415 Context.insert<InstMIPS32Movf>(Dest, Src0, FCC)->setDestRedefined(); 416 } 417 _movn(Variable * Dest,Variable * Src0,Variable * Src1)418 void _movn(Variable *Dest, Variable *Src0, Variable *Src1) { 419 Context.insert<InstMIPS32Movn>(Dest, Src0, Src1)->setDestRedefined(); 420 } 421 _movn_d(Variable * Dest,Variable * Src0,Variable * Src1)422 void _movn_d(Variable *Dest, Variable *Src0, Variable *Src1) { 423 Context.insert<InstMIPS32Movn_d>(Dest, Src0, Src1)->setDestRedefined(); 424 } 425 _movn_s(Variable * Dest,Variable * Src0,Variable * Src1)426 void _movn_s(Variable *Dest, Variable *Src0, Variable *Src1) { 427 Context.insert<InstMIPS32Movn_s>(Dest, Src0, Src1)->setDestRedefined(); 428 } 429 _movt(Variable * Dest,Variable * Src0,Operand * FCC)430 void _movt(Variable *Dest, Variable *Src0, Operand *FCC) { 431 Context.insert<InstMIPS32Movt>(Dest, Src0, FCC)->setDestRedefined(); 432 } 433 _movz(Variable * Dest,Variable * Src0,Variable * Src1)434 void _movz(Variable *Dest, Variable *Src0, Variable *Src1) { 435 Context.insert<InstMIPS32Movz>(Dest, Src0, Src1)->setDestRedefined(); 436 } 437 _movz_d(Variable * Dest,Variable * Src0,Variable * Src1)438 void _movz_d(Variable *Dest, Variable *Src0, Variable *Src1) { 439 Context.insert<InstMIPS32Movz_d>(Dest, Src0, Src1)->setDestRedefined(); 440 } 441 _movz_s(Variable * Dest,Variable * Src0,Variable * Src1)442 void _movz_s(Variable *Dest, Variable *Src0, Variable *Src1) { 443 Context.insert<InstMIPS32Movz_s>(Dest, Src0, Src1)->setDestRedefined(); 444 } 445 _mtc1(Variable * Dest,Variable * Src)446 void _mtc1(Variable *Dest, Variable *Src) { 447 Context.insert<InstMIPS32Mtc1>(Dest, Src); 448 } 449 _mthi(Variable * Dest,Operand * Src)450 void _mthi(Variable *Dest, Operand *Src) { 451 Context.insert<InstMIPS32Mthi>(Dest, Src); 452 } 453 _mtlo(Variable * Dest,Operand * Src)454 void _mtlo(Variable *Dest, Operand *Src) { 455 Context.insert<InstMIPS32Mtlo>(Dest, Src); 456 } 457 _mul(Variable * Dest,Variable * Src0,Variable * Src1)458 void _mul(Variable *Dest, Variable *Src0, Variable *Src1) { 459 Context.insert<InstMIPS32Mul>(Dest, Src0, Src1); 460 } 461 _mul_d(Variable * Dest,Variable * Src0,Variable * Src1)462 void _mul_d(Variable *Dest, Variable *Src0, Variable *Src1) { 463 Context.insert<InstMIPS32Mul_d>(Dest, Src0, Src1); 464 } 465 _mul_s(Variable * Dest,Variable * Src0,Variable * Src1)466 void _mul_s(Variable *Dest, Variable *Src0, Variable *Src1) { 467 Context.insert<InstMIPS32Mul_s>(Dest, Src0, Src1); 468 } 469 _mult(Variable * Dest,Variable * Src0,Variable * Src1)470 void _mult(Variable *Dest, Variable *Src0, Variable *Src1) { 471 Context.insert<InstMIPS32Mult>(Dest, Src0, Src1); 472 } 473 _multu(Variable * Dest,Variable * Src0,Variable * Src1)474 void _multu(Variable *Dest, Variable *Src0, Variable *Src1) { 475 Context.insert<InstMIPS32Multu>(Dest, Src0, Src1); 476 } 477 _nop()478 void _nop() { Context.insert<InstMIPS32Sll>(getZero(), getZero(), 0); } 479 _nor(Variable * Dest,Variable * Src0,Variable * Src1)480 void _nor(Variable *Dest, Variable *Src0, Variable *Src1) { 481 Context.insert<InstMIPS32Nor>(Dest, Src0, Src1); 482 } 483 _not(Variable * Dest,Variable * Src0)484 void _not(Variable *Dest, Variable *Src0) { 485 Context.insert<InstMIPS32Nor>(Dest, Src0, getZero()); 486 } 487 _or(Variable * Dest,Variable * Src0,Variable * Src1)488 void _or(Variable *Dest, Variable *Src0, Variable *Src1) { 489 Context.insert<InstMIPS32Or>(Dest, Src0, Src1); 490 } 491 _ori(Variable * Dest,Variable * Src,uint32_t Imm)492 void _ori(Variable *Dest, Variable *Src, uint32_t Imm) { 493 Context.insert<InstMIPS32Ori>(Dest, Src, Imm); 494 } 495 _sc(Variable * Value,OperandMIPS32Mem * Mem)496 InstMIPS32Sc *_sc(Variable *Value, OperandMIPS32Mem *Mem) { 497 return Context.insert<InstMIPS32Sc>(Value, Mem); 498 } 499 _sdc1(Variable * Value,OperandMIPS32Mem * Mem)500 void _sdc1(Variable *Value, OperandMIPS32Mem *Mem) { 501 Context.insert<InstMIPS32Sdc1>(Value, Mem); 502 } 503 _sll(Variable * Dest,Variable * Src,uint32_t Imm)504 void _sll(Variable *Dest, Variable *Src, uint32_t Imm) { 505 Context.insert<InstMIPS32Sll>(Dest, Src, Imm); 506 } 507 _sllv(Variable * Dest,Variable * Src0,Variable * Src1)508 void _sllv(Variable *Dest, Variable *Src0, Variable *Src1) { 509 Context.insert<InstMIPS32Sllv>(Dest, Src0, Src1); 510 } 511 _slt(Variable * Dest,Variable * Src0,Variable * Src1)512 void _slt(Variable *Dest, Variable *Src0, Variable *Src1) { 513 Context.insert<InstMIPS32Slt>(Dest, Src0, Src1); 514 } 515 _slti(Variable * Dest,Variable * Src,uint32_t Imm)516 void _slti(Variable *Dest, Variable *Src, uint32_t Imm) { 517 Context.insert<InstMIPS32Slti>(Dest, Src, Imm); 518 } 519 _sltiu(Variable * Dest,Variable * Src,uint32_t Imm)520 void _sltiu(Variable *Dest, Variable *Src, uint32_t Imm) { 521 Context.insert<InstMIPS32Sltiu>(Dest, Src, Imm); 522 } 523 _sltu(Variable * Dest,Variable * Src0,Variable * Src1)524 void _sltu(Variable *Dest, Variable *Src0, Variable *Src1) { 525 Context.insert<InstMIPS32Sltu>(Dest, Src0, Src1); 526 } 527 _sqrt_d(Variable * Dest,Variable * Src)528 void _sqrt_d(Variable *Dest, Variable *Src) { 529 Context.insert<InstMIPS32Sqrt_d>(Dest, Src); 530 } 531 _sqrt_s(Variable * Dest,Variable * Src)532 void _sqrt_s(Variable *Dest, Variable *Src) { 533 Context.insert<InstMIPS32Sqrt_s>(Dest, Src); 534 } 535 _sra(Variable * Dest,Variable * Src,uint32_t Imm)536 void _sra(Variable *Dest, Variable *Src, uint32_t Imm) { 537 Context.insert<InstMIPS32Sra>(Dest, Src, Imm); 538 } 539 _srav(Variable * Dest,Variable * Src0,Variable * Src1)540 void _srav(Variable *Dest, Variable *Src0, Variable *Src1) { 541 Context.insert<InstMIPS32Srav>(Dest, Src0, Src1); 542 } 543 _srl(Variable * Dest,Variable * Src,uint32_t Imm)544 void _srl(Variable *Dest, Variable *Src, uint32_t Imm) { 545 Context.insert<InstMIPS32Srl>(Dest, Src, Imm); 546 } 547 _srlv(Variable * Dest,Variable * Src0,Variable * Src1)548 void _srlv(Variable *Dest, Variable *Src0, Variable *Src1) { 549 Context.insert<InstMIPS32Srlv>(Dest, Src0, Src1); 550 } 551 _sub(Variable * Dest,Variable * Src0,Variable * Src1)552 void _sub(Variable *Dest, Variable *Src0, Variable *Src1) { 553 Context.insert<InstMIPS32Sub>(Dest, Src0, Src1); 554 } 555 _sub_d(Variable * Dest,Variable * Src0,Variable * Src1)556 void _sub_d(Variable *Dest, Variable *Src0, Variable *Src1) { 557 Context.insert<InstMIPS32Sub_d>(Dest, Src0, Src1); 558 } 559 _sub_s(Variable * Dest,Variable * Src0,Variable * Src1)560 void _sub_s(Variable *Dest, Variable *Src0, Variable *Src1) { 561 Context.insert<InstMIPS32Sub_s>(Dest, Src0, Src1); 562 } 563 _subu(Variable * Dest,Variable * Src0,Variable * Src1)564 void _subu(Variable *Dest, Variable *Src0, Variable *Src1) { 565 Context.insert<InstMIPS32Subu>(Dest, Src0, Src1); 566 } 567 _sw(Variable * Value,OperandMIPS32Mem * Mem)568 void _sw(Variable *Value, OperandMIPS32Mem *Mem) { 569 Context.insert<InstMIPS32Sw>(Value, Mem); 570 } 571 _swc1(Variable * Value,OperandMIPS32Mem * Mem)572 void _swc1(Variable *Value, OperandMIPS32Mem *Mem) { 573 Context.insert<InstMIPS32Swc1>(Value, Mem); 574 } 575 _sync()576 void _sync() { Context.insert<InstMIPS32Sync>(); } 577 _teq(Variable * Src0,Variable * Src1,uint32_t TrapCode)578 void _teq(Variable *Src0, Variable *Src1, uint32_t TrapCode) { 579 Context.insert<InstMIPS32Teq>(Src0, Src1, TrapCode); 580 } 581 _trunc_l_d(Variable * Dest,Variable * Src)582 void _trunc_l_d(Variable *Dest, Variable *Src) { 583 Context.insert<InstMIPS32Trunc_l_d>(Dest, Src); 584 } 585 _trunc_l_s(Variable * Dest,Variable * Src)586 void _trunc_l_s(Variable *Dest, Variable *Src) { 587 Context.insert<InstMIPS32Trunc_l_s>(Dest, Src); 588 } 589 _trunc_w_d(Variable * Dest,Variable * Src)590 void _trunc_w_d(Variable *Dest, Variable *Src) { 591 Context.insert<InstMIPS32Trunc_w_d>(Dest, Src); 592 } 593 _trunc_w_s(Variable * Dest,Variable * Src)594 void _trunc_w_s(Variable *Dest, Variable *Src) { 595 Context.insert<InstMIPS32Trunc_w_s>(Dest, Src); 596 } 597 _xor(Variable * Dest,Variable * Src0,Variable * Src1)598 void _xor(Variable *Dest, Variable *Src0, Variable *Src1) { 599 Context.insert<InstMIPS32Xor>(Dest, Src0, Src1); 600 } 601 _xori(Variable * Dest,Variable * Src,uint32_t Imm)602 void _xori(Variable *Dest, Variable *Src, uint32_t Imm) { 603 Context.insert<InstMIPS32Xori>(Dest, Src, Imm); 604 } 605 606 void lowerArguments() override; 607 608 class Sandboxer { 609 Sandboxer() = delete; 610 Sandboxer(const Sandboxer &) = delete; 611 Sandboxer &operator=(const Sandboxer &) = delete; 612 613 public: 614 explicit Sandboxer( 615 TargetMIPS32 *Target, 616 InstBundleLock::Option BundleOption = InstBundleLock::Opt_None); 617 ~Sandboxer(); 618 619 void addiu_sp(uint32_t StackOffset); 620 void lw(Variable *Dest, OperandMIPS32Mem *Mem); 621 void sw(Variable *Dest, OperandMIPS32Mem *Mem); 622 void ll(Variable *Dest, OperandMIPS32Mem *Mem); 623 void sc(Variable *Dest, OperandMIPS32Mem *Mem); 624 void lwc1(Variable *Dest, OperandMIPS32Mem *Mem, RelocOp Reloc = RO_No); 625 void ldc1(Variable *Dest, OperandMIPS32Mem *Mem, RelocOp Reloc = RO_No); 626 void ret(Variable *RetAddr, Variable *RetValue); 627 void reset_sp(Variable *Src); 628 InstMIPS32Call *jal(Variable *ReturnReg, Operand *CallTarget); 629 630 private: 631 TargetMIPS32 *const Target; 632 const InstBundleLock::Option BundleOption; 633 std::unique_ptr<AutoBundle> Bundler; 634 635 void createAutoBundle(); 636 }; 637 638 const bool NeedSandboxing; 639 640 /// Make a pass through the SortedSpilledVariables and actually assign stack 641 /// slots. SpillAreaPaddingBytes takes into account stack alignment padding. 642 /// The SpillArea starts after that amount of padding. This matches the scheme 643 /// in getVarStackSlotParams, where there may be a separate multi-block global 644 /// var spill area and a local var spill area. 645 void assignVarStackSlots(VarList &SortedSpilledVariables, 646 size_t SpillAreaPaddingBytes, 647 size_t SpillAreaSizeBytes, 648 size_t GlobalsAndSubsequentPaddingSize); 649 650 /// Operand legalization helpers. To deal with address mode constraints, 651 /// the helpers will create a new Operand and emit instructions that 652 /// guarantee that the Operand kind is one of those indicated by the 653 /// LegalMask (a bitmask of allowed kinds). If the input Operand is known 654 /// to already meet the constraints, it may be simply returned as the result, 655 /// without creating any new instructions or operands. 656 enum OperandLegalization { 657 Legal_None = 0, 658 Legal_Reg = 1 << 0, // physical register, not stack location 659 Legal_Imm = 1 << 1, 660 Legal_Mem = 1 << 2, 661 Legal_Rematerializable = 1 << 3, 662 Legal_Default = ~Legal_None 663 }; 664 typedef uint32_t LegalMask; 665 Operand *legalize(Operand *From, LegalMask Allowed = Legal_Default, 666 RegNumT RegNum = RegNumT()); 667 668 Variable *legalizeToVar(Operand *From, RegNumT RegNum = RegNumT()); 669 670 Variable *legalizeToReg(Operand *From, RegNumT RegNum = RegNumT()); 671 672 Variable *makeReg(Type Ty, RegNumT RegNum = RegNumT()); 673 getZero()674 Variable *getZero() { 675 auto *Zero = makeReg(IceType_i32, RegMIPS32::Reg_ZERO); 676 Context.insert<InstFakeDef>(Zero); 677 return Zero; 678 } 679 680 Variable *I32Reg(RegNumT RegNum = RegNumT()) { 681 return makeReg(IceType_i32, RegNum); 682 } 683 684 Variable *F32Reg(RegNumT RegNum = RegNumT()) { 685 return makeReg(IceType_f32, RegNum); 686 } 687 688 Variable *F64Reg(RegNumT RegNum = RegNumT()) { 689 return makeReg(IceType_f64, RegNum); 690 } 691 692 static Type stackSlotType(); 693 Variable *copyToReg(Operand *Src, RegNumT RegNum = RegNumT()); 694 695 void unsetIfNonLeafFunc(); 696 697 // Iterates over the CFG and determines the maximum outgoing stack arguments 698 // bytes. This information is later used during addProlog() to pre-allocate 699 // the outargs area 700 void findMaxStackOutArgsSize(); 701 702 void postLowerLegalization(); 703 704 void addProlog(CfgNode *Node) override; 705 void addEpilog(CfgNode *Node) override; 706 707 // Ensure that a 64-bit Variable has been split into 2 32-bit 708 // Variables, creating them if necessary. This is needed for all 709 // I64 operations. 710 void split64(Variable *Var); 711 Operand *loOperand(Operand *Operand); 712 Operand *hiOperand(Operand *Operand); 713 Operand *getOperandAtIndex(Operand *Operand, Type BaseType, uint32_t Index); 714 715 void finishArgumentLowering(Variable *Arg, bool PartialOnStack, 716 Variable *FramePtr, size_t BasicFrameOffset, 717 size_t *InArgsSizeBytes); 718 719 Operand *legalizeUndef(Operand *From, RegNumT RegNum = RegNumT()); 720 721 /// Helper class that understands the Calling Convention and register 722 /// assignments as per MIPS O32 abi. 723 class CallingConv { 724 CallingConv(const CallingConv &) = delete; 725 CallingConv &operator=(const CallingConv &) = delete; 726 727 public: 728 CallingConv(); 729 ~CallingConv() = default; 730 731 /// argInReg returns true if there is a Register available for the requested 732 /// type, and false otherwise. If it returns true, Reg is set to the 733 /// appropriate register number. Note that, when Ty == IceType_i64, Reg will 734 /// be an I64 register pair. 735 bool argInReg(Type Ty, uint32_t ArgNo, RegNumT *Reg); discardReg(RegNumT Reg)736 void discardReg(RegNumT Reg) { GPRegsUsed |= RegisterAliases[Reg]; } 737 738 private: 739 // argInGPR is used to find if any GPR register is available for argument of 740 // type Ty 741 bool argInGPR(Type Ty, RegNumT *Reg); 742 /// argInVFP is to floating-point/vector types what argInGPR is for integer 743 /// types. 744 bool argInVFP(Type Ty, RegNumT *Reg); 745 inline void discardNextGPRAndItsAliases(CfgVector<RegNumT> *Regs); 746 inline void alignGPR(CfgVector<RegNumT> *Regs); 747 void discardUnavailableGPRsAndTheirAliases(CfgVector<RegNumT> *Regs); 748 SmallBitVector GPRegsUsed; 749 CfgVector<RegNumT> GPRArgs; 750 CfgVector<RegNumT> I64Args; 751 752 void discardUnavailableVFPRegsAndTheirAliases(CfgVector<RegNumT> *Regs); 753 SmallBitVector VFPRegsUsed; 754 CfgVector<RegNumT> FP32Args; 755 CfgVector<RegNumT> FP64Args; 756 // UseFPRegs is a flag indicating if FP registers can be used 757 bool UseFPRegs = false; 758 }; 759 760 protected: 761 explicit TargetMIPS32(Cfg *Func); 762 763 void postLower() override; 764 765 void lowerAlloca(const InstAlloca *Instr) override; 766 void lowerArithmetic(const InstArithmetic *Instr) override; 767 void lowerInt64Arithmetic(const InstArithmetic *Instr, Variable *Dest, 768 Operand *Src0, Operand *Src1); 769 void lowerAssign(const InstAssign *Instr) override; 770 void lowerBr(const InstBr *Instr) override; 771 void lowerBreakpoint(const InstBreakpoint *Instr) override; 772 void lowerCall(const InstCall *Instr) override; 773 void lowerCast(const InstCast *Instr) override; 774 void lowerExtractElement(const InstExtractElement *Instr) override; 775 void lowerFcmp(const InstFcmp *Instr) override; 776 void lowerIcmp(const InstIcmp *Instr) override; 777 void lower64Icmp(const InstIcmp *Instr); 778 void createArithInst(Intrinsics::AtomicRMWOperation Operation, Variable *Dest, 779 Variable *Src0, Variable *Src1); 780 void lowerIntrinsic(const InstIntrinsic *Instr) override; 781 void lowerInsertElement(const InstInsertElement *Instr) override; 782 void lowerLoad(const InstLoad *Instr) override; 783 void lowerPhi(const InstPhi *Instr) override; 784 void lowerRet(const InstRet *Instr) override; 785 void lowerSelect(const InstSelect *Instr) override; 786 void lowerShuffleVector(const InstShuffleVector *Instr) override; 787 void lowerStore(const InstStore *Instr) override; 788 void lowerSwitch(const InstSwitch *Instr) override; 789 void lowerUnreachable(const InstUnreachable *Instr) override; 790 void lowerOther(const Inst *Instr) override; 791 void prelowerPhis() override; 792 uint32_t getCallStackArgumentsSizeBytes(const InstCall *Instr) override; 793 void genTargetHelperCallFor(Inst *Instr) override; 794 void doAddressOptLoad() override; 795 void doAddressOptStore() override; 796 797 OperandMIPS32Mem *formMemoryOperand(Operand *Ptr, Type Ty); 798 799 class PostLoweringLegalizer { 800 PostLoweringLegalizer() = delete; 801 PostLoweringLegalizer(const PostLoweringLegalizer &) = delete; 802 PostLoweringLegalizer &operator=(const PostLoweringLegalizer &) = delete; 803 804 public: PostLoweringLegalizer(TargetMIPS32 * Target)805 explicit PostLoweringLegalizer(TargetMIPS32 *Target) 806 : Target(Target), StackOrFrameReg(Target->getPhysicalRegister( 807 Target->getFrameOrStackReg())) {} 808 809 /// Legalizes Mem. if Mem.Base is a rematerializable variable, 810 /// Mem.Offset is fixed up. 811 OperandMIPS32Mem *legalizeMemOperand(OperandMIPS32Mem *Mem); 812 813 /// Legalizes Immediate if larger value overflows range of 16 bits 814 Variable *legalizeImmediate(int32_t Imm); 815 816 /// Legalizes Mov if its Source (or Destination) is a spilled Variable, or 817 /// if its Source is a Rematerializable variable (this form is used in lieu 818 /// of lea, which is not available in MIPS.) 819 /// 820 /// Moves to memory become store instructions, and moves from memory, loads. 821 void legalizeMov(InstMIPS32Mov *Mov); 822 void legalizeMovFp(InstMIPS32MovFP64ToI64 *MovInstr); 823 824 private: 825 /// Creates a new Base register centered around [Base, +/- Offset]. 826 Variable *newBaseRegister(Variable *Base, int32_t Offset, 827 RegNumT ScratchRegNum); 828 829 TargetMIPS32 *const Target; 830 Variable *const StackOrFrameReg; 831 }; 832 833 bool UsesFramePointer = false; 834 bool NeedsStackAlignment = false; 835 bool MaybeLeafFunc = true; 836 bool PrologEmitsFixedAllocas = false; 837 bool VariableAllocaUsed = false; 838 uint32_t MaxOutArgsSizeBytes = 0; 839 uint32_t TotalStackSizeBytes = 0; 840 uint32_t CurrentAllocaOffset = 0; 841 uint32_t VariableAllocaAlignBytes = 0; 842 static SmallBitVector TypeToRegisterSet[RCMIPS32_NUM]; 843 static SmallBitVector TypeToRegisterSetUnfiltered[RCMIPS32_NUM]; 844 static SmallBitVector RegisterAliases[RegMIPS32::Reg_NUM]; 845 SmallBitVector RegsUsed; 846 VarList PhysicalRegisters[IceType_NUM]; 847 VarList PreservedGPRs; 848 static constexpr uint32_t CHAR_BITS = 8; 849 static constexpr uint32_t INT32_BITS = 32; 850 size_t SpillAreaSizeBytes = 0; 851 size_t FixedAllocaSizeBytes = 0; 852 size_t FixedAllocaAlignBytes = 0; 853 size_t PreservedRegsSizeBytes = 0; 854 Variable *ImplicitRet = nullptr; /// Implicit return 855 856 private: 857 ENABLE_MAKE_UNIQUE; 858 859 OperandMIPS32Mem *formAddressingMode(Type Ty, Cfg *Func, const Inst *LdSt, 860 Operand *Base); 861 862 class ComputationTracker { 863 public: 864 ComputationTracker() = default; 865 ~ComputationTracker() = default; 866 forgetProducers()867 void forgetProducers() { KnownComputations.clear(); } 868 void recordProducers(CfgNode *Node); 869 getProducerOf(const Operand * Opnd)870 const Inst *getProducerOf(const Operand *Opnd) const { 871 auto *Var = llvm::dyn_cast<Variable>(Opnd); 872 if (Var == nullptr) { 873 return nullptr; 874 } 875 876 auto Iter = KnownComputations.find(Var->getIndex()); 877 if (Iter == KnownComputations.end()) { 878 return nullptr; 879 } 880 881 return Iter->second.Instr; 882 } 883 dump(const Cfg * Func)884 void dump(const Cfg *Func) const { 885 if (!BuildDefs::dump() || !Func->isVerbose(IceV_Folding)) 886 return; 887 OstreamLocker L(Func->getContext()); 888 Ostream &Str = Func->getContext()->getStrDump(); 889 Str << "foldable producer:\n"; 890 for (const auto &Computation : KnownComputations) { 891 Str << " "; 892 Computation.second.Instr->dump(Func); 893 Str << "\n"; 894 } 895 Str << "\n"; 896 } 897 898 private: 899 class ComputationEntry { 900 public: ComputationEntry(Inst * I,Type Ty)901 ComputationEntry(Inst *I, Type Ty) : Instr(I), ComputationType(Ty) {} 902 Inst *const Instr; 903 // Boolean folding is disabled for variables whose live range is multi 904 // block. We conservatively initialize IsLiveOut to true, and set it to 905 // false once we find the end of the live range for the variable defined 906 // by this instruction. If liveness analysis is not performed (e.g., in 907 // Om1 mode) IsLiveOut will never be set to false, and folding will be 908 // disabled. 909 bool IsLiveOut = true; 910 int32_t NumUses = 0; 911 Type ComputationType; 912 }; 913 914 // ComputationMap maps a Variable number to a payload identifying which 915 // instruction defined it. 916 using ComputationMap = CfgUnorderedMap<SizeT, ComputationEntry>; 917 ComputationMap KnownComputations; 918 }; 919 920 ComputationTracker Computations; 921 }; 922 923 class TargetDataMIPS32 final : public TargetDataLowering { 924 TargetDataMIPS32() = delete; 925 TargetDataMIPS32(const TargetDataMIPS32 &) = delete; 926 TargetDataMIPS32 &operator=(const TargetDataMIPS32 &) = delete; 927 928 public: create(GlobalContext * Ctx)929 static std::unique_ptr<TargetDataLowering> create(GlobalContext *Ctx) { 930 return std::unique_ptr<TargetDataLowering>(new TargetDataMIPS32(Ctx)); 931 } 932 933 void lowerGlobals(const VariableDeclarationList &Vars, 934 const std::string &SectionSuffix) override; 935 void lowerConstants() override; 936 void lowerJumpTables() override; 937 void emitTargetRODataSections() override; 938 939 protected: 940 explicit TargetDataMIPS32(GlobalContext *Ctx); 941 942 private: 943 ~TargetDataMIPS32() override = default; 944 }; 945 946 class TargetHeaderMIPS32 final : public TargetHeaderLowering { 947 TargetHeaderMIPS32() = delete; 948 TargetHeaderMIPS32(const TargetHeaderMIPS32 &) = delete; 949 TargetHeaderMIPS32 &operator=(const TargetHeaderMIPS32 &) = delete; 950 951 public: create(GlobalContext * Ctx)952 static std::unique_ptr<TargetHeaderLowering> create(GlobalContext *Ctx) { 953 return std::unique_ptr<TargetHeaderLowering>(new TargetHeaderMIPS32(Ctx)); 954 } 955 956 void lower() override; 957 958 protected: 959 explicit TargetHeaderMIPS32(GlobalContext *Ctx); 960 961 private: 962 ~TargetHeaderMIPS32() = default; 963 }; 964 965 // This structure (with some minor modifications) is copied from 966 // llvm/lib/Target/Mips/MCTargetDesc/MipsABIFlagsSection.h file. 967 struct MipsABIFlagsSection { 968 969 // Version of the MIPS.abiflags section 970 enum AFL_VERSION { 971 AFL_VERSION_V0 = 0 // Version 0 972 }; 973 974 // The level of the ISA: 1-5, 32, 64. 975 enum AFL_ISA_LEVEL { 976 AFL_ISA_LEVEL_NONE = 0, 977 AFL_ISA_LEVEL_MIPS32 = 32, // MIPS32 978 }; 979 980 // The revision of ISA: 0 for MIPS V and below, 1-n otherwise. 981 enum AFL_ISA_REV { 982 AFL_ISA_REV_NONE = 0, 983 AFL_ISA_REV_R1 = 1, // R1 984 }; 985 986 // Values for the xxx_size bytes of an ABI flags structure. 987 enum AFL_REG { 988 AFL_REG_NONE = 0x00, // No registers. 989 AFL_REG_32 = 0x01, // 32-bit registers. 990 AFL_REG_64 = 0x02, // 64-bit registers. 991 AFL_REG_128 = 0x03 // 128-bit registers. 992 }; 993 994 // Values for the fp_abi word of an ABI flags structure. 995 enum AFL_FP_ABI { 996 AFL_FP_ANY = 0, 997 AFL_FP_DOUBLE = 1, 998 AFL_FP_XX = 5, 999 AFL_FP_64 = 6, 1000 AFL_FP_64A = 7 1001 }; 1002 1003 // Values for the isa_ext word of an ABI flags structure. 1004 enum AFL_EXT { 1005 AFL_EXT_NONE = 0, 1006 AFL_EXT_XLR = 1, // RMI Xlr instruction. 1007 AFL_EXT_OCTEON2 = 2, // Cavium Networks Octeon2. 1008 AFL_EXT_OCTEONP = 3, // Cavium Networks OcteonP. 1009 AFL_EXT_LOONGSON_3A = 4, // Loongson 3A. 1010 AFL_EXT_OCTEON = 5, // Cavium Networks Octeon. 1011 AFL_EXT_5900 = 6, // MIPS R5900 instruction. 1012 AFL_EXT_4650 = 7, // MIPS R4650 instruction. 1013 AFL_EXT_4010 = 8, // LSI R4010 instruction. 1014 AFL_EXT_4100 = 9, // NEC VR4100 instruction. 1015 AFL_EXT_3900 = 10, // Toshiba R3900 instruction. 1016 AFL_EXT_10000 = 11, // MIPS R10000 instruction. 1017 AFL_EXT_SB1 = 12, // Broadcom SB-1 instruction. 1018 AFL_EXT_4111 = 13, // NEC VR4111/VR4181 instruction. 1019 AFL_EXT_4120 = 14, // NEC VR4120 instruction. 1020 AFL_EXT_5400 = 15, // NEC VR5400 instruction. 1021 AFL_EXT_5500 = 16, // NEC VR5500 instruction. 1022 AFL_EXT_LOONGSON_2E = 17, // ST Microelectronics Loongson 2E. 1023 AFL_EXT_LOONGSON_2F = 18 // ST Microelectronics Loongson 2F. 1024 }; 1025 1026 // Masks for the ases word of an ABI flags structure. 1027 enum AFL_ASE { 1028 AFL_ASE_NONE = 0x00000000, 1029 AFL_ASE_DSP = 0x00000001, // DSP ASE. 1030 AFL_ASE_DSPR2 = 0x00000002, // DSP R2 ASE. 1031 AFL_ASE_EVA = 0x00000004, // Enhanced VA Scheme. 1032 AFL_ASE_MCU = 0x00000008, // MCU (MicroController) ASE. 1033 AFL_ASE_MDMX = 0x00000010, // MDMX ASE. 1034 AFL_ASE_MIPS3D = 0x00000020, // MIPS-3D ASE. 1035 AFL_ASE_MT = 0x00000040, // MT ASE. 1036 AFL_ASE_SMARTMIPS = 0x00000080, // SmartMIPS ASE. 1037 AFL_ASE_VIRT = 0x00000100, // VZ ASE. 1038 AFL_ASE_MSA = 0x00000200, // MSA ASE. 1039 AFL_ASE_MIPS16 = 0x00000400, // MIPS16 ASE. 1040 AFL_ASE_MICROMIPS = 0x00000800, // MICROMIPS ASE. 1041 AFL_ASE_XPA = 0x00001000 // XPA ASE. 1042 }; 1043 1044 enum AFL_FLAGS1 { AFL_FLAGS1_NONE = 0, AFL_FLAGS1_ODDSPREG = 1 }; 1045 1046 enum AFL_FLAGS2 { AFL_FLAGS2_NONE = 0 }; 1047 1048 uint16_t Version = AFL_VERSION_V0; 1049 uint8_t ISALevel = AFL_ISA_LEVEL_MIPS32; 1050 uint8_t ISARevision = AFL_ISA_REV_R1; 1051 uint8_t GPRSize = AFL_REG_32; 1052 uint8_t CPR1Size = AFL_REG_32; 1053 uint8_t CPR2Size = AFL_REG_NONE; 1054 uint8_t FPABI = AFL_FP_DOUBLE; 1055 uint32_t Extension = AFL_EXT_NONE; 1056 uint32_t ASE = AFL_ASE_NONE; 1057 uint32_t Flags1 = AFL_FLAGS1_ODDSPREG; 1058 uint32_t Flags2 = AFL_FLAGS2_NONE; 1059 1060 MipsABIFlagsSection() = default; 1061 }; 1062 1063 } // end of namespace MIPS32 1064 } // end of namespace Ice 1065 1066 #endif // SUBZERO_SRC_ICETARGETLOWERINGMIPS32_H 1067