1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 2 // for details. All rights reserved. Use of this source code is governed by a 3 // BSD-style license that can be found in the LICENSE file. 4 // 5 // This is forked from Dart revision df52deea9f25690eb8b66c5995da92b70f7ac1fe 6 // Please update the (git) revision if we merge changes from Dart. 7 // https://code.google.com/p/dart/wiki/GettingTheSource 8 9 #ifndef VM_ASSEMBLER_ARM_H_ 10 #define VM_ASSEMBLER_ARM_H_ 11 12 #ifndef VM_ASSEMBLER_H_ 13 #error Do not include assembler_arm.h directly; use assembler.h instead. 14 #endif 15 16 #include "platform/assert.h" 17 #include "platform/utils.h" 18 #include "vm/constants_arm.h" 19 #include "vm/cpu.h" 20 #include "vm/hash_map.h" 21 #include "vm/object.h" 22 #include "vm/simulator.h" 23 24 namespace dart { 25 26 // Forward declarations. 27 class RuntimeEntry; 28 class StubEntry; 29 30 #if 0 31 // Moved to ARM32::AssemblerARM32 as needed 32 // Instruction encoding bits. 33 enum { 34 H = 1 << 5, // halfword (or byte) 35 L = 1 << 20, // load (or store) 36 S = 1 << 20, // set condition code (or leave unchanged) 37 W = 1 << 21, // writeback base register (or leave unchanged) 38 A = 1 << 21, // accumulate in multiply instruction (or not) 39 B = 1 << 22, // unsigned byte (or word) 40 D = 1 << 22, // high/lo bit of start of s/d register range 41 N = 1 << 22, // long (or short) 42 U = 1 << 23, // positive (or negative) offset/index 43 P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing) 44 I = 1 << 25, // immediate shifter operand (or not) 45 46 B0 = 1, 47 B1 = 1 << 1, 48 B2 = 1 << 2, 49 B3 = 1 << 3, 50 B4 = 1 << 4, 51 B5 = 1 << 5, 52 B6 = 1 << 6, 53 B7 = 1 << 7, 54 B8 = 1 << 8, 55 B9 = 1 << 9, 56 B10 = 1 << 10, 57 B11 = 1 << 11, 58 B12 = 1 << 12, 59 B16 = 1 << 16, 60 B17 = 1 << 17, 61 B18 = 1 << 18, 62 B19 = 1 << 19, 63 B20 = 1 << 20, 64 B21 = 1 << 21, 65 B22 = 1 << 22, 66 B23 = 1 << 23, 67 B24 = 1 << 24, 68 B25 = 1 << 25, 69 B26 = 1 << 26, 70 B27 = 1 << 27, 71 }; 72 #endif 73 74 class Label : public ValueObject { 75 public: Label()76 Label() : position_(0) { } 77 ~Label()78 ~Label() { 79 // Assert if label is being destroyed with unresolved branches pending. 80 ASSERT(!IsLinked()); 81 } 82 83 // Returns the position for bound and linked labels. Cannot be used 84 // for unused labels. Position()85 intptr_t Position() const { 86 ASSERT(!IsUnused()); 87 return IsBound() ? -position_ - kWordSize : position_ - kWordSize; 88 } 89 IsBound()90 bool IsBound() const { return position_ < 0; } IsUnused()91 bool IsUnused() const { return position_ == 0; } IsLinked()92 bool IsLinked() const { return position_ > 0; } 93 94 private: 95 intptr_t position_; 96 Reinitialize()97 void Reinitialize() { 98 position_ = 0; 99 } 100 BindTo(intptr_t position)101 void BindTo(intptr_t position) { 102 ASSERT(!IsBound()); 103 position_ = -position - kWordSize; 104 ASSERT(IsBound()); 105 } 106 LinkTo(intptr_t position)107 void LinkTo(intptr_t position) { 108 ASSERT(!IsBound()); 109 position_ = position + kWordSize; 110 ASSERT(IsLinked()); 111 } 112 113 friend class Assembler; 114 DISALLOW_COPY_AND_ASSIGN(Label); 115 }; 116 117 118 // Encodes Addressing Mode 1 - Data-processing operands. 119 class Operand : public ValueObject { 120 public: 121 // Data-processing operands - Uninitialized. Operand()122 Operand() : type_(-1), encoding_(-1) { } 123 124 // Data-processing operands - Copy constructor. Operand(const Operand & other)125 Operand(const Operand& other) 126 : ValueObject(), type_(other.type_), encoding_(other.encoding_) { } 127 128 // Data-processing operands - Assignment operator. 129 Operand& operator=(const Operand& other) { 130 type_ = other.type_; 131 encoding_ = other.encoding_; 132 return *this; 133 } 134 135 #if 0 136 // Moved to encodeRotatedImm8() in IceAssemblerARM32.cpp 137 // Data-processing operands - Immediate. 138 explicit Operand(uint32_t immediate) { 139 ASSERT(immediate < (1 << kImmed8Bits)); 140 type_ = 1; 141 encoding_ = immediate; 142 } 143 144 // Moved to decodeOperand() and encodeRotatedImm8() in IceAssemblerARM32.cpp 145 // Data-processing operands - Rotated immediate. 146 Operand(uint32_t rotate, uint32_t immed8) { 147 ASSERT((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits))); 148 type_ = 1; 149 encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift); 150 } 151 152 // Moved to decodeOperand() in IceAssemblerARM32.cpp 153 // Data-processing operands - Register. 154 explicit Operand(Register rm) { 155 type_ = 0; 156 encoding_ = static_cast<uint32_t>(rm); 157 } 158 159 // Moved to encodeShiftRotateImm5() in IceAssemblerARM32.cpp 160 // Data-processing operands - Logical shift/rotate by immediate. 161 Operand(Register rm, Shift shift, uint32_t shift_imm) { 162 ASSERT(shift_imm < (1 << kShiftImmBits)); 163 type_ = 0; 164 encoding_ = shift_imm << kShiftImmShift | 165 static_cast<uint32_t>(shift) << kShiftShift | 166 static_cast<uint32_t>(rm); 167 } 168 169 // Moved to encodeShiftRotateReg() in IceAssemblerARM32.cpp 170 // Data-processing operands - Logical shift/rotate by register. 171 Operand(Register rm, Shift shift, Register rs) { 172 type_ = 0; 173 encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift | 174 static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) | 175 static_cast<uint32_t>(rm); 176 } 177 178 // Already defined as ARM32::OperandARM32FlexImm::canHoldImm(). 179 static bool CanHold(uint32_t immediate, Operand* o) { 180 // Avoid the more expensive test for frequent small immediate values. 181 if (immediate < (1 << kImmed8Bits)) { 182 o->type_ = 1; 183 o->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift); 184 return true; 185 } 186 // Note that immediate must be unsigned for the test to work correctly. 187 for (int rot = 0; rot < 16; rot++) { 188 uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot)); 189 if (imm8 < (1 << kImmed8Bits)) { 190 o->type_ = 1; 191 o->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift); 192 return true; 193 } 194 } 195 return false; 196 } 197 #endif 198 199 private: is_valid()200 bool is_valid() const { return (type_ == 0) || (type_ == 1); } 201 type()202 uint32_t type() const { 203 ASSERT(is_valid()); 204 return type_; 205 } 206 encoding()207 uint32_t encoding() const { 208 ASSERT(is_valid()); 209 return encoding_; 210 } 211 212 uint32_t type_; // Encodes the type field (bits 27-25) in the instruction. 213 uint32_t encoding_; 214 215 friend class Assembler; 216 friend class Address; 217 }; 218 219 220 enum OperandSize { 221 kByte, 222 kUnsignedByte, 223 kHalfword, 224 kUnsignedHalfword, 225 kWord, 226 kUnsignedWord, 227 kWordPair, 228 kSWord, 229 kDWord, 230 kRegList, 231 }; 232 233 234 // Load/store multiple addressing mode. 235 enum BlockAddressMode { 236 // bit encoding P U W 237 DA = (0|0|0) << 21, // decrement after 238 IA = (0|4|0) << 21, // increment after 239 DB = (8|0|0) << 21, // decrement before 240 IB = (8|4|0) << 21, // increment before 241 DA_W = (0|0|1) << 21, // decrement after with writeback to base 242 IA_W = (0|4|1) << 21, // increment after with writeback to base 243 DB_W = (8|0|1) << 21, // decrement before with writeback to base 244 IB_W = (8|4|1) << 21 // increment before with writeback to base 245 }; 246 247 248 class Address : public ValueObject { 249 public: 250 enum OffsetKind { 251 Immediate, 252 IndexRegister, 253 ScaledIndexRegister, 254 }; 255 256 // Memory operand addressing mode 257 enum Mode { 258 kModeMask = (8|4|1) << 21, 259 // bit encoding P U W 260 Offset = (8|4|0) << 21, // offset (w/o writeback to base) 261 PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback 262 PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback 263 NegOffset = (8|0|0) << 21, // negative offset (w/o writeback to base) 264 NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback 265 NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback 266 }; 267 Address(const Address & other)268 Address(const Address& other) 269 : ValueObject(), encoding_(other.encoding_), kind_(other.kind_) { 270 } 271 272 Address& operator=(const Address& other) { 273 encoding_ = other.encoding_; 274 kind_ = other.kind_; 275 return *this; 276 } 277 Equals(const Address & other)278 bool Equals(const Address& other) const { 279 return (encoding_ == other.encoding_) && (kind_ == other.kind_); 280 } 281 282 #if 0 283 // Moved to decodeImmRegOffset() in IceAssemblerARM32.cpp. 284 // Used to model stack offsets. 285 explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) { 286 ASSERT(Utils::IsAbsoluteUint(12, offset)); 287 kind_ = Immediate; 288 if (offset < 0) { 289 encoding_ = (am ^ (1 << kUShift)) | -offset; // Flip U to adjust sign. 290 } else { 291 encoding_ = am | offset; 292 } 293 encoding_ |= static_cast<uint32_t>(rn) << kRnShift; 294 } 295 #endif 296 297 // There is no register offset mode unless Mode is Offset, in which case the 298 // shifted register case below should be used. 299 Address(Register rn, Register r, Mode am); 300 301 Address(Register rn, Register rm, 302 Shift shift = LSL, uint32_t shift_imm = 0, Mode am = Offset) { 303 Operand o(rm, shift, shift_imm); 304 305 if ((shift == LSL) && (shift_imm == 0)) { 306 kind_ = IndexRegister; 307 } else { 308 kind_ = ScaledIndexRegister; 309 } 310 encoding_ = o.encoding() | am | (static_cast<uint32_t>(rn) << kRnShift); 311 } 312 313 // There is no shifted register mode with a register shift. 314 Address(Register rn, Register rm, Shift shift, Register r, Mode am = Offset); 315 316 static OperandSize OperandSizeFor(intptr_t cid); 317 318 static bool CanHoldLoadOffset(OperandSize size, 319 int32_t offset, 320 int32_t* offset_mask); 321 static bool CanHoldStoreOffset(OperandSize size, 322 int32_t offset, 323 int32_t* offset_mask); 324 static bool CanHoldImmediateOffset(bool is_load, 325 intptr_t cid, 326 int64_t offset); 327 328 private: rn()329 Register rn() const { 330 return Instr::At(reinterpret_cast<uword>(&encoding_))->RnField(); 331 } 332 rm()333 Register rm() const { 334 return ((kind() == IndexRegister) || (kind() == ScaledIndexRegister)) ? 335 Instr::At(reinterpret_cast<uword>(&encoding_))->RmField() : 336 kNoRegister; 337 } 338 mode()339 Mode mode() const { return static_cast<Mode>(encoding() & kModeMask); } 340 encoding()341 uint32_t encoding() const { return encoding_; } 342 343 #if 0 344 // Moved to encodeImmRegOffsetEnc3 in IceAssemblerARM32.cpp 345 // Encoding for addressing mode 3. 346 uint32_t encoding3() const; 347 #endif 348 349 // Encoding for vfp load/store addressing. 350 uint32_t vencoding() const; 351 kind()352 OffsetKind kind() const { return kind_; } 353 354 uint32_t encoding_; 355 356 OffsetKind kind_; 357 358 friend class Assembler; 359 }; 360 361 362 class FieldAddress : public Address { 363 public: FieldAddress(Register base,int32_t disp)364 FieldAddress(Register base, int32_t disp) 365 : Address(base, disp - kHeapObjectTag) { } 366 367 // This addressing mode does not exist. 368 FieldAddress(Register base, Register r); 369 FieldAddress(const FieldAddress & other)370 FieldAddress(const FieldAddress& other) : Address(other) { } 371 372 FieldAddress& operator=(const FieldAddress& other) { 373 Address::operator=(other); 374 return *this; 375 } 376 }; 377 378 379 class Assembler : public ValueObject { 380 public: 381 explicit Assembler(bool use_far_branches = false) buffer_()382 : buffer_(), 383 prologue_offset_(-1), 384 use_far_branches_(use_far_branches), 385 comments_(), 386 constant_pool_allowed_(false) { } 387 ~Assembler()388 ~Assembler() { } 389 PopRegister(Register r)390 void PopRegister(Register r) { Pop(r); } 391 392 void Bind(Label* label); Jump(Label * label)393 void Jump(Label* label) { b(label); } 394 395 // Misc. functionality CodeSize()396 intptr_t CodeSize() const { return buffer_.Size(); } prologue_offset()397 intptr_t prologue_offset() const { return prologue_offset_; } 398 399 // Count the fixups that produce a pointer offset, without processing 400 // the fixups. On ARM there are no pointers in code. CountPointerOffsets()401 intptr_t CountPointerOffsets() const { return 0; } 402 GetPointerOffsets()403 const ZoneGrowableArray<intptr_t>& GetPointerOffsets() const { 404 ASSERT(buffer_.pointer_offsets().length() == 0); // No pointers in code. 405 return buffer_.pointer_offsets(); 406 } 407 object_pool_wrapper()408 ObjectPoolWrapper& object_pool_wrapper() { return object_pool_wrapper_; } 409 MakeObjectPool()410 RawObjectPool* MakeObjectPool() { 411 return object_pool_wrapper_.MakeObjectPool(); 412 } 413 use_far_branches()414 bool use_far_branches() const { 415 return FLAG_use_far_branches || use_far_branches_; 416 } 417 418 #if defined(TESTING) || defined(DEBUG) 419 // Used in unit tests and to ensure predictable verification code size in 420 // FlowGraphCompiler::EmitEdgeCounter. set_use_far_branches(bool b)421 void set_use_far_branches(bool b) { 422 use_far_branches_ = b; 423 } 424 #endif // TESTING || DEBUG 425 FinalizeInstructions(const MemoryRegion & region)426 void FinalizeInstructions(const MemoryRegion& region) { 427 buffer_.FinalizeInstructions(region); 428 } 429 430 // Debugging and bringup support. 431 void Stop(const char* message); 432 void Unimplemented(const char* message); 433 void Untested(const char* message); 434 void Unreachable(const char* message); 435 436 static void InitializeMemoryWithBreakpoints(uword data, intptr_t length); 437 438 void Comment(const char* format, ...) PRINTF_ATTRIBUTE(2, 3); 439 static bool EmittingComments(); 440 441 const Code::Comments& GetCodeComments() const; 442 443 static const char* RegisterName(Register reg); 444 445 static const char* FpuRegisterName(FpuRegister reg); 446 447 #if 0 448 // Moved to ARM32::AssemblerARM32::and_() 449 // Data-processing instructions. 450 void and_(Register rd, Register rn, Operand o, Condition cond = AL); 451 452 // Moved to ARM32::AssemblerARM32::eor() 453 void eor(Register rd, Register rn, Operand o, Condition cond = AL); 454 455 // Moved to ARM32::AssemberARM32::sub() 456 void sub(Register rd, Register rn, Operand o, Condition cond = AL); 457 void subs(Register rd, Register rn, Operand o, Condition cond = AL); 458 459 // Moved to ARM32::AssemberARM32::rsb() 460 void rsb(Register rd, Register rn, Operand o, Condition cond = AL); 461 void rsbs(Register rd, Register rn, Operand o, Condition cond = AL); 462 463 // Moved to ARM32::AssemblerARM32::add() 464 void add(Register rd, Register rn, Operand o, Condition cond = AL); 465 466 void adds(Register rd, Register rn, Operand o, Condition cond = AL); 467 468 // Moved to ARM32::AssemblerARM32::adc() 469 void adc(Register rd, Register rn, Operand o, Condition cond = AL); 470 471 void adcs(Register rd, Register rn, Operand o, Condition cond = AL); 472 473 // Moved to ARM32::AssemblerARM32::sbc() 474 void sbc(Register rd, Register rn, Operand o, Condition cond = AL); 475 476 // Moved to ARM32::AssemblerARM32::sbc() 477 void sbcs(Register rd, Register rn, Operand o, Condition cond = AL); 478 479 // Moved to ARM32::AssemblerARM32::rsc() 480 void rsc(Register rd, Register rn, Operand o, Condition cond = AL); 481 482 // Moved to ARM32::AssemblerARM32::tst(); 483 void tst(Register rn, Operand o, Condition cond = AL); 484 #endif 485 486 void teq(Register rn, Operand o, Condition cond = AL); 487 488 #if 0 489 // Moved to ARM32::AssemblerARM32::cmp() 490 void cmp(Register rn, Operand o, Condition cond = AL); 491 492 // Moved to ARM32::AssemblerARM32::cmn() 493 void cmn(Register rn, Operand o, Condition cond = AL); 494 495 // Moved to ARM32::IceAssemblerARM32::orr(). 496 void orr(Register rd, Register rn, Operand o, Condition cond = AL); 497 void orrs(Register rd, Register rn, Operand o, Condition cond = AL); 498 499 // Moved to ARM32::IceAssemblerARM32::mov() 500 void mov(Register rd, Operand o, Condition cond = AL); 501 void movs(Register rd, Operand o, Condition cond = AL); 502 503 // Moved to ARM32::IceAssemblerARM32::bic() 504 void bic(Register rd, Register rn, Operand o, Condition cond = AL); 505 void bics(Register rd, Register rn, Operand o, Condition cond = AL); 506 507 // Moved to ARM32::IceAssemblerARM32::mvn() 508 void mvn(Register rd, Operand o, Condition cond = AL); 509 void mvns(Register rd, Operand o, Condition cond = AL); 510 511 // Miscellaneous data-processing instructions. 512 // Moved to ARM32::AssemblerARM32::clz() 513 void clz(Register rd, Register rm, Condition cond = AL); 514 515 // Multiply instructions. 516 517 // Moved to ARM32::AssemblerARM32::mul() 518 void mul(Register rd, Register rn, Register rm, Condition cond = AL); 519 void muls(Register rd, Register rn, Register rm, Condition cond = AL); 520 521 // Moved to ARM32::AssemblerARM32::mla() 522 void mla(Register rd, Register rn, Register rm, Register ra, 523 Condition cond = AL); 524 // Moved to ARM32::AssemblerARM32::mls() 525 void mls(Register rd, Register rn, Register rm, Register ra, 526 Condition cond = AL); 527 #endif 528 529 void smull(Register rd_lo, Register rd_hi, Register rn, Register rm, 530 Condition cond = AL); 531 532 #if 0 533 // Moved to ARM32::AssemblerARM32::umull(); 534 void umull(Register rd_lo, Register rd_hi, Register rn, Register rm, 535 Condition cond = AL); 536 #endif 537 void smlal(Register rd_lo, Register rd_hi, Register rn, Register rm, 538 Condition cond = AL); 539 void umlal(Register rd_lo, Register rd_hi, Register rn, Register rm, 540 Condition cond = AL); 541 542 // Emulation of this instruction uses IP and the condition codes. Therefore, 543 // none of the registers can be IP, and the instruction can only be used 544 // unconditionally. 545 void umaal(Register rd_lo, Register rd_hi, Register rn, Register rm); 546 547 // Division instructions. 548 #if 0 549 // Moved to ARM32::AssemblerARM32::sdiv() 550 void sdiv(Register rd, Register rn, Register rm, Condition cond = AL); 551 // Moved to ARM32::AssemblerARM32::udiv() 552 void udiv(Register rd, Register rn, Register rm, Condition cond = AL); 553 554 // Moved to ARM32::AssemblerARM32::ldr() 555 // Load/store instructions. 556 void ldr(Register rd, Address ad, Condition cond = AL); 557 // Moved to ARM32::AssemblerARM32::str() 558 void str(Register rd, Address ad, Condition cond = AL); 559 560 // Moved to ARM32::AssemblerARM32::ldr() 561 void ldrb(Register rd, Address ad, Condition cond = AL); 562 // Moved to ARM32::AssemblerARM32::str() 563 void strb(Register rd, Address ad, Condition cond = AL); 564 565 // Moved to ARM32::AssemblerARM32::ldr() 566 void ldrh(Register rd, Address ad, Condition cond = AL); 567 // Moved to ARM32::AssemblerARM32::str() 568 void strh(Register rd, Address ad, Condition cond = AL); 569 #endif 570 571 void ldrsb(Register rd, Address ad, Condition cond = AL); 572 void ldrsh(Register rd, Address ad, Condition cond = AL); 573 574 // ldrd and strd actually support the full range of addressing modes, but 575 // we don't use them, and we need to split them up into two instructions for 576 // ARMv5TE, so we only support the base + offset mode. 577 void ldrd(Register rd, Register rn, int32_t offset, Condition cond = AL); 578 void strd(Register rd, Register rn, int32_t offset, Condition cond = AL); 579 580 #if 0 581 // Folded into ARM32::AssemblerARM32::popList(), since it is its only use (and 582 // doesn't implement ARM LDM instructions). 583 void ldm(BlockAddressMode am, Register base, 584 RegList regs, Condition cond = AL); 585 586 // Folded into ARM32::AssemblerARM32::pushList(), since it is its only use 587 // (and doesn't implement ARM STM instruction). 588 void stm(BlockAddressMode am, Register base, 589 RegList regs, Condition cond = AL); 590 591 // Moved to ARM::AssemblerARM32::ldrex(); 592 void ldrex(Register rd, Register rn, Condition cond = AL); 593 // Moved to ARM::AssemblerARM32::strex(); 594 void strex(Register rd, Register rt, Register rn, Condition cond = AL); 595 #endif 596 597 // Miscellaneous instructions. 598 void clrex(); 599 600 #if 0 601 // Moved to ARM32::AssemblerARM32::nop(). 602 void nop(Condition cond = AL); 603 604 // Moved to ARM32::AssemblerARM32::bkpt() 605 // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0. 606 void bkpt(uint16_t imm16); 607 608 static int32_t BkptEncoding(uint16_t imm16) { 609 // bkpt requires that the cond field is AL. 610 return (AL << kConditionShift) | B24 | B21 | 611 ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf); 612 } 613 614 // Not ported. PNaCl doesn't allow breakpoint instructions. 615 static uword GetBreakInstructionFiller() { 616 return BkptEncoding(0); 617 } 618 619 // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles). 620 621 // Moved to ARM32::AssemblerARM32::vmovsr(). 622 void vmovsr(SRegister sn, Register rt, Condition cond = AL); 623 // Moved to ARM32::AssemblerARM32::vmovrs(). 624 void vmovrs(Register rt, SRegister sn, Condition cond = AL); 625 #endif 626 void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL); 627 void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL); 628 #if 0 629 // Moved to ARM32::AssemblerARM32::vmovdrr(). 630 void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL); 631 // Moved to ARM32::AssemblerARM32::vmovrrd(). 632 void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL); 633 // Moved to ARM32::AssemblerARM32::vmovqir(). 634 void vmovdr(DRegister dd, int i, Register rt, Condition cond = AL); 635 // Moved to ARM32::AssemblerARM32::vmovss(). 636 void vmovs(SRegister sd, SRegister sm, Condition cond = AL); 637 // Moved to ARM32::AssemblerARM32::vmovdd(). 638 void vmovd(DRegister dd, DRegister dm, Condition cond = AL); 639 #endif 640 void vmovq(QRegister qd, QRegister qm); 641 642 #if 0 643 // Returns false if the immediate cannot be encoded. 644 // Moved to ARM32::AssemblerARM32::vmovs(); 645 bool vmovs(SRegister sd, float s_imm, Condition cond = AL); 646 // Moved to ARM32::AssemblerARM32::vmovs(); 647 bool vmovd(DRegister dd, double d_imm, Condition cond = AL); 648 649 // Moved to ARM32::AssemblerARM32::vldrs() 650 void vldrs(SRegister sd, Address ad, Condition cond = AL); 651 // Moved to Arm32::AssemblerARM32::vstrs() 652 void vstrs(SRegister sd, Address ad, Condition cond = AL); 653 #endif 654 // Moved to ARM32::AssemblerARM32::vldrd() 655 void vldrd(DRegister dd, Address ad, Condition cond = AL); 656 #if 0 657 // Moved to Arm32::AssemblerARM32::vstrd() 658 void vstrd(DRegister dd, Address ad, Condition cond = AL); 659 #endif 660 661 void vldms(BlockAddressMode am, Register base, 662 SRegister first, SRegister last, Condition cond = AL); 663 void vstms(BlockAddressMode am, Register base, 664 SRegister first, SRegister last, Condition cond = AL); 665 666 void vldmd(BlockAddressMode am, Register base, 667 DRegister first, intptr_t count, Condition cond = AL); 668 void vstmd(BlockAddressMode am, Register base, 669 DRegister first, intptr_t count, Condition cond = AL); 670 671 #if 0 672 // Moved to Arm32::AssemblerARM32::vadds() 673 void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); 674 // Moved to Arm32::AssemblerARM32::vaddd() 675 void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); 676 // Moved to ARM32::AssemblerARM32::vaddqi(). 677 void vaddqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm); 678 // Moved to ARM32::AssemblerARM32::vaddqf(). 679 void vaddqs(QRegister qd, QRegister qn, QRegister qm); 680 // Moved to Arm32::AssemblerARM32::vsubs() 681 void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); 682 // Moved to Arm32::AssemblerARM32::vsubd() 683 void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); 684 // Moved to ARM32::AssemblerARM32::vsubqi(). 685 void vsubqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm); 686 // Moved to ARM32::AssemblerARM32::vsubqf(). 687 void vsubqs(QRegister qd, QRegister qn, QRegister qm); 688 // Moved to Arm32::AssemblerARM32::vmuls() 689 void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); 690 // Moved to Arm32::AssemblerARM32::vmuld() 691 void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); 692 // Moved to ARM32::AssemblerARM32::vmulqi(). 693 void vmulqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm); 694 // Moved to ARM32::AssemblerARM32::vmulqf(). 695 void vmulqs(QRegister qd, QRegister qn, QRegister qm); 696 // Moved to ARM32::AssemblerARM32::vshlqi(). 697 void vshlqi(OperandSize sz, QRegister qd, QRegister qm, QRegister qn); 698 // Moved to ARM32::AssemblerARM32::vshlqu(). 699 void vshlqu(OperandSize sz, QRegister qd, QRegister qm, QRegister qn); 700 // Moved to Arm32::AssemblerARM32::vmlas() 701 void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); 702 // Moved to Arm32::AssemblerARM32::vmlad() 703 void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); 704 // Moved to Arm32::AssemblerARM32::vmlss() 705 void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); 706 // Moved to Arm32::AssemblerARM32::vmlsd() 707 void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); 708 // Moved to Arm32::AssemblerARM32::vdivs() 709 void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL); 710 // Moved to Arm32::AssemblerARM32::vdivd() 711 void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL); 712 #endif 713 void vminqs(QRegister qd, QRegister qn, QRegister qm); 714 void vmaxqs(QRegister qd, QRegister qn, QRegister qm); 715 void vrecpeqs(QRegister qd, QRegister qm); 716 void vrecpsqs(QRegister qd, QRegister qn, QRegister qm); 717 void vrsqrteqs(QRegister qd, QRegister qm); 718 void vrsqrtsqs(QRegister qd, QRegister qn, QRegister qm); 719 720 #if 0 721 // Moved to ARM32::AssemblerARM32::vorrq() 722 void veorq(QRegister qd, QRegister qn, QRegister qm); 723 // Moved to ARM32::AssemblerARM32::vorrq() 724 void vorrq(QRegister qd, QRegister qn, QRegister qm); 725 #endif 726 void vornq(QRegister qd, QRegister qn, QRegister qm); 727 #if 0 728 // Moved to Arm32::AssemblerARM32::vandq(). 729 void vandq(QRegister qd, QRegister qn, QRegister qm); 730 // Moved to Arm32::AssemblerARM32::vandq(). 731 void vmvnq(QRegister qd, QRegister qm); 732 733 // Moved to Arm32::AssemblerARM32::vceqqi(). 734 void vceqqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm); 735 // Moved to Arm32::AssemblerARM32::vceqqs(). 736 void vceqqs(QRegister qd, QRegister qn, QRegister qm); 737 // Moved to Arm32::AssemblerARM32::vcgeqi(). 738 void vcgeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm); 739 // Moved to Arm32::AssemblerARM32::vcugeqi(). 740 void vcugeqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm); 741 // Moved to Arm32::AssemblerARM32::vcgeqs(). 742 void vcgeqs(QRegister qd, QRegister qn, QRegister qm); 743 // Moved to Arm32::AssemblerARM32::vcgtqi(). 744 void vcgtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm); 745 // Moved to Arm32::AssemblerARM32::vcugtqi(). 746 void vcugtqi(OperandSize sz, QRegister qd, QRegister qn, QRegister qm); 747 // Moved to Arm32::AssemblerARM32::vcgtqs(). 748 void vcgtqs(QRegister qd, QRegister qn, QRegister qm); 749 750 // Moved to Arm32::AssemblerARM32::vabss(). 751 void vabss(SRegister sd, SRegister sm, Condition cond = AL); 752 // Moved to Arm32::AssemblerARM32::vabsd(). 753 void vabsd(DRegister dd, DRegister dm, Condition cond = AL); 754 // Moved to Arm32::AssemblerARM32::vabsq(). 755 void vabsqs(QRegister qd, QRegister qm); 756 #endif 757 void vnegs(SRegister sd, SRegister sm, Condition cond = AL); 758 void vnegd(DRegister dd, DRegister dm, Condition cond = AL); 759 #if 0 760 // Moved to ARM32::AssemblerARM32::vnegqs(). 761 void vnegqs(QRegister qd, QRegister qm); 762 // Moved to ARM32::AssemblerARM32::vsqrts(). 763 void vsqrts(SRegister sd, SRegister sm, Condition cond = AL); 764 // Moved to ARM32::AssemblerARM32::vsqrts(). 765 void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL); 766 767 // Moved to ARM32::AssemblerARM32::vcvtsd(). 768 void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL); 769 // Moved to ARM32::AssemblerARM32:vcvtds(). 770 void vcvtds(DRegister dd, SRegister sm, Condition cond = AL); 771 // Moved to ARM32::AssemblerARM32::vcvtis() 772 void vcvtis(SRegister sd, SRegister sm, Condition cond = AL); 773 // Moved to ARM32::AssemblerARM32::vcvtid() 774 void vcvtid(SRegister sd, DRegister dm, Condition cond = AL); 775 // Moved to ARM32::AssemblerARM32::vcvtsi() 776 void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL); 777 // Moved to ARM32::AssemblerARM32::vcvtdi() 778 void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL); 779 // Moved to ARM32::AssemblerARM32::vcvtus(). 780 void vcvtus(SRegister sd, SRegister sm, Condition cond = AL); 781 // Moved to ARM32::AssemblerARM32::vcvtud(). 782 void vcvtud(SRegister sd, DRegister dm, Condition cond = AL); 783 // Moved to ARM32::AssemblerARM32::vcvtsu() 784 void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL); 785 // Moved to ARM32::AssemblerARM32::vcvtdu() 786 void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL); 787 788 // Moved to ARM23::AssemblerARM32::vcmps(). 789 void vcmps(SRegister sd, SRegister sm, Condition cond = AL); 790 // Moved to ARM23::AssemblerARM32::vcmpd(). 791 void vcmpd(DRegister dd, DRegister dm, Condition cond = AL); 792 // Moved to ARM23::AssemblerARM32::vcmpsz(). 793 void vcmpsz(SRegister sd, Condition cond = AL); 794 // Moved to ARM23::AssemblerARM32::vcmpdz(). 795 void vcmpdz(DRegister dd, Condition cond = AL); 796 797 // APSR_nzcv version moved to ARM32::AssemblerARM32::vmrsAPSR_nzcv() 798 void vmrs(Register rd, Condition cond = AL); 799 #endif 800 void vmstat(Condition cond = AL); 801 802 // Duplicates the operand of size sz at index idx from dm to all elements of 803 // qd. This is a special case of vtbl. 804 void vdup(OperandSize sz, QRegister qd, DRegister dm, int idx); 805 806 // Each byte of dm is an index into the table of bytes formed by concatenating 807 // a list of 'length' registers starting with dn. The result is placed in dd. 808 void vtbl(DRegister dd, DRegister dn, int length, DRegister dm); 809 810 // The words of qd and qm are interleaved with the low words of the result 811 // in qd and the high words in qm. 812 void vzipqw(QRegister qd, QRegister qm); 813 814 // Branch instructions. 815 #if 0 816 // Moved to ARM32::AssemblerARM32::b(); 817 void b(Label* label, Condition cond = AL); 818 // Moved to ARM32::AssemblerARM32::bl() 819 void bl(Label* label, Condition cond = AL); 820 // Moved to ARM32::AssemblerARM32::bx() 821 void bx(Register rm, Condition cond = AL); 822 // Moved to ARM32::AssemblerARM32::blx() 823 void blx(Register rm, Condition cond = AL); 824 #endif 825 826 void Branch(const StubEntry& stub_entry, 827 Patchability patchable = kNotPatchable, 828 Register pp = PP, 829 Condition cond = AL); 830 831 void BranchLink(const StubEntry& stub_entry, 832 Patchability patchable = kNotPatchable); 833 void BranchLink(const Code& code, Patchability patchable); 834 835 // Branch and link to an entry address. Call sequence can be patched. 836 void BranchLinkPatchable(const StubEntry& stub_entry); 837 void BranchLinkPatchable(const Code& code); 838 839 // Branch and link to [base + offset]. Call sequence is never patched. 840 void BranchLinkOffset(Register base, int32_t offset); 841 842 // Add signed immediate value to rd. May clobber IP. 843 void AddImmediate(Register rd, int32_t value, Condition cond = AL); 844 void AddImmediate(Register rd, Register rn, int32_t value, 845 Condition cond = AL); 846 void AddImmediateSetFlags(Register rd, Register rn, int32_t value, 847 Condition cond = AL); 848 void SubImmediateSetFlags(Register rd, Register rn, int32_t value, 849 Condition cond = AL); 850 void AndImmediate(Register rd, Register rs, int32_t imm, Condition cond = AL); 851 852 // Test rn and immediate. May clobber IP. 853 void TestImmediate(Register rn, int32_t imm, Condition cond = AL); 854 855 // Compare rn with signed immediate value. May clobber IP. 856 void CompareImmediate(Register rn, int32_t value, Condition cond = AL); 857 858 859 // Signed integer division of left by right. Checks to see if integer 860 // division is supported. If not, uses the FPU for division with 861 // temporary registers tmpl and tmpr. tmpl and tmpr must be different 862 // registers. 863 void IntegerDivide(Register result, Register left, Register right, 864 DRegister tmpl, DRegister tmpr); 865 866 // Load and Store. 867 // These three do not clobber IP. 868 void LoadPatchableImmediate(Register rd, int32_t value, Condition cond = AL); 869 void LoadDecodableImmediate(Register rd, int32_t value, Condition cond = AL); 870 void LoadImmediate(Register rd, int32_t value, Condition cond = AL); 871 // These two may clobber IP. 872 void LoadSImmediate(SRegister sd, float value, Condition cond = AL); 873 void LoadDImmediate(DRegister dd, double value, 874 Register scratch, Condition cond = AL); 875 876 void MarkExceptionHandler(Label* label); 877 878 void Drop(intptr_t stack_elements); 879 880 void RestoreCodePointer(); 881 void LoadPoolPointer(Register reg = PP); 882 883 void LoadIsolate(Register rd); 884 885 void LoadObject(Register rd, const Object& object, Condition cond = AL); 886 void LoadUniqueObject(Register rd, const Object& object, Condition cond = AL); 887 void LoadFunctionFromCalleePool(Register dst, 888 const Function& function, 889 Register new_pp); 890 void LoadNativeEntry(Register dst, 891 const ExternalLabel* label, 892 Patchability patchable, 893 Condition cond = AL); 894 void PushObject(const Object& object); 895 void CompareObject(Register rn, const Object& object); 896 897 // When storing into a heap object field, knowledge of the previous content 898 // is expressed through these constants. 899 enum FieldContent { 900 kEmptyOrSmiOrNull, // Empty = garbage/zapped in release/debug mode. 901 kHeapObjectOrSmi, 902 kOnlySmi, 903 }; 904 905 void StoreIntoObject(Register object, // Object we are storing into. 906 const Address& dest, // Where we are storing into. 907 Register value, // Value we are storing. 908 bool can_value_be_smi = true); 909 void StoreIntoObjectOffset(Register object, 910 int32_t offset, 911 Register value, 912 bool can_value_be_smi = true); 913 914 void StoreIntoObjectNoBarrier(Register object, 915 const Address& dest, 916 Register value, 917 FieldContent old_content = kHeapObjectOrSmi); InitializeFieldNoBarrier(Register object,const Address & dest,Register value)918 void InitializeFieldNoBarrier(Register object, 919 const Address& dest, 920 Register value) { 921 StoreIntoObjectNoBarrier(object, dest, value, kEmptyOrSmiOrNull); 922 } 923 void StoreIntoObjectNoBarrierOffset( 924 Register object, 925 int32_t offset, 926 Register value, 927 FieldContent old_content = kHeapObjectOrSmi); 928 void StoreIntoObjectNoBarrier(Register object, 929 const Address& dest, 930 const Object& value, 931 FieldContent old_content = kHeapObjectOrSmi); 932 void StoreIntoObjectNoBarrierOffset( 933 Register object, 934 int32_t offset, 935 const Object& value, 936 FieldContent old_content = kHeapObjectOrSmi); 937 938 // Store value_even, value_odd, value_even, ... into the words in the address 939 // range [begin, end), assumed to be uninitialized fields in object (tagged). 940 // The stores must not need a generational store barrier (e.g., smi/null), 941 // and (value_even, value_odd) must be a valid register pair. 942 // Destroys register 'begin'. 943 void InitializeFieldsNoBarrier(Register object, 944 Register begin, 945 Register end, 946 Register value_even, 947 Register value_odd); 948 // Like above, for the range [base+begin_offset, base+end_offset), unrolled. 949 void InitializeFieldsNoBarrierUnrolled(Register object, 950 Register base, 951 intptr_t begin_offset, 952 intptr_t end_offset, 953 Register value_even, 954 Register value_odd); 955 956 // Stores a Smi value into a heap object field that always contains a Smi. 957 void StoreIntoSmiField(const Address& dest, Register value); 958 959 void LoadClassId(Register result, Register object, Condition cond = AL); 960 void LoadClassById(Register result, Register class_id); 961 void LoadClass(Register result, Register object, Register scratch); 962 void CompareClassId(Register object, intptr_t class_id, Register scratch); 963 void LoadClassIdMayBeSmi(Register result, Register object); 964 void LoadTaggedClassIdMayBeSmi(Register result, Register object); 965 966 void ComputeRange(Register result, 967 Register value, 968 Register scratch, 969 Label* miss); 970 971 void UpdateRangeFeedback(Register value, 972 intptr_t idx, 973 Register ic_data, 974 Register scratch1, 975 Register scratch2, 976 Label* miss); 977 978 intptr_t FindImmediate(int32_t imm); 979 bool CanLoadFromObjectPool(const Object& object) const; 980 void LoadFromOffset(OperandSize type, 981 Register reg, 982 Register base, 983 int32_t offset, 984 Condition cond = AL); 985 void LoadFieldFromOffset(OperandSize type, 986 Register reg, 987 Register base, 988 int32_t offset, 989 Condition cond = AL) { 990 LoadFromOffset(type, reg, base, offset - kHeapObjectTag, cond); 991 } 992 void StoreToOffset(OperandSize type, 993 Register reg, 994 Register base, 995 int32_t offset, 996 Condition cond = AL); 997 void LoadSFromOffset(SRegister reg, 998 Register base, 999 int32_t offset, 1000 Condition cond = AL); 1001 void StoreSToOffset(SRegister reg, 1002 Register base, 1003 int32_t offset, 1004 Condition cond = AL); 1005 void LoadDFromOffset(DRegister reg, 1006 Register base, 1007 int32_t offset, 1008 Condition cond = AL); 1009 void StoreDToOffset(DRegister reg, 1010 Register base, 1011 int32_t offset, 1012 Condition cond = AL); 1013 1014 void LoadMultipleDFromOffset(DRegister first, 1015 intptr_t count, 1016 Register base, 1017 int32_t offset); 1018 void StoreMultipleDToOffset(DRegister first, 1019 intptr_t count, 1020 Register base, 1021 int32_t offset); 1022 1023 void CopyDoubleField(Register dst, Register src, 1024 Register tmp1, Register tmp2, DRegister dtmp); 1025 void CopyFloat32x4Field(Register dst, Register src, 1026 Register tmp1, Register tmp2, DRegister dtmp); 1027 void CopyFloat64x2Field(Register dst, Register src, 1028 Register tmp1, Register tmp2, DRegister dtmp); 1029 1030 #if 0 1031 // Moved to ARM32::AssemblerARM32::push(). 1032 void Push(Register rd, Condition cond = AL); 1033 1034 // Moved to ARM32::AssemblerARM32::pop(). 1035 void Pop(Register rd, Condition cond = AL); 1036 1037 // Moved to ARM32::AssemblerARM32::pushList(). 1038 void PushList(RegList regs, Condition cond = AL); 1039 1040 // Moved to ARM32::AssemblerARM32::popList(). 1041 void PopList(RegList regs, Condition cond = AL); 1042 #endif 1043 void MoveRegister(Register rd, Register rm, Condition cond = AL); 1044 1045 // Convenience shift instructions. Use mov instruction with shifter operand 1046 // for variants setting the status flags. 1047 #if 0 1048 // Moved to ARM32::AssemblerARM32::lsl() 1049 void Lsl(Register rd, Register rm, const Operand& shift_imm, 1050 Condition cond = AL); 1051 // Moved to ARM32::AssemblerARM32::lsl() 1052 void Lsl(Register rd, Register rm, Register rs, Condition cond = AL); 1053 // Moved to ARM32::AssemblerARM32::lsr() 1054 void Lsr(Register rd, Register rm, const Operand& shift_imm, 1055 Condition cond = AL); 1056 // Moved to ARM32::AssemblerARM32::lsr() 1057 void Lsr(Register rd, Register rm, Register rs, Condition cond = AL); 1058 // Moved to ARM32::AssemblerARM32::asr() 1059 void Asr(Register rd, Register rm, const Operand& shift_imm, 1060 Condition cond = AL); 1061 // Moved to ARM32::AssemblerARM32::asr() 1062 void Asr(Register rd, Register rm, Register rs, Condition cond = AL); 1063 #endif 1064 void Asrs(Register rd, Register rm, const Operand& shift_imm, 1065 Condition cond = AL); 1066 void Ror(Register rd, Register rm, const Operand& shift_imm, 1067 Condition cond = AL); 1068 void Ror(Register rd, Register rm, Register rs, Condition cond = AL); 1069 void Rrx(Register rd, Register rm, Condition cond = AL); 1070 1071 // Fill rd with the sign of rm. 1072 void SignFill(Register rd, Register rm, Condition cond = AL); 1073 1074 void Vreciprocalqs(QRegister qd, QRegister qm); 1075 void VreciprocalSqrtqs(QRegister qd, QRegister qm); 1076 // If qm must be preserved, then provide a (non-QTMP) temporary. 1077 void Vsqrtqs(QRegister qd, QRegister qm, QRegister temp); 1078 void Vdivqs(QRegister qd, QRegister qn, QRegister qm); 1079 1080 void SmiTag(Register reg, Condition cond = AL) { 1081 Lsl(reg, reg, Operand(kSmiTagSize), cond); 1082 } 1083 1084 void SmiTag(Register dst, Register src, Condition cond = AL) { 1085 Lsl(dst, src, Operand(kSmiTagSize), cond); 1086 } 1087 1088 void SmiUntag(Register reg, Condition cond = AL) { 1089 Asr(reg, reg, Operand(kSmiTagSize), cond); 1090 } 1091 1092 void SmiUntag(Register dst, Register src, Condition cond = AL) { 1093 Asr(dst, src, Operand(kSmiTagSize), cond); 1094 } 1095 1096 // Untag the value in the register assuming it is a smi. 1097 // Untagging shifts tag bit into the carry flag - if carry is clear 1098 // assumption was correct. In this case jump to the is_smi label. 1099 // Otherwise fall-through. SmiUntag(Register dst,Register src,Label * is_smi)1100 void SmiUntag(Register dst, Register src, Label* is_smi) { 1101 ASSERT(kSmiTagSize == 1); 1102 Asrs(dst, src, Operand(kSmiTagSize)); 1103 b(is_smi, CC); 1104 } 1105 1106 void CheckCodePointer(); 1107 1108 // Function frame setup and tear down. 1109 void EnterFrame(RegList regs, intptr_t frame_space); 1110 void LeaveFrame(RegList regs); 1111 void Ret(); 1112 void ReserveAlignedFrameSpace(intptr_t frame_space); 1113 1114 // Create a frame for calling into runtime that preserves all volatile 1115 // registers. Frame's SP is guaranteed to be correctly aligned and 1116 // frame_space bytes are reserved under it. 1117 void EnterCallRuntimeFrame(intptr_t frame_space); 1118 void LeaveCallRuntimeFrame(); 1119 1120 void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count); 1121 1122 // Set up a Dart frame on entry with a frame pointer and PC information to 1123 // enable easy access to the RawInstruction object of code corresponding 1124 // to this frame. 1125 void EnterDartFrame(intptr_t frame_size); 1126 void LeaveDartFrame(RestorePP restore_pp = kRestoreCallerPP); 1127 1128 // Set up a Dart frame for a function compiled for on-stack replacement. 1129 // The frame layout is a normal Dart frame, but the frame is partially set 1130 // up on entry (it is the frame of the unoptimized code). 1131 void EnterOsrFrame(intptr_t extra_size); 1132 1133 // Set up a stub frame so that the stack traversal code can easily identify 1134 // a stub frame. 1135 void EnterStubFrame(); 1136 void LeaveStubFrame(); 1137 1138 // The register into which the allocation stats table is loaded with 1139 // LoadAllocationStatsAddress should be passed to 1140 // IncrementAllocationStats(WithSize) as stats_addr_reg to update the 1141 // allocation stats. These are separate assembler macros so we can 1142 // avoid a dependent load too nearby the load of the table address. 1143 void LoadAllocationStatsAddress(Register dest, 1144 intptr_t cid, 1145 bool inline_isolate = true); 1146 void IncrementAllocationStats(Register stats_addr, 1147 intptr_t cid, 1148 Heap::Space space); 1149 void IncrementAllocationStatsWithSize(Register stats_addr_reg, 1150 Register size_reg, 1151 Heap::Space space); 1152 1153 Address ElementAddressForIntIndex(bool is_load, 1154 bool is_external, 1155 intptr_t cid, 1156 intptr_t index_scale, 1157 Register array, 1158 intptr_t index, 1159 Register temp); 1160 1161 Address ElementAddressForRegIndex(bool is_load, 1162 bool is_external, 1163 intptr_t cid, 1164 intptr_t index_scale, 1165 Register array, 1166 Register index); 1167 1168 // If allocation tracing for |cid| is enabled, will jump to |trace| label, 1169 // which will allocate in the runtime where tracing occurs. 1170 void MaybeTraceAllocation(intptr_t cid, 1171 Register temp_reg, 1172 Label* trace, 1173 bool inline_isolate = true); 1174 1175 // Inlined allocation of an instance of class 'cls', code has no runtime 1176 // calls. Jump to 'failure' if the instance cannot be allocated here. 1177 // Allocated instance is returned in 'instance_reg'. 1178 // Only the tags field of the object is initialized. 1179 void TryAllocate(const Class& cls, 1180 Label* failure, 1181 Register instance_reg, 1182 Register temp_reg); 1183 1184 void TryAllocateArray(intptr_t cid, 1185 intptr_t instance_size, 1186 Label* failure, 1187 Register instance, 1188 Register end_address, 1189 Register temp1, 1190 Register temp2); 1191 1192 // Emit data (e.g encoded instruction or immediate) in instruction stream. 1193 void Emit(int32_t value); 1194 1195 // On some other platforms, we draw a distinction between safe and unsafe 1196 // smis. IsSafe(const Object & object)1197 static bool IsSafe(const Object& object) { return true; } IsSafeSmi(const Object & object)1198 static bool IsSafeSmi(const Object& object) { return object.IsSmi(); } 1199 constant_pool_allowed()1200 bool constant_pool_allowed() const { 1201 return constant_pool_allowed_; 1202 } set_constant_pool_allowed(bool b)1203 void set_constant_pool_allowed(bool b) { 1204 constant_pool_allowed_ = b; 1205 } 1206 1207 private: 1208 AssemblerBuffer buffer_; // Contains position independent code. 1209 ObjectPoolWrapper object_pool_wrapper_; 1210 1211 int32_t prologue_offset_; 1212 1213 bool use_far_branches_; 1214 1215 #if 0 1216 // If you are thinking of using one or both of these instructions directly, 1217 // instead LoadImmediate should probably be used. 1218 // Moved to ARM::AssemblerARM32::movw 1219 void movw(Register rd, uint16_t imm16, Condition cond = AL); 1220 // Moved to ARM::AssemblerARM32::movt 1221 void movt(Register rd, uint16_t imm16, Condition cond = AL); 1222 #endif 1223 1224 void BindARMv6(Label* label); 1225 void BindARMv7(Label* label); 1226 1227 void LoadWordFromPoolOffset(Register rd, 1228 int32_t offset, 1229 Register pp, 1230 Condition cond); 1231 1232 void BranchLink(const ExternalLabel* label); 1233 1234 class CodeComment : public ZoneAllocated { 1235 public: CodeComment(intptr_t pc_offset,const String & comment)1236 CodeComment(intptr_t pc_offset, const String& comment) 1237 : pc_offset_(pc_offset), comment_(comment) { } 1238 pc_offset()1239 intptr_t pc_offset() const { return pc_offset_; } comment()1240 const String& comment() const { return comment_; } 1241 1242 private: 1243 intptr_t pc_offset_; 1244 const String& comment_; 1245 1246 DISALLOW_COPY_AND_ASSIGN(CodeComment); 1247 }; 1248 1249 GrowableArray<CodeComment*> comments_; 1250 1251 bool constant_pool_allowed_; 1252 1253 void LoadObjectHelper(Register rd, 1254 const Object& object, 1255 Condition cond, 1256 bool is_unique, 1257 Register pp); 1258 1259 #if 0 1260 // Moved to ARM32::AssemblerARM32::emitType01() 1261 void EmitType01(Condition cond, 1262 int type, 1263 Opcode opcode, 1264 int set_cc, 1265 Register rn, 1266 Register rd, 1267 Operand o); 1268 1269 // Moved to ARM32::AssemblerARM32::emitType05() 1270 void EmitType5(Condition cond, int32_t offset, bool link); 1271 1272 // Moved to ARM32::AssemberARM32::emitMemOp() 1273 void EmitMemOp(Condition cond, 1274 bool load, 1275 bool byte, 1276 Register rd, 1277 Address ad); 1278 1279 // Moved to AssemblerARM32::emitMemOpEnc3(); 1280 void EmitMemOpAddressMode3(Condition cond, 1281 int32_t mode, 1282 Register rd, 1283 Address ad); 1284 1285 // Moved to ARM32::AssemblerARM32::emitMultiMemOp() 1286 void EmitMultiMemOp(Condition cond, 1287 BlockAddressMode am, 1288 bool load, 1289 Register base, 1290 RegList regs); 1291 #endif 1292 1293 void EmitShiftImmediate(Condition cond, 1294 Shift opcode, 1295 Register rd, 1296 Register rm, 1297 Operand o); 1298 1299 void EmitShiftRegister(Condition cond, 1300 Shift opcode, 1301 Register rd, 1302 Register rm, 1303 Operand o); 1304 1305 #if 0 1306 // Moved to ARM32::AssemblerARM32::emitMulOp() 1307 void EmitMulOp(Condition cond, 1308 int32_t opcode, 1309 Register rd, 1310 Register rn, 1311 Register rm, 1312 Register rs); 1313 1314 // Moved to ARM32::AssemblerARM32::emitDivOp(); 1315 void EmitDivOp(Condition cond, 1316 int32_t opcode, 1317 Register rd, 1318 Register rn, 1319 Register rm); 1320 #endif 1321 1322 void EmitMultiVSMemOp(Condition cond, 1323 BlockAddressMode am, 1324 bool load, 1325 Register base, 1326 SRegister start, 1327 uint32_t count); 1328 1329 void EmitMultiVDMemOp(Condition cond, 1330 BlockAddressMode am, 1331 bool load, 1332 Register base, 1333 DRegister start, 1334 int32_t count); 1335 1336 #if 0 1337 // Moved to ARM32::AssemblerARM32::emitVFPsss 1338 void EmitVFPsss(Condition cond, 1339 int32_t opcode, 1340 SRegister sd, 1341 SRegister sn, 1342 SRegister sm); 1343 1344 // Moved to ARM32::AssemblerARM32::emitVFPddd 1345 void EmitVFPddd(Condition cond, 1346 int32_t opcode, 1347 DRegister dd, 1348 DRegister dn, 1349 DRegister dm); 1350 1351 // Moved to ARM32::AssemblerARM32::emitVFPsd 1352 void EmitVFPsd(Condition cond, 1353 int32_t opcode, 1354 SRegister sd, 1355 DRegister dm); 1356 1357 // Moved to ARM32::AssemblerARM32::emitVFPds 1358 void EmitVFPds(Condition cond, 1359 int32_t opcode, 1360 DRegister dd, 1361 SRegister sm); 1362 1363 // Moved to ARM32::AssemblerARM32::emitSIMDqqq() 1364 void EmitSIMDqqq(int32_t opcode, OperandSize sz, 1365 QRegister qd, QRegister qn, QRegister qm); 1366 #endif 1367 1368 void EmitSIMDddd(int32_t opcode, OperandSize sz, 1369 DRegister dd, DRegister dn, DRegister dm); 1370 1371 void EmitFarBranch(Condition cond, int32_t offset, bool link); 1372 #if 0 1373 // Moved to ARM32::AssemblerARM32::emitBranch() 1374 void EmitBranch(Condition cond, Label* label, bool link); 1375 // Moved to ARM32::AssemblerARM32::encodeBranchoffset(). 1376 int32_t EncodeBranchOffset(int32_t offset, int32_t inst); 1377 // Moved to ARM32::AssemberARM32::decodeBranchOffset(). 1378 static int32_t DecodeBranchOffset(int32_t inst); 1379 #endif 1380 int32_t EncodeTstOffset(int32_t offset, int32_t inst); 1381 int32_t DecodeTstOffset(int32_t inst); 1382 1383 void StoreIntoObjectFilter(Register object, Register value, Label* no_update); 1384 1385 // Shorter filtering sequence that assumes that value is not a smi. 1386 void StoreIntoObjectFilterNoSmi(Register object, 1387 Register value, 1388 Label* no_update); 1389 1390 // Helpers for write-barrier verification. 1391 1392 // Returns VerifiedMemory::offset() as an Operand. 1393 Operand GetVerifiedMemoryShadow(); 1394 // Writes value to [base + offset] and also its shadow location, if enabled. 1395 void WriteShadowedField(Register base, 1396 intptr_t offset, 1397 Register value, 1398 Condition cond = AL); 1399 void WriteShadowedFieldPair(Register base, 1400 intptr_t offset, 1401 Register value_even, 1402 Register value_odd, 1403 Condition cond = AL); 1404 // Writes new_value to address and its shadow location, if enabled, after 1405 // verifying that its old value matches its shadow. 1406 void VerifiedWrite(const Address& address, 1407 Register new_value, 1408 FieldContent old_content); 1409 1410 #if 0 1411 // Added the following missing operations: 1412 // 1413 // ARM32::AssemblerARM32::uxt() (uxtb and uxth) 1414 // ARM32::AssemblerARM32::vpop() 1415 // ARM32::AssemblerARM32::vpush() 1416 // ARM32::AssemblerARM32::rbit() 1417 // ARM32::AssemblerARM32::vbslq() 1418 // ARM32::AssemblerARM32::veord() 1419 // ARM32::AssemblerARM32::vld1qr() 1420 // ARM32::AssemblerARM32::vshlqc 1421 // ARM32::AssemblerARM32::vshrqic 1422 // ARM32::AssemblerARM32::vshrquc 1423 // ARM32::AssemblerARM32::vst1qr() 1424 // ARM32::AssemblerARM32::vmorqi() 1425 // ARM32::AssemblerARM32::vmovqc() 1426 #endif 1427 1428 DISALLOW_ALLOCATION(); 1429 DISALLOW_COPY_AND_ASSIGN(Assembler); 1430 }; 1431 1432 } // namespace dart 1433 1434 #endif // VM_ASSEMBLER_ARM_H_ 1435