1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 2 // All Rights Reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions 6 // are met: 7 // 8 // - Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // 11 // - Redistribution in binary form must reproduce the above copyright 12 // notice, this list of conditions and the following disclaimer in the 13 // documentation and/or other materials provided with the 14 // distribution. 15 // 16 // - Neither the name of Sun Microsystems or the names of contributors may 17 // be used to endorse or promote products derived from this software without 18 // specific prior written permission. 19 // 20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 31 // OF THE POSSIBILITY OF SUCH DAMAGE. 32 33 // The original source code covered by the above license above has been 34 // modified significantly by Google Inc. 35 // Copyright 2012 the V8 project authors. All rights reserved. 36 37 // A light-weight ARM Assembler 38 // Generates user mode instructions for the ARM architecture up to version 5 39 40 #ifndef V8_CODEGEN_ARM_ASSEMBLER_ARM_H_ 41 #define V8_CODEGEN_ARM_ASSEMBLER_ARM_H_ 42 43 #include <stdio.h> 44 #include <memory> 45 #include <vector> 46 47 #include "src/codegen/arm/constants-arm.h" 48 #include "src/codegen/arm/register-arm.h" 49 #include "src/codegen/assembler.h" 50 #include "src/codegen/constant-pool.h" 51 #include "src/numbers/double.h" 52 #include "src/utils/boxed-float.h" 53 54 namespace v8 { 55 namespace internal { 56 57 class SafepointTableBuilder; 58 59 // Coprocessor number 60 enum Coprocessor { 61 p0 = 0, 62 p1 = 1, 63 p2 = 2, 64 p3 = 3, 65 p4 = 4, 66 p5 = 5, 67 p6 = 6, 68 p7 = 7, 69 p8 = 8, 70 p9 = 9, 71 p10 = 10, 72 p11 = 11, 73 p12 = 12, 74 p13 = 13, 75 p14 = 14, 76 p15 = 15 77 }; 78 79 // ----------------------------------------------------------------------------- 80 // Machine instruction Operands 81 82 // Class Operand represents a shifter operand in data processing instructions 83 class V8_EXPORT_PRIVATE Operand { 84 public: 85 // immediate 86 V8_INLINE explicit Operand(int32_t immediate, 87 RelocInfo::Mode rmode = RelocInfo::NONE) rmode_(rmode)88 : rmode_(rmode) { 89 value_.immediate = immediate; 90 } 91 V8_INLINE static Operand Zero(); 92 V8_INLINE explicit Operand(const ExternalReference& f); 93 explicit Operand(Handle<HeapObject> handle); 94 V8_INLINE explicit Operand(Smi value); 95 96 // rm 97 V8_INLINE explicit Operand(Register rm); 98 99 // rm <shift_op> shift_imm 100 explicit Operand(Register rm, ShiftOp shift_op, int shift_imm); SmiUntag(Register rm)101 V8_INLINE static Operand SmiUntag(Register rm) { 102 return Operand(rm, ASR, kSmiTagSize); 103 } PointerOffsetFromSmiKey(Register key)104 V8_INLINE static Operand PointerOffsetFromSmiKey(Register key) { 105 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); 106 return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize); 107 } DoubleOffsetFromSmiKey(Register key)108 V8_INLINE static Operand DoubleOffsetFromSmiKey(Register key) { 109 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2); 110 return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize); 111 } 112 113 // rm <shift_op> rs 114 explicit Operand(Register rm, ShiftOp shift_op, Register rs); 115 116 static Operand EmbeddedNumber(double number); // Smi or HeapNumber. 117 static Operand EmbeddedStringConstant(const StringConstantBase* str); 118 119 // Return true if this is a register operand. IsRegister()120 bool IsRegister() const { 121 return rm_.is_valid() && rs_ == no_reg && shift_op_ == LSL && 122 shift_imm_ == 0; 123 } 124 // Return true if this is a register operand shifted with an immediate. IsImmediateShiftedRegister()125 bool IsImmediateShiftedRegister() const { 126 return rm_.is_valid() && !rs_.is_valid(); 127 } 128 // Return true if this is a register operand shifted with a register. IsRegisterShiftedRegister()129 bool IsRegisterShiftedRegister() const { 130 return rm_.is_valid() && rs_.is_valid(); 131 } 132 133 // Return the number of actual instructions required to implement the given 134 // instruction for this particular operand. This can be a single instruction, 135 // if no load into a scratch register is necessary, or anything between 2 and 136 // 4 instructions when we need to load from the constant pool (depending upon 137 // whether the constant pool entry is in the small or extended section). If 138 // the instruction this operand is used for is a MOV or MVN instruction the 139 // actual instruction to use is required for this calculation. For other 140 // instructions instr is ignored. 141 // 142 // The value returned is only valid as long as no entries are added to the 143 // constant pool between this call and the actual instruction being emitted. 144 int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const; 145 bool MustOutputRelocInfo(const Assembler* assembler) const; 146 immediate()147 inline int32_t immediate() const { 148 DCHECK(IsImmediate()); 149 DCHECK(!IsHeapObjectRequest()); 150 return value_.immediate; 151 } IsImmediate()152 bool IsImmediate() const { return !rm_.is_valid(); } 153 heap_object_request()154 HeapObjectRequest heap_object_request() const { 155 DCHECK(IsHeapObjectRequest()); 156 return value_.heap_object_request; 157 } IsHeapObjectRequest()158 bool IsHeapObjectRequest() const { 159 DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); 160 DCHECK_IMPLIES(is_heap_object_request_, 161 rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || 162 rmode_ == RelocInfo::CODE_TARGET); 163 return is_heap_object_request_; 164 } 165 rm()166 Register rm() const { return rm_; } rs()167 Register rs() const { return rs_; } shift_op()168 ShiftOp shift_op() const { return shift_op_; } 169 170 private: 171 Register rm_ = no_reg; 172 Register rs_ = no_reg; 173 ShiftOp shift_op_; 174 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg 175 union Value { Value()176 Value() {} 177 HeapObjectRequest heap_object_request; // if is_heap_object_request_ 178 int32_t immediate; // otherwise 179 } value_; // valid if rm_ == no_reg 180 bool is_heap_object_request_ = false; 181 RelocInfo::Mode rmode_; 182 183 friend class Assembler; 184 }; 185 186 // Class MemOperand represents a memory operand in load and store instructions 187 class V8_EXPORT_PRIVATE MemOperand { 188 public: 189 // [rn +/- offset] Offset/NegOffset 190 // [rn +/- offset]! PreIndex/NegPreIndex 191 // [rn], +/- offset PostIndex/NegPostIndex 192 // offset is any signed 32-bit value; offset is first loaded to a scratch 193 // register if it does not fit the addressing mode (12-bit unsigned and sign 194 // bit) 195 explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset); 196 197 // [rn +/- rm] Offset/NegOffset 198 // [rn +/- rm]! PreIndex/NegPreIndex 199 // [rn], +/- rm PostIndex/NegPostIndex 200 explicit MemOperand(Register rn, Register rm, AddrMode am = Offset); 201 202 // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset 203 // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex 204 // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex 205 explicit MemOperand(Register rn, Register rm, ShiftOp shift_op, int shift_imm, 206 AddrMode am = Offset); 207 V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array, 208 Register key, 209 AddrMode am = Offset) { 210 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); 211 return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am); 212 } 213 set_offset(int32_t offset)214 void set_offset(int32_t offset) { 215 DCHECK(rm_ == no_reg); 216 offset_ = offset; 217 } 218 offset()219 uint32_t offset() const { 220 DCHECK(rm_ == no_reg); 221 return offset_; 222 } 223 rn()224 Register rn() const { return rn_; } rm()225 Register rm() const { return rm_; } am()226 AddrMode am() const { return am_; } 227 OffsetIsUint12Encodable()228 bool OffsetIsUint12Encodable() const { 229 return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); 230 } 231 232 private: 233 Register rn_; // base 234 Register rm_; // register offset 235 int32_t offset_; // valid if rm_ == no_reg 236 ShiftOp shift_op_; 237 int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg 238 AddrMode am_; // bits P, U, and W 239 240 friend class Assembler; 241 }; 242 243 // Class NeonMemOperand represents a memory operand in load and 244 // store NEON instructions 245 class V8_EXPORT_PRIVATE NeonMemOperand { 246 public: 247 // [rn {:align}] Offset 248 // [rn {:align}]! PostIndex 249 explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0); 250 251 // [rn {:align}], rm PostIndex 252 explicit NeonMemOperand(Register rn, Register rm, int align = 0); 253 rn()254 Register rn() const { return rn_; } rm()255 Register rm() const { return rm_; } align()256 int align() const { return align_; } 257 258 private: 259 void SetAlignment(int align); 260 261 Register rn_; // base 262 Register rm_; // register increment 263 int align_; 264 }; 265 266 // Class NeonListOperand represents a list of NEON registers 267 class NeonListOperand { 268 public: 269 explicit NeonListOperand(DoubleRegister base, int register_count = 1) base_(base)270 : base_(base), register_count_(register_count) {} NeonListOperand(QwNeonRegister q_reg)271 explicit NeonListOperand(QwNeonRegister q_reg) 272 : base_(q_reg.low()), register_count_(2) {} base()273 DoubleRegister base() const { return base_; } register_count()274 int register_count() { return register_count_; } length()275 int length() const { return register_count_ - 1; } type()276 NeonListType type() const { 277 switch (register_count_) { 278 default: 279 UNREACHABLE(); 280 // Fall through. 281 case 1: 282 return nlt_1; 283 case 2: 284 return nlt_2; 285 case 3: 286 return nlt_3; 287 case 4: 288 return nlt_4; 289 } 290 } 291 292 private: 293 DoubleRegister base_; 294 int register_count_; 295 }; 296 297 class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { 298 public: 299 // Create an assembler. Instructions and relocation information are emitted 300 // into a buffer, with the instructions starting from the beginning and the 301 // relocation information starting from the end of the buffer. See CodeDesc 302 // for a detailed comment on the layout (globals.h). 303 // 304 // If the provided buffer is nullptr, the assembler allocates and grows its 305 // own buffer. Otherwise it takes ownership of the provided buffer. 306 explicit Assembler(const AssemblerOptions&, 307 std::unique_ptr<AssemblerBuffer> = {}); 308 309 ~Assembler() override; 310 AbortedCodeGeneration()311 void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); } 312 313 // GetCode emits any pending (non-emitted) code and fills the descriptor desc. 314 static constexpr int kNoHandlerTable = 0; 315 static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; 316 void GetCode(Isolate* isolate, CodeDesc* desc, 317 SafepointTableBuilder* safepoint_table_builder, 318 int handler_table_offset); 319 320 // Convenience wrapper for code without safepoint or handler tables. GetCode(Isolate * isolate,CodeDesc * desc)321 void GetCode(Isolate* isolate, CodeDesc* desc) { 322 GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); 323 } 324 325 // Label operations & relative jumps (PPUM Appendix D) 326 // 327 // Takes a branch opcode (cc) and a label (L) and generates 328 // either a backward branch or a forward branch and links it 329 // to the label fixup chain. Usage: 330 // 331 // Label L; // unbound label 332 // j(cc, &L); // forward branch to unbound label 333 // bind(&L); // bind label to the current pc 334 // j(cc, &L); // backward branch to bound label 335 // bind(&L); // illegal: a label may be bound only once 336 // 337 // Note: The same Label can be used for forward and backward branches 338 // but it may be bound only once. 339 340 void bind(Label* L); // binds an unbound label L to the current code position 341 342 // Returns the branch offset to the given label from the current code position 343 // Links the label to the current position if it is still unbound 344 // Manages the jump elimination optimization if the second parameter is true. 345 int branch_offset(Label* L); 346 347 // Returns true if the given pc address is the start of a constant pool load 348 // instruction sequence. 349 V8_INLINE static bool is_constant_pool_load(Address pc); 350 351 // Return the address in the constant pool of the code target address used by 352 // the branch/call instruction at pc, or the object in a mov. 353 V8_INLINE static Address constant_pool_entry_address(Address pc, 354 Address constant_pool); 355 356 // Read/Modify the code target address in the branch/call instruction at pc. 357 // The isolate argument is unused (and may be nullptr) when skipping flushing. 358 V8_INLINE static Address target_address_at(Address pc, Address constant_pool); 359 V8_INLINE static void set_target_address_at( 360 Address pc, Address constant_pool, Address target, 361 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); 362 363 // This sets the branch destination (which is in the constant pool on ARM). 364 // This is for calls and branches within generated code. 365 inline static void deserialization_set_special_target_at( 366 Address constant_pool_entry, Code code, Address target); 367 368 // Get the size of the special target encoded at 'location'. 369 inline static int deserialization_special_target_size(Address location); 370 371 // This sets the internal reference at the pc. 372 inline static void deserialization_set_target_internal_reference_at( 373 Address pc, Address target, 374 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); 375 376 // Here we are patching the address in the constant pool, not the actual call 377 // instruction. The address in the constant pool is the same size as a 378 // pointer. 379 static constexpr int kSpecialTargetSize = kPointerSize; 380 GetScratchRegisterList()381 RegList* GetScratchRegisterList() { return &scratch_register_list_; } GetScratchVfpRegisterList()382 VfpRegList* GetScratchVfpRegisterList() { 383 return &scratch_vfp_register_list_; 384 } 385 386 // --------------------------------------------------------------------------- 387 // Code generation 388 389 // Insert the smallest number of nop instructions 390 // possible to align the pc offset to a multiple 391 // of m. m must be a power of 2 (>= 4). 392 void Align(int m); 393 // Insert the smallest number of zero bytes possible to align the pc offset 394 // to a mulitple of m. m must be a power of 2 (>= 2). 395 void DataAlign(int m); 396 // Aligns code to something that's optimal for a jump target for the platform. 397 void CodeTargetAlign(); 398 399 // Branch instructions 400 void b(int branch_offset, Condition cond = al, 401 RelocInfo::Mode rmode = RelocInfo::NONE); 402 void bl(int branch_offset, Condition cond = al, 403 RelocInfo::Mode rmode = RelocInfo::NONE); 404 void blx(int branch_offset); // v5 and above 405 void blx(Register target, Condition cond = al); // v5 and above 406 void bx(Register target, Condition cond = al); // v5 and above, plus v4t 407 408 // Convenience branch instructions using labels 409 void b(Label* L, Condition cond = al); b(Condition cond,Label * L)410 void b(Condition cond, Label* L) { b(L, cond); } 411 void bl(Label* L, Condition cond = al); bl(Condition cond,Label * L)412 void bl(Condition cond, Label* L) { bl(L, cond); } 413 void blx(Label* L); // v5 and above 414 415 // Data-processing instructions 416 417 void and_(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 418 Condition cond = al); 419 void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC, 420 Condition cond = al); 421 422 void eor(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 423 Condition cond = al); 424 void eor(Register dst, Register src1, Register src2, SBit s = LeaveCC, 425 Condition cond = al); 426 427 void sub(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 428 Condition cond = al); 429 void sub(Register dst, Register src1, Register src2, SBit s = LeaveCC, 430 Condition cond = al); 431 432 void rsb(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 433 Condition cond = al); 434 435 void add(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 436 Condition cond = al); 437 void add(Register dst, Register src1, Register src2, SBit s = LeaveCC, 438 Condition cond = al); 439 440 void adc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 441 Condition cond = al); 442 443 void sbc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 444 Condition cond = al); 445 446 void rsc(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 447 Condition cond = al); 448 449 void tst(Register src1, const Operand& src2, Condition cond = al); 450 void tst(Register src1, Register src2, Condition cond = al); 451 452 void teq(Register src1, const Operand& src2, Condition cond = al); 453 454 void cmp(Register src1, const Operand& src2, Condition cond = al); 455 void cmp(Register src1, Register src2, Condition cond = al); 456 457 void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al); 458 459 void cmn(Register src1, const Operand& src2, Condition cond = al); 460 461 void orr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 462 Condition cond = al); 463 void orr(Register dst, Register src1, Register src2, SBit s = LeaveCC, 464 Condition cond = al); 465 466 void mov(Register dst, const Operand& src, SBit s = LeaveCC, 467 Condition cond = al); 468 void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al); 469 470 // Load the position of the label relative to the generated code object 471 // pointer in a register. 472 void mov_label_offset(Register dst, Label* label); 473 474 // ARMv7 instructions for loading a 32 bit immediate in two instructions. 475 // The constant for movw and movt should be in the range 0-0xffff. 476 void movw(Register reg, uint32_t immediate, Condition cond = al); 477 void movt(Register reg, uint32_t immediate, Condition cond = al); 478 479 void bic(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 480 Condition cond = al); 481 482 void mvn(Register dst, const Operand& src, SBit s = LeaveCC, 483 Condition cond = al); 484 485 // Shift instructions 486 487 void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 488 Condition cond = al); 489 490 void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 491 Condition cond = al); 492 493 void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC, 494 Condition cond = al); 495 496 // Multiply instructions 497 498 void mla(Register dst, Register src1, Register src2, Register srcA, 499 SBit s = LeaveCC, Condition cond = al); 500 501 void mls(Register dst, Register src1, Register src2, Register srcA, 502 Condition cond = al); 503 504 void sdiv(Register dst, Register src1, Register src2, Condition cond = al); 505 506 void udiv(Register dst, Register src1, Register src2, Condition cond = al); 507 508 void mul(Register dst, Register src1, Register src2, SBit s = LeaveCC, 509 Condition cond = al); 510 511 void smmla(Register dst, Register src1, Register src2, Register srcA, 512 Condition cond = al); 513 514 void smmul(Register dst, Register src1, Register src2, Condition cond = al); 515 516 void smlal(Register dstL, Register dstH, Register src1, Register src2, 517 SBit s = LeaveCC, Condition cond = al); 518 519 void smull(Register dstL, Register dstH, Register src1, Register src2, 520 SBit s = LeaveCC, Condition cond = al); 521 522 void umlal(Register dstL, Register dstH, Register src1, Register src2, 523 SBit s = LeaveCC, Condition cond = al); 524 525 void umull(Register dstL, Register dstH, Register src1, Register src2, 526 SBit s = LeaveCC, Condition cond = al); 527 528 // Miscellaneous arithmetic instructions 529 530 void clz(Register dst, Register src, Condition cond = al); // v5 and above 531 532 // Saturating instructions. v6 and above. 533 534 // Unsigned saturate. 535 // 536 // Saturate an optionally shifted signed value to an unsigned range. 537 // 538 // usat dst, #satpos, src 539 // usat dst, #satpos, src, lsl #sh 540 // usat dst, #satpos, src, asr #sh 541 // 542 // Register dst will contain: 543 // 544 // 0, if s < 0 545 // (1 << satpos) - 1, if s > ((1 << satpos) - 1) 546 // s, otherwise 547 // 548 // where s is the contents of src after shifting (if used.) 549 void usat(Register dst, int satpos, const Operand& src, Condition cond = al); 550 551 // Bitfield manipulation instructions. v7 and above. 552 553 void ubfx(Register dst, Register src, int lsb, int width, 554 Condition cond = al); 555 556 void sbfx(Register dst, Register src, int lsb, int width, 557 Condition cond = al); 558 559 void bfc(Register dst, int lsb, int width, Condition cond = al); 560 561 void bfi(Register dst, Register src, int lsb, int width, Condition cond = al); 562 563 void pkhbt(Register dst, Register src1, const Operand& src2, 564 Condition cond = al); 565 566 void pkhtb(Register dst, Register src1, const Operand& src2, 567 Condition cond = al); 568 569 void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al); 570 void sxtab(Register dst, Register src1, Register src2, int rotate = 0, 571 Condition cond = al); 572 void sxth(Register dst, Register src, int rotate = 0, Condition cond = al); 573 void sxtah(Register dst, Register src1, Register src2, int rotate = 0, 574 Condition cond = al); 575 576 void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al); 577 void uxtab(Register dst, Register src1, Register src2, int rotate = 0, 578 Condition cond = al); 579 void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al); 580 void uxth(Register dst, Register src, int rotate = 0, Condition cond = al); 581 void uxtah(Register dst, Register src1, Register src2, int rotate = 0, 582 Condition cond = al); 583 584 // Reverse the bits in a register. 585 void rbit(Register dst, Register src, Condition cond = al); 586 void rev(Register dst, Register src, Condition cond = al); 587 588 // Status register access instructions 589 590 void mrs(Register dst, SRegister s, Condition cond = al); 591 void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al); 592 593 // Load/Store instructions 594 void ldr(Register dst, const MemOperand& src, Condition cond = al); 595 void str(Register src, const MemOperand& dst, Condition cond = al); 596 void ldrb(Register dst, const MemOperand& src, Condition cond = al); 597 void strb(Register src, const MemOperand& dst, Condition cond = al); 598 void ldrh(Register dst, const MemOperand& src, Condition cond = al); 599 void strh(Register src, const MemOperand& dst, Condition cond = al); 600 void ldrsb(Register dst, const MemOperand& src, Condition cond = al); 601 void ldrsh(Register dst, const MemOperand& src, Condition cond = al); 602 void ldrd(Register dst1, Register dst2, const MemOperand& src, 603 Condition cond = al); 604 void strd(Register src1, Register src2, const MemOperand& dst, 605 Condition cond = al); 606 607 // Load literal from a pc relative address. 608 void ldr_pcrel(Register dst, int imm12, Condition cond = al); 609 610 // Load/Store exclusive instructions 611 void ldrex(Register dst, Register src, Condition cond = al); 612 void strex(Register src1, Register src2, Register dst, Condition cond = al); 613 void ldrexb(Register dst, Register src, Condition cond = al); 614 void strexb(Register src1, Register src2, Register dst, Condition cond = al); 615 void ldrexh(Register dst, Register src, Condition cond = al); 616 void strexh(Register src1, Register src2, Register dst, Condition cond = al); 617 void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al); 618 void strexd(Register res, Register src1, Register src2, Register dst, 619 Condition cond = al); 620 621 // Preload instructions 622 void pld(const MemOperand& address); 623 624 // Load/Store multiple instructions 625 void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al); 626 void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al); 627 628 // Exception-generating instructions and debugging support 629 void stop(Condition cond = al, int32_t code = kDefaultStopCode); 630 631 void bkpt(uint32_t imm16); // v5 and above 632 void svc(uint32_t imm24, Condition cond = al); 633 634 // Synchronization instructions. 635 // On ARMv6, an equivalent CP15 operation will be used. 636 void dmb(BarrierOption option); 637 void dsb(BarrierOption option); 638 void isb(BarrierOption option); 639 640 // Conditional speculation barrier. 641 void csdb(); 642 643 // Coprocessor instructions 644 645 void cdp(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, 646 CRegister crm, int opcode_2, Condition cond = al); 647 648 void cdp2(Coprocessor coproc, int opcode_1, CRegister crd, CRegister crn, 649 CRegister crm, 650 int opcode_2); // v5 and above 651 652 void mcr(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, 653 CRegister crm, int opcode_2 = 0, Condition cond = al); 654 655 void mcr2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, 656 CRegister crm, 657 int opcode_2 = 0); // v5 and above 658 659 void mrc(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, 660 CRegister crm, int opcode_2 = 0, Condition cond = al); 661 662 void mrc2(Coprocessor coproc, int opcode_1, Register rd, CRegister crn, 663 CRegister crm, 664 int opcode_2 = 0); // v5 and above 665 666 void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src, 667 LFlag l = Short, Condition cond = al); 668 void ldc(Coprocessor coproc, CRegister crd, Register base, int option, 669 LFlag l = Short, Condition cond = al); 670 671 void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src, 672 LFlag l = Short); // v5 and above 673 void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, 674 LFlag l = Short); // v5 and above 675 676 // Support for VFP. 677 // All these APIs support S0 to S31 and D0 to D31. 678 679 void vldr(const DwVfpRegister dst, const Register base, int offset, 680 const Condition cond = al); 681 void vldr(const DwVfpRegister dst, const MemOperand& src, 682 const Condition cond = al); 683 684 void vldr(const SwVfpRegister dst, const Register base, int offset, 685 const Condition cond = al); 686 void vldr(const SwVfpRegister dst, const MemOperand& src, 687 const Condition cond = al); 688 689 void vstr(const DwVfpRegister src, const Register base, int offset, 690 const Condition cond = al); 691 void vstr(const DwVfpRegister src, const MemOperand& dst, 692 const Condition cond = al); 693 694 void vstr(const SwVfpRegister src, const Register base, int offset, 695 const Condition cond = al); 696 void vstr(const SwVfpRegister src, const MemOperand& dst, 697 const Condition cond = al); 698 699 void vldm(BlockAddrMode am, Register base, DwVfpRegister first, 700 DwVfpRegister last, Condition cond = al); 701 702 void vstm(BlockAddrMode am, Register base, DwVfpRegister first, 703 DwVfpRegister last, Condition cond = al); 704 705 void vldm(BlockAddrMode am, Register base, SwVfpRegister first, 706 SwVfpRegister last, Condition cond = al); 707 708 void vstm(BlockAddrMode am, Register base, SwVfpRegister first, 709 SwVfpRegister last, Condition cond = al); 710 711 void vmov(const SwVfpRegister dst, Float32 imm); 712 void vmov(const DwVfpRegister dst, Double imm, 713 const Register extra_scratch = no_reg); 714 void vmov(const SwVfpRegister dst, const SwVfpRegister src, 715 const Condition cond = al); 716 void vmov(const DwVfpRegister dst, const DwVfpRegister src, 717 const Condition cond = al); 718 void vmov(const DwVfpRegister dst, const Register src1, const Register src2, 719 const Condition cond = al); 720 void vmov(const Register dst1, const Register dst2, const DwVfpRegister src, 721 const Condition cond = al); 722 void vmov(const SwVfpRegister dst, const Register src, 723 const Condition cond = al); 724 void vmov(const Register dst, const SwVfpRegister src, 725 const Condition cond = al); 726 void vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, 727 VFPConversionMode mode = kDefaultRoundToZero, 728 const Condition cond = al); 729 void vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, 730 VFPConversionMode mode = kDefaultRoundToZero, 731 const Condition cond = al); 732 void vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, 733 VFPConversionMode mode = kDefaultRoundToZero, 734 const Condition cond = al); 735 void vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src, 736 VFPConversionMode mode = kDefaultRoundToZero, 737 const Condition cond = al); 738 void vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src, 739 VFPConversionMode mode = kDefaultRoundToZero, 740 const Condition cond = al); 741 void vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src, 742 VFPConversionMode mode = kDefaultRoundToZero, 743 const Condition cond = al); 744 void vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, 745 VFPConversionMode mode = kDefaultRoundToZero, 746 const Condition cond = al); 747 void vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, 748 VFPConversionMode mode = kDefaultRoundToZero, 749 const Condition cond = al); 750 void vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, 751 VFPConversionMode mode = kDefaultRoundToZero, 752 const Condition cond = al); 753 void vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, 754 VFPConversionMode mode = kDefaultRoundToZero, 755 const Condition cond = al); 756 void vcvt_f64_s32(const DwVfpRegister dst, int fraction_bits, 757 const Condition cond = al); 758 759 void vmrs(const Register dst, const Condition cond = al); 760 void vmsr(const Register dst, const Condition cond = al); 761 762 void vneg(const DwVfpRegister dst, const DwVfpRegister src, 763 const Condition cond = al); 764 void vneg(const SwVfpRegister dst, const SwVfpRegister src, 765 const Condition cond = al); 766 void vabs(const DwVfpRegister dst, const DwVfpRegister src, 767 const Condition cond = al); 768 void vabs(const SwVfpRegister dst, const SwVfpRegister src, 769 const Condition cond = al); 770 void vadd(const DwVfpRegister dst, const DwVfpRegister src1, 771 const DwVfpRegister src2, const Condition cond = al); 772 void vadd(const SwVfpRegister dst, const SwVfpRegister src1, 773 const SwVfpRegister src2, const Condition cond = al); 774 void vsub(const DwVfpRegister dst, const DwVfpRegister src1, 775 const DwVfpRegister src2, const Condition cond = al); 776 void vsub(const SwVfpRegister dst, const SwVfpRegister src1, 777 const SwVfpRegister src2, const Condition cond = al); 778 void vmul(const DwVfpRegister dst, const DwVfpRegister src1, 779 const DwVfpRegister src2, const Condition cond = al); 780 void vmul(const SwVfpRegister dst, const SwVfpRegister src1, 781 const SwVfpRegister src2, const Condition cond = al); 782 void vmla(const DwVfpRegister dst, const DwVfpRegister src1, 783 const DwVfpRegister src2, const Condition cond = al); 784 void vmla(const SwVfpRegister dst, const SwVfpRegister src1, 785 const SwVfpRegister src2, const Condition cond = al); 786 void vmls(const DwVfpRegister dst, const DwVfpRegister src1, 787 const DwVfpRegister src2, const Condition cond = al); 788 void vmls(const SwVfpRegister dst, const SwVfpRegister src1, 789 const SwVfpRegister src2, const Condition cond = al); 790 void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, 791 const DwVfpRegister src2, const Condition cond = al); 792 void vdiv(const SwVfpRegister dst, const SwVfpRegister src1, 793 const SwVfpRegister src2, const Condition cond = al); 794 void vcmp(const DwVfpRegister src1, const DwVfpRegister src2, 795 const Condition cond = al); 796 void vcmp(const SwVfpRegister src1, const SwVfpRegister src2, 797 const Condition cond = al); 798 void vcmp(const DwVfpRegister src1, const double src2, 799 const Condition cond = al); 800 void vcmp(const SwVfpRegister src1, const float src2, 801 const Condition cond = al); 802 803 void vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1, 804 const DwVfpRegister src2); 805 void vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1, 806 const SwVfpRegister src2); 807 void vminnm(const DwVfpRegister dst, const DwVfpRegister src1, 808 const DwVfpRegister src2); 809 void vminnm(const SwVfpRegister dst, const SwVfpRegister src1, 810 const SwVfpRegister src2); 811 812 // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}. 813 void vsel(const Condition cond, const DwVfpRegister dst, 814 const DwVfpRegister src1, const DwVfpRegister src2); 815 void vsel(const Condition cond, const SwVfpRegister dst, 816 const SwVfpRegister src1, const SwVfpRegister src2); 817 818 void vsqrt(const DwVfpRegister dst, const DwVfpRegister src, 819 const Condition cond = al); 820 void vsqrt(const SwVfpRegister dst, const SwVfpRegister src, 821 const Condition cond = al); 822 823 // ARMv8 rounding instructions (Scalar). 824 void vrinta(const SwVfpRegister dst, const SwVfpRegister src); 825 void vrinta(const DwVfpRegister dst, const DwVfpRegister src); 826 void vrintn(const SwVfpRegister dst, const SwVfpRegister src); 827 void vrintn(const DwVfpRegister dst, const DwVfpRegister src); 828 void vrintm(const SwVfpRegister dst, const SwVfpRegister src); 829 void vrintm(const DwVfpRegister dst, const DwVfpRegister src); 830 void vrintp(const SwVfpRegister dst, const SwVfpRegister src); 831 void vrintp(const DwVfpRegister dst, const DwVfpRegister src); 832 void vrintz(const SwVfpRegister dst, const SwVfpRegister src, 833 const Condition cond = al); 834 void vrintz(const DwVfpRegister dst, const DwVfpRegister src, 835 const Condition cond = al); 836 837 // Support for NEON. 838 839 // All these APIs support D0 to D31 and Q0 to Q15. 840 void vld1(NeonSize size, const NeonListOperand& dst, 841 const NeonMemOperand& src); 842 // vld1s(ingle element to one lane). 843 void vld1s(NeonSize size, const NeonListOperand& dst, uint8_t index, 844 const NeonMemOperand& src); 845 void vld1r(NeonSize size, const NeonListOperand& dst, 846 const NeonMemOperand& src); 847 void vst1(NeonSize size, const NeonListOperand& src, 848 const NeonMemOperand& dst); 849 // dt represents the narrower type 850 void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src); 851 // dst_dt represents the narrower type, src_dt represents the src type. 852 void vqmovn(NeonDataType dst_dt, NeonDataType src_dt, DwVfpRegister dst, 853 QwNeonRegister src); 854 855 // Only unconditional core <-> scalar moves are currently supported. 856 void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src); 857 void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index); 858 859 void vmov(DwVfpRegister dst, uint64_t imm); 860 void vmov(QwNeonRegister dst, uint64_t imm); 861 void vmov(QwNeonRegister dst, QwNeonRegister src); 862 void vdup(NeonSize size, QwNeonRegister dst, Register src); 863 void vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src, int index); 864 void vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int index); 865 866 void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src); 867 void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src); 868 void vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src); 869 void vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src); 870 871 void vmvn(QwNeonRegister dst, QwNeonRegister src); 872 void vswp(DwVfpRegister dst, DwVfpRegister src); 873 void vswp(QwNeonRegister dst, QwNeonRegister src); 874 void vabs(QwNeonRegister dst, QwNeonRegister src); 875 void vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src); 876 void vneg(QwNeonRegister dst, QwNeonRegister src); 877 void vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src); 878 879 void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 880 void vbic(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 881 void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2); 882 void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 883 void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 884 void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 885 void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 886 void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, 887 QwNeonRegister src2); 888 void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, 889 QwNeonRegister src2); 890 void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 891 void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, 892 QwNeonRegister src2); 893 void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, 894 QwNeonRegister src2); 895 void vmlal(NeonDataType size, QwNeonRegister dst, DwVfpRegister src1, 896 DwVfpRegister src2); 897 void vmul(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 898 void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, 899 QwNeonRegister src2); 900 void vmull(NeonDataType size, QwNeonRegister dst, DwVfpRegister src1, 901 DwVfpRegister src2); 902 void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 903 void vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, 904 QwNeonRegister src2); 905 void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 906 void vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, 907 QwNeonRegister src2); 908 void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2); 909 void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1, 910 DwVfpRegister src2); 911 void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, 912 DwVfpRegister src2); 913 void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, 914 DwVfpRegister src2); 915 916 // ARMv8 rounding instructions (NEON). 917 void vrintm(NeonDataType dt, const QwNeonRegister dst, 918 const QwNeonRegister src); 919 void vrintn(NeonDataType dt, const QwNeonRegister dst, 920 const QwNeonRegister src); 921 void vrintp(NeonDataType dt, const QwNeonRegister dst, 922 const QwNeonRegister src); 923 void vrintz(NeonDataType dt, const QwNeonRegister dst, 924 const QwNeonRegister src); 925 926 void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift); 927 void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, 928 QwNeonRegister shift); 929 void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift); 930 void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift); 931 void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift); 932 // vrecpe and vrsqrte only support floating point lanes. 933 void vrecpe(QwNeonRegister dst, QwNeonRegister src); 934 void vrsqrte(QwNeonRegister dst, QwNeonRegister src); 935 void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 936 void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 937 void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, 938 QwNeonRegister src2); 939 void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 940 void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, 941 QwNeonRegister src2); 942 void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 943 void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, 944 QwNeonRegister src2); 945 void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2); 946 void vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, 947 QwNeonRegister src2); 948 void vrhadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1, 949 QwNeonRegister src2); 950 void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2, 951 int bytes); 952 void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2); 953 void vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2); 954 void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2); 955 void vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2); 956 void vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src); 957 void vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src); 958 void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src); 959 void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2); 960 void vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2); 961 void vtbl(DwVfpRegister dst, const NeonListOperand& list, 962 DwVfpRegister index); 963 void vtbx(DwVfpRegister dst, const NeonListOperand& list, 964 DwVfpRegister index); 965 966 // Pseudo instructions 967 968 // Different nop operations are used by the code generator to detect certain 969 // states of the generated code. 970 enum NopMarkerTypes { 971 NON_MARKING_NOP = 0, 972 DEBUG_BREAK_NOP, 973 // IC markers. 974 PROPERTY_ACCESS_INLINED, 975 PROPERTY_ACCESS_INLINED_CONTEXT, 976 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, 977 // Helper values. 978 LAST_CODE_MARKER, 979 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED 980 }; 981 982 void nop(int type = 0); // 0 is the default non-marking type. 983 984 void push(Register src, Condition cond = al) { 985 str(src, MemOperand(sp, 4, NegPreIndex), cond); 986 } 987 988 void pop(Register dst, Condition cond = al) { 989 ldr(dst, MemOperand(sp, 4, PostIndex), cond); 990 } 991 992 void pop(); 993 994 void vpush(QwNeonRegister src, Condition cond = al) { 995 vstm(db_w, sp, src.low(), src.high(), cond); 996 } 997 998 void vpush(DwVfpRegister src, Condition cond = al) { 999 vstm(db_w, sp, src, src, cond); 1000 } 1001 1002 void vpush(SwVfpRegister src, Condition cond = al) { 1003 vstm(db_w, sp, src, src, cond); 1004 } 1005 1006 void vpop(DwVfpRegister dst, Condition cond = al) { 1007 vldm(ia_w, sp, dst, dst, cond); 1008 } 1009 1010 // Jump unconditionally to given label. jmp(Label * L)1011 void jmp(Label* L) { b(L, al); } 1012 1013 // Check the code size generated from label to here. SizeOfCodeGeneratedSince(Label * label)1014 int SizeOfCodeGeneratedSince(Label* label) { 1015 return pc_offset() - label->pos(); 1016 } 1017 1018 // Check the number of instructions generated from label to here. InstructionsGeneratedSince(Label * label)1019 int InstructionsGeneratedSince(Label* label) { 1020 return SizeOfCodeGeneratedSince(label) / kInstrSize; 1021 } 1022 1023 // Check whether an immediate fits an addressing mode 1 instruction. 1024 static bool ImmediateFitsAddrMode1Instruction(int32_t imm32); 1025 1026 // Check whether an immediate fits an addressing mode 2 instruction. 1027 bool ImmediateFitsAddrMode2Instruction(int32_t imm32); 1028 1029 // Class for scoping postponing the constant pool generation. 1030 class BlockConstPoolScope { 1031 public: BlockConstPoolScope(Assembler * assem)1032 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) { 1033 assem_->StartBlockConstPool(); 1034 } ~BlockConstPoolScope()1035 ~BlockConstPoolScope() { assem_->EndBlockConstPool(); } 1036 1037 private: 1038 Assembler* assem_; 1039 1040 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); 1041 }; 1042 1043 // Unused on this architecture. MaybeEmitOutOfLineConstantPool()1044 void MaybeEmitOutOfLineConstantPool() {} 1045 1046 // Record a deoptimization reason that can be used by a log or cpu profiler. 1047 // Use --trace-deopt to enable. 1048 void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position, 1049 int id); 1050 1051 // Record the emission of a constant pool. 1052 // 1053 // The emission of constant pool depends on the size of the code generated and 1054 // the number of RelocInfo recorded. 1055 // The Debug mechanism needs to map code offsets between two versions of a 1056 // function, compiled with and without debugger support (see for example 1057 // Debug::PrepareForBreakPoints()). 1058 // Compiling functions with debugger support generates additional code 1059 // (DebugCodegen::GenerateSlot()). This may affect the emission of the 1060 // constant pools and cause the version of the code with debugger support to 1061 // have constant pools generated in different places. 1062 // Recording the position and size of emitted constant pools allows to 1063 // correctly compute the offset mappings between the different versions of a 1064 // function in all situations. 1065 // 1066 // The parameter indicates the size of the constant pool (in bytes), including 1067 // the marker and branch over the data. 1068 void RecordConstPool(int size); 1069 1070 // Writes a single byte or word of data in the code stream. Used 1071 // for inline tables, e.g., jump-tables. CheckConstantPool() should be 1072 // called before any use of db/dd/dq/dp to ensure that constant pools 1073 // are not emitted as part of the tables generated. 1074 void db(uint8_t data); 1075 void dd(uint32_t data); 1076 void dq(uint64_t data); dp(uintptr_t data)1077 void dp(uintptr_t data) { dd(data); } 1078 1079 // Read/patch instructions instr_at(int pos)1080 Instr instr_at(int pos) { 1081 return *reinterpret_cast<Instr*>(buffer_start_ + pos); 1082 } instr_at_put(int pos,Instr instr)1083 void instr_at_put(int pos, Instr instr) { 1084 *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr; 1085 } instr_at(Address pc)1086 static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); } instr_at_put(Address pc,Instr instr)1087 static void instr_at_put(Address pc, Instr instr) { 1088 *reinterpret_cast<Instr*>(pc) = instr; 1089 } 1090 static Condition GetCondition(Instr instr); 1091 static bool IsLdrRegisterImmediate(Instr instr); 1092 static bool IsVldrDRegisterImmediate(Instr instr); 1093 static int GetLdrRegisterImmediateOffset(Instr instr); 1094 static int GetVldrDRegisterImmediateOffset(Instr instr); 1095 static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset); 1096 static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset); 1097 static bool IsStrRegisterImmediate(Instr instr); 1098 static Instr SetStrRegisterImmediateOffset(Instr instr, int offset); 1099 static bool IsAddRegisterImmediate(Instr instr); 1100 static Instr SetAddRegisterImmediateOffset(Instr instr, int offset); 1101 static Register GetRd(Instr instr); 1102 static Register GetRn(Instr instr); 1103 static Register GetRm(Instr instr); 1104 static bool IsPush(Instr instr); 1105 static bool IsPop(Instr instr); 1106 static bool IsStrRegFpOffset(Instr instr); 1107 static bool IsLdrRegFpOffset(Instr instr); 1108 static bool IsStrRegFpNegOffset(Instr instr); 1109 static bool IsLdrRegFpNegOffset(Instr instr); 1110 static bool IsLdrPcImmediateOffset(Instr instr); 1111 static bool IsBOrBlPcImmediateOffset(Instr instr); 1112 static bool IsVldrDPcImmediateOffset(Instr instr); 1113 static bool IsBlxReg(Instr instr); 1114 static bool IsBlxIp(Instr instr); 1115 static bool IsTstImmediate(Instr instr); 1116 static bool IsCmpRegister(Instr instr); 1117 static bool IsCmpImmediate(Instr instr); 1118 static Register GetCmpImmediateRegister(Instr instr); 1119 static int GetCmpImmediateRawImmediate(Instr instr); 1120 static bool IsNop(Instr instr, int type = NON_MARKING_NOP); 1121 static bool IsMovImmed(Instr instr); 1122 static bool IsOrrImmed(Instr instr); 1123 static bool IsMovT(Instr instr); 1124 static Instr GetMovTPattern(); 1125 static bool IsMovW(Instr instr); 1126 static Instr GetMovWPattern(); 1127 static Instr EncodeMovwImmediate(uint32_t immediate); 1128 static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate); 1129 static int DecodeShiftImm(Instr instr); 1130 static Instr PatchShiftImm(Instr instr, int immed); 1131 1132 // Constants in pools are accessed via pc relative addressing, which can 1133 // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point 1134 // PC-relative loads, thereby defining a maximum distance between the 1135 // instruction and the accessed constant. 1136 static constexpr int kMaxDistToIntPool = 4 * KB; 1137 // All relocations could be integer, it therefore acts as the limit. 1138 static constexpr int kMinNumPendingConstants = 4; 1139 static constexpr int kMaxNumPending32Constants = 1140 kMaxDistToIntPool / kInstrSize; 1141 1142 // Postpone the generation of the constant pool for the specified number of 1143 // instructions. 1144 void BlockConstPoolFor(int instructions); 1145 1146 // Check if is time to emit a constant pool. 1147 void CheckConstPool(bool force_emit, bool require_jump); 1148 MaybeCheckConstPool()1149 void MaybeCheckConstPool() { 1150 if (pc_offset() >= next_buffer_check_) { 1151 CheckConstPool(false, true); 1152 } 1153 } 1154 1155 // Move a 32-bit immediate into a register, potentially via the constant pool. 1156 void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al); 1157 1158 // Get the code target object for a pc-relative call or jump. 1159 V8_INLINE Handle<Code> relative_code_target_object_handle_at( 1160 Address pc_) const; 1161 1162 protected: buffer_space()1163 int buffer_space() const { return reloc_info_writer.pos() - pc_; } 1164 1165 // Decode branch instruction at pos and return branch target pos 1166 int target_at(int pos); 1167 1168 // Patch branch instruction at pos to branch to given branch target pos 1169 void target_at_put(int pos, int target_pos); 1170 1171 // Prevent contant pool emission until EndBlockConstPool is called. 1172 // Calls to this function can be nested but must be followed by an equal 1173 // number of call to EndBlockConstpool. StartBlockConstPool()1174 void StartBlockConstPool() { 1175 if (const_pool_blocked_nesting_++ == 0) { 1176 // Prevent constant pool checks happening by setting the next check to 1177 // the biggest possible offset. 1178 next_buffer_check_ = kMaxInt; 1179 } 1180 } 1181 1182 // Resume constant pool emission. Needs to be called as many times as 1183 // StartBlockConstPool to have an effect. EndBlockConstPool()1184 void EndBlockConstPool() { 1185 if (--const_pool_blocked_nesting_ == 0) { 1186 #ifdef DEBUG 1187 // Max pool start (if we need a jump and an alignment). 1188 int start = pc_offset() + kInstrSize + 2 * kPointerSize; 1189 // Check the constant pool hasn't been blocked for too long. 1190 DCHECK(pending_32_bit_constants_.empty() || 1191 (start < first_const_pool_32_use_ + kMaxDistToIntPool)); 1192 #endif 1193 // Two cases: 1194 // * no_const_pool_before_ >= next_buffer_check_ and the emission is 1195 // still blocked 1196 // * no_const_pool_before_ < next_buffer_check_ and the next emit will 1197 // trigger a check. 1198 next_buffer_check_ = no_const_pool_before_; 1199 } 1200 } 1201 is_const_pool_blocked()1202 bool is_const_pool_blocked() const { 1203 return (const_pool_blocked_nesting_ > 0) || 1204 (pc_offset() < no_const_pool_before_); 1205 } 1206 VfpRegisterIsAvailable(DwVfpRegister reg)1207 bool VfpRegisterIsAvailable(DwVfpRegister reg) { 1208 DCHECK(reg.is_valid()); 1209 return IsEnabled(VFP32DREGS) || 1210 (reg.code() < LowDwVfpRegister::kNumRegisters); 1211 } 1212 VfpRegisterIsAvailable(QwNeonRegister reg)1213 bool VfpRegisterIsAvailable(QwNeonRegister reg) { 1214 DCHECK(reg.is_valid()); 1215 return IsEnabled(VFP32DREGS) || 1216 (reg.code() < LowDwVfpRegister::kNumRegisters / 2); 1217 } 1218 1219 inline void emit(Instr x); 1220 1221 // Code generation 1222 // The relocation writer's position is at least kGap bytes below the end of 1223 // the generated instructions. This is so that multi-instruction sequences do 1224 // not have to check for overflow. The same is true for writes of large 1225 // relocation info entries. 1226 static constexpr int kGap = 32; 1227 STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap); 1228 1229 // Relocation info generation 1230 // Each relocation is encoded as a variable size value 1231 static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; 1232 RelocInfoWriter reloc_info_writer; 1233 1234 // ConstantPoolEntry records are used during code generation as temporary 1235 // containers for constants and code target addresses until they are emitted 1236 // to the constant pool. These records are temporarily stored in a separate 1237 // buffer until a constant pool is emitted. 1238 // If every instruction in a long sequence is accessing the pool, we need one 1239 // pending relocation entry per instruction. 1240 1241 // The buffers of pending constant pool entries. 1242 std::vector<ConstantPoolEntry> pending_32_bit_constants_; 1243 1244 // Scratch registers available for use by the Assembler. 1245 RegList scratch_register_list_; 1246 VfpRegList scratch_vfp_register_list_; 1247 1248 private: 1249 // Avoid overflows for displacements etc. 1250 static const int kMaximalBufferSize = 512 * MB; 1251 1252 int next_buffer_check_; // pc offset of next buffer check 1253 1254 // Constant pool generation 1255 // Pools are emitted in the instruction stream, preferably after unconditional 1256 // jumps or after returns from functions (in dead code locations). 1257 // If a long code sequence does not contain unconditional jumps, it is 1258 // necessary to emit the constant pool before the pool gets too far from the 1259 // location it is accessed from. In this case, we emit a jump over the emitted 1260 // constant pool. 1261 // Constants in the pool may be addresses of functions that gets relocated; 1262 // if so, a relocation info entry is associated to the constant pool entry. 1263 1264 // Repeated checking whether the constant pool should be emitted is rather 1265 // expensive. By default we only check again once a number of instructions 1266 // has been generated. That also means that the sizing of the buffers is not 1267 // an exact science, and that we rely on some slop to not overrun buffers. 1268 static constexpr int kCheckPoolIntervalInst = 32; 1269 static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize; 1270 1271 // Emission of the constant pool may be blocked in some code sequences. 1272 int const_pool_blocked_nesting_; // Block emission if this is not zero. 1273 int no_const_pool_before_; // Block emission before this pc offset. 1274 1275 // Keep track of the first instruction requiring a constant pool entry 1276 // since the previous constant pool was emitted. 1277 int first_const_pool_32_use_; 1278 1279 // The bound position, before this we cannot do instruction elimination. 1280 int last_bound_pos_; 1281 1282 inline void CheckBuffer(); 1283 void GrowBuffer(); 1284 1285 // Instruction generation 1286 void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x); 1287 // Attempt to encode operand |x| for instruction |instr| and return true on 1288 // success. The result will be encoded in |instr| directly. This method may 1289 // change the opcode if deemed beneficial, for instance, MOV may be turned 1290 // into MVN, ADD into SUB, AND into BIC, ...etc. The only reason this method 1291 // may fail is that the operand is an immediate that cannot be encoded. 1292 bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x); 1293 1294 void AddrMode2(Instr instr, Register rd, const MemOperand& x); 1295 void AddrMode3(Instr instr, Register rd, const MemOperand& x); 1296 void AddrMode4(Instr instr, Register rn, RegList rl); 1297 void AddrMode5(Instr instr, CRegister crd, const MemOperand& x); 1298 1299 // Labels 1300 void print(const Label* L); 1301 void bind_to(Label* L, int pos); 1302 void next(Label* L); 1303 1304 // Record reloc info for current pc_ 1305 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); 1306 void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, 1307 intptr_t value); 1308 void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); 1309 1310 int WriteCodeComments(); 1311 1312 friend class RelocInfo; 1313 friend class BlockConstPoolScope; 1314 friend class EnsureSpace; 1315 friend class UseScratchRegisterScope; 1316 }; 1317 1318 class EnsureSpace { 1319 public: 1320 V8_INLINE explicit EnsureSpace(Assembler* assembler); 1321 }; 1322 1323 class PatchingAssembler : public Assembler { 1324 public: 1325 PatchingAssembler(const AssemblerOptions& options, byte* address, 1326 int instructions); 1327 ~PatchingAssembler(); 1328 1329 void Emit(Address addr); 1330 void PadWithNops(); 1331 }; 1332 1333 // This scope utility allows scratch registers to be managed safely. The 1334 // Assembler's GetScratchRegisterList() is used as a pool of scratch 1335 // registers. These registers can be allocated on demand, and will be returned 1336 // at the end of the scope. 1337 // 1338 // When the scope ends, the Assembler's list will be restored to its original 1339 // state, even if the list is modified by some other means. Note that this scope 1340 // can be nested but the destructors need to run in the opposite order as the 1341 // constructors. We do not have assertions for this. 1342 class V8_EXPORT_PRIVATE UseScratchRegisterScope { 1343 public: 1344 explicit UseScratchRegisterScope(Assembler* assembler); 1345 ~UseScratchRegisterScope(); 1346 1347 // Take a register from the list and return it. 1348 Register Acquire(); AcquireS()1349 SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); } AcquireLowD()1350 LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); } AcquireD()1351 DwVfpRegister AcquireD() { 1352 DwVfpRegister reg = AcquireVfp<DwVfpRegister>(); 1353 DCHECK(assembler_->VfpRegisterIsAvailable(reg)); 1354 return reg; 1355 } AcquireQ()1356 QwNeonRegister AcquireQ() { 1357 QwNeonRegister reg = AcquireVfp<QwNeonRegister>(); 1358 DCHECK(assembler_->VfpRegisterIsAvailable(reg)); 1359 return reg; 1360 } 1361 1362 // Check if we have registers available to acquire. CanAcquire()1363 bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; } CanAcquireD()1364 bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); } 1365 1366 private: 1367 friend class Assembler; 1368 friend class TurboAssembler; 1369 1370 template <typename T> 1371 bool CanAcquireVfp() const; 1372 1373 template <typename T> 1374 T AcquireVfp(); 1375 1376 Assembler* assembler_; 1377 // Available scratch registers at the start of this scope. 1378 RegList old_available_; 1379 VfpRegList old_available_vfp_; 1380 }; 1381 1382 } // namespace internal 1383 } // namespace v8 1384 1385 #endif // V8_CODEGEN_ARM_ASSEMBLER_ARM_H_ 1386