1 // Copyright (c) 1994-2006 Sun Microsystems Inc. 2 // All Rights Reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are 6 // met: 7 // 8 // - Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // 11 // - Redistribution in binary form must reproduce the above copyright 12 // notice, this list of conditions and the following disclaimer in the 13 // documentation and/or other materials provided with the distribution. 14 // 15 // - Neither the name of Sun Microsystems or the names of contributors may 16 // be used to endorse or promote products derived from this software without 17 // specific prior written permission. 18 // 19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 // The original source code covered by the above license above has been 32 // modified significantly by Google Inc. 33 // Copyright 2012 the V8 project authors. All rights reserved. 34 35 36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_ 37 #define V8_MIPS_ASSEMBLER_MIPS_H_ 38 39 #include <stdio.h> 40 #include "assembler.h" 41 #include "constants-mips.h" 42 #include "serialize.h" 43 44 namespace v8 { 45 namespace internal { 46 47 // CPU Registers. 48 // 49 // 1) We would prefer to use an enum, but enum values are assignment- 50 // compatible with int, which has caused code-generation bugs. 51 // 52 // 2) We would prefer to use a class instead of a struct but we don't like 53 // the register initialization to depend on the particular initialization 54 // order (which appears to be different on OS X, Linux, and Windows for the 55 // installed versions of C++ we tried). Using a struct permits C-style 56 // "initialization". Also, the Register objects cannot be const as this 57 // forces initialization stubs in MSVC, making us dependent on initialization 58 // order. 59 // 60 // 3) By not using an enum, we are possibly preventing the compiler from 61 // doing certain constant folds, which may significantly reduce the 62 // code generated for some assembly instructions (because they boil down 63 // to a few constants). If this is a problem, we could change the code 64 // such that we use an enum in optimized mode, and the struct in debug 65 // mode. This way we get the compile-time error checking in debug mode 66 // and best performance in optimized code. 67 68 69 // ----------------------------------------------------------------------------- 70 // Implementation of Register and FPURegister. 71 72 // Core register. 73 struct Register { 74 static const int kNumRegisters = v8::internal::kNumRegisters; 75 static const int kNumAllocatableRegisters = 14; // v0 through t7. 76 static const int kSizeInBytes = 4; 77 ToAllocationIndexRegister78 static int ToAllocationIndex(Register reg) { 79 return reg.code() - 2; // zero_reg and 'at' are skipped. 80 } 81 FromAllocationIndexRegister82 static Register FromAllocationIndex(int index) { 83 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 84 return from_code(index + 2); // zero_reg and 'at' are skipped. 85 } 86 AllocationIndexToStringRegister87 static const char* AllocationIndexToString(int index) { 88 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 89 const char* const names[] = { 90 "v0", 91 "v1", 92 "a0", 93 "a1", 94 "a2", 95 "a3", 96 "t0", 97 "t1", 98 "t2", 99 "t3", 100 "t4", 101 "t5", 102 "t6", 103 "t7", 104 }; 105 return names[index]; 106 } 107 from_codeRegister108 static Register from_code(int code) { 109 Register r = { code }; 110 return r; 111 } 112 is_validRegister113 bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } isRegister114 bool is(Register reg) const { return code_ == reg.code_; } codeRegister115 int code() const { 116 ASSERT(is_valid()); 117 return code_; 118 } bitRegister119 int bit() const { 120 ASSERT(is_valid()); 121 return 1 << code_; 122 } 123 124 // Unfortunately we can't make this private in a struct. 125 int code_; 126 }; 127 128 #define REGISTER(N, C) \ 129 const int kRegister_ ## N ## _Code = C; \ 130 const Register N = { C } 131 132 REGISTER(no_reg, -1); 133 // Always zero. 134 REGISTER(zero_reg, 0); 135 // at: Reserved for synthetic instructions. 136 REGISTER(at, 1); 137 // v0, v1: Used when returning multiple values from subroutines. 138 REGISTER(v0, 2); 139 REGISTER(v1, 3); 140 // a0 - a4: Used to pass non-FP parameters. 141 REGISTER(a0, 4); 142 REGISTER(a1, 5); 143 REGISTER(a2, 6); 144 REGISTER(a3, 7); 145 // t0 - t9: Can be used without reservation, act as temporary registers and are 146 // allowed to be destroyed by subroutines. 147 REGISTER(t0, 8); 148 REGISTER(t1, 9); 149 REGISTER(t2, 10); 150 REGISTER(t3, 11); 151 REGISTER(t4, 12); 152 REGISTER(t5, 13); 153 REGISTER(t6, 14); 154 REGISTER(t7, 15); 155 // s0 - s7: Subroutine register variables. Subroutines that write to these 156 // registers must restore their values before exiting so that the caller can 157 // expect the values to be preserved. 158 REGISTER(s0, 16); 159 REGISTER(s1, 17); 160 REGISTER(s2, 18); 161 REGISTER(s3, 19); 162 REGISTER(s4, 20); 163 REGISTER(s5, 21); 164 REGISTER(s6, 22); 165 REGISTER(s7, 23); 166 REGISTER(t8, 24); 167 REGISTER(t9, 25); 168 // k0, k1: Reserved for system calls and interrupt handlers. 169 REGISTER(k0, 26); 170 REGISTER(k1, 27); 171 // gp: Reserved. 172 REGISTER(gp, 28); 173 // sp: Stack pointer. 174 REGISTER(sp, 29); 175 // fp: Frame pointer. 176 REGISTER(fp, 30); 177 // ra: Return address pointer. 178 REGISTER(ra, 31); 179 180 #undef REGISTER 181 182 183 int ToNumber(Register reg); 184 185 Register ToRegister(int num); 186 187 // Coprocessor register. 188 struct FPURegister { 189 static const int kNumRegisters = v8::internal::kNumFPURegisters; 190 191 // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers 192 // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to 193 // number of Double regs (64-bit regs, or FPU-reg-pairs). 194 195 // A few double registers are reserved: one as a scratch register and one to 196 // hold 0.0. 197 // f28: 0.0 198 // f30: scratch register. 199 static const int kNumReservedRegisters = 2; 200 static const int kNumAllocatableRegisters = kNumRegisters / 2 - 201 kNumReservedRegisters; 202 203 204 inline static int ToAllocationIndex(FPURegister reg); 205 FromAllocationIndexFPURegister206 static FPURegister FromAllocationIndex(int index) { 207 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 208 return from_code(index * 2); 209 } 210 AllocationIndexToStringFPURegister211 static const char* AllocationIndexToString(int index) { 212 ASSERT(index >= 0 && index < kNumAllocatableRegisters); 213 const char* const names[] = { 214 "f0", 215 "f2", 216 "f4", 217 "f6", 218 "f8", 219 "f10", 220 "f12", 221 "f14", 222 "f16", 223 "f18", 224 "f20", 225 "f22", 226 "f24", 227 "f26" 228 }; 229 return names[index]; 230 } 231 from_codeFPURegister232 static FPURegister from_code(int code) { 233 FPURegister r = { code }; 234 return r; 235 } 236 is_validFPURegister237 bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; } isFPURegister238 bool is(FPURegister creg) const { return code_ == creg.code_; } lowFPURegister239 FPURegister low() const { 240 // Find low reg of a Double-reg pair, which is the reg itself. 241 ASSERT(code_ % 2 == 0); // Specified Double reg must be even. 242 FPURegister reg; 243 reg.code_ = code_; 244 ASSERT(reg.is_valid()); 245 return reg; 246 } highFPURegister247 FPURegister high() const { 248 // Find high reg of a Doubel-reg pair, which is reg + 1. 249 ASSERT(code_ % 2 == 0); // Specified Double reg must be even. 250 FPURegister reg; 251 reg.code_ = code_ + 1; 252 ASSERT(reg.is_valid()); 253 return reg; 254 } 255 codeFPURegister256 int code() const { 257 ASSERT(is_valid()); 258 return code_; 259 } bitFPURegister260 int bit() const { 261 ASSERT(is_valid()); 262 return 1 << code_; 263 } setcodeFPURegister264 void setcode(int f) { 265 code_ = f; 266 ASSERT(is_valid()); 267 } 268 // Unfortunately we can't make this private in a struct. 269 int code_; 270 }; 271 272 // V8 now supports the O32 ABI, and the FPU Registers are organized as 32 273 // 32-bit registers, f0 through f31. When used as 'double' they are used 274 // in pairs, starting with the even numbered register. So a double operation 275 // on f0 really uses f0 and f1. 276 // (Modern mips hardware also supports 32 64-bit registers, via setting 277 // (priviledged) Status Register FR bit to 1. This is used by the N32 ABI, 278 // but it is not in common use. Someday we will want to support this in v8.) 279 280 // For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers. 281 typedef FPURegister DoubleRegister; 282 typedef FPURegister FloatRegister; 283 284 const FPURegister no_freg = { -1 }; 285 286 const FPURegister f0 = { 0 }; // Return value in hard float mode. 287 const FPURegister f1 = { 1 }; 288 const FPURegister f2 = { 2 }; 289 const FPURegister f3 = { 3 }; 290 const FPURegister f4 = { 4 }; 291 const FPURegister f5 = { 5 }; 292 const FPURegister f6 = { 6 }; 293 const FPURegister f7 = { 7 }; 294 const FPURegister f8 = { 8 }; 295 const FPURegister f9 = { 9 }; 296 const FPURegister f10 = { 10 }; 297 const FPURegister f11 = { 11 }; 298 const FPURegister f12 = { 12 }; // Arg 0 in hard float mode. 299 const FPURegister f13 = { 13 }; 300 const FPURegister f14 = { 14 }; // Arg 1 in hard float mode. 301 const FPURegister f15 = { 15 }; 302 const FPURegister f16 = { 16 }; 303 const FPURegister f17 = { 17 }; 304 const FPURegister f18 = { 18 }; 305 const FPURegister f19 = { 19 }; 306 const FPURegister f20 = { 20 }; 307 const FPURegister f21 = { 21 }; 308 const FPURegister f22 = { 22 }; 309 const FPURegister f23 = { 23 }; 310 const FPURegister f24 = { 24 }; 311 const FPURegister f25 = { 25 }; 312 const FPURegister f26 = { 26 }; 313 const FPURegister f27 = { 27 }; 314 const FPURegister f28 = { 28 }; 315 const FPURegister f29 = { 29 }; 316 const FPURegister f30 = { 30 }; 317 const FPURegister f31 = { 31 }; 318 319 // Register aliases. 320 // cp is assumed to be a callee saved register. 321 static const Register& kLithiumScratchReg = s3; // Scratch register. 322 static const Register& kLithiumScratchReg2 = s4; // Scratch register. 323 static const Register& kRootRegister = s6; // Roots array pointer. 324 static const Register& cp = s7; // JavaScript context pointer. 325 static const DoubleRegister& kLithiumScratchDouble = f30; 326 static const FPURegister& kDoubleRegZero = f28; 327 328 // FPU (coprocessor 1) control registers. 329 // Currently only FCSR (#31) is implemented. 330 struct FPUControlRegister { is_validFPUControlRegister331 bool is_valid() const { return code_ == kFCSRRegister; } isFPUControlRegister332 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } codeFPUControlRegister333 int code() const { 334 ASSERT(is_valid()); 335 return code_; 336 } bitFPUControlRegister337 int bit() const { 338 ASSERT(is_valid()); 339 return 1 << code_; 340 } setcodeFPUControlRegister341 void setcode(int f) { 342 code_ = f; 343 ASSERT(is_valid()); 344 } 345 // Unfortunately we can't make this private in a struct. 346 int code_; 347 }; 348 349 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; 350 const FPUControlRegister FCSR = { kFCSRRegister }; 351 352 353 // ----------------------------------------------------------------------------- 354 // Machine instruction Operands. 355 356 // Class Operand represents a shifter operand in data processing instructions. 357 class Operand BASE_EMBEDDED { 358 public: 359 // Immediate. 360 INLINE(explicit Operand(int32_t immediate, 361 RelocInfo::Mode rmode = RelocInfo::NONE)); 362 INLINE(explicit Operand(const ExternalReference& f)); 363 INLINE(explicit Operand(const char* s)); 364 INLINE(explicit Operand(Object** opp)); 365 INLINE(explicit Operand(Context** cpp)); 366 explicit Operand(Handle<Object> handle); 367 INLINE(explicit Operand(Smi* value)); 368 369 // Register. 370 INLINE(explicit Operand(Register rm)); 371 372 // Return true if this is a register operand. 373 INLINE(bool is_reg() const); 374 rm()375 Register rm() const { return rm_; } 376 377 private: 378 Register rm_; 379 int32_t imm32_; // Valid if rm_ == no_reg. 380 RelocInfo::Mode rmode_; 381 382 friend class Assembler; 383 friend class MacroAssembler; 384 }; 385 386 387 // On MIPS we have only one adressing mode with base_reg + offset. 388 // Class MemOperand represents a memory operand in load and store instructions. 389 class MemOperand : public Operand { 390 public: 391 explicit MemOperand(Register rn, int32_t offset = 0); offset()392 int32_t offset() const { return offset_; } 393 OffsetIsInt16Encodable()394 bool OffsetIsInt16Encodable() const { 395 return is_int16(offset_); 396 } 397 398 private: 399 int32_t offset_; 400 401 friend class Assembler; 402 }; 403 404 405 // CpuFeatures keeps track of which features are supported by the target CPU. 406 // Supported features must be enabled by a Scope before use. 407 class CpuFeatures : public AllStatic { 408 public: 409 // Detect features of the target CPU. Set safe defaults if the serializer 410 // is enabled (snapshots must be portable). 411 static void Probe(); 412 413 // Check whether a feature is supported by the target CPU. IsSupported(CpuFeature f)414 static bool IsSupported(CpuFeature f) { 415 ASSERT(initialized_); 416 if (f == FPU && !FLAG_enable_fpu) return false; 417 return (supported_ & (1u << f)) != 0; 418 } 419 420 421 #ifdef DEBUG 422 // Check whether a feature is currently enabled. IsEnabled(CpuFeature f)423 static bool IsEnabled(CpuFeature f) { 424 ASSERT(initialized_); 425 Isolate* isolate = Isolate::UncheckedCurrent(); 426 if (isolate == NULL) { 427 // When no isolate is available, work as if we're running in 428 // release mode. 429 return IsSupported(f); 430 } 431 unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); 432 return (enabled & (1u << f)) != 0; 433 } 434 #endif 435 436 // Enable a specified feature within a scope. 437 class Scope BASE_EMBEDDED { 438 #ifdef DEBUG 439 440 public: Scope(CpuFeature f)441 explicit Scope(CpuFeature f) { 442 unsigned mask = 1u << f; 443 ASSERT(CpuFeatures::IsSupported(f)); 444 ASSERT(!Serializer::enabled() || 445 (CpuFeatures::found_by_runtime_probing_ & mask) == 0); 446 isolate_ = Isolate::UncheckedCurrent(); 447 old_enabled_ = 0; 448 if (isolate_ != NULL) { 449 old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); 450 isolate_->set_enabled_cpu_features(old_enabled_ | mask); 451 } 452 } ~Scope()453 ~Scope() { 454 ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); 455 if (isolate_ != NULL) { 456 isolate_->set_enabled_cpu_features(old_enabled_); 457 } 458 } 459 460 private: 461 Isolate* isolate_; 462 unsigned old_enabled_; 463 #else 464 465 public: 466 explicit Scope(CpuFeature f) {} 467 #endif 468 }; 469 470 class TryForceFeatureScope BASE_EMBEDDED { 471 public: TryForceFeatureScope(CpuFeature f)472 explicit TryForceFeatureScope(CpuFeature f) 473 : old_supported_(CpuFeatures::supported_) { 474 if (CanForce()) { 475 CpuFeatures::supported_ |= (1u << f); 476 } 477 } 478 ~TryForceFeatureScope()479 ~TryForceFeatureScope() { 480 if (CanForce()) { 481 CpuFeatures::supported_ = old_supported_; 482 } 483 } 484 485 private: CanForce()486 static bool CanForce() { 487 // It's only safe to temporarily force support of CPU features 488 // when there's only a single isolate, which is guaranteed when 489 // the serializer is enabled. 490 return Serializer::enabled(); 491 } 492 493 const unsigned old_supported_; 494 }; 495 496 private: 497 #ifdef DEBUG 498 static bool initialized_; 499 #endif 500 static unsigned supported_; 501 static unsigned found_by_runtime_probing_; 502 503 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); 504 }; 505 506 507 class Assembler : public AssemblerBase { 508 public: 509 // Create an assembler. Instructions and relocation information are emitted 510 // into a buffer, with the instructions starting from the beginning and the 511 // relocation information starting from the end of the buffer. See CodeDesc 512 // for a detailed comment on the layout (globals.h). 513 // 514 // If the provided buffer is NULL, the assembler allocates and grows its own 515 // buffer, and buffer_size determines the initial buffer size. The buffer is 516 // owned by the assembler and deallocated upon destruction of the assembler. 517 // 518 // If the provided buffer is not NULL, the assembler uses the provided buffer 519 // for code generation and assumes its size to be buffer_size. If the buffer 520 // is too small, a fatal error occurs. No deallocation of the buffer is done 521 // upon destruction of the assembler. 522 Assembler(Isolate* isolate, void* buffer, int buffer_size); 523 ~Assembler(); 524 525 // Overrides the default provided by FLAG_debug_code. set_emit_debug_code(bool value)526 void set_emit_debug_code(bool value) { emit_debug_code_ = value; } 527 528 // GetCode emits any pending (non-emitted) code and fills the descriptor 529 // desc. GetCode() is idempotent; it returns the same result if no other 530 // Assembler functions are invoked in between GetCode() calls. 531 void GetCode(CodeDesc* desc); 532 533 // Label operations & relative jumps (PPUM Appendix D). 534 // 535 // Takes a branch opcode (cc) and a label (L) and generates 536 // either a backward branch or a forward branch and links it 537 // to the label fixup chain. Usage: 538 // 539 // Label L; // unbound label 540 // j(cc, &L); // forward branch to unbound label 541 // bind(&L); // bind label to the current pc 542 // j(cc, &L); // backward branch to bound label 543 // bind(&L); // illegal: a label may be bound only once 544 // 545 // Note: The same Label can be used for forward and backward branches 546 // but it may be bound only once. 547 void bind(Label* L); // Binds an unbound label L to current code position. 548 // Determines if Label is bound and near enough so that branch instruction 549 // can be used to reach it, instead of jump instruction. 550 bool is_near(Label* L); 551 552 // Returns the branch offset to the given label from the current code 553 // position. Links the label to the current position if it is still unbound. 554 // Manages the jump elimination optimization if the second parameter is true. 555 int32_t branch_offset(Label* L, bool jump_elimination_allowed); shifted_branch_offset(Label * L,bool jump_elimination_allowed)556 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { 557 int32_t o = branch_offset(L, jump_elimination_allowed); 558 ASSERT((o & 3) == 0); // Assert the offset is aligned. 559 return o >> 2; 560 } 561 uint32_t jump_address(Label* L); 562 563 // Puts a labels target address at the given position. 564 // The high 8 bits are set to zero. 565 void label_at_put(Label* L, int at_offset); 566 567 // Read/Modify the code target address in the branch/call instruction at pc. 568 static Address target_address_at(Address pc); 569 static void set_target_address_at(Address pc, Address target); 570 571 static void JumpLabelToJumpRegister(Address pc); 572 573 static void QuietNaN(HeapObject* nan); 574 575 // This sets the branch destination (which gets loaded at the call address). 576 // This is for calls and branches within generated code. The serializer 577 // has already deserialized the lui/ori instructions etc. deserialization_set_special_target_at(Address instruction_payload,Address target)578 inline static void deserialization_set_special_target_at( 579 Address instruction_payload, Address target) { 580 set_target_address_at( 581 instruction_payload - kInstructionsFor32BitConstant * kInstrSize, 582 target); 583 } 584 585 // This sets the branch destination. 586 // This is for calls and branches to runtime code. set_external_target_at(Address instruction_payload,Address target)587 inline static void set_external_target_at(Address instruction_payload, 588 Address target) { 589 set_target_address_at(instruction_payload, target); 590 } 591 592 // Size of an instruction. 593 static const int kInstrSize = sizeof(Instr); 594 595 // Difference between address of current opcode and target address offset. 596 static const int kBranchPCOffset = 4; 597 598 // Here we are patching the address in the LUI/ORI instruction pair. 599 // These values are used in the serialization process and must be zero for 600 // MIPS platform, as Code, Embedded Object or External-reference pointers 601 // are split across two consecutive instructions and don't exist separately 602 // in the code, so the serializer should not step forwards in memory after 603 // a target is resolved and written. 604 static const int kSpecialTargetSize = 0; 605 606 // Number of consecutive instructions used to store 32bit constant. 607 // Before jump-optimizations, this constant was used in 608 // RelocInfo::target_address_address() function to tell serializer address of 609 // the instruction that follows LUI/ORI instruction pair. Now, with new jump 610 // optimization, where jump-through-register instruction that usually 611 // follows LUI/ORI pair is substituted with J/JAL, this constant equals 612 // to 3 instructions (LUI+ORI+J/JAL/JR/JALR). 613 static const int kInstructionsFor32BitConstant = 3; 614 615 // Distance between the instruction referring to the address of the call 616 // target and the return address. 617 static const int kCallTargetAddressOffset = 4 * kInstrSize; 618 619 // Distance between start of patched return sequence and the emitted address 620 // to jump to. 621 static const int kPatchReturnSequenceAddressOffset = 0; 622 623 // Distance between start of patched debug break slot and the emitted address 624 // to jump to. 625 static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; 626 627 // Difference between address of current opcode and value read from pc 628 // register. 629 static const int kPcLoadDelta = 4; 630 631 // Number of instructions used for the JS return sequence. The constant is 632 // used by the debugger to patch the JS return sequence. 633 static const int kJSReturnSequenceInstructions = 7; 634 static const int kDebugBreakSlotInstructions = 4; 635 static const int kDebugBreakSlotLength = 636 kDebugBreakSlotInstructions * kInstrSize; 637 638 639 // --------------------------------------------------------------------------- 640 // Code generation. 641 642 // Insert the smallest number of nop instructions 643 // possible to align the pc offset to a multiple 644 // of m. m must be a power of 2 (>= 4). 645 void Align(int m); 646 // Aligns code to something that's optimal for a jump target for the platform. 647 void CodeTargetAlign(); 648 649 // Different nop operations are used by the code generator to detect certain 650 // states of the generated code. 651 enum NopMarkerTypes { 652 NON_MARKING_NOP = 0, 653 DEBUG_BREAK_NOP, 654 // IC markers. 655 PROPERTY_ACCESS_INLINED, 656 PROPERTY_ACCESS_INLINED_CONTEXT, 657 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, 658 // Helper values. 659 LAST_CODE_MARKER, 660 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED 661 }; 662 663 // Type == 0 is the default non-marking type. 664 void nop(unsigned int type = 0) { 665 ASSERT(type < 32); 666 sll(zero_reg, zero_reg, type, true); 667 } 668 669 670 // --------Branch-and-jump-instructions---------- 671 // We don't use likely variant of instructions. 672 void b(int16_t offset); b(Label * L)673 void b(Label* L) { b(branch_offset(L, false)>>2); } 674 void bal(int16_t offset); bal(Label * L)675 void bal(Label* L) { bal(branch_offset(L, false)>>2); } 676 677 void beq(Register rs, Register rt, int16_t offset); beq(Register rs,Register rt,Label * L)678 void beq(Register rs, Register rt, Label* L) { 679 beq(rs, rt, branch_offset(L, false) >> 2); 680 } 681 void bgez(Register rs, int16_t offset); 682 void bgezal(Register rs, int16_t offset); 683 void bgtz(Register rs, int16_t offset); 684 void blez(Register rs, int16_t offset); 685 void bltz(Register rs, int16_t offset); 686 void bltzal(Register rs, int16_t offset); 687 void bne(Register rs, Register rt, int16_t offset); bne(Register rs,Register rt,Label * L)688 void bne(Register rs, Register rt, Label* L) { 689 bne(rs, rt, branch_offset(L, false)>>2); 690 } 691 692 // Never use the int16_t b(l)cond version with a branch offset 693 // instead of using the Label* version. 694 695 // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. 696 void j(int32_t target); 697 void jal(int32_t target); 698 void jalr(Register rs, Register rd = ra); 699 void jr(Register target); 700 void j_or_jr(int32_t target, Register rs); 701 void jal_or_jalr(int32_t target, Register rs); 702 703 704 //-------Data-processing-instructions--------- 705 706 // Arithmetic. 707 void addu(Register rd, Register rs, Register rt); 708 void subu(Register rd, Register rs, Register rt); 709 void mult(Register rs, Register rt); 710 void multu(Register rs, Register rt); 711 void div(Register rs, Register rt); 712 void divu(Register rs, Register rt); 713 void mul(Register rd, Register rs, Register rt); 714 715 void addiu(Register rd, Register rs, int32_t j); 716 717 // Logical. 718 void and_(Register rd, Register rs, Register rt); 719 void or_(Register rd, Register rs, Register rt); 720 void xor_(Register rd, Register rs, Register rt); 721 void nor(Register rd, Register rs, Register rt); 722 723 void andi(Register rd, Register rs, int32_t j); 724 void ori(Register rd, Register rs, int32_t j); 725 void xori(Register rd, Register rs, int32_t j); 726 void lui(Register rd, int32_t j); 727 728 // Shifts. 729 // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop 730 // and may cause problems in normal code. coming_from_nop makes sure this 731 // doesn't happen. 732 void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false); 733 void sllv(Register rd, Register rt, Register rs); 734 void srl(Register rd, Register rt, uint16_t sa); 735 void srlv(Register rd, Register rt, Register rs); 736 void sra(Register rt, Register rd, uint16_t sa); 737 void srav(Register rt, Register rd, Register rs); 738 void rotr(Register rd, Register rt, uint16_t sa); 739 void rotrv(Register rd, Register rt, Register rs); 740 741 742 //------------Memory-instructions------------- 743 744 void lb(Register rd, const MemOperand& rs); 745 void lbu(Register rd, const MemOperand& rs); 746 void lh(Register rd, const MemOperand& rs); 747 void lhu(Register rd, const MemOperand& rs); 748 void lw(Register rd, const MemOperand& rs); 749 void lwl(Register rd, const MemOperand& rs); 750 void lwr(Register rd, const MemOperand& rs); 751 void sb(Register rd, const MemOperand& rs); 752 void sh(Register rd, const MemOperand& rs); 753 void sw(Register rd, const MemOperand& rs); 754 void swl(Register rd, const MemOperand& rs); 755 void swr(Register rd, const MemOperand& rs); 756 757 758 //-------------Misc-instructions-------------- 759 760 // Break / Trap instructions. 761 void break_(uint32_t code, bool break_as_stop = false); 762 void stop(const char* msg, uint32_t code = kMaxStopCode); 763 void tge(Register rs, Register rt, uint16_t code); 764 void tgeu(Register rs, Register rt, uint16_t code); 765 void tlt(Register rs, Register rt, uint16_t code); 766 void tltu(Register rs, Register rt, uint16_t code); 767 void teq(Register rs, Register rt, uint16_t code); 768 void tne(Register rs, Register rt, uint16_t code); 769 770 // Move from HI/LO register. 771 void mfhi(Register rd); 772 void mflo(Register rd); 773 774 // Set on less than. 775 void slt(Register rd, Register rs, Register rt); 776 void sltu(Register rd, Register rs, Register rt); 777 void slti(Register rd, Register rs, int32_t j); 778 void sltiu(Register rd, Register rs, int32_t j); 779 780 // Conditional move. 781 void movz(Register rd, Register rs, Register rt); 782 void movn(Register rd, Register rs, Register rt); 783 void movt(Register rd, Register rs, uint16_t cc = 0); 784 void movf(Register rd, Register rs, uint16_t cc = 0); 785 786 // Bit twiddling. 787 void clz(Register rd, Register rs); 788 void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); 789 void ext_(Register rt, Register rs, uint16_t pos, uint16_t size); 790 791 //--------Coprocessor-instructions---------------- 792 793 // Load, store, and move. 794 void lwc1(FPURegister fd, const MemOperand& src); 795 void ldc1(FPURegister fd, const MemOperand& src); 796 797 void swc1(FPURegister fs, const MemOperand& dst); 798 void sdc1(FPURegister fs, const MemOperand& dst); 799 800 void mtc1(Register rt, FPURegister fs); 801 void mfc1(Register rt, FPURegister fs); 802 803 void ctc1(Register rt, FPUControlRegister fs); 804 void cfc1(Register rt, FPUControlRegister fs); 805 806 // Arithmetic. 807 void add_d(FPURegister fd, FPURegister fs, FPURegister ft); 808 void sub_d(FPURegister fd, FPURegister fs, FPURegister ft); 809 void mul_d(FPURegister fd, FPURegister fs, FPURegister ft); 810 void div_d(FPURegister fd, FPURegister fs, FPURegister ft); 811 void abs_d(FPURegister fd, FPURegister fs); 812 void mov_d(FPURegister fd, FPURegister fs); 813 void neg_d(FPURegister fd, FPURegister fs); 814 void sqrt_d(FPURegister fd, FPURegister fs); 815 816 // Conversion. 817 void cvt_w_s(FPURegister fd, FPURegister fs); 818 void cvt_w_d(FPURegister fd, FPURegister fs); 819 void trunc_w_s(FPURegister fd, FPURegister fs); 820 void trunc_w_d(FPURegister fd, FPURegister fs); 821 void round_w_s(FPURegister fd, FPURegister fs); 822 void round_w_d(FPURegister fd, FPURegister fs); 823 void floor_w_s(FPURegister fd, FPURegister fs); 824 void floor_w_d(FPURegister fd, FPURegister fs); 825 void ceil_w_s(FPURegister fd, FPURegister fs); 826 void ceil_w_d(FPURegister fd, FPURegister fs); 827 828 void cvt_l_s(FPURegister fd, FPURegister fs); 829 void cvt_l_d(FPURegister fd, FPURegister fs); 830 void trunc_l_s(FPURegister fd, FPURegister fs); 831 void trunc_l_d(FPURegister fd, FPURegister fs); 832 void round_l_s(FPURegister fd, FPURegister fs); 833 void round_l_d(FPURegister fd, FPURegister fs); 834 void floor_l_s(FPURegister fd, FPURegister fs); 835 void floor_l_d(FPURegister fd, FPURegister fs); 836 void ceil_l_s(FPURegister fd, FPURegister fs); 837 void ceil_l_d(FPURegister fd, FPURegister fs); 838 839 void cvt_s_w(FPURegister fd, FPURegister fs); 840 void cvt_s_l(FPURegister fd, FPURegister fs); 841 void cvt_s_d(FPURegister fd, FPURegister fs); 842 843 void cvt_d_w(FPURegister fd, FPURegister fs); 844 void cvt_d_l(FPURegister fd, FPURegister fs); 845 void cvt_d_s(FPURegister fd, FPURegister fs); 846 847 // Conditions and branches. 848 void c(FPUCondition cond, SecondaryField fmt, 849 FPURegister ft, FPURegister fs, uint16_t cc = 0); 850 851 void bc1f(int16_t offset, uint16_t cc = 0); 852 void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } 853 void bc1t(int16_t offset, uint16_t cc = 0); 854 void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } 855 void fcmp(FPURegister src1, const double src2, FPUCondition cond); 856 857 // Check the code size generated from label to here. SizeOfCodeGeneratedSince(Label * label)858 int SizeOfCodeGeneratedSince(Label* label) { 859 return pc_offset() - label->pos(); 860 } 861 862 // Check the number of instructions generated from label to here. InstructionsGeneratedSince(Label * label)863 int InstructionsGeneratedSince(Label* label) { 864 return SizeOfCodeGeneratedSince(label) / kInstrSize; 865 } 866 867 // Class for scoping postponing the trampoline pool generation. 868 class BlockTrampolinePoolScope { 869 public: BlockTrampolinePoolScope(Assembler * assem)870 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { 871 assem_->StartBlockTrampolinePool(); 872 } ~BlockTrampolinePoolScope()873 ~BlockTrampolinePoolScope() { 874 assem_->EndBlockTrampolinePool(); 875 } 876 877 private: 878 Assembler* assem_; 879 880 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); 881 }; 882 883 // Class for postponing the assembly buffer growth. Typically used for 884 // sequences of instructions that must be emitted as a unit, before 885 // buffer growth (and relocation) can occur. 886 // This blocking scope is not nestable. 887 class BlockGrowBufferScope { 888 public: BlockGrowBufferScope(Assembler * assem)889 explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { 890 assem_->StartBlockGrowBuffer(); 891 } ~BlockGrowBufferScope()892 ~BlockGrowBufferScope() { 893 assem_->EndBlockGrowBuffer(); 894 } 895 896 private: 897 Assembler* assem_; 898 899 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); 900 }; 901 902 // Debugging. 903 904 // Mark address of the ExitJSFrame code. 905 void RecordJSReturn(); 906 907 // Mark address of a debug break slot. 908 void RecordDebugBreakSlot(); 909 910 // Record the AST id of the CallIC being compiled, so that it can be placed 911 // in the relocation information. SetRecordedAstId(unsigned ast_id)912 void SetRecordedAstId(unsigned ast_id) { 913 ASSERT(recorded_ast_id_ == kNoASTId); 914 recorded_ast_id_ = ast_id; 915 } 916 RecordedAstId()917 unsigned RecordedAstId() { 918 ASSERT(recorded_ast_id_ != kNoASTId); 919 return recorded_ast_id_; 920 } 921 ClearRecordedAstId()922 void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; } 923 924 // Record a comment relocation entry that can be used by a disassembler. 925 // Use --code-comments to enable. 926 void RecordComment(const char* msg); 927 928 static int RelocateInternalReference(byte* pc, intptr_t pc_delta); 929 930 // Writes a single byte or word of data in the code stream. Used for 931 // inline tables, e.g., jump-tables. 932 void db(uint8_t data); 933 void dd(uint32_t data); 934 pc_offset()935 int32_t pc_offset() const { return pc_ - buffer_; } 936 positions_recorder()937 PositionsRecorder* positions_recorder() { return &positions_recorder_; } 938 939 // Postpone the generation of the trampoline pool for the specified number of 940 // instructions. 941 void BlockTrampolinePoolFor(int instructions); 942 943 // Check if there is less than kGap bytes available in the buffer. 944 // If this is the case, we need to grow the buffer before emitting 945 // an instruction or relocation information. overflow()946 inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } 947 948 // Get the number of bytes available in the buffer. available_space()949 inline int available_space() const { return reloc_info_writer.pos() - pc_; } 950 951 // Read/patch instructions. instr_at(byte * pc)952 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } instr_at_put(byte * pc,Instr instr)953 static void instr_at_put(byte* pc, Instr instr) { 954 *reinterpret_cast<Instr*>(pc) = instr; 955 } instr_at(int pos)956 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } instr_at_put(int pos,Instr instr)957 void instr_at_put(int pos, Instr instr) { 958 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; 959 } 960 961 // Check if an instruction is a branch of some kind. 962 static bool IsBranch(Instr instr); 963 static bool IsBeq(Instr instr); 964 static bool IsBne(Instr instr); 965 966 static bool IsJump(Instr instr); 967 static bool IsJ(Instr instr); 968 static bool IsLui(Instr instr); 969 static bool IsOri(Instr instr); 970 971 static bool IsJal(Instr instr); 972 static bool IsJr(Instr instr); 973 static bool IsJalr(Instr instr); 974 975 static bool IsNop(Instr instr, unsigned int type); 976 static bool IsPop(Instr instr); 977 static bool IsPush(Instr instr); 978 static bool IsLwRegFpOffset(Instr instr); 979 static bool IsSwRegFpOffset(Instr instr); 980 static bool IsLwRegFpNegOffset(Instr instr); 981 static bool IsSwRegFpNegOffset(Instr instr); 982 983 static Register GetRtReg(Instr instr); 984 static Register GetRsReg(Instr instr); 985 static Register GetRdReg(Instr instr); 986 987 static uint32_t GetRt(Instr instr); 988 static uint32_t GetRtField(Instr instr); 989 static uint32_t GetRs(Instr instr); 990 static uint32_t GetRsField(Instr instr); 991 static uint32_t GetRd(Instr instr); 992 static uint32_t GetRdField(Instr instr); 993 static uint32_t GetSa(Instr instr); 994 static uint32_t GetSaField(Instr instr); 995 static uint32_t GetOpcodeField(Instr instr); 996 static uint32_t GetFunction(Instr instr); 997 static uint32_t GetFunctionField(Instr instr); 998 static uint32_t GetImmediate16(Instr instr); 999 static uint32_t GetLabelConst(Instr instr); 1000 1001 static int32_t GetBranchOffset(Instr instr); 1002 static bool IsLw(Instr instr); 1003 static int16_t GetLwOffset(Instr instr); 1004 static Instr SetLwOffset(Instr instr, int16_t offset); 1005 1006 static bool IsSw(Instr instr); 1007 static Instr SetSwOffset(Instr instr, int16_t offset); 1008 static bool IsAddImmediate(Instr instr); 1009 static Instr SetAddImmediateOffset(Instr instr, int16_t offset); 1010 1011 static bool IsAndImmediate(Instr instr); 1012 1013 void CheckTrampolinePool(); 1014 1015 protected: 1016 // Relocation for a type-recording IC has the AST id added to it. This 1017 // member variable is a way to pass the information from the call site to 1018 // the relocation info. 1019 unsigned recorded_ast_id_; 1020 emit_debug_code()1021 bool emit_debug_code() const { return emit_debug_code_; } 1022 buffer_space()1023 int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } 1024 1025 // Decode branch instruction at pos and return branch target pos. 1026 int target_at(int32_t pos); 1027 1028 // Patch branch instruction at pos to branch to given branch target pos. 1029 void target_at_put(int32_t pos, int32_t target_pos); 1030 1031 // Say if we need to relocate with this mode. 1032 bool MustUseReg(RelocInfo::Mode rmode); 1033 1034 // Record reloc info for current pc_. 1035 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); 1036 1037 // Block the emission of the trampoline pool before pc_offset. BlockTrampolinePoolBefore(int pc_offset)1038 void BlockTrampolinePoolBefore(int pc_offset) { 1039 if (no_trampoline_pool_before_ < pc_offset) 1040 no_trampoline_pool_before_ = pc_offset; 1041 } 1042 StartBlockTrampolinePool()1043 void StartBlockTrampolinePool() { 1044 trampoline_pool_blocked_nesting_++; 1045 } 1046 EndBlockTrampolinePool()1047 void EndBlockTrampolinePool() { 1048 trampoline_pool_blocked_nesting_--; 1049 } 1050 is_trampoline_pool_blocked()1051 bool is_trampoline_pool_blocked() const { 1052 return trampoline_pool_blocked_nesting_ > 0; 1053 } 1054 has_exception()1055 bool has_exception() const { 1056 return internal_trampoline_exception_; 1057 } 1058 1059 void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi); 1060 is_trampoline_emitted()1061 bool is_trampoline_emitted() const { 1062 return trampoline_emitted_; 1063 } 1064 1065 // Temporarily block automatic assembly buffer growth. StartBlockGrowBuffer()1066 void StartBlockGrowBuffer() { 1067 ASSERT(!block_buffer_growth_); 1068 block_buffer_growth_ = true; 1069 } 1070 EndBlockGrowBuffer()1071 void EndBlockGrowBuffer() { 1072 ASSERT(block_buffer_growth_); 1073 block_buffer_growth_ = false; 1074 } 1075 is_buffer_growth_blocked()1076 bool is_buffer_growth_blocked() const { 1077 return block_buffer_growth_; 1078 } 1079 1080 private: 1081 // Code buffer: 1082 // The buffer into which code and relocation info are generated. 1083 byte* buffer_; 1084 int buffer_size_; 1085 // True if the assembler owns the buffer, false if buffer is external. 1086 bool own_buffer_; 1087 1088 // Buffer size and constant pool distance are checked together at regular 1089 // intervals of kBufferCheckInterval emitted bytes. 1090 static const int kBufferCheckInterval = 1*KB/2; 1091 1092 // Code generation. 1093 // The relocation writer's position is at least kGap bytes below the end of 1094 // the generated instructions. This is so that multi-instruction sequences do 1095 // not have to check for overflow. The same is true for writes of large 1096 // relocation info entries. 1097 static const int kGap = 32; 1098 byte* pc_; // The program counter - moves forward. 1099 1100 1101 // Repeated checking whether the trampoline pool should be emitted is rather 1102 // expensive. By default we only check again once a number of instructions 1103 // has been generated. 1104 static const int kCheckConstIntervalInst = 32; 1105 static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; 1106 1107 int next_buffer_check_; // pc offset of next buffer check. 1108 1109 // Emission of the trampoline pool may be blocked in some code sequences. 1110 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. 1111 int no_trampoline_pool_before_; // Block emission before this pc offset. 1112 1113 // Keep track of the last emitted pool to guarantee a maximal distance. 1114 int last_trampoline_pool_end_; // pc offset of the end of the last pool. 1115 1116 // Automatic growth of the assembly buffer may be blocked for some sequences. 1117 bool block_buffer_growth_; // Block growth when true. 1118 1119 // Relocation information generation. 1120 // Each relocation is encoded as a variable size value. 1121 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; 1122 RelocInfoWriter reloc_info_writer; 1123 1124 // The bound position, before this we cannot do instruction elimination. 1125 int last_bound_pos_; 1126 1127 // Code emission. 1128 inline void CheckBuffer(); 1129 void GrowBuffer(); 1130 inline void emit(Instr x); 1131 inline void CheckTrampolinePoolQuick(); 1132 1133 // Instruction generation. 1134 // We have 3 different kind of encoding layout on MIPS. 1135 // However due to many different types of objects encoded in the same fields 1136 // we have quite a few aliases for each mode. 1137 // Using the same structure to refer to Register and FPURegister would spare a 1138 // few aliases, but mixing both does not look clean to me. 1139 // Anyway we could surely implement this differently. 1140 1141 void GenInstrRegister(Opcode opcode, 1142 Register rs, 1143 Register rt, 1144 Register rd, 1145 uint16_t sa = 0, 1146 SecondaryField func = NULLSF); 1147 1148 void GenInstrRegister(Opcode opcode, 1149 Register rs, 1150 Register rt, 1151 uint16_t msb, 1152 uint16_t lsb, 1153 SecondaryField func); 1154 1155 void GenInstrRegister(Opcode opcode, 1156 SecondaryField fmt, 1157 FPURegister ft, 1158 FPURegister fs, 1159 FPURegister fd, 1160 SecondaryField func = NULLSF); 1161 1162 void GenInstrRegister(Opcode opcode, 1163 SecondaryField fmt, 1164 Register rt, 1165 FPURegister fs, 1166 FPURegister fd, 1167 SecondaryField func = NULLSF); 1168 1169 void GenInstrRegister(Opcode opcode, 1170 SecondaryField fmt, 1171 Register rt, 1172 FPUControlRegister fs, 1173 SecondaryField func = NULLSF); 1174 1175 1176 void GenInstrImmediate(Opcode opcode, 1177 Register rs, 1178 Register rt, 1179 int32_t j); 1180 void GenInstrImmediate(Opcode opcode, 1181 Register rs, 1182 SecondaryField SF, 1183 int32_t j); 1184 void GenInstrImmediate(Opcode opcode, 1185 Register r1, 1186 FPURegister r2, 1187 int32_t j); 1188 1189 1190 void GenInstrJump(Opcode opcode, 1191 uint32_t address); 1192 1193 // Helpers. 1194 void LoadRegPlusOffsetToAt(const MemOperand& src); 1195 1196 // Labels. 1197 void print(Label* L); 1198 void bind_to(Label* L, int pos); 1199 void next(Label* L); 1200 1201 // One trampoline consists of: 1202 // - space for trampoline slots, 1203 // - space for labels. 1204 // 1205 // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. 1206 // Space for trampoline slots preceeds space for labels. Each label is of one 1207 // instruction size, so total amount for labels is equal to 1208 // label_count * kInstrSize. 1209 class Trampoline { 1210 public: Trampoline()1211 Trampoline() { 1212 start_ = 0; 1213 next_slot_ = 0; 1214 free_slot_count_ = 0; 1215 end_ = 0; 1216 } Trampoline(int start,int slot_count)1217 Trampoline(int start, int slot_count) { 1218 start_ = start; 1219 next_slot_ = start; 1220 free_slot_count_ = slot_count; 1221 end_ = start + slot_count * kTrampolineSlotsSize; 1222 } start()1223 int start() { 1224 return start_; 1225 } end()1226 int end() { 1227 return end_; 1228 } take_slot()1229 int take_slot() { 1230 int trampoline_slot = kInvalidSlotPos; 1231 if (free_slot_count_ <= 0) { 1232 // We have run out of space on trampolines. 1233 // Make sure we fail in debug mode, so we become aware of each case 1234 // when this happens. 1235 ASSERT(0); 1236 // Internal exception will be caught. 1237 } else { 1238 trampoline_slot = next_slot_; 1239 free_slot_count_--; 1240 next_slot_ += kTrampolineSlotsSize; 1241 } 1242 return trampoline_slot; 1243 } 1244 1245 private: 1246 int start_; 1247 int end_; 1248 int next_slot_; 1249 int free_slot_count_; 1250 }; 1251 1252 int32_t get_trampoline_entry(int32_t pos); 1253 int unbound_labels_count_; 1254 // If trampoline is emitted, generated code is becoming large. As this is 1255 // already a slow case which can possibly break our code generation for the 1256 // extreme case, we use this information to trigger different mode of 1257 // branch instruction generation, where we use jump instructions rather 1258 // than regular branch instructions. 1259 bool trampoline_emitted_; 1260 static const int kTrampolineSlotsSize = 4 * kInstrSize; 1261 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; 1262 static const int kInvalidSlotPos = -1; 1263 1264 Trampoline trampoline_; 1265 bool internal_trampoline_exception_; 1266 1267 friend class RegExpMacroAssemblerMIPS; 1268 friend class RelocInfo; 1269 friend class CodePatcher; 1270 friend class BlockTrampolinePoolScope; 1271 1272 PositionsRecorder positions_recorder_; 1273 bool emit_debug_code_; 1274 friend class PositionsRecorder; 1275 friend class EnsureSpace; 1276 }; 1277 1278 1279 class EnsureSpace BASE_EMBEDDED { 1280 public: EnsureSpace(Assembler * assembler)1281 explicit EnsureSpace(Assembler* assembler) { 1282 assembler->CheckBuffer(); 1283 } 1284 }; 1285 1286 } } // namespace v8::internal 1287 1288 #endif // V8_ARM_ASSEMBLER_MIPS_H_ 1289