1 // Copyright 2021 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ 6 #define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ 7 8 #include <stdio.h> 9 10 #include <memory> 11 #include <set> 12 13 #include "src/codegen/assembler.h" 14 #include "src/codegen/external-reference.h" 15 #include "src/codegen/label.h" 16 #include "src/codegen/loong64/constants-loong64.h" 17 #include "src/codegen/loong64/register-loong64.h" 18 #include "src/codegen/machine-type.h" 19 #include "src/objects/contexts.h" 20 #include "src/objects/smi.h" 21 22 namespace v8 { 23 namespace internal { 24 25 class SafepointTableBuilder; 26 27 // ----------------------------------------------------------------------------- 28 // Machine instruction Operands. 29 constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize; 30 constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1; 31 // Class Operand represents a shifter operand in data processing instructions. 32 class Operand { 33 public: 34 // Immediate. 35 V8_INLINE explicit Operand(int64_t immediate, 36 RelocInfo::Mode rmode = RelocInfo::NO_INFO) rm_(no_reg)37 : rm_(no_reg), rmode_(rmode) { 38 value_.immediate = immediate; 39 } Operand(const ExternalReference & f)40 V8_INLINE explicit Operand(const ExternalReference& f) 41 : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) { 42 value_.immediate = static_cast<int64_t>(f.address()); 43 } 44 V8_INLINE explicit Operand(const char* s); 45 explicit Operand(Handle<HeapObject> handle); Operand(Smi value)46 V8_INLINE explicit Operand(Smi value) 47 : rm_(no_reg), rmode_(RelocInfo::NO_INFO) { 48 value_.immediate = static_cast<intptr_t>(value.ptr()); 49 } 50 51 static Operand EmbeddedNumber(double number); // Smi or HeapNumber. 52 static Operand EmbeddedStringConstant(const StringConstantBase* str); 53 54 // Register. Operand(Register rm)55 V8_INLINE explicit Operand(Register rm) : rm_(rm) {} 56 57 // Return true if this is a register operand. 58 V8_INLINE bool is_reg() const; 59 60 inline int64_t immediate() const; 61 IsImmediate()62 bool IsImmediate() const { return !rm_.is_valid(); } 63 heap_object_request()64 HeapObjectRequest heap_object_request() const { 65 DCHECK(IsHeapObjectRequest()); 66 return value_.heap_object_request; 67 } 68 IsHeapObjectRequest()69 bool IsHeapObjectRequest() const { 70 DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); 71 DCHECK_IMPLIES(is_heap_object_request_, 72 rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || 73 rmode_ == RelocInfo::CODE_TARGET); 74 return is_heap_object_request_; 75 } 76 rm()77 Register rm() const { return rm_; } 78 rmode()79 RelocInfo::Mode rmode() const { return rmode_; } 80 81 private: 82 Register rm_; 83 union Value { Value()84 Value() {} 85 HeapObjectRequest heap_object_request; // if is_heap_object_request_ 86 int64_t immediate; // otherwise 87 } value_; // valid if rm_ == no_reg 88 bool is_heap_object_request_ = false; 89 RelocInfo::Mode rmode_; 90 91 friend class Assembler; 92 friend class MacroAssembler; 93 }; 94 95 // Class MemOperand represents a memory operand in load and store instructions. 96 // 1: base_reg + off_imm( si12 | si14<<2) 97 // 2: base_reg + offset_reg 98 class V8_EXPORT_PRIVATE MemOperand { 99 public: 100 explicit MemOperand(Register rj, int32_t offset = 0); 101 explicit MemOperand(Register rj, Register offset = no_reg); base()102 Register base() const { return base_; } index()103 Register index() const { return index_; } offset()104 int32_t offset() const { return offset_; } 105 hasIndexReg()106 bool hasIndexReg() const { return index_ != no_reg; } 107 108 private: 109 Register base_; // base 110 Register index_; // index 111 int32_t offset_; // offset 112 113 friend class Assembler; 114 }; 115 116 class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { 117 public: 118 // Create an assembler. Instructions and relocation information are emitted 119 // into a buffer, with the instructions starting from the beginning and the 120 // relocation information starting from the end of the buffer. See CodeDesc 121 // for a detailed comment on the layout (globals.h). 122 // 123 // If the provided buffer is nullptr, the assembler allocates and grows its 124 // own buffer. Otherwise it takes ownership of the provided buffer. 125 explicit Assembler(const AssemblerOptions&, 126 std::unique_ptr<AssemblerBuffer> = {}); 127 ~Assembler()128 virtual ~Assembler() {} 129 130 // GetCode emits any pending (non-emitted) code and fills the descriptor desc. 131 static constexpr int kNoHandlerTable = 0; 132 static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; 133 void GetCode(Isolate* isolate, CodeDesc* desc, 134 SafepointTableBuilder* safepoint_table_builder, 135 int handler_table_offset); 136 137 // Convenience wrapper for code without safepoint or handler tables. GetCode(Isolate * isolate,CodeDesc * desc)138 void GetCode(Isolate* isolate, CodeDesc* desc) { 139 GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); 140 } 141 142 // Unused on this architecture. MaybeEmitOutOfLineConstantPool()143 void MaybeEmitOutOfLineConstantPool() {} 144 145 // Loong64 uses BlockTrampolinePool to prevent generating trampoline inside a 146 // continuous instruction block. In the destructor of BlockTrampolinePool, it 147 // must check if it needs to generate trampoline immediately, if it does not 148 // do this, the branch range will go beyond the max branch offset, that means 149 // the pc_offset after call CheckTrampolinePool may have changed. So we use 150 // pc_for_safepoint_ here for safepoint record. pc_offset_for_safepoint()151 int pc_offset_for_safepoint() { 152 return static_cast<int>(pc_for_safepoint_ - buffer_start_); 153 } 154 155 // TODO(LOONG_dev): LOONG64 Check this comment 156 // Label operations & relative jumps (PPUM Appendix D). 157 // 158 // Takes a branch opcode (cc) and a label (L) and generates 159 // either a backward branch or a forward branch and links it 160 // to the label fixup chain. Usage: 161 // 162 // Label L; // unbound label 163 // j(cc, &L); // forward branch to unbound label 164 // bind(&L); // bind label to the current pc 165 // j(cc, &L); // backward branch to bound label 166 // bind(&L); // illegal: a label may be bound only once 167 // 168 // Note: The same Label can be used for forward and backward branches 169 // but it may be bound only once. 170 void bind(Label* L); // Binds an unbound label L to current code position. 171 172 enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; 173 174 // Determines if Label is bound and near enough so that branch instruction 175 // can be used to reach it, instead of jump instruction. 176 // c means conditinal branch, a means always branch. 177 bool is_near_c(Label* L); 178 bool is_near(Label* L, OffsetSize bits); 179 bool is_near_a(Label* L); 180 181 int BranchOffset(Instr instr); 182 183 // Returns the branch offset to the given label from the current code 184 // position. Links the label to the current position if it is still unbound. 185 // Manages the jump elimination optimization if the second parameter is true. 186 int32_t branch_offset_helper(Label* L, OffsetSize bits); branch_offset(Label * L)187 inline int32_t branch_offset(Label* L) { 188 return branch_offset_helper(L, OffsetSize::kOffset16); 189 } branch_offset21(Label * L)190 inline int32_t branch_offset21(Label* L) { 191 return branch_offset_helper(L, OffsetSize::kOffset21); 192 } branch_offset26(Label * L)193 inline int32_t branch_offset26(Label* L) { 194 return branch_offset_helper(L, OffsetSize::kOffset26); 195 } shifted_branch_offset(Label * L)196 inline int32_t shifted_branch_offset(Label* L) { 197 return branch_offset(L) >> 2; 198 } shifted_branch_offset21(Label * L)199 inline int32_t shifted_branch_offset21(Label* L) { 200 return branch_offset21(L) >> 2; 201 } shifted_branch_offset26(Label * L)202 inline int32_t shifted_branch_offset26(Label* L) { 203 return branch_offset26(L) >> 2; 204 } 205 uint64_t jump_address(Label* L); 206 uint64_t jump_offset(Label* L); 207 uint64_t branch_long_offset(Label* L); 208 209 // Puts a labels target address at the given position. 210 // The high 8 bits are set to zero. 211 void label_at_put(Label* L, int at_offset); 212 213 // Read/Modify the code target address in the branch/call instruction at pc. 214 // The isolate argument is unused (and may be nullptr) when skipping flushing. 215 static Address target_address_at(Address pc); 216 V8_INLINE static void set_target_address_at( 217 Address pc, Address target, 218 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { 219 set_target_value_at(pc, target, icache_flush_mode); 220 } 221 // On LOONG64 there is no Constant Pool so we skip that parameter. target_address_at(Address pc,Address constant_pool)222 V8_INLINE static Address target_address_at(Address pc, 223 Address constant_pool) { 224 return target_address_at(pc); 225 } 226 V8_INLINE static void set_target_address_at( 227 Address pc, Address constant_pool, Address target, 228 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { 229 set_target_address_at(pc, target, icache_flush_mode); 230 } 231 232 static void set_target_value_at( 233 Address pc, uint64_t target, 234 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); 235 236 static void JumpLabelToJumpRegister(Address pc); 237 238 // This sets the branch destination (which gets loaded at the call address). 239 // This is for calls and branches within generated code. The serializer 240 // has already deserialized the lui/ori instructions etc. 241 inline static void deserialization_set_special_target_at( 242 Address instruction_payload, Code code, Address target); 243 244 // Get the size of the special target encoded at 'instruction_payload'. 245 inline static int deserialization_special_target_size( 246 Address instruction_payload); 247 248 // This sets the internal reference at the pc. 249 inline static void deserialization_set_target_internal_reference_at( 250 Address pc, Address target, 251 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); 252 253 // Here we are patching the address in the LUI/ORI instruction pair. 254 // These values are used in the serialization process and must be zero for 255 // LOONG platform, as Code, Embedded Object or External-reference pointers 256 // are split across two consecutive instructions and don't exist separately 257 // in the code, so the serializer should not step forwards in memory after 258 // a target is resolved and written. 259 static constexpr int kSpecialTargetSize = 0; 260 261 // Number of consecutive instructions used to store 32bit/64bit constant. 262 // This constant was used in RelocInfo::target_address_address() function 263 // to tell serializer address of the instruction that follows 264 // LUI/ORI instruction pair. 265 // TODO(LOONG_dev): check this 266 static constexpr int kInstructionsFor64BitConstant = 4; 267 268 // Max offset for instructions with 16-bit offset field 269 static constexpr int kMax16BranchOffset = (1 << (18 - 1)) - 1; 270 271 // Max offset for instructions with 21-bit offset field 272 static constexpr int kMax21BranchOffset = (1 << (23 - 1)) - 1; 273 274 // Max offset for compact branch instructions with 26-bit offset field 275 static constexpr int kMax26BranchOffset = (1 << (28 - 1)) - 1; 276 277 static constexpr int kTrampolineSlotsSize = 2 * kInstrSize; 278 GetScratchRegisterList()279 RegList* GetScratchRegisterList() { return &scratch_register_list_; } 280 281 // --------------------------------------------------------------------------- 282 // Code generation. 283 284 // Insert the smallest number of nop instructions 285 // possible to align the pc offset to a multiple 286 // of m. m must be a power of 2 (>= 4). 287 void Align(int m); 288 // Insert the smallest number of zero bytes possible to align the pc offset 289 // to a mulitple of m. m must be a power of 2 (>= 2). 290 void DataAlign(int m); 291 // Aligns code to something that's optimal for a jump target for the platform. 292 void CodeTargetAlign(); LoopHeaderAlign()293 void LoopHeaderAlign() { CodeTargetAlign(); } 294 295 // Different nop operations are used by the code generator to detect certain 296 // states of the generated code. 297 enum NopMarkerTypes { 298 NON_MARKING_NOP = 0, 299 DEBUG_BREAK_NOP, 300 // IC markers. 301 PROPERTY_ACCESS_INLINED, 302 PROPERTY_ACCESS_INLINED_CONTEXT, 303 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, 304 // Helper values. 305 LAST_CODE_MARKER, 306 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, 307 }; 308 309 // Type == 0 is the default non-marking nop. For LoongArch this is a 310 // andi(zero_reg, zero_reg, 0). 311 void nop(unsigned int type = 0) { 312 DCHECK_LT(type, 32); 313 andi(zero_reg, zero_reg, type); 314 } 315 316 // --------Branch-and-jump-instructions---------- 317 // We don't use likely variant of instructions. 318 void b(int32_t offset); b(Label * L)319 inline void b(Label* L) { b(shifted_branch_offset26(L)); } 320 void bl(int32_t offset); bl(Label * L)321 inline void bl(Label* L) { bl(shifted_branch_offset26(L)); } 322 323 void beq(Register rj, Register rd, int32_t offset); beq(Register rj,Register rd,Label * L)324 inline void beq(Register rj, Register rd, Label* L) { 325 beq(rj, rd, shifted_branch_offset(L)); 326 } 327 void bne(Register rj, Register rd, int32_t offset); bne(Register rj,Register rd,Label * L)328 inline void bne(Register rj, Register rd, Label* L) { 329 bne(rj, rd, shifted_branch_offset(L)); 330 } 331 void blt(Register rj, Register rd, int32_t offset); blt(Register rj,Register rd,Label * L)332 inline void blt(Register rj, Register rd, Label* L) { 333 blt(rj, rd, shifted_branch_offset(L)); 334 } 335 void bge(Register rj, Register rd, int32_t offset); bge(Register rj,Register rd,Label * L)336 inline void bge(Register rj, Register rd, Label* L) { 337 bge(rj, rd, shifted_branch_offset(L)); 338 } 339 void bltu(Register rj, Register rd, int32_t offset); bltu(Register rj,Register rd,Label * L)340 inline void bltu(Register rj, Register rd, Label* L) { 341 bltu(rj, rd, shifted_branch_offset(L)); 342 } 343 void bgeu(Register rj, Register rd, int32_t offset); bgeu(Register rj,Register rd,Label * L)344 inline void bgeu(Register rj, Register rd, Label* L) { 345 bgeu(rj, rd, shifted_branch_offset(L)); 346 } 347 void beqz(Register rj, int32_t offset); beqz(Register rj,Label * L)348 inline void beqz(Register rj, Label* L) { 349 beqz(rj, shifted_branch_offset21(L)); 350 } 351 void bnez(Register rj, int32_t offset); bnez(Register rj,Label * L)352 inline void bnez(Register rj, Label* L) { 353 bnez(rj, shifted_branch_offset21(L)); 354 } 355 356 void jirl(Register rd, Register rj, int32_t offset); 357 358 void bceqz(CFRegister cj, int32_t si21); bceqz(CFRegister cj,Label * L)359 inline void bceqz(CFRegister cj, Label* L) { 360 bceqz(cj, shifted_branch_offset21(L)); 361 } 362 void bcnez(CFRegister cj, int32_t si21); bcnez(CFRegister cj,Label * L)363 inline void bcnez(CFRegister cj, Label* L) { 364 bcnez(cj, shifted_branch_offset21(L)); 365 } 366 367 // -------Data-processing-instructions--------- 368 369 // Arithmetic. 370 void add_w(Register rd, Register rj, Register rk); 371 void add_d(Register rd, Register rj, Register rk); 372 void sub_w(Register rd, Register rj, Register rk); 373 void sub_d(Register rd, Register rj, Register rk); 374 375 void addi_w(Register rd, Register rj, int32_t si12); 376 void addi_d(Register rd, Register rj, int32_t si12); 377 378 void addu16i_d(Register rd, Register rj, int32_t si16); 379 380 void alsl_w(Register rd, Register rj, Register rk, int32_t sa2); 381 void alsl_wu(Register rd, Register rj, Register rk, int32_t sa2); 382 void alsl_d(Register rd, Register rj, Register rk, int32_t sa2); 383 384 void lu12i_w(Register rd, int32_t si20); 385 void lu32i_d(Register rd, int32_t si20); 386 void lu52i_d(Register rd, Register rj, int32_t si12); 387 388 void slt(Register rd, Register rj, Register rk); 389 void sltu(Register rd, Register rj, Register rk); 390 void slti(Register rd, Register rj, int32_t si12); 391 void sltui(Register rd, Register rj, int32_t si12); 392 393 void pcaddi(Register rd, int32_t si20); 394 void pcaddu12i(Register rd, int32_t si20); 395 void pcaddu18i(Register rd, int32_t si20); 396 void pcalau12i(Register rd, int32_t si20); 397 398 void and_(Register rd, Register rj, Register rk); 399 void or_(Register rd, Register rj, Register rk); 400 void xor_(Register rd, Register rj, Register rk); 401 void nor(Register rd, Register rj, Register rk); 402 void andn(Register rd, Register rj, Register rk); 403 void orn(Register rd, Register rj, Register rk); 404 405 void andi(Register rd, Register rj, int32_t ui12); 406 void ori(Register rd, Register rj, int32_t ui12); 407 void xori(Register rd, Register rj, int32_t ui12); 408 409 void mul_w(Register rd, Register rj, Register rk); 410 void mulh_w(Register rd, Register rj, Register rk); 411 void mulh_wu(Register rd, Register rj, Register rk); 412 void mul_d(Register rd, Register rj, Register rk); 413 void mulh_d(Register rd, Register rj, Register rk); 414 void mulh_du(Register rd, Register rj, Register rk); 415 416 void mulw_d_w(Register rd, Register rj, Register rk); 417 void mulw_d_wu(Register rd, Register rj, Register rk); 418 419 void div_w(Register rd, Register rj, Register rk); 420 void mod_w(Register rd, Register rj, Register rk); 421 void div_wu(Register rd, Register rj, Register rk); 422 void mod_wu(Register rd, Register rj, Register rk); 423 void div_d(Register rd, Register rj, Register rk); 424 void mod_d(Register rd, Register rj, Register rk); 425 void div_du(Register rd, Register rj, Register rk); 426 void mod_du(Register rd, Register rj, Register rk); 427 428 // Shifts. 429 void sll_w(Register rd, Register rj, Register rk); 430 void srl_w(Register rd, Register rj, Register rk); 431 void sra_w(Register rd, Register rj, Register rk); 432 void rotr_w(Register rd, Register rj, Register rk); 433 434 void slli_w(Register rd, Register rj, int32_t ui5); 435 void srli_w(Register rd, Register rj, int32_t ui5); 436 void srai_w(Register rd, Register rj, int32_t ui5); 437 void rotri_w(Register rd, Register rj, int32_t ui5); 438 439 void sll_d(Register rd, Register rj, Register rk); 440 void srl_d(Register rd, Register rj, Register rk); 441 void sra_d(Register rd, Register rj, Register rk); 442 void rotr_d(Register rd, Register rj, Register rk); 443 444 void slli_d(Register rd, Register rj, int32_t ui6); 445 void srli_d(Register rd, Register rj, int32_t ui6); 446 void srai_d(Register rd, Register rj, int32_t ui6); 447 void rotri_d(Register rd, Register rj, int32_t ui6); 448 449 // Bit twiddling. 450 void ext_w_b(Register rd, Register rj); 451 void ext_w_h(Register rd, Register rj); 452 453 void clo_w(Register rd, Register rj); 454 void clz_w(Register rd, Register rj); 455 void cto_w(Register rd, Register rj); 456 void ctz_w(Register rd, Register rj); 457 void clo_d(Register rd, Register rj); 458 void clz_d(Register rd, Register rj); 459 void cto_d(Register rd, Register rj); 460 void ctz_d(Register rd, Register rj); 461 462 void bytepick_w(Register rd, Register rj, Register rk, int32_t sa2); 463 void bytepick_d(Register rd, Register rj, Register rk, int32_t sa3); 464 465 void revb_2h(Register rd, Register rj); 466 void revb_4h(Register rd, Register rj); 467 void revb_2w(Register rd, Register rj); 468 void revb_d(Register rd, Register rj); 469 470 void revh_2w(Register rd, Register rj); 471 void revh_d(Register rd, Register rj); 472 473 void bitrev_4b(Register rd, Register rj); 474 void bitrev_8b(Register rd, Register rj); 475 476 void bitrev_w(Register rd, Register rj); 477 void bitrev_d(Register rd, Register rj); 478 479 void bstrins_w(Register rd, Register rj, int32_t msbw, int32_t lsbw); 480 void bstrins_d(Register rd, Register rj, int32_t msbd, int32_t lsbd); 481 482 void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw); 483 void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd); 484 485 void maskeqz(Register rd, Register rj, Register rk); 486 void masknez(Register rd, Register rj, Register rk); 487 488 // Memory-instructions 489 void ld_b(Register rd, Register rj, int32_t si12); 490 void ld_h(Register rd, Register rj, int32_t si12); 491 void ld_w(Register rd, Register rj, int32_t si12); 492 void ld_d(Register rd, Register rj, int32_t si12); 493 void ld_bu(Register rd, Register rj, int32_t si12); 494 void ld_hu(Register rd, Register rj, int32_t si12); 495 void ld_wu(Register rd, Register rj, int32_t si12); 496 void st_b(Register rd, Register rj, int32_t si12); 497 void st_h(Register rd, Register rj, int32_t si12); 498 void st_w(Register rd, Register rj, int32_t si12); 499 void st_d(Register rd, Register rj, int32_t si12); 500 501 void ldx_b(Register rd, Register rj, Register rk); 502 void ldx_h(Register rd, Register rj, Register rk); 503 void ldx_w(Register rd, Register rj, Register rk); 504 void ldx_d(Register rd, Register rj, Register rk); 505 void ldx_bu(Register rd, Register rj, Register rk); 506 void ldx_hu(Register rd, Register rj, Register rk); 507 void ldx_wu(Register rd, Register rj, Register rk); 508 void stx_b(Register rd, Register rj, Register rk); 509 void stx_h(Register rd, Register rj, Register rk); 510 void stx_w(Register rd, Register rj, Register rk); 511 void stx_d(Register rd, Register rj, Register rk); 512 513 void ldptr_w(Register rd, Register rj, int32_t si14); 514 void ldptr_d(Register rd, Register rj, int32_t si14); 515 void stptr_w(Register rd, Register rj, int32_t si14); 516 void stptr_d(Register rd, Register rj, int32_t si14); 517 518 void amswap_w(Register rd, Register rk, Register rj); 519 void amswap_d(Register rd, Register rk, Register rj); 520 void amadd_w(Register rd, Register rk, Register rj); 521 void amadd_d(Register rd, Register rk, Register rj); 522 void amand_w(Register rd, Register rk, Register rj); 523 void amand_d(Register rd, Register rk, Register rj); 524 void amor_w(Register rd, Register rk, Register rj); 525 void amor_d(Register rd, Register rk, Register rj); 526 void amxor_w(Register rd, Register rk, Register rj); 527 void amxor_d(Register rd, Register rk, Register rj); 528 void ammax_w(Register rd, Register rk, Register rj); 529 void ammax_d(Register rd, Register rk, Register rj); 530 void ammin_w(Register rd, Register rk, Register rj); 531 void ammin_d(Register rd, Register rk, Register rj); 532 void ammax_wu(Register rd, Register rk, Register rj); 533 void ammax_du(Register rd, Register rk, Register rj); 534 void ammin_wu(Register rd, Register rk, Register rj); 535 void ammin_du(Register rd, Register rk, Register rj); 536 537 void amswap_db_w(Register rd, Register rk, Register rj); 538 void amswap_db_d(Register rd, Register rk, Register rj); 539 void amadd_db_w(Register rd, Register rk, Register rj); 540 void amadd_db_d(Register rd, Register rk, Register rj); 541 void amand_db_w(Register rd, Register rk, Register rj); 542 void amand_db_d(Register rd, Register rk, Register rj); 543 void amor_db_w(Register rd, Register rk, Register rj); 544 void amor_db_d(Register rd, Register rk, Register rj); 545 void amxor_db_w(Register rd, Register rk, Register rj); 546 void amxor_db_d(Register rd, Register rk, Register rj); 547 void ammax_db_w(Register rd, Register rk, Register rj); 548 void ammax_db_d(Register rd, Register rk, Register rj); 549 void ammin_db_w(Register rd, Register rk, Register rj); 550 void ammin_db_d(Register rd, Register rk, Register rj); 551 void ammax_db_wu(Register rd, Register rk, Register rj); 552 void ammax_db_du(Register rd, Register rk, Register rj); 553 void ammin_db_wu(Register rd, Register rk, Register rj); 554 void ammin_db_du(Register rd, Register rk, Register rj); 555 556 void ll_w(Register rd, Register rj, int32_t si14); 557 void ll_d(Register rd, Register rj, int32_t si14); 558 void sc_w(Register rd, Register rj, int32_t si14); 559 void sc_d(Register rd, Register rj, int32_t si14); 560 561 void dbar(int32_t hint); 562 void ibar(int32_t hint); 563 564 // Break instruction 565 void break_(uint32_t code, bool break_as_stop = false); 566 void stop(uint32_t code = kMaxStopCode); 567 568 // Arithmetic. 569 void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk); 570 void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk); 571 void fsub_s(FPURegister fd, FPURegister fj, FPURegister fk); 572 void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk); 573 void fmul_s(FPURegister fd, FPURegister fj, FPURegister fk); 574 void fmul_d(FPURegister fd, FPURegister fj, FPURegister fk); 575 void fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk); 576 void fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk); 577 578 void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); 579 void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); 580 void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); 581 void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); 582 void fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); 583 void fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); 584 void fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); 585 void fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa); 586 587 void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk); 588 void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk); 589 void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk); 590 void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk); 591 592 void fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk); 593 void fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk); 594 void fmina_s(FPURegister fd, FPURegister fj, FPURegister fk); 595 void fmina_d(FPURegister fd, FPURegister fj, FPURegister fk); 596 597 void fabs_s(FPURegister fd, FPURegister fj); 598 void fabs_d(FPURegister fd, FPURegister fj); 599 void fneg_s(FPURegister fd, FPURegister fj); 600 void fneg_d(FPURegister fd, FPURegister fj); 601 602 void fsqrt_s(FPURegister fd, FPURegister fj); 603 void fsqrt_d(FPURegister fd, FPURegister fj); 604 void frecip_s(FPURegister fd, FPURegister fj); 605 void frecip_d(FPURegister fd, FPURegister fj); 606 void frsqrt_s(FPURegister fd, FPURegister fj); 607 void frsqrt_d(FPURegister fd, FPURegister fj); 608 609 void fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk); 610 void fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk); 611 void flogb_s(FPURegister fd, FPURegister fj); 612 void flogb_d(FPURegister fd, FPURegister fj); 613 void fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk); 614 void fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk); 615 616 void fclass_s(FPURegister fd, FPURegister fj); 617 void fclass_d(FPURegister fd, FPURegister fj); 618 619 void fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk, 620 CFRegister cd); 621 void fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk, 622 CFRegister cd); 623 624 void fcvt_s_d(FPURegister fd, FPURegister fj); 625 void fcvt_d_s(FPURegister fd, FPURegister fj); 626 627 void ffint_s_w(FPURegister fd, FPURegister fj); 628 void ffint_s_l(FPURegister fd, FPURegister fj); 629 void ffint_d_w(FPURegister fd, FPURegister fj); 630 void ffint_d_l(FPURegister fd, FPURegister fj); 631 void ftint_w_s(FPURegister fd, FPURegister fj); 632 void ftint_w_d(FPURegister fd, FPURegister fj); 633 void ftint_l_s(FPURegister fd, FPURegister fj); 634 void ftint_l_d(FPURegister fd, FPURegister fj); 635 636 void ftintrm_w_s(FPURegister fd, FPURegister fj); 637 void ftintrm_w_d(FPURegister fd, FPURegister fj); 638 void ftintrm_l_s(FPURegister fd, FPURegister fj); 639 void ftintrm_l_d(FPURegister fd, FPURegister fj); 640 void ftintrp_w_s(FPURegister fd, FPURegister fj); 641 void ftintrp_w_d(FPURegister fd, FPURegister fj); 642 void ftintrp_l_s(FPURegister fd, FPURegister fj); 643 void ftintrp_l_d(FPURegister fd, FPURegister fj); 644 void ftintrz_w_s(FPURegister fd, FPURegister fj); 645 void ftintrz_w_d(FPURegister fd, FPURegister fj); 646 void ftintrz_l_s(FPURegister fd, FPURegister fj); 647 void ftintrz_l_d(FPURegister fd, FPURegister fj); 648 void ftintrne_w_s(FPURegister fd, FPURegister fj); 649 void ftintrne_w_d(FPURegister fd, FPURegister fj); 650 void ftintrne_l_s(FPURegister fd, FPURegister fj); 651 void ftintrne_l_d(FPURegister fd, FPURegister fj); 652 653 void frint_s(FPURegister fd, FPURegister fj); 654 void frint_d(FPURegister fd, FPURegister fj); 655 656 void fmov_s(FPURegister fd, FPURegister fj); 657 void fmov_d(FPURegister fd, FPURegister fj); 658 659 void fsel(CFRegister ca, FPURegister fd, FPURegister fj, FPURegister fk); 660 661 void movgr2fr_w(FPURegister fd, Register rj); 662 void movgr2fr_d(FPURegister fd, Register rj); 663 void movgr2frh_w(FPURegister fd, Register rj); 664 665 void movfr2gr_s(Register rd, FPURegister fj); 666 void movfr2gr_d(Register rd, FPURegister fj); 667 void movfrh2gr_s(Register rd, FPURegister fj); 668 669 void movgr2fcsr(Register rj, FPUControlRegister fcsr = FCSR0); 670 void movfcsr2gr(Register rd, FPUControlRegister fcsr = FCSR0); 671 672 void movfr2cf(CFRegister cd, FPURegister fj); 673 void movcf2fr(FPURegister fd, CFRegister cj); 674 675 void movgr2cf(CFRegister cd, Register rj); 676 void movcf2gr(Register rd, CFRegister cj); 677 678 void fld_s(FPURegister fd, Register rj, int32_t si12); 679 void fld_d(FPURegister fd, Register rj, int32_t si12); 680 void fst_s(FPURegister fd, Register rj, int32_t si12); 681 void fst_d(FPURegister fd, Register rj, int32_t si12); 682 683 void fldx_s(FPURegister fd, Register rj, Register rk); 684 void fldx_d(FPURegister fd, Register rj, Register rk); 685 void fstx_s(FPURegister fd, Register rj, Register rk); 686 void fstx_d(FPURegister fd, Register rj, Register rk); 687 688 // Check the code size generated from label to here. SizeOfCodeGeneratedSince(Label * label)689 int SizeOfCodeGeneratedSince(Label* label) { 690 return pc_offset() - label->pos(); 691 } 692 693 // Check the number of instructions generated from label to here. InstructionsGeneratedSince(Label * label)694 int InstructionsGeneratedSince(Label* label) { 695 return SizeOfCodeGeneratedSince(label) / kInstrSize; 696 } 697 698 // Class for scoping postponing the trampoline pool generation. 699 class V8_NODISCARD BlockTrampolinePoolScope { 700 public: BlockTrampolinePoolScope(Assembler * assem)701 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { 702 assem_->StartBlockTrampolinePool(); 703 } ~BlockTrampolinePoolScope()704 ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); } 705 706 private: 707 Assembler* assem_; 708 709 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); 710 }; 711 712 // Class for postponing the assembly buffer growth. Typically used for 713 // sequences of instructions that must be emitted as a unit, before 714 // buffer growth (and relocation) can occur. 715 // This blocking scope is not nestable. 716 class V8_NODISCARD BlockGrowBufferScope { 717 public: BlockGrowBufferScope(Assembler * assem)718 explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { 719 assem_->StartBlockGrowBuffer(); 720 } ~BlockGrowBufferScope()721 ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); } 722 723 private: 724 Assembler* assem_; 725 726 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); 727 }; 728 729 // Record a deoptimization reason that can be used by a log or cpu profiler. 730 // Use --trace-deopt to enable. 731 void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, 732 SourcePosition position, int id); 733 734 static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, 735 intptr_t pc_delta); 736 static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, 737 intptr_t pc_delta); 738 739 // Writes a single byte or word of data in the code stream. Used for 740 // inline tables, e.g., jump-tables. 741 void db(uint8_t data); 742 void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); 743 void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); 744 void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { 745 dq(data, rmode); 746 } 747 void dd(Label* label); 748 749 // Postpone the generation of the trampoline pool for the specified number of 750 // instructions. 751 void BlockTrampolinePoolFor(int instructions); 752 753 // Check if there is less than kGap bytes available in the buffer. 754 // If this is the case, we need to grow the buffer before emitting 755 // an instruction or relocation information. overflow()756 inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } 757 758 // Get the number of bytes available in the buffer. available_space()759 inline intptr_t available_space() const { 760 return reloc_info_writer.pos() - pc_; 761 } 762 763 // Read/patch instructions. instr_at(Address pc)764 static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); } instr_at_put(Address pc,Instr instr)765 static void instr_at_put(Address pc, Instr instr) { 766 *reinterpret_cast<Instr*>(pc) = instr; 767 } instr_at(int pos)768 Instr instr_at(int pos) { 769 return *reinterpret_cast<Instr*>(buffer_start_ + pos); 770 } instr_at_put(int pos,Instr instr)771 void instr_at_put(int pos, Instr instr) { 772 *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr; 773 } 774 775 // Check if an instruction is a branch of some kind. 776 static bool IsBranch(Instr instr); 777 static bool IsB(Instr instr); 778 static bool IsBz(Instr instr); 779 static bool IsNal(Instr instr); 780 781 static bool IsBeq(Instr instr); 782 static bool IsBne(Instr instr); 783 784 static bool IsJump(Instr instr); 785 static bool IsMov(Instr instr, Register rd, Register rs); 786 static bool IsPcAddi(Instr instr, Register rd, int32_t si20); 787 788 static bool IsJ(Instr instr); 789 static bool IsLu12i_w(Instr instr); 790 static bool IsOri(Instr instr); 791 static bool IsLu32i_d(Instr instr); 792 static bool IsLu52i_d(Instr instr); 793 794 static bool IsNop(Instr instr, unsigned int type); 795 796 static Register GetRjReg(Instr instr); 797 static Register GetRkReg(Instr instr); 798 static Register GetRdReg(Instr instr); 799 800 static uint32_t GetRj(Instr instr); 801 static uint32_t GetRjField(Instr instr); 802 static uint32_t GetRk(Instr instr); 803 static uint32_t GetRkField(Instr instr); 804 static uint32_t GetRd(Instr instr); 805 static uint32_t GetRdField(Instr instr); 806 static uint32_t GetSa2(Instr instr); 807 static uint32_t GetSa3(Instr instr); 808 static uint32_t GetSa2Field(Instr instr); 809 static uint32_t GetSa3Field(Instr instr); 810 static uint32_t GetOpcodeField(Instr instr); 811 static uint32_t GetFunction(Instr instr); 812 static uint32_t GetFunctionField(Instr instr); 813 static uint32_t GetImmediate16(Instr instr); 814 static uint32_t GetLabelConst(Instr instr); 815 816 static bool IsAddImmediate(Instr instr); 817 static Instr SetAddImmediateOffset(Instr instr, int16_t offset); 818 819 static bool IsAndImmediate(Instr instr); 820 static bool IsEmittedConstant(Instr instr); 821 822 void CheckTrampolinePool(); 823 824 // Get the code target object for a pc-relative call or jump. 825 V8_INLINE Handle<Code> relative_code_target_object_handle_at( 826 Address pc_) const; 827 UnboundLabelsCount()828 inline int UnboundLabelsCount() { return unbound_labels_count_; } 829 830 protected: 831 // Helper function for memory load/store. 832 void AdjustBaseAndOffset(MemOperand* src); 833 834 inline static void set_target_internal_reference_encoded_at(Address pc, 835 Address target); 836 buffer_space()837 int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; } 838 839 // Decode branch instruction at pos and return branch target pos. 840 int target_at(int pos, bool is_internal); 841 842 // Patch branch instruction at pos to branch to given branch target pos. 843 void target_at_put(int pos, int target_pos, bool is_internal); 844 845 // Say if we need to relocate with this mode. 846 bool MustUseReg(RelocInfo::Mode rmode); 847 848 // Record reloc info for current pc_. 849 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); 850 851 // Block the emission of the trampoline pool before pc_offset. BlockTrampolinePoolBefore(int pc_offset)852 void BlockTrampolinePoolBefore(int pc_offset) { 853 if (no_trampoline_pool_before_ < pc_offset) 854 no_trampoline_pool_before_ = pc_offset; 855 } 856 StartBlockTrampolinePool()857 void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; } 858 EndBlockTrampolinePool()859 void EndBlockTrampolinePool() { 860 trampoline_pool_blocked_nesting_--; 861 if (trampoline_pool_blocked_nesting_ == 0) { 862 CheckTrampolinePoolQuick(1); 863 } 864 } 865 is_trampoline_pool_blocked()866 bool is_trampoline_pool_blocked() const { 867 return trampoline_pool_blocked_nesting_ > 0; 868 } 869 has_exception()870 bool has_exception() const { return internal_trampoline_exception_; } 871 is_trampoline_emitted()872 bool is_trampoline_emitted() const { return trampoline_emitted_; } 873 874 // Temporarily block automatic assembly buffer growth. StartBlockGrowBuffer()875 void StartBlockGrowBuffer() { 876 DCHECK(!block_buffer_growth_); 877 block_buffer_growth_ = true; 878 } 879 EndBlockGrowBuffer()880 void EndBlockGrowBuffer() { 881 DCHECK(block_buffer_growth_); 882 block_buffer_growth_ = false; 883 } 884 is_buffer_growth_blocked()885 bool is_buffer_growth_blocked() const { return block_buffer_growth_; } 886 887 void CheckTrampolinePoolQuick(int extra_instructions = 0) { 888 if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { 889 CheckTrampolinePool(); 890 } 891 } 892 set_pc_for_safepoint()893 void set_pc_for_safepoint() { pc_for_safepoint_ = pc_; } 894 895 private: 896 // Avoid overflows for displacements etc. 897 static const int kMaximalBufferSize = 512 * MB; 898 899 // Buffer size and constant pool distance are checked together at regular 900 // intervals of kBufferCheckInterval emitted bytes. 901 static constexpr int kBufferCheckInterval = 1 * KB / 2; 902 903 // Code generation. 904 // The relocation writer's position is at least kGap bytes below the end of 905 // the generated instructions. This is so that multi-instruction sequences do 906 // not have to check for overflow. The same is true for writes of large 907 // relocation info entries. 908 static constexpr int kGap = 64; 909 STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap); 910 911 // Repeated checking whether the trampoline pool should be emitted is rather 912 // expensive. By default we only check again once a number of instructions 913 // has been generated. 914 static constexpr int kCheckConstIntervalInst = 32; 915 static constexpr int kCheckConstInterval = 916 kCheckConstIntervalInst * kInstrSize; 917 918 int next_buffer_check_; // pc offset of next buffer check. 919 920 // Emission of the trampoline pool may be blocked in some code sequences. 921 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. 922 int no_trampoline_pool_before_; // Block emission before this pc offset. 923 924 // Keep track of the last emitted pool to guarantee a maximal distance. 925 int last_trampoline_pool_end_; // pc offset of the end of the last pool. 926 927 // Automatic growth of the assembly buffer may be blocked for some sequences. 928 bool block_buffer_growth_; // Block growth when true. 929 930 // Relocation information generation. 931 // Each relocation is encoded as a variable size value. 932 static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; 933 RelocInfoWriter reloc_info_writer; 934 935 // The bound position, before this we cannot do instruction elimination. 936 int last_bound_pos_; 937 938 // Code emission. 939 inline void CheckBuffer(); 940 void GrowBuffer(); 941 inline void emit(Instr x); 942 inline void emit(uint64_t x); 943 template <typename T> 944 inline void EmitHelper(T x); 945 inline void EmitHelper(Instr x); 946 947 void GenB(Opcode opcode, Register rj, int32_t si21); // opcode:6 948 void GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq); 949 void GenB(Opcode opcode, int32_t si26); 950 void GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16); 951 void GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, FPURegister fj, 952 CFRegister cd); 953 void GenSel(Opcode opcode, CFRegister ca, FPURegister fk, FPURegister fj, 954 FPURegister rd); 955 956 void GenRegister(Opcode opcode, Register rj, Register rd, bool rjrd = true); 957 void GenRegister(Opcode opcode, FPURegister fj, FPURegister fd); 958 void GenRegister(Opcode opcode, Register rj, FPURegister fd); 959 void GenRegister(Opcode opcode, FPURegister fj, Register rd); 960 void GenRegister(Opcode opcode, Register rj, FPUControlRegister fd); 961 void GenRegister(Opcode opcode, FPUControlRegister fj, Register rd); 962 void GenRegister(Opcode opcode, FPURegister fj, CFRegister cd); 963 void GenRegister(Opcode opcode, CFRegister cj, FPURegister fd); 964 void GenRegister(Opcode opcode, Register rj, CFRegister cd); 965 void GenRegister(Opcode opcode, CFRegister cj, Register rd); 966 967 void GenRegister(Opcode opcode, Register rk, Register rj, Register rd); 968 void GenRegister(Opcode opcode, FPURegister fk, FPURegister fj, 969 FPURegister fd); 970 971 void GenRegister(Opcode opcode, FPURegister fa, FPURegister fk, 972 FPURegister fj, FPURegister fd); 973 void GenRegister(Opcode opcode, Register rk, Register rj, FPURegister fd); 974 975 void GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj, 976 Register rd); 977 void GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj, 978 Register rd); 979 void GenImm(Opcode opcode, int32_t bit20, Register rd); 980 void GenImm(Opcode opcode, int32_t bit15); 981 void GenImm(Opcode opcode, int32_t value, Register rj, Register rd, 982 int32_t value_bits); // 6 | 12 | 14 | 16 983 void GenImm(Opcode opcode, int32_t bit12, Register rj, FPURegister fd); 984 985 // Labels. 986 void print(const Label* L); 987 void bind_to(Label* L, int pos); 988 void next(Label* L, bool is_internal); 989 990 // One trampoline consists of: 991 // - space for trampoline slots, 992 // - space for labels. 993 // 994 // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. 995 // Space for trampoline slots precedes space for labels. Each label is of one 996 // instruction size, so total amount for labels is equal to 997 // label_count * kInstrSize. 998 class Trampoline { 999 public: Trampoline()1000 Trampoline() { 1001 start_ = 0; 1002 next_slot_ = 0; 1003 free_slot_count_ = 0; 1004 end_ = 0; 1005 } Trampoline(int start,int slot_count)1006 Trampoline(int start, int slot_count) { 1007 start_ = start; 1008 next_slot_ = start; 1009 free_slot_count_ = slot_count; 1010 end_ = start + slot_count * kTrampolineSlotsSize; 1011 } start()1012 int start() { return start_; } end()1013 int end() { return end_; } take_slot()1014 int take_slot() { 1015 int trampoline_slot = kInvalidSlotPos; 1016 if (free_slot_count_ <= 0) { 1017 // We have run out of space on trampolines. 1018 // Make sure we fail in debug mode, so we become aware of each case 1019 // when this happens. 1020 DCHECK(0); 1021 // Internal exception will be caught. 1022 } else { 1023 trampoline_slot = next_slot_; 1024 free_slot_count_--; 1025 next_slot_ += kTrampolineSlotsSize; 1026 } 1027 return trampoline_slot; 1028 } 1029 1030 private: 1031 int start_; 1032 int end_; 1033 int next_slot_; 1034 int free_slot_count_; 1035 }; 1036 1037 int32_t get_trampoline_entry(int32_t pos); 1038 int unbound_labels_count_; 1039 // After trampoline is emitted, long branches are used in generated code for 1040 // the forward branches whose target offsets could be beyond reach of branch 1041 // instruction. We use this information to trigger different mode of 1042 // branch instruction generation, where we use jump instructions rather 1043 // than regular branch instructions. 1044 bool trampoline_emitted_; 1045 static constexpr int kInvalidSlotPos = -1; 1046 1047 // Internal reference positions, required for unbounded internal reference 1048 // labels. 1049 std::set<int64_t> internal_reference_positions_; is_internal_reference(Label * L)1050 bool is_internal_reference(Label* L) { 1051 return internal_reference_positions_.find(L->pos()) != 1052 internal_reference_positions_.end(); 1053 } 1054 EmittedCompactBranchInstruction()1055 void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } ClearCompactBranchState()1056 void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } 1057 bool prev_instr_compact_branch_ = false; 1058 1059 Trampoline trampoline_; 1060 bool internal_trampoline_exception_; 1061 1062 // Keep track of the last Call's position to ensure that safepoint can get the 1063 // correct information even if there is a trampoline immediately after the 1064 // Call. 1065 byte* pc_for_safepoint_; 1066 1067 RegList scratch_register_list_; 1068 1069 private: 1070 void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); 1071 1072 int WriteCodeComments(); 1073 1074 friend class RegExpMacroAssemblerLOONG64; 1075 friend class RelocInfo; 1076 friend class BlockTrampolinePoolScope; 1077 friend class EnsureSpace; 1078 }; 1079 1080 class EnsureSpace { 1081 public: 1082 explicit inline EnsureSpace(Assembler* assembler); 1083 }; 1084 1085 class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { 1086 public: 1087 explicit UseScratchRegisterScope(Assembler* assembler); 1088 ~UseScratchRegisterScope(); 1089 1090 Register Acquire(); 1091 bool hasAvailable() const; 1092 Include(const RegList & list)1093 void Include(const RegList& list) { *available_ |= list; } Exclude(const RegList & list)1094 void Exclude(const RegList& list) { available_->clear(list); } 1095 void Include(const Register& reg1, const Register& reg2 = no_reg) { 1096 RegList list({reg1, reg2}); 1097 Include(list); 1098 } 1099 void Exclude(const Register& reg1, const Register& reg2 = no_reg) { 1100 RegList list({reg1, reg2}); 1101 Exclude(list); 1102 } 1103 1104 private: 1105 RegList* available_; 1106 RegList old_available_; 1107 }; 1108 1109 } // namespace internal 1110 } // namespace v8 1111 1112 #endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_ 1113