• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
10 #define V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
11 
12 #include "src/codegen/assembler.h"
13 #include "src/codegen/loong64/assembler-loong64.h"
14 #include "src/common/globals.h"
15 #include "src/objects/tagged-index.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 // Forward declarations.
21 enum class AbortReason : uint8_t;
22 
23 // Flags used for LeaveExitFrame function.
24 enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
25 
26 // Flags used for the li macro-assembler function.
27 enum LiFlags {
28   // If the constant value can be represented in just 12 bits, then
29   // optimize the li to use a single instruction, rather than lu12i_w/lu32i_d/
30   // lu52i_d/ori sequence. A number of other optimizations that emits less than
31   // maximum number of instructions exists.
32   OPTIMIZE_SIZE = 0,
33   // Always use 4 instructions (lu12i_w/ori/lu32i_d/lu52i_d sequence),
34   // even if the constant could be loaded with just one, so that this value is
35   // patchable later.
36   CONSTANT_SIZE = 1,
37   // For address loads only 3 instruction are required. Used to mark
38   // constant load that will be used as address without relocation
39   // information. It ensures predictable code size, so specific sites
40   // in code are patchable.
41   ADDRESS_LOAD = 2
42 };
43 
44 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
45 
46 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
47                                    Register reg3 = no_reg,
48                                    Register reg4 = no_reg,
49                                    Register reg5 = no_reg,
50                                    Register reg6 = no_reg);
51 
52 // -----------------------------------------------------------------------------
53 // Static helper functions.
54 
55 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
56 
57 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)58 inline MemOperand FieldMemOperand(Register object, int offset) {
59   return MemOperand(object, offset - kHeapObjectTag);
60 }
61 
62 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
63  public:
64   using TurboAssemblerBase::TurboAssemblerBase;
65 
66   // Activation support.
67   void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)68   void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
69     // Out-of-line constant pool not implemented on loong64.
70     UNREACHABLE();
71   }
72   void LeaveFrame(StackFrame::Type type);
73 
AllocateStackSpace(Register bytes)74   void AllocateStackSpace(Register bytes) { Sub_d(sp, sp, bytes); }
75 
AllocateStackSpace(int bytes)76   void AllocateStackSpace(int bytes) {
77     DCHECK_GE(bytes, 0);
78     if (bytes == 0) return;
79     Sub_d(sp, sp, Operand(bytes));
80   }
81 
82   // Generates function and stub prologue code.
83   void StubPrologue(StackFrame::Type type);
84   void Prologue();
85 
InitializeRootRegister()86   void InitializeRootRegister() {
87     ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
88     li(kRootRegister, Operand(isolate_root));
89   }
90 
91   // Jump unconditionally to given label.
92   // Use rather b(Label) for code generation.
jmp(Label * L)93   void jmp(Label* L) { Branch(L); }
94 
95   // -------------------------------------------------------------------------
96   // Debugging.
97 
98   void Trap();
99   void DebugBreak();
100 
101   // Calls Abort(msg) if the condition cc is not satisfied.
102   // Use --debug_code to enable.
103   void Assert(Condition cc, AbortReason reason, Register rj, Operand rk);
104 
105   // Like Assert(), but always enabled.
106   void Check(Condition cc, AbortReason reason, Register rj, Operand rk);
107 
108   // Print a message to stdout and abort execution.
109   void Abort(AbortReason msg);
110 
111   void Branch(Label* label, bool need_link = false);
112   void Branch(Label* label, Condition cond, Register r1, const Operand& r2,
113               bool need_link = false);
114   void BranchShort(Label* label, Condition cond, Register r1, const Operand& r2,
115                    bool need_link = false);
116   void Branch(Label* L, Condition cond, Register rj, RootIndex index);
117 
118   // Floating point branches
119   void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
120                   CFRegister cd = FCC0) {
121     CompareF(cmp1, cmp2, cc, cd, true);
122   }
123 
124   void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2,
125                        CFRegister cd = FCC0) {
126     CompareIsNanF(cmp1, cmp2, cd, true);
127   }
128 
129   void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
130                   CFRegister cd = FCC0) {
131     CompareF(cmp1, cmp2, cc, cd, false);
132   }
133 
134   void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2,
135                        CFRegister cd = FCC0) {
136     CompareIsNanF(cmp1, cmp2, cd, false);
137   }
138 
139   void BranchTrueShortF(Label* target, CFRegister cc = FCC0);
140   void BranchFalseShortF(Label* target, CFRegister cc = FCC0);
141 
142   void BranchTrueF(Label* target, CFRegister cc = FCC0);
143   void BranchFalseF(Label* target, CFRegister cc = FCC0);
144 
145   static int InstrCountForLi64Bit(int64_t value);
146   inline void LiLower32BitHelper(Register rd, Operand j);
147   void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
148   void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
149   inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
150     li(rd, Operand(j), mode);
151   }
152   inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
153     li(rd, Operand(static_cast<int64_t>(j)), mode);
154   }
155   void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
156   void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
157   void li(Register dst, const StringConstantBase* string,
158           LiFlags mode = OPTIMIZE_SIZE);
159 
160   void LoadFromConstantsTable(Register destination, int constant_index) final;
161   void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
162   void LoadRootRelative(Register destination, int32_t offset) final;
163 
Move(Register output,MemOperand operand)164   inline void Move(Register output, MemOperand operand) {
165     Ld_d(output, operand);
166   }
167 
168   inline void GenPCRelativeJump(Register rd, int64_t offset);
169   inline void GenPCRelativeJumpAndLink(Register rd, int64_t offset);
170 
171 // Jump, Call, and Ret pseudo instructions implementing inter-working.
172 #define COND_ARGS                              \
173   Condition cond = al, Register rj = zero_reg, \
174             const Operand &rk = Operand(zero_reg)
175 
176   void Jump(Register target, COND_ARGS);
177   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
178   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
179   // Deffer from li, this method save target to the memory, and then load
180   // it to register use ld_d, it can be used in wasm jump table for concurrent
181   // patching.
182   void PatchAndJump(Address target);
183   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
184   void Jump(const ExternalReference& reference);
185   void Call(Register target, COND_ARGS);
186   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
187   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
188             COND_ARGS);
189   void Call(Label* target);
190 
191   // Load the builtin given by the Smi in |builtin_index| into the same
192   // register.
193   void LoadEntryFromBuiltinIndex(Register builtin);
194   void LoadEntryFromBuiltin(Builtin builtin, Register destination);
195   MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
196 
197   void CallBuiltinByIndex(Register builtin);
198   void CallBuiltin(Builtin builtin);
199 
200   void LoadCodeObjectEntry(Register destination, Register code_object);
201   void CallCodeObject(Register code_object);
202 
203   void JumpCodeObject(Register code_object,
204                       JumpMode jump_mode = JumpMode::kJump);
205 
206   // Generates an instruction sequence s.t. the return address points to the
207   // instruction following the call.
208   // The return address on the stack is used by frame iteration.
209   void StoreReturnAddressAndCall(Register target);
210 
211   void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
212                              DeoptimizeKind kind, Label* ret,
213                              Label* jump_deoptimization_entry_label);
214 
215   void Ret(COND_ARGS);
216 
217   // Emit code to discard a non-negative number of pointer-sized elements
218   // from the stack, clobbering only the sp register.
219   void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
220             const Operand& op = Operand(no_reg));
221 
222   enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
223   enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
224   void DropArguments(Register count, ArgumentsCountType type,
225                      ArgumentsCountMode mode, Register scratch = no_reg);
226   void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
227                                        ArgumentsCountType type,
228                                        ArgumentsCountMode mode,
229                                        Register scratch = no_reg);
230 
231   void Ld_d(Register rd, const MemOperand& rj);
232   void St_d(Register rd, const MemOperand& rj);
233 
234   void Push(Handle<HeapObject> handle);
235   void Push(Smi smi);
236 
Push(Register src)237   void Push(Register src) {
238     Add_d(sp, sp, Operand(-kPointerSize));
239     St_d(src, MemOperand(sp, 0));
240   }
241 
242   // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)243   void Push(Register src1, Register src2) {
244     Sub_d(sp, sp, Operand(2 * kPointerSize));
245     St_d(src1, MemOperand(sp, 1 * kPointerSize));
246     St_d(src2, MemOperand(sp, 0 * kPointerSize));
247   }
248 
249   // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)250   void Push(Register src1, Register src2, Register src3) {
251     Sub_d(sp, sp, Operand(3 * kPointerSize));
252     St_d(src1, MemOperand(sp, 2 * kPointerSize));
253     St_d(src2, MemOperand(sp, 1 * kPointerSize));
254     St_d(src3, MemOperand(sp, 0 * kPointerSize));
255   }
256 
257   // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)258   void Push(Register src1, Register src2, Register src3, Register src4) {
259     Sub_d(sp, sp, Operand(4 * kPointerSize));
260     St_d(src1, MemOperand(sp, 3 * kPointerSize));
261     St_d(src2, MemOperand(sp, 2 * kPointerSize));
262     St_d(src3, MemOperand(sp, 1 * kPointerSize));
263     St_d(src4, MemOperand(sp, 0 * kPointerSize));
264   }
265 
266   // Push five registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)267   void Push(Register src1, Register src2, Register src3, Register src4,
268             Register src5) {
269     Sub_d(sp, sp, Operand(5 * kPointerSize));
270     St_d(src1, MemOperand(sp, 4 * kPointerSize));
271     St_d(src2, MemOperand(sp, 3 * kPointerSize));
272     St_d(src3, MemOperand(sp, 2 * kPointerSize));
273     St_d(src4, MemOperand(sp, 1 * kPointerSize));
274     St_d(src5, MemOperand(sp, 0 * kPointerSize));
275   }
276 
277   enum PushArrayOrder { kNormal, kReverse };
278   void PushArray(Register array, Register size, Register scratch,
279                  Register scratch2, PushArrayOrder order = kNormal);
280 
281   void MaybeSaveRegisters(RegList registers);
282   void MaybeRestoreRegisters(RegList registers);
283 
284   void CallEphemeronKeyBarrier(Register object, Operand offset,
285                                SaveFPRegsMode fp_mode);
286 
287   void CallRecordWriteStubSaveRegisters(
288       Register object, Operand offset,
289       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
290       StubCallMode mode = StubCallMode::kCallBuiltinPointer);
291   void CallRecordWriteStub(
292       Register object, Register slot_address,
293       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
294       StubCallMode mode = StubCallMode::kCallBuiltinPointer);
295 
296   // For a given |object| and |offset|:
297   //   - Move |object| to |dst_object|.
298   //   - Compute the address of the slot pointed to by |offset| in |object| and
299   //     write it to |dst_slot|.
300   // This method makes sure |object| and |offset| are allowed to overlap with
301   // the destination registers.
302   void MoveObjectAndSlot(Register dst_object, Register dst_slot,
303                          Register object, Operand offset);
304 
305   // Push multiple registers on the stack.
306   // Registers are saved in numerical order, with higher numbered registers
307   // saved in higher memory addresses.
308   void MultiPush(RegList regs);
309   void MultiPush(RegList regs1, RegList regs2);
310   void MultiPush(RegList regs1, RegList regs2, RegList regs3);
311   void MultiPushFPU(DoubleRegList regs);
312 
313   // Calculate how much stack space (in bytes) are required to store caller
314   // registers excluding those specified in the arguments.
315   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
316                                       Register exclusion1 = no_reg,
317                                       Register exclusion2 = no_reg,
318                                       Register exclusion3 = no_reg) const;
319 
320   // Push caller saved registers on the stack, and return the number of bytes
321   // stack pointer is adjusted.
322   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
323                       Register exclusion2 = no_reg,
324                       Register exclusion3 = no_reg);
325   // Restore caller saved registers from the stack, and return the number of
326   // bytes stack pointer is adjusted.
327   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
328                      Register exclusion2 = no_reg,
329                      Register exclusion3 = no_reg);
330 
Pop(Register dst)331   void Pop(Register dst) {
332     Ld_d(dst, MemOperand(sp, 0));
333     Add_d(sp, sp, Operand(kPointerSize));
334   }
335 
336   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)337   void Pop(Register src1, Register src2) {
338     DCHECK(src1 != src2);
339     Ld_d(src2, MemOperand(sp, 0 * kPointerSize));
340     Ld_d(src1, MemOperand(sp, 1 * kPointerSize));
341     Add_d(sp, sp, 2 * kPointerSize);
342   }
343 
344   // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)345   void Pop(Register src1, Register src2, Register src3) {
346     Ld_d(src3, MemOperand(sp, 0 * kPointerSize));
347     Ld_d(src2, MemOperand(sp, 1 * kPointerSize));
348     Ld_d(src1, MemOperand(sp, 2 * kPointerSize));
349     Add_d(sp, sp, 3 * kPointerSize);
350   }
351 
352   // Pops multiple values from the stack and load them in the
353   // registers specified in regs. Pop order is the opposite as in MultiPush.
354   void MultiPop(RegList regs);
355   void MultiPop(RegList regs1, RegList regs2);
356   void MultiPop(RegList regs1, RegList regs2, RegList regs3);
357 
358   void MultiPopFPU(DoubleRegList regs);
359 
360 #define DEFINE_INSTRUCTION(instr)                          \
361   void instr(Register rd, Register rj, const Operand& rk); \
362   void instr(Register rd, Register rj, Register rk) {      \
363     instr(rd, rj, Operand(rk));                            \
364   }                                                        \
365   void instr(Register rj, Register rk, int32_t j) { instr(rj, rk, Operand(j)); }
366 
367 #define DEFINE_INSTRUCTION2(instr)                                 \
368   void instr(Register rj, const Operand& rk);                      \
369   void instr(Register rj, Register rk) { instr(rj, Operand(rk)); } \
370   void instr(Register rj, int32_t j) { instr(rj, Operand(j)); }
371 
372   DEFINE_INSTRUCTION(Add_w)
DEFINE_INSTRUCTION(Add_d)373   DEFINE_INSTRUCTION(Add_d)
374   DEFINE_INSTRUCTION(Div_w)
375   DEFINE_INSTRUCTION(Div_wu)
376   DEFINE_INSTRUCTION(Div_du)
377   DEFINE_INSTRUCTION(Mod_w)
378   DEFINE_INSTRUCTION(Mod_wu)
379   DEFINE_INSTRUCTION(Div_d)
380   DEFINE_INSTRUCTION(Sub_w)
381   DEFINE_INSTRUCTION(Sub_d)
382   DEFINE_INSTRUCTION(Mod_d)
383   DEFINE_INSTRUCTION(Mod_du)
384   DEFINE_INSTRUCTION(Mul_w)
385   DEFINE_INSTRUCTION(Mulh_w)
386   DEFINE_INSTRUCTION(Mulh_wu)
387   DEFINE_INSTRUCTION(Mul_d)
388   DEFINE_INSTRUCTION(Mulh_d)
389   DEFINE_INSTRUCTION2(Div_w)
390   DEFINE_INSTRUCTION2(Div_d)
391   DEFINE_INSTRUCTION2(Div_wu)
392   DEFINE_INSTRUCTION2(Div_du)
393 
394   DEFINE_INSTRUCTION(And)
395   DEFINE_INSTRUCTION(Or)
396   DEFINE_INSTRUCTION(Xor)
397   DEFINE_INSTRUCTION(Nor)
398   DEFINE_INSTRUCTION2(Neg)
399   DEFINE_INSTRUCTION(Andn)
400   DEFINE_INSTRUCTION(Orn)
401 
402   DEFINE_INSTRUCTION(Slt)
403   DEFINE_INSTRUCTION(Sltu)
404   DEFINE_INSTRUCTION(Slti)
405   DEFINE_INSTRUCTION(Sltiu)
406   DEFINE_INSTRUCTION(Sle)
407   DEFINE_INSTRUCTION(Sleu)
408   DEFINE_INSTRUCTION(Sgt)
409   DEFINE_INSTRUCTION(Sgtu)
410   DEFINE_INSTRUCTION(Sge)
411   DEFINE_INSTRUCTION(Sgeu)
412 
413   DEFINE_INSTRUCTION(Rotr_w)
414   DEFINE_INSTRUCTION(Rotr_d)
415 
416 #undef DEFINE_INSTRUCTION
417 #undef DEFINE_INSTRUCTION2
418 #undef DEFINE_INSTRUCTION3
419 
420   void SmiTag(Register dst, Register src) {
421     STATIC_ASSERT(kSmiTag == 0);
422     if (SmiValuesAre32Bits()) {
423       slli_d(dst, src, 32);
424     } else {
425       DCHECK(SmiValuesAre31Bits());
426       add_w(dst, src, src);
427     }
428   }
429 
SmiTag(Register reg)430   void SmiTag(Register reg) { SmiTag(reg, reg); }
431 
432   void SmiUntag(Register dst, const MemOperand& src);
SmiUntag(Register dst,Register src)433   void SmiUntag(Register dst, Register src) {
434     if (SmiValuesAre32Bits()) {
435       srai_d(dst, src, kSmiShift);
436     } else {
437       DCHECK(SmiValuesAre31Bits());
438       srai_w(dst, src, kSmiShift);
439     }
440   }
441 
SmiUntag(Register reg)442   void SmiUntag(Register reg) { SmiUntag(reg, reg); }
443 
444   // Left-shifted from int32 equivalent of Smi.
SmiScale(Register dst,Register src,int scale)445   void SmiScale(Register dst, Register src, int scale) {
446     if (SmiValuesAre32Bits()) {
447       // The int portion is upper 32-bits of 64-bit word.
448       srai_d(dst, src, kSmiShift - scale);
449     } else {
450       DCHECK(SmiValuesAre31Bits());
451       DCHECK_GE(scale, kSmiTagSize);
452       slli_w(dst, src, scale - kSmiTagSize);
453     }
454   }
455 
456   // On LoongArch64, we should sign-extend 32-bit values.
SmiToInt32(Register smi)457   void SmiToInt32(Register smi) {
458     if (FLAG_enable_slow_asserts) {
459       AssertSmi(smi);
460     }
461     DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
462     SmiUntag(smi);
463   }
464 
465   // Abort execution if argument is a smi, enabled via --debug-code.
466   void AssertNotSmi(Register object);
467   void AssertSmi(Register object);
468 
469   int CalculateStackPassedWords(int num_reg_arguments,
470                                 int num_double_arguments);
471 
472   // Before calling a C-function from generated code, align arguments on stack.
473   // After aligning the frame, non-register arguments must be stored on the
474   // stack, after the argument-slots using helper: CFunctionArgumentOperand().
475   // The argument count assumes all arguments are word sized.
476   // Some compilers/platforms require the stack to be aligned when calling
477   // C++ code.
478   // Needs a scratch register to do some arithmetic. This register will be
479   // trashed.
480   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
481                             Register scratch);
482   void PrepareCallCFunction(int num_reg_arguments, Register scratch);
483 
484   // Calls a C function and cleans up the space for arguments allocated
485   // by PrepareCallCFunction. The called function is not allowed to trigger a
486   // garbage collection, since that might move the code and invalidate the
487   // return address (unless this is somehow accounted for by the called
488   // function).
489   void CallCFunction(ExternalReference function, int num_arguments);
490   void CallCFunction(Register function, int num_arguments);
491   void CallCFunction(ExternalReference function, int num_reg_arguments,
492                      int num_double_arguments);
493   void CallCFunction(Register function, int num_reg_arguments,
494                      int num_double_arguments);
495 
496   // See comments at the beginning of Builtins::Generate_CEntry.
PrepareCEntryArgs(int num_args)497   inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
PrepareCEntryFunction(const ExternalReference & ref)498   inline void PrepareCEntryFunction(const ExternalReference& ref) {
499     li(a1, ref);
500   }
501 
502   void CheckPageFlag(const Register& object, int mask, Condition cc,
503                      Label* condition_met);
504 #undef COND_ARGS
505 
506   // Performs a truncating conversion of a floating point number as used by
507   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
508   // Exits with 'result' holding the answer.
509   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
510                          DoubleRegister double_input, StubCallMode stub_mode);
511 
512   // Conditional move.
513   void Movz(Register rd, Register rj, Register rk);
514   void Movn(Register rd, Register rj, Register rk);
515 
516   void LoadZeroIfFPUCondition(Register dest, CFRegister = FCC0);
517   void LoadZeroIfNotFPUCondition(Register dest, CFRegister = FCC0);
518 
519   void LoadZeroIfConditionNotZero(Register dest, Register condition);
520   void LoadZeroIfConditionZero(Register dest, Register condition);
521   void LoadZeroOnCondition(Register rd, Register rj, const Operand& rk,
522                            Condition cond);
523 
524   void Clz_w(Register rd, Register rj);
525   void Clz_d(Register rd, Register rj);
526   void Ctz_w(Register rd, Register rj);
527   void Ctz_d(Register rd, Register rj);
528   void Popcnt_w(Register rd, Register rj);
529   void Popcnt_d(Register rd, Register rj);
530 
531   void ExtractBits(Register dest, Register source, Register pos, int size,
532                    bool sign_extend = false);
533   void InsertBits(Register dest, Register source, Register pos, int size);
534 
535   void Bstrins_w(Register rk, Register rj, uint16_t msbw, uint16_t lswb);
536   void Bstrins_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
537   void Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
538   void Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
539   void Neg_s(FPURegister fd, FPURegister fj);
540   void Neg_d(FPURegister fd, FPURegister fk);
541 
542   // Convert single to unsigned word.
543   void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch);
544   void Trunc_uw_s(Register rd, FPURegister fj, FPURegister scratch);
545 
546   // Change endianness
547   void ByteSwapSigned(Register dest, Register src, int operand_size);
548   void ByteSwapUnsigned(Register dest, Register src, int operand_size);
549 
550   void Ld_b(Register rd, const MemOperand& rj);
551   void Ld_bu(Register rd, const MemOperand& rj);
552   void St_b(Register rd, const MemOperand& rj);
553 
554   void Ld_h(Register rd, const MemOperand& rj);
555   void Ld_hu(Register rd, const MemOperand& rj);
556   void St_h(Register rd, const MemOperand& rj);
557 
558   void Ld_w(Register rd, const MemOperand& rj);
559   void Ld_wu(Register rd, const MemOperand& rj);
560   void St_w(Register rd, const MemOperand& rj);
561 
562   void Fld_s(FPURegister fd, const MemOperand& src);
563   void Fst_s(FPURegister fj, const MemOperand& dst);
564 
565   void Fld_d(FPURegister fd, const MemOperand& src);
566   void Fst_d(FPURegister fj, const MemOperand& dst);
567 
568   void Ll_w(Register rd, const MemOperand& rj);
569   void Sc_w(Register rd, const MemOperand& rj);
570 
571   void Ll_d(Register rd, const MemOperand& rj);
572   void Sc_d(Register rd, const MemOperand& rj);
573 
574   // These functions assume (and assert) that src1!=src2. It is permitted
575   // for the result to alias either input register.
576   void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
577                   Label* out_of_line);
578   void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
579                   Label* out_of_line);
580   void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2,
581                   Label* out_of_line);
582   void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2,
583                   Label* out_of_line);
584 
585   // Generate out-of-line cases for the macros above.
586   void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
587   void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
588   void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
589   void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
590 
IsDoubleZeroRegSet()591   bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
592 
mov(Register rd,Register rj)593   void mov(Register rd, Register rj) { or_(rd, rj, zero_reg); }
594 
Move(Register dst,Handle<HeapObject> handle)595   inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
Move(Register dst,Smi smi)596   inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
597 
Move(Register dst,Register src)598   inline void Move(Register dst, Register src) {
599     if (dst != src) {
600       mov(dst, src);
601     }
602   }
603 
FmoveLow(Register dst_low,FPURegister src)604   inline void FmoveLow(Register dst_low, FPURegister src) {
605     movfr2gr_s(dst_low, src);
606   }
607 
608   void FmoveLow(FPURegister dst, Register src_low);
609 
Move(FPURegister dst,FPURegister src)610   inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
611 
Move_d(FPURegister dst,FPURegister src)612   inline void Move_d(FPURegister dst, FPURegister src) {
613     if (dst != src) {
614       fmov_d(dst, src);
615     }
616   }
617 
Move_s(FPURegister dst,FPURegister src)618   inline void Move_s(FPURegister dst, FPURegister src) {
619     if (dst != src) {
620       fmov_s(dst, src);
621     }
622   }
623 
Move(FPURegister dst,float imm)624   void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
Move(FPURegister dst,double imm)625   void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
626   void Move(FPURegister dst, uint32_t src);
627   void Move(FPURegister dst, uint64_t src);
628 
629   // AddOverflow_d sets overflow register to a negative value if
630   // overflow occured, otherwise it is zero or positive
631   void AddOverflow_d(Register dst, Register left, const Operand& right,
632                      Register overflow);
633   // SubOverflow_d sets overflow register to a negative value if
634   // overflow occured, otherwise it is zero or positive
635   void SubOverflow_d(Register dst, Register left, const Operand& right,
636                      Register overflow);
637   // MulOverflow_w sets overflow register to zero if no overflow occured
638   void MulOverflow_w(Register dst, Register left, const Operand& right,
639                      Register overflow);
640 
641   // TODO(LOONG_dev): LOONG64 Remove this constant
642   // Number of instructions needed for calculation of switch table entry address
643   static const int kSwitchTablePrologueSize = 5;
644 
645   // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
646   // functor/function with 'Label *func(size_t index)' declaration.
647   template <typename Func>
648   void GenerateSwitchTable(Register index, size_t case_count,
649                            Func GetLabelFunction);
650 
651   // Load an object from the root table.
652   void LoadRoot(Register destination, RootIndex index) final;
653   void LoadRoot(Register destination, RootIndex index, Condition cond,
654                 Register src1, const Operand& src2);
655 
656   void LoadMap(Register destination, Register object);
657 
658   // If the value is a NaN, canonicalize the value else, do nothing.
659   void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
660 
661   // ---------------------------------------------------------------------------
662   // FPU macros. These do not handle special cases like NaN or +- inf.
663 
664   // Convert unsigned word to double.
665   void Ffint_d_uw(FPURegister fd, FPURegister fj);
666   void Ffint_d_uw(FPURegister fd, Register rj);
667 
668   // Convert unsigned long to double.
669   void Ffint_d_ul(FPURegister fd, FPURegister fj);
670   void Ffint_d_ul(FPURegister fd, Register rj);
671 
672   // Convert unsigned word to float.
673   void Ffint_s_uw(FPURegister fd, FPURegister fj);
674   void Ffint_s_uw(FPURegister fd, Register rj);
675 
676   // Convert unsigned long to float.
677   void Ffint_s_ul(FPURegister fd, FPURegister fj);
678   void Ffint_s_ul(FPURegister fd, Register rj);
679 
680   // Convert double to unsigned word.
681   void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch);
682   void Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch);
683 
684   // Convert single to unsigned word.
685   void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
686   void Ftintrz_uw_s(Register rd, FPURegister fs, FPURegister scratch);
687 
688   // Convert double to unsigned long.
689   void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch,
690                     Register result = no_reg);
691   void Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch,
692                     Register result = no_reg);
693 
694   // Convert single to unsigned long.
695   void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch,
696                     Register result = no_reg);
697   void Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch,
698                     Register result = no_reg);
699 
700   // Round double functions
701   void Trunc_d(FPURegister fd, FPURegister fj);
702   void Round_d(FPURegister fd, FPURegister fj);
703   void Floor_d(FPURegister fd, FPURegister fj);
704   void Ceil_d(FPURegister fd, FPURegister fj);
705 
706   // Round float functions
707   void Trunc_s(FPURegister fd, FPURegister fj);
708   void Round_s(FPURegister fd, FPURegister fj);
709   void Floor_s(FPURegister fd, FPURegister fj);
710   void Ceil_s(FPURegister fd, FPURegister fj);
711 
712   // Jump the register contains a smi.
713   void JumpIfSmi(Register value, Label* smi_label);
714 
JumpIfEqual(Register a,int32_t b,Label * dest)715   void JumpIfEqual(Register a, int32_t b, Label* dest) {
716     UseScratchRegisterScope temps(this);
717     Register scratch = temps.Acquire();
718     li(scratch, Operand(b));
719     Branch(dest, eq, a, Operand(scratch));
720   }
721 
JumpIfLessThan(Register a,int32_t b,Label * dest)722   void JumpIfLessThan(Register a, int32_t b, Label* dest) {
723     UseScratchRegisterScope temps(this);
724     Register scratch = temps.Acquire();
725     li(scratch, Operand(b));
726     Branch(dest, lt, a, Operand(scratch));
727   }
728 
729   // Push a standard frame, consisting of ra, fp, context and JS function.
730   void PushStandardFrame(Register function_reg);
731 
732   // Get the actual activation frame alignment for target environment.
733   static int ActivationFrameAlignment();
734 
735   // Load Scaled Address instructions. Parameter sa (shift argument) must be
736   // between [1, 31] (inclusive). The scratch register may be clobbered.
737   void Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
738               Register scratch = t7);
739   void Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
740               Register scratch = t7);
741 
742   // Compute the start of the generated instruction stream from the current PC.
743   // This is an alternative to embedding the {CodeObject} handle as a reference.
744   void ComputeCodeStartAddress(Register dst);
745 
746   // Control-flow integrity:
747 
748   // Define a function entrypoint. This doesn't emit any code for this
749   // architecture, as control-flow integrity is not supported for it.
CodeEntry()750   void CodeEntry() {}
751   // Define an exception handler.
ExceptionHandler()752   void ExceptionHandler() {}
753   // Define an exception handler and bind a label.
BindExceptionHandler(Label * label)754   void BindExceptionHandler(Label* label) { bind(label); }
755 
756  protected:
757   inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
758   inline int32_t GetOffset(Label* L, OffsetSize bits);
759 
760  private:
761   bool has_double_zero_reg_set_ = false;
762 
763   // Performs a truncating conversion of a floating point number as used by
764   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
765   // succeeds, otherwise falls through if result is saturated. On return
766   // 'result' either holds answer, or is clobbered on fall through.
767   void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
768                                   Label* done);
769 
770   bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
771                              const Operand& rk, bool need_link);
772 
773   // f32 or f64
774   void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
775                 CFRegister cd, bool f32 = true);
776 
777   void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
778                      bool f32 = true);
779 
780   void CallCFunctionHelper(Register function, int num_reg_arguments,
781                            int num_double_arguments);
782 
783   void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
784 
785   void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
786 
787   // Push a fixed frame, consisting of ra, fp.
788   void PushCommonFrame(Register marker_reg = no_reg);
789 };
790 
791 // MacroAssembler implements a collection of frequently used macros.
792 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
793  public:
794   using TurboAssembler::TurboAssembler;
795 
796   // It assumes that the arguments are located below the stack pointer.
797   // argc is the number of arguments not including the receiver.
798   // TODO(LOONG_dev): LOONG64: Remove this function once we stick with the
799   // reversed arguments order.
LoadReceiver(Register dest,Register argc)800   void LoadReceiver(Register dest, Register argc) {
801     Ld_d(dest, MemOperand(sp, 0));
802   }
803 
StoreReceiver(Register rec,Register argc,Register scratch)804   void StoreReceiver(Register rec, Register argc, Register scratch) {
805     St_d(rec, MemOperand(sp, 0));
806   }
807 
808   bool IsNear(Label* L, Condition cond, int rs_reg);
809 
810   // Swap two registers.  If the scratch register is omitted then a slightly
811   // less efficient form using xor instead of mov is emitted.
812   void Swap(Register reg1, Register reg2, Register scratch = no_reg);
813 
PushRoot(RootIndex index)814   void PushRoot(RootIndex index) {
815     UseScratchRegisterScope temps(this);
816     Register scratch = temps.Acquire();
817     LoadRoot(scratch, index);
818     Push(scratch);
819   }
820 
821   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,RootIndex index,Label * if_equal)822   void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
823     UseScratchRegisterScope temps(this);
824     Register scratch = temps.Acquire();
825     LoadRoot(scratch, index);
826     Branch(if_equal, eq, with, Operand(scratch));
827   }
828 
829   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,RootIndex index,Label * if_not_equal)830   void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
831     UseScratchRegisterScope temps(this);
832     Register scratch = temps.Acquire();
833     LoadRoot(scratch, index);
834     Branch(if_not_equal, ne, with, Operand(scratch));
835   }
836 
837   // Checks if value is in range [lower_limit, higher_limit] using a single
838   // comparison.
839   void JumpIfIsInRange(Register value, unsigned lower_limit,
840                        unsigned higher_limit, Label* on_in_range);
841 
842   // ---------------------------------------------------------------------------
843   // GC Support
844 
845   // Notify the garbage collector that we wrote a pointer into an object.
846   // |object| is the object being stored into, |value| is the object being
847   // stored.
848   // The offset is the offset from the start of the object, not the offset from
849   // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
850   void RecordWriteField(
851       Register object, int offset, Register value, RAStatus ra_status,
852       SaveFPRegsMode save_fp,
853       RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
854       SmiCheck smi_check = SmiCheck::kInline);
855 
856   // For a given |object| notify the garbage collector that the slot at |offset|
857   // has been written.  |value| is the object being stored.
858   void RecordWrite(
859       Register object, Operand offset, Register value, RAStatus ra_status,
860       SaveFPRegsMode save_fp,
861       RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
862       SmiCheck smi_check = SmiCheck::kInline);
863 
864   // ---------------------------------------------------------------------------
865   // Pseudo-instructions.
866 
867   // Convert double to unsigned long.
868   void Ftintrz_l_ud(FPURegister fd, FPURegister fj, FPURegister scratch);
869 
870   void Ftintrz_l_d(FPURegister fd, FPURegister fj);
871   void Ftintrne_l_d(FPURegister fd, FPURegister fj);
872   void Ftintrm_l_d(FPURegister fd, FPURegister fj);
873   void Ftintrp_l_d(FPURegister fd, FPURegister fj);
874 
875   void Ftintrz_w_d(FPURegister fd, FPURegister fj);
876   void Ftintrne_w_d(FPURegister fd, FPURegister fj);
877   void Ftintrm_w_d(FPURegister fd, FPURegister fj);
878   void Ftintrp_w_d(FPURegister fd, FPURegister fj);
879 
880   void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
881   void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
882   void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
883   void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
884 
885   // Enter exit frame.
886   // argc - argument count to be dropped by LeaveExitFrame.
887   // save_doubles - saves FPU registers on stack, currently disabled.
888   // stack_space - extra stack space.
889   void EnterExitFrame(bool save_doubles, int stack_space = 0,
890                       StackFrame::Type frame_type = StackFrame::EXIT);
891 
892   // Leave the current exit frame.
893   void LeaveExitFrame(bool save_doubles, Register arg_count,
894                       bool do_return = NO_EMIT_RETURN,
895                       bool argument_count_is_length = false);
896 
897   // Make sure the stack is aligned. Only emits code in debug mode.
898   void AssertStackIsAligned();
899 
900   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)901   void LoadGlobalProxy(Register dst) {
902     LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
903   }
904 
905   void LoadNativeContextSlot(Register dst, int index);
906 
907   // Load the initial map from the global function. The registers
908   // function and map can be the same, function is then overwritten.
909   void LoadGlobalFunctionInitialMap(Register function, Register map,
910                                     Register scratch);
911 
912   // -------------------------------------------------------------------------
913   // JavaScript invokes.
914 
915   // Invoke the JavaScript function code by either calling or jumping.
916   void InvokeFunctionCode(Register function, Register new_target,
917                           Register expected_parameter_count,
918                           Register actual_parameter_count, InvokeType type);
919 
920   // On function call, call into the debugger.
921   void CallDebugOnFunctionCall(Register fun, Register new_target,
922                                Register expected_parameter_count,
923                                Register actual_parameter_count);
924 
925   // Invoke the JavaScript function in the given register. Changes the
926   // current context to the context in the function before invoking.
927   void InvokeFunctionWithNewTarget(Register function, Register new_target,
928                                    Register actual_parameter_count,
929                                    InvokeType type);
930   void InvokeFunction(Register function, Register expected_parameter_count,
931                       Register actual_parameter_count, InvokeType type);
932 
933   // Exception handling.
934 
935   // Push a new stack handler and link into stack handler chain.
936   void PushStackHandler();
937 
938   // Unlink the stack handler on top of the stack from the stack handler chain.
939   // Must preserve the result register.
940   void PopStackHandler();
941 
942   // -------------------------------------------------------------------------
943   // Support functions.
944 
945   void GetObjectType(Register function, Register map, Register type_reg);
946 
947   void GetInstanceTypeRange(Register map, Register type_reg,
948                             InstanceType lower_limit, Register range);
949 
950   // -------------------------------------------------------------------------
951   // Runtime calls.
952 
953   // Call a runtime routine.
954   void CallRuntime(const Runtime::Function* f, int num_arguments,
955                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
956 
957   // Convenience function: Same as above, but takes the fid instead.
958   void CallRuntime(Runtime::FunctionId fid,
959                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
960     const Runtime::Function* function = Runtime::FunctionForId(fid);
961     CallRuntime(function, function->nargs, save_doubles);
962   }
963 
964   // Convenience function: Same as above, but takes the fid instead.
965   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
966                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
967     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
968   }
969 
970   // Convenience function: tail call a runtime routine (jump).
971   void TailCallRuntime(Runtime::FunctionId fid);
972 
973   // Jump to the builtin routine.
974   void JumpToExternalReference(const ExternalReference& builtin,
975                                bool builtin_exit_frame = false);
976 
977   // Generates a trampoline to jump to the off-heap instruction stream.
978   void JumpToOffHeapInstructionStream(Address entry);
979 
980   // ---------------------------------------------------------------------------
981   // In-place weak references.
982   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
983 
984   // -------------------------------------------------------------------------
985   // StatsCounter support.
986 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)987   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
988                         Register scratch2) {
989     if (!FLAG_native_code_counters) return;
990     EmitIncrementCounter(counter, value, scratch1, scratch2);
991   }
992   void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
993                             Register scratch2);
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)994   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
995                         Register scratch2) {
996     if (!FLAG_native_code_counters) return;
997     EmitDecrementCounter(counter, value, scratch1, scratch2);
998   }
999   void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
1000                             Register scratch2);
1001 
1002   // -------------------------------------------------------------------------
1003   // Stack limit utilities
1004 
1005   enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
1006   void LoadStackLimit(Register destination, StackLimitKind kind);
1007   void StackOverflowCheck(Register num_args, Register scratch1,
1008                           Register scratch2, Label* stack_overflow);
1009 
1010   // ---------------------------------------------------------------------------
1011   // Smi utilities.
1012 
1013   // Test if the register contains a smi.
SmiTst(Register value,Register scratch)1014   inline void SmiTst(Register value, Register scratch) {
1015     And(scratch, value, Operand(kSmiTagMask));
1016   }
1017 
1018   // Jump if the register contains a non-smi.
1019   void JumpIfNotSmi(Register value, Label* not_smi_label);
1020 
1021   // Abort execution if argument is not a Constructor, enabled via --debug-code.
1022   void AssertConstructor(Register object);
1023 
1024   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1025   void AssertFunction(Register object);
1026 
1027   // Abort execution if argument is not a callable JSFunction, enabled via
1028   // --debug-code.
1029   void AssertCallableFunction(Register object);
1030 
1031   // Abort execution if argument is not a JSBoundFunction,
1032   // enabled via --debug-code.
1033   void AssertBoundFunction(Register object);
1034 
1035   // Abort execution if argument is not a JSGeneratorObject (or subclass),
1036   // enabled via --debug-code.
1037   void AssertGeneratorObject(Register object);
1038 
1039   // Abort execution if argument is not undefined or an AllocationSite, enabled
1040   // via --debug-code.
1041   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1042 
1043   template <typename Field>
DecodeField(Register dst,Register src)1044   void DecodeField(Register dst, Register src) {
1045     Bstrpick_d(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
1046   }
1047 
1048   template <typename Field>
DecodeField(Register reg)1049   void DecodeField(Register reg) {
1050     DecodeField<Field>(reg, reg);
1051   }
1052 
1053  private:
1054   // Helper functions for generating invokes.
1055   void InvokePrologue(Register expected_parameter_count,
1056                       Register actual_parameter_count, Label* done,
1057                       InvokeType type);
1058 
1059   DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
1060 };
1061 
1062 template <typename Func>
GenerateSwitchTable(Register index,size_t case_count,Func GetLabelFunction)1063 void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
1064                                          Func GetLabelFunction) {
1065   UseScratchRegisterScope scope(this);
1066   Register scratch = scope.Acquire();
1067   BlockTrampolinePoolFor(3 + case_count);
1068 
1069   pcaddi(scratch, 3);
1070   alsl_d(scratch, index, scratch, kInstrSizeLog2);
1071   jirl(zero_reg, scratch, 0);
1072   for (size_t index = 0; index < case_count; ++index) {
1073     b(GetLabelFunction(index));
1074   }
1075 }
1076 
1077 #define ACCESS_MASM(masm) masm->
1078 
1079 }  // namespace internal
1080 }  // namespace v8
1081 
1082 #endif  // V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
1083