• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
10 #define V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
11 
12 #include "src/codegen/assembler.h"
13 #include "src/codegen/riscv64/assembler-riscv64.h"
14 #include "src/common/globals.h"
15 #include "src/execution/isolate-data.h"
16 #include "src/objects/tagged-index.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 // Forward declarations.
22 enum class AbortReason : uint8_t;
23 
24 // Reserved Register Usage Summary.
25 //
26 // Registers t5, t6, and t3 are reserved for use by the MacroAssembler.
27 //
28 // The programmer should know that the MacroAssembler may clobber these three,
29 // but won't touch other registers except in special cases.
30 //
31 // TODO(RISCV): Cannot find info about this ABI. We chose t6 for now.
32 // Per the RISC-V ABI, register t6 must be used for indirect function call
33 // via 'jalr t6' or 'jr t6' instructions. This is relied upon by gcc when
34 // trying to update gp register for position-independent-code. Whenever
35 // RISC-V generated code calls C code, it must be via t6 register.
36 
37 // Flags used for LeaveExitFrame function.
38 enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
39 
40 // Flags used for the li macro-assembler function.
41 enum LiFlags {
42   // If the constant value can be represented in just 16 bits, then
43   // optimize the li to use a single instruction, rather than lui/ori/slli
44   // sequence. A number of other optimizations that emits less than
45   // maximum number of instructions exists.
46   OPTIMIZE_SIZE = 0,
47   // Always use 8 instructions (lui/addi/slliw sequence), even if the
48   // constant
49   // could be loaded with just one, so that this value is patchable later.
50   CONSTANT_SIZE = 1,
51   // For address loads 8 instruction are required. Used to mark
52   // constant load that will be used as address without relocation
53   // information. It ensures predictable code size, so specific sites
54   // in code are patchable.
55   ADDRESS_LOAD = 2
56 };
57 
58 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
59 
60 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
61                                    Register reg3 = no_reg,
62                                    Register reg4 = no_reg,
63                                    Register reg5 = no_reg,
64                                    Register reg6 = no_reg);
65 
66 // -----------------------------------------------------------------------------
67 // Static helper functions.
68 
69 #if defined(V8_TARGET_LITTLE_ENDIAN)
70 #define SmiWordOffset(offset) (offset + kSystemPointerSize / 2)
71 #else
72 #define SmiWordOffset(offset) offset
73 #endif
74 
75 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)76 inline MemOperand FieldMemOperand(Register object, int offset) {
77   return MemOperand(object, offset - kHeapObjectTag);
78 }
79 
80 // Generate a MemOperand for storing arguments 5..N on the stack
81 // when calling CallCFunction().
82 // TODO(plind): Currently ONLY used for O32. Should be fixed for
83 //              n64, and used in RegExp code, and other places
84 //              with more than 8 arguments.
CFunctionArgumentOperand(int index)85 inline MemOperand CFunctionArgumentOperand(int index) {
86   DCHECK_GT(index, kCArgSlotCount);
87   // Argument 5 takes the slot just past the four Arg-slots.
88   int offset = (index - 5) * kSystemPointerSize + kCArgsSlotsSize;
89   return MemOperand(sp, offset);
90 }
91 
92 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
93  public:
94   using TurboAssemblerBase::TurboAssemblerBase;
95 
96   // Activation support.
97   void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)98   void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
99     // Out-of-line constant pool not implemented on RISC-V.
100     UNREACHABLE();
101   }
102   void LeaveFrame(StackFrame::Type type);
103 
104   // Generates function and stub prologue code.
105   void StubPrologue(StackFrame::Type type);
106   void Prologue();
107 
InitializeRootRegister()108   void InitializeRootRegister() {
109     ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
110     li(kRootRegister, Operand(isolate_root));
111 #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
112     LoadRootRelative(kPtrComprCageBaseRegister,
113                      IsolateData::cage_base_offset());
114 #endif
115   }
116 
117   // Jump unconditionally to given label.
jmp(Label * L)118   void jmp(Label* L) { Branch(L); }
119 
120   // -------------------------------------------------------------------------
121   // Debugging.
122 
123   void Trap();
124   void DebugBreak();
125 
126   // Calls Abort(msg) if the condition cc is not satisfied.
127   // Use --debug_code to enable.
128   void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
129 
130   // Like Assert(), but always enabled.
131   void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
132 
133   // Print a message to stdout and abort execution.
134   void Abort(AbortReason msg);
135 
136   // Arguments macros.
137 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2
138 #define COND_ARGS cond, r1, r2
139 
140   // Cases when relocation is not needed.
141 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
142   void Name(target_type target);                     \
143   void Name(target_type target, COND_TYPED_ARGS);
144 
145 #define DECLARE_BRANCH_PROTOTYPES(Name)   \
146   DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
147   DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
148 
149   DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
150   DECLARE_BRANCH_PROTOTYPES(BranchShort)
151 
152   void Branch(Label* target);
153   void Branch(int32_t target);
154   void BranchLong(Label* L);
155   void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
156               Label::Distance near_jump = Label::kFar);
157   void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
158               Label::Distance near_jump = Label::kFar);
159 #undef DECLARE_BRANCH_PROTOTYPES
160 #undef COND_TYPED_ARGS
161 #undef COND_ARGS
162 
AllocateStackSpace(Register bytes)163   void AllocateStackSpace(Register bytes) { Sub64(sp, sp, bytes); }
164 
AllocateStackSpace(int bytes)165   void AllocateStackSpace(int bytes) {
166     DCHECK_GE(bytes, 0);
167     if (bytes == 0) return;
168     Sub64(sp, sp, Operand(bytes));
169   }
170 
NegateBool(Register rd,Register rs)171   inline void NegateBool(Register rd, Register rs) { Xor(rd, rs, 1); }
172 
173   // Compare float, if any operand is NaN, result is false except for NE
174   void CompareF32(Register rd, FPUCondition cc, FPURegister cmp1,
175                   FPURegister cmp2);
176   // Compare double, if any operand is NaN, result is false except for NE
177   void CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
178                   FPURegister cmp2);
179   void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
180   void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
181   void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
182   void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
183 
184   // Floating point branches
185   void BranchTrueShortF(Register rs, Label* target);
186   void BranchFalseShortF(Register rs, Label* target);
187 
188   void BranchTrueF(Register rs, Label* target);
189   void BranchFalseF(Register rs, Label* target);
190 
191   void Branch(Label* L, Condition cond, Register rs, RootIndex index);
192 
193   static int InstrCountForLi64Bit(int64_t value);
194   inline void LiLower32BitHelper(Register rd, Operand j);
195   void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
196   // Load int32 in the rd register.
197   void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
198   inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
199     li(rd, Operand(j), mode);
200   }
201 
Move(Register output,MemOperand operand)202   inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
203   void li(Register dst, Handle<HeapObject> value,
204           RelocInfo::Mode rmode = RelocInfo::FULL_EMBEDDED_OBJECT);
205   void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
206   void li(Register dst, const StringConstantBase* string,
207           LiFlags mode = OPTIMIZE_SIZE);
208 
209   void LoadFromConstantsTable(Register destination, int constant_index) final;
210   void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
211   void LoadRootRelative(Register destination, int32_t offset) final;
212 
GenPCRelativeJump(Register rd,int64_t imm32)213   inline void GenPCRelativeJump(Register rd, int64_t imm32) {
214     DCHECK(is_int32(imm32 + 0x800));
215     int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
216     int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
217     auipc(rd, Hi20);  // Read PC + Hi20 into scratch.
218     jr(rd, Lo12);     // jump PC + Hi20 + Lo12
219   }
220 
GenPCRelativeJumpAndLink(Register rd,int64_t imm32)221   inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
222     DCHECK(is_int32(imm32 + 0x800));
223     int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
224     int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
225     auipc(rd, Hi20);  // Read PC + Hi20 into scratch.
226     jalr(rd, Lo12);   // jump PC + Hi20 + Lo12
227   }
228 // Jump, Call, and Ret pseudo instructions implementing inter-working.
229 #define COND_ARGS                              \
230   Condition cond = al, Register rs = zero_reg, \
231             const Operand &rt = Operand(zero_reg)
232 
233   void Jump(Register target, COND_ARGS);
234   void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
235   void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
236   // Deffer from li, this method save target to the memory, and then load
237   // it to register use ld, it can be used in wasm jump table for concurrent
238   // patching.
239   void PatchAndJump(Address target);
240   void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
241   void Jump(const ExternalReference& reference);
242   void Call(Register target, COND_ARGS);
243   void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
244   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
245             COND_ARGS);
246   void Call(Label* target);
247   void LoadAddress(
248       Register dst, Label* target,
249       RelocInfo::Mode rmode = RelocInfo::INTERNAL_REFERENCE_ENCODED);
250 
251   // Load the builtin given by the Smi in |builtin| into the same
252   // register.
253   void LoadEntryFromBuiltinIndex(Register builtin);
254   void LoadEntryFromBuiltin(Builtin builtin, Register destination);
255   MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
256   void CallBuiltinByIndex(Register builtin);
257   void CallBuiltin(Builtin builtin);
258   void TailCallBuiltin(Builtin builtin);
259 
260   void LoadCodeObjectEntry(Register destination, Register code_object);
261   void CallCodeObject(Register code_object);
262   void JumpCodeObject(Register code_object,
263                       JumpMode jump_mode = JumpMode::kJump);
264 
265   // Generates an instruction sequence s.t. the return address points to the
266   // instruction following the call.
267   // The return address on the stack is used by frame iteration.
268   void StoreReturnAddressAndCall(Register target);
269 
270   void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
271                              DeoptimizeKind kind, Label* ret,
272                              Label* jump_deoptimization_entry_label);
273 
274   void Ret(COND_ARGS);
275 
276   // Emit code to discard a non-negative number of pointer-sized elements
277   // from the stack, clobbering only the sp register.
278   void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
279             const Operand& op = Operand(no_reg));
280 
281   // Trivial case of DropAndRet that only emits 2 instructions.
282   void DropAndRet(int drop);
283 
284   void DropAndRet(int drop, Condition cond, Register reg, const Operand& op);
285 
286   void Ld(Register rd, const MemOperand& rs);
287   void Sd(Register rd, const MemOperand& rs);
288 
push(Register src)289   void push(Register src) {
290     Add64(sp, sp, Operand(-kSystemPointerSize));
291     Sd(src, MemOperand(sp, 0));
292   }
Push(Register src)293   void Push(Register src) { push(src); }
294   void Push(Handle<HeapObject> handle);
295   void Push(Smi smi);
296 
297   // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)298   void Push(Register src1, Register src2) {
299     Sub64(sp, sp, Operand(2 * kSystemPointerSize));
300     Sd(src1, MemOperand(sp, 1 * kSystemPointerSize));
301     Sd(src2, MemOperand(sp, 0 * kSystemPointerSize));
302   }
303 
304   // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)305   void Push(Register src1, Register src2, Register src3) {
306     Sub64(sp, sp, Operand(3 * kSystemPointerSize));
307     Sd(src1, MemOperand(sp, 2 * kSystemPointerSize));
308     Sd(src2, MemOperand(sp, 1 * kSystemPointerSize));
309     Sd(src3, MemOperand(sp, 0 * kSystemPointerSize));
310   }
311 
312   // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)313   void Push(Register src1, Register src2, Register src3, Register src4) {
314     Sub64(sp, sp, Operand(4 * kSystemPointerSize));
315     Sd(src1, MemOperand(sp, 3 * kSystemPointerSize));
316     Sd(src2, MemOperand(sp, 2 * kSystemPointerSize));
317     Sd(src3, MemOperand(sp, 1 * kSystemPointerSize));
318     Sd(src4, MemOperand(sp, 0 * kSystemPointerSize));
319   }
320 
321   // Push five registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)322   void Push(Register src1, Register src2, Register src3, Register src4,
323             Register src5) {
324     Sub64(sp, sp, Operand(5 * kSystemPointerSize));
325     Sd(src1, MemOperand(sp, 4 * kSystemPointerSize));
326     Sd(src2, MemOperand(sp, 3 * kSystemPointerSize));
327     Sd(src3, MemOperand(sp, 2 * kSystemPointerSize));
328     Sd(src4, MemOperand(sp, 1 * kSystemPointerSize));
329     Sd(src5, MemOperand(sp, 0 * kSystemPointerSize));
330   }
331 
Push(Register src,Condition cond,Register tst1,Register tst2)332   void Push(Register src, Condition cond, Register tst1, Register tst2) {
333     // Since we don't have conditional execution we use a Branch.
334     Branch(3, cond, tst1, Operand(tst2));
335     Sub64(sp, sp, Operand(kSystemPointerSize));
336     Sd(src, MemOperand(sp, 0));
337   }
338 
339   enum PushArrayOrder { kNormal, kReverse };
340   void PushArray(Register array, Register size, PushArrayOrder order = kNormal);
341 
342   void MaybeSaveRegisters(RegList registers);
343   void MaybeRestoreRegisters(RegList registers);
344 
345   void CallEphemeronKeyBarrier(Register object, Register slot_address,
346                                SaveFPRegsMode fp_mode);
347 
348   void CallRecordWriteStubSaveRegisters(
349       Register object, Register slot_address,
350       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
351       StubCallMode mode = StubCallMode::kCallBuiltinPointer);
352   void CallRecordWriteStub(
353       Register object, Register slot_address,
354       RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
355       StubCallMode mode = StubCallMode::kCallBuiltinPointer);
356 
357   // Push multiple registers on the stack.
358   // Registers are saved in numerical order, with higher numbered registers
359   // saved in higher memory addresses.
360   void MultiPush(RegList regs);
361   void MultiPushFPU(DoubleRegList regs);
362 
363   // Calculate how much stack space (in bytes) are required to store caller
364   // registers excluding those specified in the arguments.
365   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
366                                       Register exclusion1 = no_reg,
367                                       Register exclusion2 = no_reg,
368                                       Register exclusion3 = no_reg) const;
369 
370   // Push caller saved registers on the stack, and return the number of bytes
371   // stack pointer is adjusted.
372   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
373                       Register exclusion2 = no_reg,
374                       Register exclusion3 = no_reg);
375   // Restore caller saved registers from the stack, and return the number of
376   // bytes stack pointer is adjusted.
377   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
378                      Register exclusion2 = no_reg,
379                      Register exclusion3 = no_reg);
380 
pop(Register dst)381   void pop(Register dst) {
382     Ld(dst, MemOperand(sp, 0));
383     Add64(sp, sp, Operand(kSystemPointerSize));
384   }
Pop(Register dst)385   void Pop(Register dst) { pop(dst); }
386 
387   // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)388   void Pop(Register src1, Register src2) {
389     DCHECK(src1 != src2);
390     Ld(src2, MemOperand(sp, 0 * kSystemPointerSize));
391     Ld(src1, MemOperand(sp, 1 * kSystemPointerSize));
392     Add64(sp, sp, 2 * kSystemPointerSize);
393   }
394 
395   // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)396   void Pop(Register src1, Register src2, Register src3) {
397     Ld(src3, MemOperand(sp, 0 * kSystemPointerSize));
398     Ld(src2, MemOperand(sp, 1 * kSystemPointerSize));
399     Ld(src1, MemOperand(sp, 2 * kSystemPointerSize));
400     Add64(sp, sp, 3 * kSystemPointerSize);
401   }
402 
403   void Pop(uint32_t count = 1) {
404     Add64(sp, sp, Operand(count * kSystemPointerSize));
405   }
406 
407   // Pops multiple values from the stack and load them in the
408   // registers specified in regs. Pop order is the opposite as in MultiPush.
409   void MultiPop(RegList regs);
410   void MultiPopFPU(DoubleRegList regs);
411 
412 #define DEFINE_INSTRUCTION(instr)                          \
413   void instr(Register rd, Register rs, const Operand& rt); \
414   void instr(Register rd, Register rs, Register rt) {      \
415     instr(rd, rs, Operand(rt));                            \
416   }                                                        \
417   void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
418 
419 #define DEFINE_INSTRUCTION2(instr)                                 \
420   void instr(Register rs, const Operand& rt);                      \
421   void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
422   void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
423 
424 #define DEFINE_INSTRUCTION3(instr) void instr(Register rd, int64_t imm);
425 
426   DEFINE_INSTRUCTION(Add32)
427   DEFINE_INSTRUCTION(Add64)
428   DEFINE_INSTRUCTION(Div32)
429   DEFINE_INSTRUCTION(Divu32)
430   DEFINE_INSTRUCTION(Divu64)
431   DEFINE_INSTRUCTION(Mod32)
432   DEFINE_INSTRUCTION(Modu32)
433   DEFINE_INSTRUCTION(Div64)
434   DEFINE_INSTRUCTION(Sub32)
435   DEFINE_INSTRUCTION(Sub64)
436   DEFINE_INSTRUCTION(Mod64)
437   DEFINE_INSTRUCTION(Modu64)
438   DEFINE_INSTRUCTION(Mul32)
439   DEFINE_INSTRUCTION(Mulh32)
440   DEFINE_INSTRUCTION(Mul64)
441   DEFINE_INSTRUCTION(Mulh64)
442   DEFINE_INSTRUCTION2(Div32)
443   DEFINE_INSTRUCTION2(Div64)
444   DEFINE_INSTRUCTION2(Divu32)
445   DEFINE_INSTRUCTION2(Divu64)
446 
447   DEFINE_INSTRUCTION(And)
448   DEFINE_INSTRUCTION(Or)
449   DEFINE_INSTRUCTION(Xor)
450   DEFINE_INSTRUCTION(Nor)
451   DEFINE_INSTRUCTION2(Neg)
452 
453   DEFINE_INSTRUCTION(Slt)
454   DEFINE_INSTRUCTION(Sltu)
455   DEFINE_INSTRUCTION(Sle)
456   DEFINE_INSTRUCTION(Sleu)
457   DEFINE_INSTRUCTION(Sgt)
458   DEFINE_INSTRUCTION(Sgtu)
459   DEFINE_INSTRUCTION(Sge)
460   DEFINE_INSTRUCTION(Sgeu)
461   DEFINE_INSTRUCTION(Seq)
462   DEFINE_INSTRUCTION(Sne)
463 
464   DEFINE_INSTRUCTION(Sll64)
465   DEFINE_INSTRUCTION(Sra64)
466   DEFINE_INSTRUCTION(Srl64)
467   DEFINE_INSTRUCTION(Sll32)
468   DEFINE_INSTRUCTION(Sra32)
469   DEFINE_INSTRUCTION(Srl32)
470 
471   DEFINE_INSTRUCTION2(Seqz)
472   DEFINE_INSTRUCTION2(Snez)
473 
474   DEFINE_INSTRUCTION(Ror)
475   DEFINE_INSTRUCTION(Dror)
476 
477   DEFINE_INSTRUCTION3(Li)
478   DEFINE_INSTRUCTION2(Mv)
479 
480 #undef DEFINE_INSTRUCTION
481 #undef DEFINE_INSTRUCTION2
482 #undef DEFINE_INSTRUCTION3
483 
484   void SmiUntag(Register dst, const MemOperand& src);
SmiUntag(Register dst,Register src)485   void SmiUntag(Register dst, Register src) {
486     DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
487     if (COMPRESS_POINTERS_BOOL) {
488       sraiw(dst, src, kSmiShift);
489     } else {
490       srai(dst, src, kSmiShift);
491     }
492   }
493 
SmiUntag(Register reg)494   void SmiUntag(Register reg) { SmiUntag(reg, reg); }
495   void SmiToInt32(Register smi);
496 
497   // Enabled via --debug-code.
498   void AssertNotSmi(Register object,
499                     AbortReason reason = AbortReason::kOperandIsASmi);
500   void AssertSmi(Register object,
501                  AbortReason reason = AbortReason::kOperandIsASmi);
502 
503   int CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments);
504 
505   // Before calling a C-function from generated code, align arguments on stack.
506   // After aligning the frame, non-register arguments must be stored on the
507   // stack, using helper: CFunctionArgumentOperand().
508   // The argument count assumes all arguments are word sized.
509   // Some compilers/platforms require the stack to be aligned when calling
510   // C++ code.
511   // Needs a scratch register to do some arithmetic. This register will be
512   // trashed.
513   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
514                             Register scratch);
515   void PrepareCallCFunction(int num_reg_arguments, Register scratch);
516 
517   // Arguments 1-8 are placed in registers a0 through a7 respectively.
518   // Arguments 9..n are stored to stack
519 
520   // Calls a C function and cleans up the space for arguments allocated
521   // by PrepareCallCFunction. The called function is not allowed to trigger a
522   // garbage collection, since that might move the code and invalidate the
523   // return address (unless this is somehow accounted for by the called
524   // function).
525   void CallCFunction(ExternalReference function, int num_arguments);
526   void CallCFunction(Register function, int num_arguments);
527   void CallCFunction(ExternalReference function, int num_reg_arguments,
528                      int num_double_arguments);
529   void CallCFunction(Register function, int num_reg_arguments,
530                      int num_double_arguments);
531   void MovFromFloatResult(DoubleRegister dst);
532   void MovFromFloatParameter(DoubleRegister dst);
533 
534   // These functions abstract parameter passing for the three different ways
535   // we call C functions from generated code.
536   void MovToFloatParameter(DoubleRegister src);
537   void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
538   void MovToFloatResult(DoubleRegister src);
539 
540   // See comments at the beginning of Builtins::Generate_CEntry.
PrepareCEntryArgs(int num_args)541   inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
PrepareCEntryFunction(const ExternalReference & ref)542   inline void PrepareCEntryFunction(const ExternalReference& ref) {
543     li(a1, ref);
544   }
545 
546   void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
547                      Label* condition_met);
548 #undef COND_ARGS
549 
550   // Performs a truncating conversion of a floating point number as used by
551   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
552   // Exits with 'result' holding the answer.
553   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
554                          DoubleRegister double_input, StubCallMode stub_mode);
555 
556   void CompareI(Register rd, Register rs, const Operand& rt, Condition cond);
557 
558   void LoadZeroIfConditionNotZero(Register dest, Register condition);
559   void LoadZeroIfConditionZero(Register dest, Register condition);
560 
SignExtendByte(Register rd,Register rs)561   void SignExtendByte(Register rd, Register rs) {
562     slli(rd, rs, 64 - 8);
563     srai(rd, rd, 64 - 8);
564   }
565 
SignExtendShort(Register rd,Register rs)566   void SignExtendShort(Register rd, Register rs) {
567     slli(rd, rs, 64 - 16);
568     srai(rd, rd, 64 - 16);
569   }
570 
SignExtendWord(Register rd,Register rs)571   void SignExtendWord(Register rd, Register rs) { sext_w(rd, rs); }
ZeroExtendWord(Register rd,Register rs)572   void ZeroExtendWord(Register rd, Register rs) {
573     slli(rd, rs, 32);
574     srli(rd, rd, 32);
575   }
576 
577   void Clz32(Register rd, Register rs);
578   void Clz64(Register rd, Register rs);
579   void Ctz32(Register rd, Register rs);
580   void Ctz64(Register rd, Register rs);
581   void Popcnt32(Register rd, Register rs, Register scratch);
582   void Popcnt64(Register rd, Register rs, Register scratch);
583 
584   // Bit field starts at bit pos and extending for size bits is extracted from
585   // rs and stored zero/sign-extended and right-justified in rt
586   void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size,
587                    bool sign_extend = false);
588   void ExtractBits(Register dest, Register source, Register pos, int size,
589                    bool sign_extend = false) {
590     sra(dest, source, pos);
591     ExtractBits(dest, dest, 0, size, sign_extend);
592   }
593 
594   // Insert bits [0, size) of source to bits [pos, pos+size) of dest
595   void InsertBits(Register dest, Register source, Register pos, int size);
596 
597   void Neg_s(FPURegister fd, FPURegister fs);
598   void Neg_d(FPURegister fd, FPURegister fs);
599 
600   // Change endianness
601   void ByteSwap(Register dest, Register src, int operand_size,
602                 Register scratch);
603 
604   void Clear_if_nan_d(Register rd, FPURegister fs);
605   void Clear_if_nan_s(Register rd, FPURegister fs);
606   // Convert single to unsigned word.
607   void Trunc_uw_s(Register rd, FPURegister fs, Register result = no_reg);
608 
609   // helper functions for unaligned load/store
610   template <int NBYTES, bool IS_SIGNED>
611   void UnalignedLoadHelper(Register rd, const MemOperand& rs);
612   template <int NBYTES>
613   void UnalignedStoreHelper(Register rd, const MemOperand& rs,
614                             Register scratch_other = no_reg);
615 
616   template <int NBYTES>
617   void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
618                             Register scratch);
619   template <int NBYTES>
620   void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs,
621                              Register scratch);
622 
623   template <typename Reg_T, typename Func>
624   void AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator);
625   template <typename Reg_T, typename Func>
626   void AlignedStoreHelper(Reg_T value, const MemOperand& rs, Func generator);
627 
628   template <int NBYTES, bool LOAD_SIGNED>
629   void LoadNBytes(Register rd, const MemOperand& rs, Register scratch);
630   template <int NBYTES, bool LOAD_SIGNED>
631   void LoadNBytesOverwritingBaseReg(const MemOperand& rs, Register scratch0,
632                                     Register scratch1);
633   // load/store macros
634   void Ulh(Register rd, const MemOperand& rs);
635   void Ulhu(Register rd, const MemOperand& rs);
636   void Ush(Register rd, const MemOperand& rs);
637 
638   void Ulw(Register rd, const MemOperand& rs);
639   void Ulwu(Register rd, const MemOperand& rs);
640   void Usw(Register rd, const MemOperand& rs);
641 
642   void Uld(Register rd, const MemOperand& rs);
643   void Usd(Register rd, const MemOperand& rs);
644 
645   void ULoadFloat(FPURegister fd, const MemOperand& rs, Register scratch);
646   void UStoreFloat(FPURegister fd, const MemOperand& rs, Register scratch);
647 
648   void ULoadDouble(FPURegister fd, const MemOperand& rs, Register scratch);
649   void UStoreDouble(FPURegister fd, const MemOperand& rs, Register scratch);
650 
651   void Lb(Register rd, const MemOperand& rs);
652   void Lbu(Register rd, const MemOperand& rs);
653   void Sb(Register rd, const MemOperand& rs);
654 
655   void Lh(Register rd, const MemOperand& rs);
656   void Lhu(Register rd, const MemOperand& rs);
657   void Sh(Register rd, const MemOperand& rs);
658 
659   void Lw(Register rd, const MemOperand& rs);
660   void Lwu(Register rd, const MemOperand& rs);
661   void Sw(Register rd, const MemOperand& rs);
662 
663   void LoadFloat(FPURegister fd, const MemOperand& src);
664   void StoreFloat(FPURegister fs, const MemOperand& dst);
665 
666   void LoadDouble(FPURegister fd, const MemOperand& src);
667   void StoreDouble(FPURegister fs, const MemOperand& dst);
668 
669   void Ll(Register rd, const MemOperand& rs);
670   void Sc(Register rd, const MemOperand& rs);
671 
672   void Lld(Register rd, const MemOperand& rs);
673   void Scd(Register rd, const MemOperand& rs);
674 
675   void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2);
676   void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2);
677   void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2);
678   void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2);
679   template <typename F>
680   void FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2,
681                          MaxMinKind kind);
682 
IsDoubleZeroRegSet()683   bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
IsSingleZeroRegSet()684   bool IsSingleZeroRegSet() { return has_single_zero_reg_set_; }
685 
Move(Register dst,Smi smi)686   inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
687 
Move(Register dst,Register src)688   inline void Move(Register dst, Register src) {
689     if (dst != src) {
690       mv(dst, src);
691     }
692   }
693 
MoveDouble(FPURegister dst,FPURegister src)694   inline void MoveDouble(FPURegister dst, FPURegister src) {
695     if (dst != src) fmv_d(dst, src);
696   }
697 
MoveFloat(FPURegister dst,FPURegister src)698   inline void MoveFloat(FPURegister dst, FPURegister src) {
699     if (dst != src) fmv_s(dst, src);
700   }
701 
Move(FPURegister dst,FPURegister src)702   inline void Move(FPURegister dst, FPURegister src) { MoveDouble(dst, src); }
703 
Move(Register dst_low,Register dst_high,FPURegister src)704   inline void Move(Register dst_low, Register dst_high, FPURegister src) {
705     fmv_x_d(dst_high, src);
706     fmv_x_w(dst_low, src);
707     srli(dst_high, dst_high, 32);
708   }
709 
Move(Register dst,FPURegister src)710   inline void Move(Register dst, FPURegister src) { fmv_x_d(dst, src); }
711 
Move(FPURegister dst,Register src)712   inline void Move(FPURegister dst, Register src) { fmv_d_x(dst, src); }
713 
714   // Extract sign-extended word from high-half of FPR to GPR
ExtractHighWordFromF64(Register dst_high,FPURegister src)715   inline void ExtractHighWordFromF64(Register dst_high, FPURegister src) {
716     fmv_x_d(dst_high, src);
717     srai(dst_high, dst_high, 32);
718   }
719 
720   // Insert low-word from GPR (src_high) to the high-half of FPR (dst)
721   void InsertHighWordF64(FPURegister dst, Register src_high);
722 
723   // Extract sign-extended word from low-half of FPR to GPR
ExtractLowWordFromF64(Register dst_low,FPURegister src)724   inline void ExtractLowWordFromF64(Register dst_low, FPURegister src) {
725     fmv_x_w(dst_low, src);
726   }
727 
728   // Insert low-word from GPR (src_high) to the low-half of FPR (dst)
729   void InsertLowWordF64(FPURegister dst, Register src_low);
730 
LoadFPRImmediate(FPURegister dst,float imm)731   void LoadFPRImmediate(FPURegister dst, float imm) {
732     LoadFPRImmediate(dst, bit_cast<uint32_t>(imm));
733   }
LoadFPRImmediate(FPURegister dst,double imm)734   void LoadFPRImmediate(FPURegister dst, double imm) {
735     LoadFPRImmediate(dst, bit_cast<uint64_t>(imm));
736   }
737   void LoadFPRImmediate(FPURegister dst, uint32_t src);
738   void LoadFPRImmediate(FPURegister dst, uint64_t src);
739 
740   // AddOverflow64 sets overflow register to a negative value if
741   // overflow occured, otherwise it is zero or positive
742   void AddOverflow64(Register dst, Register left, const Operand& right,
743                      Register overflow);
744   // SubOverflow64 sets overflow register to a negative value if
745   // overflow occured, otherwise it is zero or positive
746   void SubOverflow64(Register dst, Register left, const Operand& right,
747                      Register overflow);
748   // MulOverflow32 sets overflow register to zero if no overflow occured
749   void MulOverflow32(Register dst, Register left, const Operand& right,
750                      Register overflow);
751 
752   // MIPS-style 32-bit unsigned mulh
753   void Mulhu32(Register dst, Register left, const Operand& right,
754                Register left_zero, Register right_zero);
755 
756   // Number of instructions needed for calculation of switch table entry address
757   static const int kSwitchTablePrologueSize = 6;
758 
759   // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
760   // functor/function with 'Label *func(size_t index)' declaration.
761   template <typename Func>
762   void GenerateSwitchTable(Register index, size_t case_count,
763                            Func GetLabelFunction);
764 
765   // Load an object from the root table.
766   void LoadRoot(Register destination, RootIndex index) final;
767   void LoadRoot(Register destination, RootIndex index, Condition cond,
768                 Register src1, const Operand& src2);
769 
770   void LoadMap(Register destination, Register object);
771 
772   // If the value is a NaN, canonicalize the value else, do nothing.
773   void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
774 
775   // ---------------------------------------------------------------------------
776   // FPU macros. These do not handle special cases like NaN or +- inf.
777 
778   // Convert unsigned word to double.
779   void Cvt_d_uw(FPURegister fd, Register rs);
780 
781   // convert signed word to double.
782   void Cvt_d_w(FPURegister fd, Register rs);
783 
784   // Convert unsigned long to double.
785   void Cvt_d_ul(FPURegister fd, Register rs);
786 
787   // Convert unsigned word to float.
788   void Cvt_s_uw(FPURegister fd, Register rs);
789 
790   // convert signed word to float.
791   void Cvt_s_w(FPURegister fd, Register rs);
792 
793   // Convert unsigned long to float.
794   void Cvt_s_ul(FPURegister fd, Register rs);
795 
796   // Convert double to unsigned word.
797   void Trunc_uw_d(Register rd, FPURegister fs, Register result = no_reg);
798 
799   // Convert double to signed word.
800   void Trunc_w_d(Register rd, FPURegister fs, Register result = no_reg);
801 
802   // Convert single to signed word.
803   void Trunc_w_s(Register rd, FPURegister fs, Register result = no_reg);
804 
805   // Convert double to unsigned long.
806   void Trunc_ul_d(Register rd, FPURegister fs, Register result = no_reg);
807 
808   // Convert singled to signed long.
809   void Trunc_l_d(Register rd, FPURegister fs, Register result = no_reg);
810 
811   // Convert single to unsigned long.
812   void Trunc_ul_s(Register rd, FPURegister fs, Register result = no_reg);
813 
814   // Convert singled to signed long.
815   void Trunc_l_s(Register rd, FPURegister fs, Register result = no_reg);
816 
817   // Round single to signed word.
818   void Round_w_s(Register rd, FPURegister fs, Register result = no_reg);
819 
820   // Round double to signed word.
821   void Round_w_d(Register rd, FPURegister fs, Register result = no_reg);
822 
823   // Ceil single to signed word.
824   void Ceil_w_s(Register rd, FPURegister fs, Register result = no_reg);
825 
826   // Ceil double to signed word.
827   void Ceil_w_d(Register rd, FPURegister fs, Register result = no_reg);
828 
829   // Floor single to signed word.
830   void Floor_w_s(Register rd, FPURegister fs, Register result = no_reg);
831 
832   // Floor double to signed word.
833   void Floor_w_d(Register rd, FPURegister fs, Register result = no_reg);
834 
835   // Round double functions
836   void Trunc_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
837   void Round_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
838   void Floor_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
839   void Ceil_d_d(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
840 
841   // Round float functions
842   void Trunc_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
843   void Round_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
844   void Floor_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
845   void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
846 
847   void Ceil_f(VRegister dst, VRegister src, Register scratch,
848               VRegister v_scratch);
849 
850   void Ceil_d(VRegister dst, VRegister src, Register scratch,
851               VRegister v_scratch);
852 
853   void Floor_f(VRegister dst, VRegister src, Register scratch,
854                VRegister v_scratch);
855   void Floor_d(VRegister dst, VRegister src, Register scratch,
856                VRegister v_scratch);
857   void Trunc_f(VRegister dst, VRegister src, Register scratch,
858                VRegister v_scratch);
859   void Trunc_d(VRegister dst, VRegister src, Register scratch,
860                VRegister v_scratch);
861   void Round_f(VRegister dst, VRegister src, Register scratch,
862                VRegister v_scratch);
863   void Round_d(VRegister dst, VRegister src, Register scratch,
864                VRegister v_scratch);
865 
866   // -------------------------------------------------------------------------
867   // Smi utilities.
868 
SmiTag(Register dst,Register src)869   void SmiTag(Register dst, Register src) {
870     STATIC_ASSERT(kSmiTag == 0);
871     if (SmiValuesAre32Bits()) {
872       // Smi goes to upper 32
873       slli(dst, src, 32);
874     } else {
875       DCHECK(SmiValuesAre31Bits());
876       // Smi is shifted left by 1
877       Add32(dst, src, src);
878     }
879   }
880 
SmiTag(Register reg)881   void SmiTag(Register reg) { SmiTag(reg, reg); }
882 
883   // Jump the register contains a smi.
884   void JumpIfSmi(Register value, Label* smi_label);
885 
JumpIfEqual(Register a,int32_t b,Label * dest)886   void JumpIfEqual(Register a, int32_t b, Label* dest) {
887     Branch(dest, eq, a, Operand(b));
888   }
889 
JumpIfLessThan(Register a,int32_t b,Label * dest)890   void JumpIfLessThan(Register a, int32_t b, Label* dest) {
891     Branch(dest, lt, a, Operand(b));
892   }
893 
894   // Push a standard frame, consisting of ra, fp, context and JS function.
895   void PushStandardFrame(Register function_reg);
896 
897   // Get the actual activation frame alignment for target environment.
898   static int ActivationFrameAlignment();
899 
900   // Calculated scaled address (rd) as rt + rs << sa
901   void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa);
902 
903   // Compute the start of the generated instruction stream from the current PC.
904   // This is an alternative to embedding the {CodeObject} handle as a reference.
905   void ComputeCodeStartAddress(Register dst);
906 
907   // Control-flow integrity:
908 
909   // Define a function entrypoint. This doesn't emit any code for this
910   // architecture, as control-flow integrity is not supported for it.
CodeEntry()911   void CodeEntry() {}
912   // Define an exception handler.
ExceptionHandler()913   void ExceptionHandler() {}
914   // Define an exception handler and bind a label.
BindExceptionHandler(Label * label)915   void BindExceptionHandler(Label* label) { bind(label); }
916 
917   // ---------------------------------------------------------------------------
918   // Pointer compression Support
919 
920   // Loads a field containing a HeapObject and decompresses it if pointer
921   // compression is enabled.
922   void LoadTaggedPointerField(const Register& destination,
923                               const MemOperand& field_operand);
924 
925   // Loads a field containing any tagged value and decompresses it if necessary.
926   void LoadAnyTaggedField(const Register& destination,
927                           const MemOperand& field_operand);
928 
929   // Loads a field containing a tagged signed value and decompresses it if
930   // necessary.
931   void LoadTaggedSignedField(const Register& destination,
932                              const MemOperand& field_operand);
933 
934   // Loads a field containing smi value and untags it.
935   void SmiUntagField(Register dst, const MemOperand& src);
936 
937   // Compresses and stores tagged value to given on-heap location.
938   void StoreTaggedField(const Register& value,
939                         const MemOperand& dst_field_operand);
940 
941   void DecompressTaggedSigned(const Register& destination,
942                               const MemOperand& field_operand);
943   void DecompressTaggedPointer(const Register& destination,
944                                const MemOperand& field_operand);
945   void DecompressTaggedPointer(const Register& destination,
946                                const Register& source);
947   void DecompressAnyTagged(const Register& destination,
948                            const MemOperand& field_operand);
CmpTagged(const Register & rd,const Register & rs1,const Register & rs2)949   void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
950     if (COMPRESS_POINTERS_BOOL) {
951       Sub32(rd, rs1, rs2);
952     } else {
953       Sub64(rd, rs1, rs2);
954     }
955   }
956   // Wasm into RVV
WasmRvvExtractLane(Register dst,VRegister src,int8_t idx,VSew sew,Vlmul lmul)957   void WasmRvvExtractLane(Register dst, VRegister src, int8_t idx, VSew sew,
958                           Vlmul lmul) {
959     VU.set(kScratchReg, sew, lmul);
960     VRegister Vsrc = idx != 0 ? kSimd128ScratchReg : src;
961     if (idx != 0) {
962       vslidedown_vi(kSimd128ScratchReg, src, idx);
963     }
964     vmv_xs(dst, Vsrc);
965   }
966 
967   void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
968                  Vlmul lmul);
969 
970   void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
971                  Vlmul lmul);
972   void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
973                   Vlmul lmul);
974   void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
975                   Vlmul lmul);
976   void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
977                   Vlmul lmul);
978   void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
979                   Vlmul lmul);
980   void WasmRvvS128const(VRegister dst, const uint8_t imms[16]);
981 
982   void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
983   void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
984 
985  protected:
986   inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
987   inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
988 
989  private:
990   bool has_double_zero_reg_set_ = false;
991   bool has_single_zero_reg_set_ = false;
992 
993   // Performs a truncating conversion of a floating point number as used by
994   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
995   // succeeds, otherwise falls through if result is saturated. On return
996   // 'result' either holds answer, or is clobbered on fall through.
997   void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
998                                   Label* done);
999 
1000   void CallCFunctionHelper(Register function, int num_reg_arguments,
1001                            int num_double_arguments);
1002 
1003   // TODO(RISCV) Reorder parameters so out parameters come last.
1004   bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
1005   bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
1006                        Register* scratch, const Operand& rt);
1007 
1008   void BranchShortHelper(int32_t offset, Label* L);
1009   bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
1010                          const Operand& rt);
1011   bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
1012                         const Operand& rt);
1013 
1014   void BranchAndLinkShortHelper(int32_t offset, Label* L);
1015   void BranchAndLinkShort(int32_t offset);
1016   void BranchAndLinkShort(Label* L);
1017   bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
1018                                 Register rs, const Operand& rt);
1019   bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
1020                                Register rs, const Operand& rt);
1021   void BranchAndLinkLong(Label* L);
1022 
1023   template <typename F_TYPE>
1024   void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
1025                    RoundingMode mode);
1026 
1027   template <typename F>
1028   void RoundHelper(VRegister dst, VRegister src, Register scratch,
1029                    VRegister v_scratch, RoundingMode frm);
1030 
1031   template <typename TruncFunc>
1032   void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
1033                                    TruncFunc trunc);
1034 
1035   // Push a fixed frame, consisting of ra, fp.
1036   void PushCommonFrame(Register marker_reg = no_reg);
1037 };
1038 
1039 // MacroAssembler implements a collection of frequently used macros.
1040 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
1041  public:
1042   using TurboAssembler::TurboAssembler;
1043 
1044   // It assumes that the arguments are located below the stack pointer.
1045   // argc is the number of arguments not including the receiver.
1046   // TODO(victorgomes): Remove this function once we stick with the reversed
1047   // arguments order.
LoadReceiver(Register dest,Register argc)1048   void LoadReceiver(Register dest, Register argc) {
1049     Ld(dest, MemOperand(sp, 0));
1050   }
1051 
StoreReceiver(Register rec,Register argc,Register scratch)1052   void StoreReceiver(Register rec, Register argc, Register scratch) {
1053     Sd(rec, MemOperand(sp, 0));
1054   }
1055 
1056   bool IsNear(Label* L, Condition cond, int rs_reg);
1057 
1058   // Swap two registers.  If the scratch register is omitted then a slightly
1059   // less efficient form using xor instead of mov is emitted.
1060   void Swap(Register reg1, Register reg2, Register scratch = no_reg);
1061 
PushRoot(RootIndex index)1062   void PushRoot(RootIndex index) {
1063     UseScratchRegisterScope temps(this);
1064     Register scratch = temps.Acquire();
1065     LoadRoot(scratch, index);
1066     Push(scratch);
1067   }
1068 
1069   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,RootIndex index,Label * if_equal)1070   void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
1071     UseScratchRegisterScope temps(this);
1072     Register scratch = temps.Acquire();
1073     LoadRoot(scratch, index);
1074     Branch(if_equal, eq, with, Operand(scratch));
1075   }
1076 
1077   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,RootIndex index,Label * if_not_equal)1078   void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
1079     UseScratchRegisterScope temps(this);
1080     Register scratch = temps.Acquire();
1081     LoadRoot(scratch, index);
1082     Branch(if_not_equal, ne, with, Operand(scratch));
1083   }
1084 
1085   // Checks if value is in range [lower_limit, higher_limit] using a single
1086   // comparison.
1087   void JumpIfIsInRange(Register value, unsigned lower_limit,
1088                        unsigned higher_limit, Label* on_in_range);
1089 
1090   // ---------------------------------------------------------------------------
1091   // GC Support
1092 
1093   // Notify the garbage collector that we wrote a pointer into an object.
1094   // |object| is the object being stored into, |value| is the object being
1095   // stored.  value and scratch registers are clobbered by the operation.
1096   // The offset is the offset from the start of the object, not the offset from
1097   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
1098   void RecordWriteField(
1099       Register object, int offset, Register value, RAStatus ra_status,
1100       SaveFPRegsMode save_fp,
1101       RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
1102       SmiCheck smi_check = SmiCheck::kInline);
1103 
1104   // For a given |object| notify the garbage collector that the slot |address|
1105   // has been written.  |value| is the object being stored. The value and
1106   // address registers are clobbered by the operation.
1107   void RecordWrite(
1108       Register object, Operand offset, Register value, RAStatus ra_status,
1109       SaveFPRegsMode save_fp,
1110       RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
1111       SmiCheck smi_check = SmiCheck::kInline);
1112 
1113   // void Pref(int32_t hint, const MemOperand& rs);
1114 
1115   // ---------------------------------------------------------------------------
1116   // Pseudo-instructions.
1117 
1118   void LoadWordPair(Register rd, const MemOperand& rs);
1119   void StoreWordPair(Register rd, const MemOperand& rs);
1120 
1121   void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
1122   void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
1123   void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
1124   void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
1125 
1126   // Enter exit frame.
1127   // argc - argument count to be dropped by LeaveExitFrame.
1128   // save_doubles - saves FPU registers on stack.
1129   // stack_space - extra stack space.
1130   void EnterExitFrame(bool save_doubles, int stack_space = 0,
1131                       StackFrame::Type frame_type = StackFrame::EXIT);
1132 
1133   // Leave the current exit frame.
1134   void LeaveExitFrame(bool save_doubles, Register arg_count,
1135                       bool do_return = NO_EMIT_RETURN,
1136                       bool argument_count_is_length = false);
1137 
1138   // Make sure the stack is aligned. Only emits code in debug mode.
1139   void AssertStackIsAligned();
1140 
1141   // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)1142   void LoadGlobalProxy(Register dst) {
1143     LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
1144   }
1145 
1146   void LoadNativeContextSlot(Register dst, int index);
1147 
1148   // Load the initial map from the global function. The registers
1149   // function and map can be the same, function is then overwritten.
1150   void LoadGlobalFunctionInitialMap(Register function, Register map,
1151                                     Register scratch);
1152 
1153   // -------------------------------------------------------------------------
1154   // JavaScript invokes.
1155 
1156   // Invoke the JavaScript function code by either calling or jumping.
1157   void InvokeFunctionCode(Register function, Register new_target,
1158                           Register expected_parameter_count,
1159                           Register actual_parameter_count, InvokeType type);
1160 
1161   // On function call, call into the debugger if necessary.
1162   void CheckDebugHook(Register fun, Register new_target,
1163                       Register expected_parameter_count,
1164                       Register actual_parameter_count);
1165 
1166   // Invoke the JavaScript function in the given register. Changes the
1167   // current context to the context in the function before invoking.
1168   void InvokeFunctionWithNewTarget(Register function, Register new_target,
1169                                    Register actual_parameter_count,
1170                                    InvokeType type);
1171   void InvokeFunction(Register function, Register expected_parameter_count,
1172                       Register actual_parameter_count, InvokeType type);
1173 
1174   // Exception handling.
1175 
1176   // Push a new stack handler and link into stack handler chain.
1177   void PushStackHandler();
1178 
1179   // Unlink the stack handler on top of the stack from the stack handler chain.
1180   // Must preserve the result register.
1181   void PopStackHandler();
1182 
1183   // -------------------------------------------------------------------------
1184   // Support functions.
1185 
1186   void GetObjectType(Register function, Register map, Register type_reg);
1187 
1188   void GetInstanceTypeRange(Register map, Register type_reg,
1189                             InstanceType lower_limit, Register range);
1190 
1191   // -------------------------------------------------------------------------
1192   // Runtime calls.
1193 
1194   // Call a runtime routine.
1195   void CallRuntime(const Runtime::Function* f, int num_arguments,
1196                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
1197 
1198   // Convenience function: Same as above, but takes the fid instead.
1199   void CallRuntime(Runtime::FunctionId fid,
1200                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1201     const Runtime::Function* function = Runtime::FunctionForId(fid);
1202     CallRuntime(function, function->nargs, save_doubles);
1203   }
1204 
1205   // Convenience function: Same as above, but takes the fid instead.
1206   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1207                    SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
1208     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1209   }
1210 
1211   // Convenience function: tail call a runtime routine (jump).
1212   void TailCallRuntime(Runtime::FunctionId fid);
1213 
1214   // Jump to the builtin routine.
1215   void JumpToExternalReference(const ExternalReference& builtin,
1216                                bool builtin_exit_frame = false);
1217 
1218   // Generates a trampoline to jump to the off-heap instruction stream.
1219   void JumpToOffHeapInstructionStream(Address entry);
1220 
1221   // ---------------------------------------------------------------------------
1222   // In-place weak references.
1223   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1224 
1225   // -------------------------------------------------------------------------
1226   // StatsCounter support.
1227 
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1228   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1229                         Register scratch2) {
1230     if (!FLAG_native_code_counters) return;
1231     EmitIncrementCounter(counter, value, scratch1, scratch2);
1232   }
1233   void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
1234                             Register scratch2);
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1235   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1236                         Register scratch2) {
1237     if (!FLAG_native_code_counters) return;
1238     EmitDecrementCounter(counter, value, scratch1, scratch2);
1239   }
1240   void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
1241                             Register scratch2);
1242 
1243   // -------------------------------------------------------------------------
1244   // Stack limit utilities
1245 
1246   enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
1247   void LoadStackLimit(Register destination, StackLimitKind kind);
1248   void StackOverflowCheck(Register num_args, Register scratch1,
1249                           Register scratch2, Label* stack_overflow,
1250                           Label* done = nullptr);
1251 
1252   // Left-shifted from int32 equivalent of Smi.
SmiScale(Register dst,Register src,int scale)1253   void SmiScale(Register dst, Register src, int scale) {
1254     if (SmiValuesAre32Bits()) {
1255       // The int portion is upper 32-bits of 64-bit word.
1256       srai(dst, src, (kSmiShift - scale) & 0x3F);
1257     } else {
1258       DCHECK(SmiValuesAre31Bits());
1259       DCHECK_GE(scale, kSmiTagSize);
1260       slliw(dst, src, scale - kSmiTagSize);
1261     }
1262   }
1263 
1264   // Test if the register contains a smi.
SmiTst(Register value,Register scratch)1265   inline void SmiTst(Register value, Register scratch) {
1266     And(scratch, value, Operand(kSmiTagMask));
1267   }
1268 
1269   enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
1270   enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes };
1271   void DropArguments(Register count, ArgumentsCountType type,
1272                      ArgumentsCountMode mode, Register scratch = no_reg);
1273   void DropArgumentsAndPushNewReceiver(Register argc, Register receiver,
1274                                        ArgumentsCountType type,
1275                                        ArgumentsCountMode mode,
1276                                        Register scratch = no_reg);
1277 
1278   // Jump if the register contains a non-smi.
1279   void JumpIfNotSmi(Register value, Label* not_smi_label);
1280 
1281 
1282   // Abort execution if argument is not a Constructor, enabled via --debug-code.
1283   void AssertConstructor(Register object);
1284 
1285   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1286   void AssertFunction(Register object);
1287 
1288   // Abort execution if argument is not a callable JSFunction, enabled via
1289   // --debug-code.
1290   void AssertCallableFunction(Register object);
1291 
1292   // Abort execution if argument is not a JSBoundFunction,
1293   // enabled via --debug-code.
1294   void AssertBoundFunction(Register object);
1295 
1296   // Abort execution if argument is not a JSGeneratorObject (or subclass),
1297   // enabled via --debug-code.
1298   void AssertGeneratorObject(Register object);
1299 
1300   // Abort execution if argument is not undefined or an AllocationSite, enabled
1301   // via --debug-code.
1302   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1303 
1304   template <typename Field>
DecodeField(Register dst,Register src)1305   void DecodeField(Register dst, Register src) {
1306     ExtractBits(dst, src, Field::kShift, Field::kSize);
1307   }
1308 
1309   template <typename Field>
DecodeField(Register reg)1310   void DecodeField(Register reg) {
1311     DecodeField<Field>(reg, reg);
1312   }
1313 
1314  private:
1315   // Helper functions for generating invokes.
1316   void InvokePrologue(Register expected_parameter_count,
1317                       Register actual_parameter_count, Label* done,
1318                       InvokeType type);
1319 
1320   // Compute memory operands for safepoint stack slots.
1321   static int SafepointRegisterStackIndex(int reg_code);
1322 
1323   // Needs access to SafepointRegisterStackIndex for compiled frame
1324   // traversal.
1325   friend class CommonFrame;
1326 
1327   DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
1328 };
1329 
1330 template <typename Func>
GenerateSwitchTable(Register index,size_t case_count,Func GetLabelFunction)1331 void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
1332                                          Func GetLabelFunction) {
1333   // Ensure that dd-ed labels following this instruction use 8 bytes aligned
1334   // addresses.
1335   BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
1336                          kSwitchTablePrologueSize);
1337   UseScratchRegisterScope temps(this);
1338   Register scratch = temps.Acquire();
1339   Register scratch2 = temps.Acquire();
1340 
1341   Align(8);
1342   // Load the address from the jump table at index and jump to it
1343   auipc(scratch, 0);  // Load the current PC into scratch
1344   slli(scratch2, index,
1345        kSystemPointerSizeLog2);  // scratch2 = offset of indexth entry
1346   add(scratch2, scratch2,
1347       scratch);  // scratch2 = (saved PC) + (offset of indexth entry)
1348   ld(scratch2, scratch2,
1349      6 * kInstrSize);  // Add the size of these 6 instructions to the
1350                        // offset, then load
1351   jr(scratch2);        // Jump to the address loaded from the table
1352   nop();               // For 16-byte alignment
1353   for (size_t index = 0; index < case_count; ++index) {
1354     dd(GetLabelFunction(index));
1355   }
1356 }
1357 
1358 #define ACCESS_MASM(masm) masm->
1359 
1360 }  // namespace internal
1361 }  // namespace v8
1362 
1363 #endif  // V8_CODEGEN_RISCV64_MACRO_ASSEMBLER_RISCV64_H_
1364