1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
6 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
7
8 #include "src/assembler.h"
9 #include "src/globals.h"
10 #include "src/mips/assembler-mips.h"
11 #include "src/turbo-assembler.h"
12
13 namespace v8 {
14 namespace internal {
15
16 // Give alias names to registers for calling conventions.
17 constexpr Register kReturnRegister0 = v0;
18 constexpr Register kReturnRegister1 = v1;
19 constexpr Register kReturnRegister2 = a0;
20 constexpr Register kJSFunctionRegister = a1;
21 constexpr Register kContextRegister = s7;
22 constexpr Register kAllocateSizeRegister = a0;
23 constexpr Register kSpeculationPoisonRegister = t3;
24 constexpr Register kInterpreterAccumulatorRegister = v0;
25 constexpr Register kInterpreterBytecodeOffsetRegister = t4;
26 constexpr Register kInterpreterBytecodeArrayRegister = t5;
27 constexpr Register kInterpreterDispatchTableRegister = t6;
28
29 constexpr Register kJavaScriptCallArgCountRegister = a0;
30 constexpr Register kJavaScriptCallCodeStartRegister = a2;
31 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
32 constexpr Register kJavaScriptCallNewTargetRegister = a3;
33 constexpr Register kJavaScriptCallExtraArg1Register = a2;
34
35 constexpr Register kOffHeapTrampolineRegister = at;
36 constexpr Register kRuntimeCallFunctionRegister = a1;
37 constexpr Register kRuntimeCallArgCountRegister = a0;
38 constexpr Register kRuntimeCallArgvRegister = a2;
39 constexpr Register kWasmInstanceRegister = a0;
40
41 // Forward declarations
42 enum class AbortReason : uint8_t;
43
44 // Reserved Register Usage Summary.
45 //
46 // Registers t8, t9, and at are reserved for use by the MacroAssembler.
47 //
48 // The programmer should know that the MacroAssembler may clobber these three,
49 // but won't touch other registers except in special cases.
50 //
51 // Per the MIPS ABI, register t9 must be used for indirect function call
52 // via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
53 // trying to update gp register for position-independent-code. Whenever
54 // MIPS generated code calls C code, it must be via t9 register.
55
56
57 // Flags used for LeaveExitFrame function.
58 enum LeaveExitFrameMode {
59 EMIT_RETURN = true,
60 NO_EMIT_RETURN = false
61 };
62
63 // Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
64 enum BranchDelaySlot {
65 USE_DELAY_SLOT,
66 PROTECT
67 };
68
69 // Flags used for the li macro-assembler function.
70 enum LiFlags {
71 // If the constant value can be represented in just 16 bits, then
72 // optimize the li to use a single instruction, rather than lui/ori pair.
73 OPTIMIZE_SIZE = 0,
74 // Always use 2 instructions (lui/ori pair), even if the constant could
75 // be loaded with just one, so that this value is patchable later.
76 CONSTANT_SIZE = 1
77 };
78
79
80 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
81 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
82 enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
83
84 Register GetRegisterThatIsNotOneOf(Register reg1,
85 Register reg2 = no_reg,
86 Register reg3 = no_reg,
87 Register reg4 = no_reg,
88 Register reg5 = no_reg,
89 Register reg6 = no_reg);
90
91 // -----------------------------------------------------------------------------
92 // Static helper functions.
93
ContextMemOperand(Register context,int index)94 inline MemOperand ContextMemOperand(Register context, int index) {
95 return MemOperand(context, Context::SlotOffset(index));
96 }
97
98
NativeContextMemOperand()99 inline MemOperand NativeContextMemOperand() {
100 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
101 }
102
103
104 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)105 inline MemOperand FieldMemOperand(Register object, int offset) {
106 return MemOperand(object, offset - kHeapObjectTag);
107 }
108
109
110 // Generate a MemOperand for storing arguments 5..N on the stack
111 // when calling CallCFunction().
CFunctionArgumentOperand(int index)112 inline MemOperand CFunctionArgumentOperand(int index) {
113 DCHECK_GT(index, kCArgSlotCount);
114 // Argument 5 takes the slot just past the four Arg-slots.
115 int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
116 return MemOperand(sp, offset);
117 }
118
119 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
120 public:
TurboAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int buffer_size,CodeObjectRequired create_code_object)121 TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
122 void* buffer, int buffer_size,
123 CodeObjectRequired create_code_object)
124 : TurboAssemblerBase(isolate, options, buffer, buffer_size,
125 create_code_object) {}
126
127 // Activation support.
128 void EnterFrame(StackFrame::Type type);
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)129 void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
130 // Out-of-line constant pool not implemented on mips.
131 UNREACHABLE();
132 }
133 void LeaveFrame(StackFrame::Type type);
134
135 // Generates function and stub prologue code.
136 void StubPrologue(StackFrame::Type type);
137 void Prologue();
138
InitializeRootRegister()139 void InitializeRootRegister() {
140 ExternalReference roots_array_start =
141 ExternalReference::roots_array_start(isolate());
142 li(kRootRegister, Operand(roots_array_start));
143 Addu(kRootRegister, kRootRegister, kRootRegisterBias);
144 }
145
146 // Jump unconditionally to given label.
147 // We NEED a nop in the branch delay slot, as it used by v8, for example in
148 // CodeGenerator::ProcessDeferred().
149 // Currently the branch delay slot is filled by the MacroAssembler.
150 // Use rather b(Label) for code generation.
jmp(Label * L)151 void jmp(Label* L) { Branch(L); }
152
153 // -------------------------------------------------------------------------
154 // Debugging.
155
156 // Calls Abort(msg) if the condition cc is not satisfied.
157 // Use --debug_code to enable.
158 void Assert(Condition cc, AbortReason reason, Register rs, Operand rt);
159
160 // Like Assert(), but always enabled.
161 void Check(Condition cc, AbortReason reason, Register rs, Operand rt);
162
163 // Print a message to stdout and abort execution.
164 void Abort(AbortReason msg);
165
166 inline bool AllowThisStubCall(CodeStub* stub);
167
168 // Arguments macros.
169 #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
170 #define COND_ARGS cond, r1, r2
171
172 // Cases when relocation is not needed.
173 #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
174 void Name(target_type target, BranchDelaySlot bd = PROTECT); \
175 inline void Name(BranchDelaySlot bd, target_type target) { \
176 Name(target, bd); \
177 } \
178 void Name(target_type target, \
179 COND_TYPED_ARGS, \
180 BranchDelaySlot bd = PROTECT); \
181 inline void Name(BranchDelaySlot bd, \
182 target_type target, \
183 COND_TYPED_ARGS) { \
184 Name(target, COND_ARGS, bd); \
185 }
186
187 #define DECLARE_BRANCH_PROTOTYPES(Name) \
188 DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
189 DECLARE_NORELOC_PROTOTYPE(Name, int32_t)
190
191 DECLARE_BRANCH_PROTOTYPES(Branch)
DECLARE_BRANCH_PROTOTYPES(BranchAndLink)192 DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
193 DECLARE_BRANCH_PROTOTYPES(BranchShort)
194
195 #undef DECLARE_BRANCH_PROTOTYPES
196 #undef COND_TYPED_ARGS
197 #undef COND_ARGS
198
199 // Floating point branches
200 void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
201 CompareF(S, cc, cmp1, cmp2);
202 }
203
CompareIsNanF32(FPURegister cmp1,FPURegister cmp2)204 void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) {
205 CompareIsNanF(S, cmp1, cmp2);
206 }
207
CompareF64(FPUCondition cc,FPURegister cmp1,FPURegister cmp2)208 void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) {
209 CompareF(D, cc, cmp1, cmp2);
210 }
211
CompareIsNanF64(FPURegister cmp1,FPURegister cmp2)212 void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) {
213 CompareIsNanF(D, cmp1, cmp2);
214 }
215
216 void BranchTrueShortF(Label* target, BranchDelaySlot bd = PROTECT);
217 void BranchFalseShortF(Label* target, BranchDelaySlot bd = PROTECT);
218
219 void BranchTrueF(Label* target, BranchDelaySlot bd = PROTECT);
220 void BranchFalseF(Label* target, BranchDelaySlot bd = PROTECT);
221
222 // MSA Branches
223 void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond,
224 MSARegister wt, BranchDelaySlot bd = PROTECT);
225
226 void Branch(Label* L, Condition cond, Register rs, Heap::RootListIndex index,
227 BranchDelaySlot bdslot = PROTECT);
228
229 // Load int32 in the rd register.
230 void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
231 inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
232 li(rd, Operand(j), mode);
233 }
234 void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
235 void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
236
237 void LoadFromConstantsTable(Register destination,
238 int constant_index) override;
239 void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
240 void LoadRootRelative(Register destination, int32_t offset) override;
241
242 // Jump, Call, and Ret pseudo instructions implementing inter-working.
243 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
244 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
245
246 void Jump(Register target, int16_t offset = 0, COND_ARGS);
247 void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS);
248 void Jump(Register target, const Operand& offset, COND_ARGS);
249 void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
250 void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
251 void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
252 void Call(Register target, int16_t offset = 0, COND_ARGS);
253 void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
254 void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
255 void Call(Handle<Code> code,
256 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
257 COND_ARGS);
258 void Call(Label* target);
259
CallForDeoptimization(Address target,int deopt_id,RelocInfo::Mode rmode)260 void CallForDeoptimization(Address target, int deopt_id,
261 RelocInfo::Mode rmode) {
262 USE(deopt_id);
263 Call(target, rmode);
264 }
265
266 void Ret(COND_ARGS);
267 inline void Ret(BranchDelaySlot bd, Condition cond = al,
268 Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
269 Ret(cond, rs, rt, bd);
270 }
271
272 // Emit code to discard a non-negative number of pointer-sized elements
273 // from the stack, clobbering only the sp register.
274 void Drop(int count,
275 Condition cond = cc_always,
276 Register reg = no_reg,
277 const Operand& op = Operand(no_reg));
278
279 // Trivial case of DropAndRet that utilizes the delay slot and only emits
280 // 2 instructions.
281 void DropAndRet(int drop);
282
283 void DropAndRet(int drop,
284 Condition cond,
285 Register reg,
286 const Operand& op);
287
push(Register src)288 void push(Register src) {
289 Addu(sp, sp, Operand(-kPointerSize));
290 sw(src, MemOperand(sp, 0));
291 }
292
Push(Register src)293 void Push(Register src) { push(src); }
294 void Push(Handle<HeapObject> handle);
295 void Push(Smi* smi);
296
297 // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)298 void Push(Register src1, Register src2) {
299 Subu(sp, sp, Operand(2 * kPointerSize));
300 sw(src1, MemOperand(sp, 1 * kPointerSize));
301 sw(src2, MemOperand(sp, 0 * kPointerSize));
302 }
303
304 // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)305 void Push(Register src1, Register src2, Register src3) {
306 Subu(sp, sp, Operand(3 * kPointerSize));
307 sw(src1, MemOperand(sp, 2 * kPointerSize));
308 sw(src2, MemOperand(sp, 1 * kPointerSize));
309 sw(src3, MemOperand(sp, 0 * kPointerSize));
310 }
311
312 // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)313 void Push(Register src1, Register src2, Register src3, Register src4) {
314 Subu(sp, sp, Operand(4 * kPointerSize));
315 sw(src1, MemOperand(sp, 3 * kPointerSize));
316 sw(src2, MemOperand(sp, 2 * kPointerSize));
317 sw(src3, MemOperand(sp, 1 * kPointerSize));
318 sw(src4, MemOperand(sp, 0 * kPointerSize));
319 }
320
321 // Push five registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)322 void Push(Register src1, Register src2, Register src3, Register src4,
323 Register src5) {
324 Subu(sp, sp, Operand(5 * kPointerSize));
325 sw(src1, MemOperand(sp, 4 * kPointerSize));
326 sw(src2, MemOperand(sp, 3 * kPointerSize));
327 sw(src3, MemOperand(sp, 2 * kPointerSize));
328 sw(src4, MemOperand(sp, 1 * kPointerSize));
329 sw(src5, MemOperand(sp, 0 * kPointerSize));
330 }
331
Push(Register src,Condition cond,Register tst1,Register tst2)332 void Push(Register src, Condition cond, Register tst1, Register tst2) {
333 // Since we don't have conditional execution we use a Branch.
334 Branch(3, cond, tst1, Operand(tst2));
335 Subu(sp, sp, Operand(kPointerSize));
336 sw(src, MemOperand(sp, 0));
337 }
338
339 void SaveRegisters(RegList registers);
340 void RestoreRegisters(RegList registers);
341
342 void CallRecordWriteStub(Register object, Register address,
343 RememberedSetAction remembered_set_action,
344 SaveFPRegsMode fp_mode);
345
346 // Push multiple registers on the stack.
347 // Registers are saved in numerical order, with higher numbered registers
348 // saved in higher memory addresses.
349 void MultiPush(RegList regs);
350 void MultiPushFPU(RegList regs);
351
352 // Calculate how much stack space (in bytes) are required to store caller
353 // registers excluding those specified in the arguments.
354 int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
355 Register exclusion1 = no_reg,
356 Register exclusion2 = no_reg,
357 Register exclusion3 = no_reg) const;
358
359 // Push caller saved registers on the stack, and return the number of bytes
360 // stack pointer is adjusted.
361 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
362 Register exclusion2 = no_reg,
363 Register exclusion3 = no_reg);
364 // Restore caller saved registers from the stack, and return the number of
365 // bytes stack pointer is adjusted.
366 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
367 Register exclusion2 = no_reg,
368 Register exclusion3 = no_reg);
369
pop(Register dst)370 void pop(Register dst) {
371 lw(dst, MemOperand(sp, 0));
372 Addu(sp, sp, Operand(kPointerSize));
373 }
374
Pop(Register dst)375 void Pop(Register dst) { pop(dst); }
376
377 // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)378 void Pop(Register src1, Register src2) {
379 DCHECK(src1 != src2);
380 lw(src2, MemOperand(sp, 0 * kPointerSize));
381 lw(src1, MemOperand(sp, 1 * kPointerSize));
382 Addu(sp, sp, 2 * kPointerSize);
383 }
384
385 // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)386 void Pop(Register src1, Register src2, Register src3) {
387 lw(src3, MemOperand(sp, 0 * kPointerSize));
388 lw(src2, MemOperand(sp, 1 * kPointerSize));
389 lw(src1, MemOperand(sp, 2 * kPointerSize));
390 Addu(sp, sp, 3 * kPointerSize);
391 }
392
393 void Pop(uint32_t count = 1) { Addu(sp, sp, Operand(count * kPointerSize)); }
394
395 // Pops multiple values from the stack and load them in the
396 // registers specified in regs. Pop order is the opposite as in MultiPush.
397 void MultiPop(RegList regs);
398 void MultiPopFPU(RegList regs);
399
400 // Load Scaled Address instructions. Parameter sa (shift argument) must be
401 // between [1, 31] (inclusive). On pre-r6 architectures the scratch register
402 // may be clobbered.
403 void Lsa(Register rd, Register rs, Register rt, uint8_t sa,
404 Register scratch = at);
405
406 #define DEFINE_INSTRUCTION(instr) \
407 void instr(Register rd, Register rs, const Operand& rt); \
408 void instr(Register rd, Register rs, Register rt) { \
409 instr(rd, rs, Operand(rt)); \
410 } \
411 void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); }
412
413 #define DEFINE_INSTRUCTION2(instr) \
414 void instr(Register rs, const Operand& rt); \
415 void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \
416 void instr(Register rs, int32_t j) { instr(rs, Operand(j)); }
417
418 #define DEFINE_INSTRUCTION3(instr) \
419 void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \
420 void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \
421 instr(rd_hi, rd_lo, rs, Operand(rt)); \
422 } \
423 void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \
424 instr(rd_hi, rd_lo, rs, Operand(j)); \
425 }
426
427 DEFINE_INSTRUCTION(Addu);
428 DEFINE_INSTRUCTION(Subu);
429 DEFINE_INSTRUCTION(Mul);
430 DEFINE_INSTRUCTION(Div);
431 DEFINE_INSTRUCTION(Divu);
432 DEFINE_INSTRUCTION(Mod);
433 DEFINE_INSTRUCTION(Modu);
434 DEFINE_INSTRUCTION(Mulh);
435 DEFINE_INSTRUCTION2(Mult);
436 DEFINE_INSTRUCTION(Mulhu);
437 DEFINE_INSTRUCTION2(Multu);
438 DEFINE_INSTRUCTION2(Div);
439 DEFINE_INSTRUCTION2(Divu);
440
441 DEFINE_INSTRUCTION3(Div);
442 DEFINE_INSTRUCTION3(Mul);
443 DEFINE_INSTRUCTION3(Mulu);
444
445 DEFINE_INSTRUCTION(And);
446 DEFINE_INSTRUCTION(Or);
447 DEFINE_INSTRUCTION(Xor);
448 DEFINE_INSTRUCTION(Nor);
449 DEFINE_INSTRUCTION2(Neg);
450
451 DEFINE_INSTRUCTION(Slt);
452 DEFINE_INSTRUCTION(Sltu);
453 DEFINE_INSTRUCTION(Sle);
454 DEFINE_INSTRUCTION(Sleu);
455 DEFINE_INSTRUCTION(Sgt);
456 DEFINE_INSTRUCTION(Sgtu);
457 DEFINE_INSTRUCTION(Sge);
458 DEFINE_INSTRUCTION(Sgeu);
459
460 // MIPS32 R2 instruction macro.
461 DEFINE_INSTRUCTION(Ror);
462
463 #undef DEFINE_INSTRUCTION
464 #undef DEFINE_INSTRUCTION2
465 #undef DEFINE_INSTRUCTION3
466
SmiUntag(Register reg)467 void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); }
468
SmiUntag(Register dst,Register src)469 void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); }
470
471 // Removes current frame and its arguments from the stack preserving
472 // the arguments and a return address pushed to the stack for the next call.
473 // Both |callee_args_count| and |caller_args_count_reg| do not include
474 // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
475 // is trashed.
476 void PrepareForTailCall(const ParameterCount& callee_args_count,
477 Register caller_args_count_reg, Register scratch0,
478 Register scratch1);
479
480 int CalculateStackPassedWords(int num_reg_arguments,
481 int num_double_arguments);
482
483 // Before calling a C-function from generated code, align arguments on stack
484 // and add space for the four mips argument slots.
485 // After aligning the frame, non-register arguments must be stored on the
486 // stack, after the argument-slots using helper: CFunctionArgumentOperand().
487 // The argument count assumes all arguments are word sized.
488 // Some compilers/platforms require the stack to be aligned when calling
489 // C++ code.
490 // Needs a scratch register to do some arithmetic. This register will be
491 // trashed.
492 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
493 Register scratch);
494 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
495
496 // Arguments 1-4 are placed in registers a0 through a3 respectively.
497 // Arguments 5..n are stored to stack using following:
498 // sw(t0, CFunctionArgumentOperand(5));
499
500 // Calls a C function and cleans up the space for arguments allocated
501 // by PrepareCallCFunction. The called function is not allowed to trigger a
502 // garbage collection, since that might move the code and invalidate the
503 // return address (unless this is somehow accounted for by the called
504 // function).
505 void CallCFunction(ExternalReference function, int num_arguments);
506 void CallCFunction(Register function, int num_arguments);
507 void CallCFunction(ExternalReference function, int num_reg_arguments,
508 int num_double_arguments);
509 void CallCFunction(Register function, int num_reg_arguments,
510 int num_double_arguments);
511 void MovFromFloatResult(DoubleRegister dst);
512 void MovFromFloatParameter(DoubleRegister dst);
513
514 // There are two ways of passing double arguments on MIPS, depending on
515 // whether soft or hard floating point ABI is used. These functions
516 // abstract parameter passing for the three different ways we call
517 // C functions from generated code.
518 void MovToFloatParameter(DoubleRegister src);
519 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
520 void MovToFloatResult(DoubleRegister src);
521
522 // See comments at the beginning of Builtins::Generate_CEntry.
PrepareCEntryArgs(int num_args)523 inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
PrepareCEntryFunction(const ExternalReference & ref)524 inline void PrepareCEntryFunction(const ExternalReference& ref) {
525 li(a1, ref);
526 }
527
528 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
529 Label* condition_met);
530
531 void CallStubDelayed(CodeStub* stub, COND_ARGS);
532 #undef COND_ARGS
533
534 // Call a runtime routine. This expects {centry} to contain a fitting CEntry
535 // builtin for the target runtime function and uses an indirect call.
536 void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
537
538 // Performs a truncating conversion of a floating point number as used by
539 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
540 // succeeds, otherwise falls through if result is saturated. On return
541 // 'result' either holds answer, or is clobbered on fall through.
542 //
543 // Only public for the test code in test-code-stubs-arm.cc.
544 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
545 Label* done);
546
547 // Performs a truncating conversion of a floating point number as used by
548 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
549 // Exits with 'result' holding the answer.
550 void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
551 DoubleRegister double_input, StubCallMode stub_mode);
552
553 // Conditional move.
554 void Movz(Register rd, Register rs, Register rt);
555 void Movn(Register rd, Register rs, Register rt);
556 void Movt(Register rd, Register rs, uint16_t cc = 0);
557 void Movf(Register rd, Register rs, uint16_t cc = 0);
558
559 void LoadZeroIfFPUCondition(Register dest);
560 void LoadZeroIfNotFPUCondition(Register dest);
561
562 void LoadZeroIfConditionNotZero(Register dest, Register condition);
563 void LoadZeroIfConditionZero(Register dest, Register condition);
564 void LoadZeroOnCondition(Register rd, Register rs, const Operand& rt,
565 Condition cond);
566
567 void Clz(Register rd, Register rs);
568 void Ctz(Register rd, Register rs);
569 void Popcnt(Register rd, Register rs);
570
571 // Int64Lowering instructions
572 void AddPair(Register dst_low, Register dst_high, Register left_low,
573 Register left_high, Register right_low, Register right_high,
574 Register scratch1, Register scratch2);
575
576 void SubPair(Register dst_low, Register dst_high, Register left_low,
577 Register left_high, Register right_low, Register right_high,
578 Register scratch1, Register scratch2);
579
580 void MulPair(Register dst_low, Register dst_high, Register left_low,
581 Register left_high, Register right_low, Register right_high,
582 Register scratch1, Register scratch2);
583
584 void ShlPair(Register dst_low, Register dst_high, Register src_low,
585 Register src_high, Register shift, Register scratch1,
586 Register scratch2);
587
588 void ShlPair(Register dst_low, Register dst_high, Register src_low,
589 Register src_high, uint32_t shift, Register scratch);
590
591 void ShrPair(Register dst_low, Register dst_high, Register src_low,
592 Register src_high, Register shift, Register scratch1,
593 Register scratch2);
594
595 void ShrPair(Register dst_low, Register dst_high, Register src_low,
596 Register src_high, uint32_t shift, Register scratch);
597
598 void SarPair(Register dst_low, Register dst_high, Register src_low,
599 Register src_high, Register shift, Register scratch1,
600 Register scratch2);
601
602 void SarPair(Register dst_low, Register dst_high, Register src_low,
603 Register src_high, uint32_t shift, Register scratch);
604
605 // MIPS32 R2 instruction macro.
606 void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
607 void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
608 void ExtractBits(Register dest, Register source, Register pos, int size,
609 bool sign_extend = false);
610 void InsertBits(Register dest, Register source, Register pos, int size);
611
612 void Seb(Register rd, Register rt);
613 void Seh(Register rd, Register rt);
614 void Neg_s(FPURegister fd, FPURegister fs);
615 void Neg_d(FPURegister fd, FPURegister fs);
616
617 // MIPS32 R6 instruction macros.
618 void Bovc(Register rt, Register rs, Label* L);
619 void Bnvc(Register rt, Register rs, Label* L);
620
621 // Convert single to unsigned word.
622 void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
623 void Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch);
624
625 void Trunc_w_d(FPURegister fd, FPURegister fs);
626 void Round_w_d(FPURegister fd, FPURegister fs);
627 void Floor_w_d(FPURegister fd, FPURegister fs);
628 void Ceil_w_d(FPURegister fd, FPURegister fs);
629
630 // Round double functions
631 void Trunc_d_d(FPURegister fd, FPURegister fs);
632 void Round_d_d(FPURegister fd, FPURegister fs);
633 void Floor_d_d(FPURegister fd, FPURegister fs);
634 void Ceil_d_d(FPURegister fd, FPURegister fs);
635
636 // Round float functions
637 void Trunc_s_s(FPURegister fd, FPURegister fs);
638 void Round_s_s(FPURegister fd, FPURegister fs);
639 void Floor_s_s(FPURegister fd, FPURegister fs);
640 void Ceil_s_s(FPURegister fd, FPURegister fs);
641
642 // FP32 mode: Move the general purpose register into
643 // the high part of the double-register pair.
644 // FP64 mode: Move the general-purpose register into
645 // the higher 32 bits of the 64-bit coprocessor register,
646 // while leaving the low bits unchanged.
647 void Mthc1(Register rt, FPURegister fs);
648
649 // FP32 mode: move the high part of the double-register pair into
650 // general purpose register.
651 // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
652 // general-purpose register.
653 void Mfhc1(Register rt, FPURegister fs);
654
655 void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
656 FPURegister scratch);
657 void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
658 FPURegister scratch);
659 void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
660 FPURegister scratch);
661 void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
662 FPURegister scratch);
663
664 // Change endianness
665 void ByteSwapSigned(Register dest, Register src, int operand_size);
666 void ByteSwapUnsigned(Register dest, Register src, int operand_size);
667
668 void Ulh(Register rd, const MemOperand& rs);
669 void Ulhu(Register rd, const MemOperand& rs);
670 void Ush(Register rd, const MemOperand& rs, Register scratch);
671
672 void Ulw(Register rd, const MemOperand& rs);
673 void Usw(Register rd, const MemOperand& rs);
674
675 void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch);
676 void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch);
677
678 void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch);
679 void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch);
680
681 void Ldc1(FPURegister fd, const MemOperand& src);
682 void Sdc1(FPURegister fs, const MemOperand& dst);
683
684 void Ll(Register rd, const MemOperand& rs);
685 void Sc(Register rd, const MemOperand& rs);
686
687 // Perform a floating-point min or max operation with the
688 // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
689 // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
690 // handled in out-of-line code. The specific behaviour depends on supported
691 // instructions.
692 //
693 // These functions assume (and assert) that src1!=src2. It is permitted
694 // for the result to alias either input register.
695 void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
696 Label* out_of_line);
697 void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
698 Label* out_of_line);
699 void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
700 Label* out_of_line);
701 void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
702 Label* out_of_line);
703
704 // Generate out-of-line cases for the macros above.
705 void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
706 void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
707 void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1,
708 DoubleRegister src2);
709 void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1,
710 DoubleRegister src2);
711
IsDoubleZeroRegSet()712 bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
713
mov(Register rd,Register rt)714 void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
715
Move(Register dst,Handle<HeapObject> handle)716 inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
Move(Register dst,Smi * smi)717 inline void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
718
Move(Register dst,Register src)719 inline void Move(Register dst, Register src) {
720 if (dst != src) {
721 mov(dst, src);
722 }
723 }
724
Move_d(FPURegister dst,FPURegister src)725 inline void Move_d(FPURegister dst, FPURegister src) {
726 if (dst != src) {
727 mov_d(dst, src);
728 }
729 }
730
Move_s(FPURegister dst,FPURegister src)731 inline void Move_s(FPURegister dst, FPURegister src) {
732 if (dst != src) {
733 mov_s(dst, src);
734 }
735 }
736
Move(FPURegister dst,FPURegister src)737 inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
738
Move(Register dst_low,Register dst_high,FPURegister src)739 inline void Move(Register dst_low, Register dst_high, FPURegister src) {
740 mfc1(dst_low, src);
741 Mfhc1(dst_high, src);
742 }
743
FmoveHigh(Register dst_high,FPURegister src)744 inline void FmoveHigh(Register dst_high, FPURegister src) {
745 Mfhc1(dst_high, src);
746 }
747
FmoveHigh(FPURegister dst,Register src_high)748 inline void FmoveHigh(FPURegister dst, Register src_high) {
749 Mthc1(src_high, dst);
750 }
751
FmoveLow(Register dst_low,FPURegister src)752 inline void FmoveLow(Register dst_low, FPURegister src) {
753 mfc1(dst_low, src);
754 }
755
756 void FmoveLow(FPURegister dst, Register src_low);
757
Move(FPURegister dst,Register src_low,Register src_high)758 inline void Move(FPURegister dst, Register src_low, Register src_high) {
759 mtc1(src_low, dst);
760 Mthc1(src_high, dst);
761 }
762
Move(FPURegister dst,float imm)763 void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
Move(FPURegister dst,double imm)764 void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
765 void Move(FPURegister dst, uint32_t src);
766 void Move(FPURegister dst, uint64_t src);
767
768 // -------------------------------------------------------------------------
769 // Overflow operations.
770
771 // AddOverflow sets overflow register to a negative value if
772 // overflow occured, otherwise it is zero or positive
773 void AddOverflow(Register dst, Register left, const Operand& right,
774 Register overflow);
775 // SubOverflow sets overflow register to a negative value if
776 // overflow occured, otherwise it is zero or positive
777 void SubOverflow(Register dst, Register left, const Operand& right,
778 Register overflow);
779 // MulOverflow sets overflow register to zero if no overflow occured
780 void MulOverflow(Register dst, Register left, const Operand& right,
781 Register overflow);
782
783 // Number of instructions needed for calculation of switch table entry address
784 #ifdef _MIPS_ARCH_MIPS32R6
785 static constexpr int kSwitchTablePrologueSize = 5;
786 #else
787 static constexpr int kSwitchTablePrologueSize = 10;
788 #endif
789 // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
790 // functor/function with 'Label *func(size_t index)' declaration.
791 template <typename Func>
792 void GenerateSwitchTable(Register index, size_t case_count,
793 Func GetLabelFunction);
794
795 // Load an object from the root table.
796 void LoadRoot(Register destination, Heap::RootListIndex index) override;
797 void LoadRoot(Register destination, Heap::RootListIndex index, Condition cond,
798 Register src1, const Operand& src2);
799
800 // If the value is a NaN, canonicalize the value else, do nothing.
801 void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
802
803 // ---------------------------------------------------------------------------
804 // FPU macros. These do not handle special cases like NaN or +- inf.
805
806 // Convert unsigned word to double.
807 void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
808
809 // Convert double to unsigned word.
810 void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
811 void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch);
812
813 // Jump the register contains a smi.
814 void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
815 BranchDelaySlot bd = PROTECT);
816
JumpIfEqual(Register a,int32_t b,Label * dest)817 void JumpIfEqual(Register a, int32_t b, Label* dest) {
818 li(kScratchReg, Operand(b));
819 Branch(dest, eq, a, Operand(kScratchReg));
820 }
821
JumpIfLessThan(Register a,int32_t b,Label * dest)822 void JumpIfLessThan(Register a, int32_t b, Label* dest) {
823 li(kScratchReg, Operand(b));
824 Branch(dest, lt, a, Operand(kScratchReg));
825 }
826
827 // Push a standard frame, consisting of ra, fp, context and JS function.
828 void PushStandardFrame(Register function_reg);
829
830 // Get the actual activation frame alignment for target environment.
831 static int ActivationFrameAlignment();
832
833 // Compute the start of the generated instruction stream from the current PC.
834 // This is an alternative to embedding the {CodeObject} handle as a reference.
835 void ComputeCodeStartAddress(Register dst);
836
837 void ResetSpeculationPoisonRegister();
838
839 protected:
840 void BranchLong(Label* L, BranchDelaySlot bdslot);
841
842 inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
843
844 inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
845
846 private:
847 bool has_double_zero_reg_set_ = false;
848
849 void CallCFunctionHelper(Register function_base, int16_t function_offset,
850 int num_reg_arguments, int num_double_arguments);
851
852 void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
853 FPURegister cmp2);
854
855 void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
856 FPURegister cmp2);
857
858 void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
859 MSARegister wt, BranchDelaySlot bd = PROTECT);
860
861 bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits);
862 bool CalculateOffset(Label* L, int32_t& offset, OffsetSize bits,
863 Register& scratch, const Operand& rt);
864
865 void BranchShortHelperR6(int32_t offset, Label* L);
866 void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
867 bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
868 Register rs, const Operand& rt);
869 bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
870 const Operand& rt, BranchDelaySlot bdslot);
871 bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
872 const Operand& rt, BranchDelaySlot bdslot);
873
874 void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
875 void BranchAndLinkShortHelper(int16_t offset, Label* L,
876 BranchDelaySlot bdslot);
877 void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
878 void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
879 bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
880 Register rs, const Operand& rt);
881 bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
882 Register rs, const Operand& rt,
883 BranchDelaySlot bdslot);
884 bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
885 Register rs, const Operand& rt,
886 BranchDelaySlot bdslot);
887 void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
888
889 template <typename RoundFunc>
890 void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
891 RoundFunc round);
892
893 template <typename RoundFunc>
894 void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
895 RoundFunc round);
896
897 // Push a fixed frame, consisting of ra, fp.
898 void PushCommonFrame(Register marker_reg = no_reg);
899 };
900
901 // MacroAssembler implements a collection of frequently used macros.
902 class MacroAssembler : public TurboAssembler {
903 public:
MacroAssembler(Isolate * isolate,void * buffer,int size,CodeObjectRequired create_code_object)904 MacroAssembler(Isolate* isolate, void* buffer, int size,
905 CodeObjectRequired create_code_object)
906 : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
907 size, create_code_object) {}
908 MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
909 void* buffer, int size, CodeObjectRequired create_code_object);
910
911 // Swap two registers. If the scratch register is omitted then a slightly
912 // less efficient form using xor instead of mov is emitted.
913 void Swap(Register reg1, Register reg2, Register scratch = no_reg);
914
PushRoot(Heap::RootListIndex index)915 void PushRoot(Heap::RootListIndex index) {
916 UseScratchRegisterScope temps(this);
917 Register scratch = temps.Acquire();
918 LoadRoot(scratch, index);
919 Push(scratch);
920 }
921
922 // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)923 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
924 UseScratchRegisterScope temps(this);
925 Register scratch = temps.Acquire();
926 LoadRoot(scratch, index);
927 Branch(if_equal, eq, with, Operand(scratch));
928 }
929
930 // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)931 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
932 Label* if_not_equal) {
933 UseScratchRegisterScope temps(this);
934 Register scratch = temps.Acquire();
935 LoadRoot(scratch, index);
936 Branch(if_not_equal, ne, with, Operand(scratch));
937 }
938
939 // ---------------------------------------------------------------------------
940 // GC Support
941
942 // Notify the garbage collector that we wrote a pointer into an object.
943 // |object| is the object being stored into, |value| is the object being
944 // stored. value and scratch registers are clobbered by the operation.
945 // The offset is the offset from the start of the object, not the offset from
946 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
947 void RecordWriteField(
948 Register object, int offset, Register value, Register scratch,
949 RAStatus ra_status, SaveFPRegsMode save_fp,
950 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
951 SmiCheck smi_check = INLINE_SMI_CHECK);
952
953 // For a given |object| notify the garbage collector that the slot |address|
954 // has been written. |value| is the object being stored. The value and
955 // address registers are clobbered by the operation.
956 void RecordWrite(
957 Register object, Register address, Register value, RAStatus ra_status,
958 SaveFPRegsMode save_fp,
959 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
960 SmiCheck smi_check = INLINE_SMI_CHECK);
961
962 void Pref(int32_t hint, const MemOperand& rs);
963
964 // Push and pop the registers that can hold pointers, as defined by the
965 // RegList constant kSafepointSavedRegisters.
966 void PushSafepointRegisters();
967 void PopSafepointRegisters();
968
969 // Truncates a double using a specific rounding mode, and writes the value
970 // to the result register.
971 // The except_flag will contain any exceptions caused by the instruction.
972 // If check_inexact is kDontCheckForInexactConversion, then the inexact
973 // exception is masked.
974 void EmitFPUTruncate(
975 FPURoundingMode rounding_mode, Register result,
976 DoubleRegister double_input, Register scratch,
977 DoubleRegister double_scratch, Register except_flag,
978 CheckForInexactConversion check_inexact = kDontCheckForInexactConversion);
979
980 // Enter exit frame.
981 // argc - argument count to be dropped by LeaveExitFrame.
982 // save_doubles - saves FPU registers on stack, currently disabled.
983 // stack_space - extra stack space.
984 void EnterExitFrame(bool save_doubles, int stack_space = 0,
985 StackFrame::Type frame_type = StackFrame::EXIT);
986
987 // Leave the current exit frame.
988 void LeaveExitFrame(bool save_doubles, Register arg_count,
989 bool do_return = NO_EMIT_RETURN,
990 bool argument_count_is_length = false);
991
992 // Make sure the stack is aligned. Only emits code in debug mode.
993 void AssertStackIsAligned();
994
995 // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)996 void LoadGlobalProxy(Register dst) {
997 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
998 }
999
1000 void LoadNativeContextSlot(int index, Register dst);
1001
1002 // -------------------------------------------------------------------------
1003 // JavaScript invokes.
1004
1005 // Invoke the JavaScript function code by either calling or jumping.
1006 void InvokeFunctionCode(Register function, Register new_target,
1007 const ParameterCount& expected,
1008 const ParameterCount& actual, InvokeFlag flag);
1009
1010 // On function call, call into the debugger if necessary.
1011 void CheckDebugHook(Register fun, Register new_target,
1012 const ParameterCount& expected,
1013 const ParameterCount& actual);
1014
1015 // Invoke the JavaScript function in the given register. Changes the
1016 // current context to the context in the function before invoking.
1017 void InvokeFunction(Register function, Register new_target,
1018 const ParameterCount& actual, InvokeFlag flag);
1019
1020 void InvokeFunction(Register function, const ParameterCount& expected,
1021 const ParameterCount& actual, InvokeFlag flag);
1022
1023 // Frame restart support.
1024 void MaybeDropFrames();
1025
1026 // Exception handling.
1027
1028 // Push a new stack handler and link into stack handler chain.
1029 void PushStackHandler();
1030
1031 // Unlink the stack handler on top of the stack from the stack handler chain.
1032 // Must preserve the result register.
1033 void PopStackHandler();
1034
1035 // -------------------------------------------------------------------------
1036 // Support functions.
1037
1038 void GetObjectType(Register function,
1039 Register map,
1040 Register type_reg);
1041
1042 // -------------------------------------------------------------------------
1043 // Runtime calls.
1044
1045 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
1046 const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
1047
1048 // Call a code stub.
1049 void CallStub(CodeStub* stub,
1050 COND_ARGS);
1051
1052 // Tail call a code stub (jump).
1053 void TailCallStub(CodeStub* stub, COND_ARGS);
1054
1055 #undef COND_ARGS
1056
1057 // Call a runtime routine.
1058 void CallRuntime(const Runtime::Function* f, int num_arguments,
1059 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1060
1061 // Convenience function: Same as above, but takes the fid instead.
1062 void CallRuntime(Runtime::FunctionId fid,
1063 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1064 const Runtime::Function* function = Runtime::FunctionForId(fid);
1065 CallRuntime(function, function->nargs, save_doubles);
1066 }
1067
1068 // Convenience function: Same as above, but takes the fid instead.
1069 void CallRuntime(Runtime::FunctionId id, int num_arguments,
1070 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1071 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1072 }
1073
1074 // Convenience function: tail call a runtime routine (jump).
1075 void TailCallRuntime(Runtime::FunctionId fid);
1076
1077 // Jump to the builtin routine.
1078 void JumpToExternalReference(const ExternalReference& builtin,
1079 BranchDelaySlot bd = PROTECT,
1080 bool builtin_exit_frame = false);
1081
1082 // Generates a trampoline to jump to the off-heap instruction stream.
1083 void JumpToInstructionStream(Address entry);
1084
1085 // ---------------------------------------------------------------------------
1086 // In-place weak references.
1087 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
1088
1089 // -------------------------------------------------------------------------
1090 // StatsCounter support.
1091
1092 void IncrementCounter(StatsCounter* counter, int value,
1093 Register scratch1, Register scratch2);
1094 void DecrementCounter(StatsCounter* counter, int value,
1095 Register scratch1, Register scratch2);
1096
1097 // -------------------------------------------------------------------------
1098 // Smi utilities.
1099
SmiTag(Register reg)1100 void SmiTag(Register reg) {
1101 Addu(reg, reg, reg);
1102 }
1103
SmiTag(Register dst,Register src)1104 void SmiTag(Register dst, Register src) { Addu(dst, src, src); }
1105
1106 // Test if the register contains a smi.
SmiTst(Register value,Register scratch)1107 inline void SmiTst(Register value, Register scratch) {
1108 And(scratch, value, Operand(kSmiTagMask));
1109 }
1110
1111 // Untag the source value into destination and jump if source is a smi.
1112 // Souce and destination can be the same register.
1113 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1114
1115 // Jump if the register contains a non-smi.
1116 void JumpIfNotSmi(Register value,
1117 Label* not_smi_label,
1118 Register scratch = at,
1119 BranchDelaySlot bd = PROTECT);
1120
1121 // Jump if either of the registers contain a smi.
1122 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1123
1124 // Abort execution if argument is a smi, enabled via --debug-code.
1125 void AssertNotSmi(Register object);
1126 void AssertSmi(Register object);
1127
1128 // Abort execution if argument is not a Constructor, enabled via --debug-code.
1129 void AssertConstructor(Register object);
1130
1131 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1132 void AssertFunction(Register object);
1133
1134 // Abort execution if argument is not a JSBoundFunction,
1135 // enabled via --debug-code.
1136 void AssertBoundFunction(Register object);
1137
1138 // Abort execution if argument is not a JSGeneratorObject (or subclass),
1139 // enabled via --debug-code.
1140 void AssertGeneratorObject(Register object);
1141
1142 // Abort execution if argument is not undefined or an AllocationSite, enabled
1143 // via --debug-code.
1144 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1145
1146 template<typename Field>
DecodeField(Register dst,Register src)1147 void DecodeField(Register dst, Register src) {
1148 Ext(dst, src, Field::kShift, Field::kSize);
1149 }
1150
1151 template<typename Field>
DecodeField(Register reg)1152 void DecodeField(Register reg) {
1153 DecodeField<Field>(reg, reg);
1154 }
1155
1156 void EnterBuiltinFrame(Register context, Register target, Register argc);
1157 void LeaveBuiltinFrame(Register context, Register target, Register argc);
1158
1159
1160 private:
1161 // Helper functions for generating invokes.
1162 void InvokePrologue(const ParameterCount& expected,
1163 const ParameterCount& actual, Label* done,
1164 bool* definitely_mismatches, InvokeFlag flag);
1165
1166 // Compute memory operands for safepoint stack slots.
1167 static int SafepointRegisterStackIndex(int reg_code);
1168
1169 // Needs access to SafepointRegisterStackIndex for compiled frame
1170 // traversal.
1171 friend class StandardFrame;
1172 };
1173
1174 template <typename Func>
GenerateSwitchTable(Register index,size_t case_count,Func GetLabelFunction)1175 void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
1176 Func GetLabelFunction) {
1177 Label here;
1178 BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
1179 UseScratchRegisterScope temps(this);
1180 Register scratch = temps.Acquire();
1181 if (kArchVariant >= kMips32r6) {
1182 addiupc(scratch, 5);
1183 Lsa(scratch, scratch, index, kPointerSizeLog2);
1184 lw(scratch, MemOperand(scratch));
1185 } else {
1186 push(ra);
1187 bal(&here);
1188 sll(scratch, index, kPointerSizeLog2); // Branch delay slot.
1189 bind(&here);
1190 addu(scratch, scratch, ra);
1191 pop(ra);
1192 lw(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize));
1193 }
1194 jr(scratch);
1195 nop(); // Branch delay slot nop.
1196 for (size_t index = 0; index < case_count; ++index) {
1197 dd(GetLabelFunction(index));
1198 }
1199 }
1200
1201 #define ACCESS_MASM(masm) masm->
1202
1203 } // namespace internal
1204 } // namespace v8
1205
1206 #endif // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
1207