• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_MIPS
6 
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 #include "src/debug/debug.h"
11 #include "src/deoptimizer/deoptimizer.h"
12 #include "src/execution/frame-constants.h"
13 #include "src/execution/frames.h"
14 #include "src/logging/counters.h"
15 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
16 #include "src/codegen/macro-assembler-inl.h"
17 #include "src/codegen/mips/constants-mips.h"
18 #include "src/codegen/register-configuration.h"
19 #include "src/heap/heap-inl.h"
20 #include "src/objects/cell.h"
21 #include "src/objects/foreign.h"
22 #include "src/objects/heap-number.h"
23 #include "src/objects/js-generator.h"
24 #include "src/objects/objects-inl.h"
25 #include "src/objects/smi.h"
26 #include "src/runtime/runtime.h"
27 
28 #if V8_ENABLE_WEBASSEMBLY
29 #include "src/wasm/wasm-linkage.h"
30 #include "src/wasm/wasm-objects.h"
31 #endif  // V8_ENABLE_WEBASSEMBLY
32 
33 namespace v8 {
34 namespace internal {
35 
36 #define __ ACCESS_MASM(masm)
37 
Generate_Adaptor(MacroAssembler * masm,Address address)38 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
39   __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
40   __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
41           RelocInfo::CODE_TARGET);
42 }
43 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)44 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
45                                            Runtime::FunctionId function_id) {
46   // ----------- S t a t e -------------
47   //  -- a0 : actual argument count
48   //  -- a1 : target function (preserved for callee)
49   //  -- a3 : new target (preserved for callee)
50   // -----------------------------------
51   {
52     FrameScope scope(masm, StackFrame::INTERNAL);
53     // Push a copy of the target function, the new target and the actual
54     // argument count.
55     // Push function as parameter to the runtime call.
56     __ SmiTag(kJavaScriptCallArgCountRegister);
57     __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
58             kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
59 
60     __ CallRuntime(function_id, 1);
61 
62     // Restore target function, new target and actual argument count.
63     __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
64            kJavaScriptCallArgCountRegister);
65     __ SmiUntag(kJavaScriptCallArgCountRegister);
66   }
67 
68   static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
69   __ Addu(a2, v0, Code::kHeaderSize - kHeapObjectTag);
70   __ Jump(a2);
71 }
72 
73 namespace {
74 
75 enum class ArgumentsElementType {
76   kRaw,    // Push arguments as they are.
77   kHandle  // Dereference arguments before pushing.
78 };
79 
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,Register scratch2,ArgumentsElementType element_type)80 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
81                             Register scratch, Register scratch2,
82                             ArgumentsElementType element_type) {
83   DCHECK(!AreAliased(array, argc, scratch));
84   Label loop, entry;
85   __ Subu(scratch, argc, Operand(kJSArgcReceiverSlots));
86   __ Branch(&entry);
87   __ bind(&loop);
88   __ Lsa(scratch2, array, scratch, kSystemPointerSizeLog2);
89   __ lw(scratch2, MemOperand(scratch2));
90   if (element_type == ArgumentsElementType::kHandle) {
91     __ lw(scratch2, MemOperand(scratch2));
92   }
93   __ push(scratch2);
94   __ bind(&entry);
95   __ Addu(scratch, scratch, Operand(-1));
96   __ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
97 }
98 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)99 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
100   // ----------- S t a t e -------------
101   //  -- a0     : number of arguments
102   //  -- a1     : constructor function
103   //  -- a3     : new target
104   //  -- cp     : context
105   //  -- ra     : return address
106   //  -- sp[...]: constructor arguments
107   // -----------------------------------
108 
109   // Enter a construct frame.
110   {
111     FrameScope scope(masm, StackFrame::CONSTRUCT);
112 
113     // Preserve the incoming parameters on the stack.
114     __ SmiTag(a0);
115     __ Push(cp, a0);
116     __ SmiUntag(a0);
117     // Set up pointer to first argument (skip receiver).
118     __ Addu(
119         t2, fp,
120         Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
121     // Copy arguments and receiver to the expression stack.
122     // t2: Pointer to start of arguments.
123     // a0: Number of arguments.
124     Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw);
125     // The receiver for the builtin/api call.
126     __ PushRoot(RootIndex::kTheHoleValue);
127 
128     // Call the function.
129     // a0: number of arguments (untagged)
130     // a1: constructor function
131     // a3: new target
132     __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
133 
134     // Restore context from the frame.
135     __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
136     // Restore smi-tagged arguments count from the frame.
137     __ lw(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
138     // Leave construct frame.
139   }
140 
141   // Remove caller arguments from the stack and return.
142   __ DropArguments(t3, TurboAssembler::kCountIsSmi,
143                    TurboAssembler::kCountIncludesReceiver);
144   __ Ret();
145 }
146 
147 }  // namespace
148 
149 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)150 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
151   // ----------- S t a t e -------------
152   //  --      a0: number of arguments (untagged)
153   //  --      a1: constructor function
154   //  --      a3: new target
155   //  --      cp: context
156   //  --      ra: return address
157   //  -- sp[...]: constructor arguments
158   // -----------------------------------
159 
160   // Enter a construct frame.
161   FrameScope scope(masm, StackFrame::MANUAL);
162   Label post_instantiation_deopt_entry, not_create_implicit_receiver;
163   __ EnterFrame(StackFrame::CONSTRUCT);
164 
165   // Preserve the incoming parameters on the stack.
166   __ SmiTag(a0);
167   __ Push(cp, a0, a1);
168   __ PushRoot(RootIndex::kTheHoleValue);
169   __ Push(a3);
170 
171   // ----------- S t a t e -------------
172   //  --        sp[0*kPointerSize]: new target
173   //  --        sp[1*kPointerSize]: padding
174   //  -- a1 and sp[2*kPointerSize]: constructor function
175   //  --        sp[3*kPointerSize]: number of arguments (tagged)
176   //  --        sp[4*kPointerSize]: context
177   // -----------------------------------
178 
179   __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
180   __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
181   __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
182   __ JumpIfIsInRange(
183       t2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
184       static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
185       &not_create_implicit_receiver);
186 
187   // If not derived class constructor: Allocate the new receiver object.
188   __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
189                       t2, t3);
190   __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
191   __ Branch(&post_instantiation_deopt_entry);
192 
193   // Else: use TheHoleValue as receiver for constructor call
194   __ bind(&not_create_implicit_receiver);
195   __ LoadRoot(v0, RootIndex::kTheHoleValue);
196 
197   // ----------- S t a t e -------------
198   //  --                          v0: receiver
199   //  -- Slot 4 / sp[0*kPointerSize]: new target
200   //  -- Slot 3 / sp[1*kPointerSize]: padding
201   //  -- Slot 2 / sp[2*kPointerSize]: constructor function
202   //  -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
203   //  -- Slot 0 / sp[4*kPointerSize]: context
204   // -----------------------------------
205   // Deoptimizer enters here.
206   masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
207       masm->pc_offset());
208   __ bind(&post_instantiation_deopt_entry);
209 
210   // Restore new target.
211   __ Pop(a3);
212 
213   // Push the allocated receiver to the stack.
214   __ Push(v0);
215 
216   // We need two copies because we may have to return the original one
217   // and the calling conventions dictate that the called function pops the
218   // receiver. The second copy is pushed after the arguments, we saved in s0
219   // since v0 will store the return value of callRuntime.
220   __ mov(s0, v0);
221 
222   // Set up pointer to last argument.
223   __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
224                           kSystemPointerSize));
225 
226   // ----------- S t a t e -------------
227   //  --                 r3: new target
228   //  -- sp[0*kPointerSize]: implicit receiver
229   //  -- sp[1*kPointerSize]: implicit receiver
230   //  -- sp[2*kPointerSize]: padding
231   //  -- sp[3*kPointerSize]: constructor function
232   //  -- sp[4*kPointerSize]: number of arguments (tagged)
233   //  -- sp[5*kPointerSize]: context
234   // -----------------------------------
235 
236   // Restore constructor function and argument count.
237   __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
238   __ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
239   __ SmiUntag(a0);
240 
241   Label stack_overflow;
242   __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
243 
244   // TODO(victorgomes): When the arguments adaptor is completely removed, we
245   // should get the formal parameter count and copy the arguments in its
246   // correct position (including any undefined), instead of delaying this to
247   // InvokeFunction.
248 
249   // Copy arguments and receiver to the expression stack.
250   // t2: Pointer to start of argument.
251   // a0: Number of arguments.
252   Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw);
253 
254   // We need two copies because we may have to return the original one
255   // and the calling conventions dictate that the called function pops the
256   // receiver. The second copy is pushed after the arguments.
257   __ Push(s0);
258 
259   // Call the function.
260   __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
261 
262   // ----------- S t a t e -------------
263   //  --                 v0: constructor result
264   //  -- sp[0*kPointerSize]: implicit receiver
265   //  -- sp[1*kPointerSize]: padding
266   //  -- sp[2*kPointerSize]: constructor function
267   //  -- sp[3*kPointerSize]: number of arguments
268   //  -- sp[4*kPointerSize]: context
269   // -----------------------------------
270 
271   // Store offset of return address for deoptimizer.
272   masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
273       masm->pc_offset());
274 
275   // If the result is an object (in the ECMA sense), we should get rid
276   // of the receiver and use the result; see ECMA-262 section 13.2.2-7
277   // on page 74.
278   Label use_receiver, do_throw, leave_and_return, check_receiver;
279 
280   // If the result is undefined, we jump out to using the implicit receiver.
281   __ JumpIfNotRoot(v0, RootIndex::kUndefinedValue, &check_receiver);
282 
283   // Otherwise we do a smi check and fall through to check if the return value
284   // is a valid receiver.
285 
286   // Throw away the result of the constructor invocation and use the
287   // on-stack receiver as the result.
288   __ bind(&use_receiver);
289   __ lw(v0, MemOperand(sp, 0 * kPointerSize));
290   __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
291 
292   __ bind(&leave_and_return);
293   // Restore smi-tagged arguments count from the frame.
294   __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
295   // Leave construct frame.
296   __ LeaveFrame(StackFrame::CONSTRUCT);
297 
298   // Remove caller arguments from the stack and return.
299   __ DropArguments(a1, TurboAssembler::kCountIsSmi,
300                    TurboAssembler::kCountIncludesReceiver);
301   __ Ret();
302 
303   __ bind(&check_receiver);
304   // If the result is a smi, it is *not* an object in the ECMA sense.
305   __ JumpIfSmi(v0, &use_receiver);
306 
307   // If the type of the result (stored in its map) is less than
308   // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
309   __ GetObjectType(v0, t2, t2);
310   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
311   __ Branch(&leave_and_return, greater_equal, t2,
312             Operand(FIRST_JS_RECEIVER_TYPE));
313   __ Branch(&use_receiver);
314 
315   __ bind(&do_throw);
316   // Restore the context from the frame.
317   __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
318   __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
319   __ break_(0xCC);
320 
321   __ bind(&stack_overflow);
322   // Restore the context from the frame.
323   __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
324   __ CallRuntime(Runtime::kThrowStackOverflow);
325   // Unreachable code.
326   __ break_(0xCC);
327 }
328 
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)329 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
330   Generate_JSBuiltinsConstructStubHelper(masm);
331 }
332 
Generate_ConstructedNonConstructable(MacroAssembler * masm)333 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
334   FrameScope scope(masm, StackFrame::INTERNAL);
335   __ Push(a1);
336   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
337 }
338 
339 // Clobbers scratch1 and scratch2; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc,Register scratch1,Register scratch2)340 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
341                                         Register scratch1, Register scratch2) {
342   ASM_CODE_COMMENT(masm);
343   // Check the stack for overflow. We are not trying to catch
344   // interruptions (e.g. debug break and preemption) here, so the "real stack
345   // limit" is checked.
346   Label okay;
347   __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
348   // Make a2 the space we have left. The stack might already be overflowed
349   // here which will cause a2 to become negative.
350   __ Subu(scratch1, sp, scratch1);
351   // Check if the arguments will overflow the stack.
352   __ sll(scratch2, argc, kPointerSizeLog2);
353   // Signed comparison.
354   __ Branch(&okay, gt, scratch1, Operand(scratch2));
355 
356   // Out of stack space.
357   __ CallRuntime(Runtime::kThrowStackOverflow);
358 
359   __ bind(&okay);
360 }
361 
362 namespace {
363 
364 // Used by JSEntryTrampoline to refer C++ parameter to JSEntryVariant.
365 constexpr int kPushedStackSpace =
366     kCArgsSlotsSize + (kNumCalleeSaved + 1) * kPointerSize +
367     kNumCalleeSavedFPU * kDoubleSize + 4 * kPointerSize +
368     EntryFrameConstants::kCallerFPOffset;
369 
370 // Called with the native C calling convention. The corresponding function
371 // signature is either:
372 //
373 //   using JSEntryFunction = GeneratedCode<Address(
374 //       Address root_register_value, Address new_target, Address target,
375 //       Address receiver, intptr_t argc, Address** argv)>;
376 // or
377 //   using JSEntryFunction = GeneratedCode<Address(
378 //       Address root_register_value, MicrotaskQueue* microtask_queue)>;
379 //
380 // Passes through a0, a1, a2, a3 and stack to JSEntryTrampoline.
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)381 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
382                              Builtin entry_trampoline) {
383   Label invoke, handler_entry, exit;
384 
385   int pushed_stack_space = kCArgsSlotsSize;
386   {
387     NoRootArrayScope no_root_array(masm);
388 
389     // Registers:
390     // a0: root_register_value
391 
392     // Save callee saved registers on the stack.
393     __ MultiPush(kCalleeSaved | ra);
394     pushed_stack_space +=
395         kNumCalleeSaved * kPointerSize + kPointerSize /* ra */;
396 
397     // Save callee-saved FPU registers.
398     __ MultiPushFPU(kCalleeSavedFPU);
399     pushed_stack_space += kNumCalleeSavedFPU * kDoubleSize;
400 
401     // Set up the reserved register for 0.0.
402     __ Move(kDoubleRegZero, 0.0);
403 
404     // Initialize the root register.
405     // C calling convention. The first argument is passed in a0.
406     __ mov(kRootRegister, a0);
407   }
408 
409   // We build an EntryFrame.
410   __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
411   __ li(t2, Operand(StackFrame::TypeToMarker(type)));
412   __ li(t1, Operand(StackFrame::TypeToMarker(type)));
413   __ li(t4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
414                                       masm->isolate()));
415   __ lw(t0, MemOperand(t4));
416   __ Push(t3, t2, t1, t0);
417   pushed_stack_space += 4 * kPointerSize;
418 
419   // Clear c_entry_fp, now we've pushed its previous value to the stack.
420   // If the c_entry_fp is not already zero and we don't clear it, the
421   // SafeStackFrameIterator will assume we are executing C++ and miss the JS
422   // frames on top.
423   __ Sw(zero_reg, MemOperand(t4));
424 
425   // Set up frame pointer for the frame to be pushed.
426   __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
427   pushed_stack_space += EntryFrameConstants::kCallerFPOffset;
428 
429   // Registers:
430   // a0: root_register_value
431   //
432   // Stack:
433   // caller fp          |
434   // function slot      | entry frame
435   // context slot       |
436   // bad fp (0xFF...F)  |
437   // callee saved registers + ra
438   // 4 args slots
439 
440   // If this is the outermost JS call, set js_entry_sp value.
441   Label non_outermost_js;
442   ExternalReference js_entry_sp = ExternalReference::Create(
443       IsolateAddressId::kJSEntrySPAddress, masm->isolate());
444   __ li(t1, js_entry_sp);
445   __ lw(t2, MemOperand(t1));
446   __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
447   __ sw(fp, MemOperand(t1));
448   __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
449   Label cont;
450   __ b(&cont);
451   __ nop();  // Branch delay slot nop.
452   __ bind(&non_outermost_js);
453   __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME));
454   __ bind(&cont);
455   __ push(t0);
456 
457   // Jump to a faked try block that does the invoke, with a faked catch
458   // block that sets the pending exception.
459   __ jmp(&invoke);
460   __ bind(&handler_entry);
461 
462   // Store the current pc as the handler offset. It's used later to create the
463   // handler table.
464   masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
465 
466   // Caught exception: Store result (exception) in the pending exception
467   // field in the JSEnv and return a failure sentinel.  Coming in here the
468   // fp will be invalid because the PushStackHandler below sets it to 0 to
469   // signal the existence of the JSEntry frame.
470   __ li(t0, ExternalReference::Create(
471                 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
472   __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
473   __ LoadRoot(v0, RootIndex::kException);
474   __ b(&exit);  // b exposes branch delay slot.
475   __ nop();     // Branch delay slot nop.
476 
477   // Invoke: Link this frame into the handler chain.
478   __ bind(&invoke);
479   __ PushStackHandler();
480   // If an exception not caught by another handler occurs, this handler
481   // returns control to the code after the bal(&invoke) above, which
482   // restores all kCalleeSaved registers (including cp and fp) to their
483   // saved values before returning a failure to C.
484   //
485   // Preserve a1, a2 and a3 passed by C++ and pass them to the trampoline.
486   //
487   // Stack:
488   // handler frame
489   // entry frame
490   // callee saved registers + ra
491   // 4 args slots
492   //
493   // Invoke the function by calling through JS entry trampoline builtin and
494   // pop the faked function when we return.
495   Handle<Code> trampoline_code =
496       masm->isolate()->builtins()->code_handle(entry_trampoline);
497   DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
498   USE(pushed_stack_space);
499   __ Call(trampoline_code, RelocInfo::CODE_TARGET);
500 
501   // Unlink this frame from the handler chain.
502   __ PopStackHandler();
503 
504   __ bind(&exit);  // v0 holds result
505   // Check if the current stack frame is marked as the outermost JS frame.
506   Label non_outermost_js_2;
507   __ pop(t1);
508   __ Branch(&non_outermost_js_2, ne, t1,
509             Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
510   __ li(t1, ExternalReference(js_entry_sp));
511   __ sw(zero_reg, MemOperand(t1));
512   __ bind(&non_outermost_js_2);
513 
514   // Restore the top frame descriptors from the stack.
515   __ pop(t1);
516   __ li(t0, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
517                                       masm->isolate()));
518   __ sw(t1, MemOperand(t0));
519 
520   // Reset the stack to the callee saved registers.
521   __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
522 
523   // Restore callee-saved fpu registers.
524   __ MultiPopFPU(kCalleeSavedFPU);
525 
526   // Restore callee saved registers from the stack.
527   __ MultiPop(kCalleeSaved | ra);
528   // Return.
529   __ Jump(ra);
530 }
531 
532 }  // namespace
533 
Generate_JSEntry(MacroAssembler * masm)534 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
535   Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
536 }
537 
Generate_JSConstructEntry(MacroAssembler * masm)538 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
539   Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
540                           Builtin::kJSConstructEntryTrampoline);
541 }
542 
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)543 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
544   Generate_JSEntryVariant(masm, StackFrame::ENTRY,
545                           Builtin::kRunMicrotasksTrampoline);
546 }
547 
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)548 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
549                                              bool is_construct) {
550   // ----------- S t a t e -------------
551   //  -- a0: root_register_value (unused)
552   //  -- a1: new.target
553   //  -- a2: function
554   //  -- a3: receiver_pointer
555   //  -- [fp + kPushedStackSpace + 0 * kPointerSize]: argc
556   //  -- [fp + kPushedStackSpace + 1 * kPointerSize]: argv
557   // -----------------------------------
558 
559   // Enter an internal frame.
560   {
561     FrameScope scope(masm, StackFrame::INTERNAL);
562 
563     // Setup the context (we need to use the caller context from the isolate).
564     ExternalReference context_address = ExternalReference::Create(
565         IsolateAddressId::kContextAddress, masm->isolate());
566     __ li(cp, context_address);
567     __ lw(cp, MemOperand(cp));
568 
569     // Push the function onto the stack.
570     __ Push(a2);
571 
572     __ lw(s0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
573     __ lw(a0,
574           MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgcOffset));
575     __ lw(s0,
576           MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
577 
578     // Check if we have enough stack space to push all arguments.
579     // Clobbers a2 and t0.
580     __ mov(t1, a0);
581     Generate_CheckStackOverflow(masm, t1, t0, t2);
582 
583     // Copy arguments to the stack.
584     // a0: argc
585     // s0: argv, i.e. points to first arg
586     Generate_PushArguments(masm, s0, a0, t2, t0, ArgumentsElementType::kHandle);
587 
588     // Push the receiver.
589     __ Push(a3);
590 
591     // a0: argc
592     // a1: function
593     // a3: new.target
594     __ mov(a3, a1);
595     __ mov(a1, a2);
596 
597     // Initialize all JavaScript callee-saved registers, since they will be seen
598     // by the garbage collector as part of handlers.
599     __ LoadRoot(t0, RootIndex::kUndefinedValue);
600     __ mov(s0, t0);
601     __ mov(s1, t0);
602     __ mov(s2, t0);
603     __ mov(s3, t0);
604     __ mov(s4, t0);
605     __ mov(s5, t0);
606     // s6 holds the root address. Do not clobber.
607     // s7 is cp. Do not init.
608 
609     // Invoke the code.
610     Handle<Code> builtin = is_construct
611                                ? BUILTIN_CODE(masm->isolate(), Construct)
612                                : masm->isolate()->builtins()->Call();
613     __ Call(builtin, RelocInfo::CODE_TARGET);
614 
615     // Leave internal frame.
616   }
617 
618   __ Jump(ra);
619 }
620 
Generate_JSEntryTrampoline(MacroAssembler * masm)621 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
622   Generate_JSEntryTrampolineHelper(masm, false);
623 }
624 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)625 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
626   Generate_JSEntryTrampolineHelper(masm, true);
627 }
628 
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)629 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
630   // a1: microtask_queue
631   __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
632   __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
633 }
634 
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)635 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
636                                  Register scratch) {
637   DCHECK(!AreAliased(code, scratch));
638   // Verify that the code kind is baseline code via the CodeKind.
639   __ lw(scratch, FieldMemOperand(code, Code::kFlagsOffset));
640   __ DecodeField<Code::KindField>(scratch);
641   __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
642             Operand(static_cast<int>(CodeKind::BASELINE)));
643 }
644 
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)645 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
646                                                     Register sfi_data,
647                                                     Register scratch1,
648                                                     Label* is_baseline) {
649   ASM_CODE_COMMENT(masm);
650   Label done;
651 
652   __ GetObjectType(sfi_data, scratch1, scratch1);
653   if (FLAG_debug_code) {
654     Label not_baseline;
655     __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
656     AssertCodeIsBaseline(masm, sfi_data, scratch1);
657     __ Branch(is_baseline);
658     __ bind(&not_baseline);
659   } else {
660     __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
661   }
662   __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
663   __ lw(sfi_data,
664         FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
665 
666   __ bind(&done);
667 }
668 
669 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)670 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
671   // ----------- S t a t e -------------
672   //  -- v0 : the value to pass to the generator
673   //  -- a1 : the JSGeneratorObject to resume
674   //  -- ra : return address
675   // -----------------------------------
676 
677   // Store input value into generator object.
678   __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
679   __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
680                       kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
681 
682   // Check that a1 is still valid, RecordWrite might have clobbered it.
683   __ AssertGeneratorObject(a1);
684 
685   // Load suspended function and context.
686   __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
687   __ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset));
688 
689   // Flood function if we are stepping.
690   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
691   Label stepping_prepared;
692   ExternalReference debug_hook =
693       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
694   __ li(t1, debug_hook);
695   __ lb(t1, MemOperand(t1));
696   __ Branch(&prepare_step_in_if_stepping, ne, t1, Operand(zero_reg));
697 
698   // Flood function if we need to continue stepping in the suspended generator.
699   ExternalReference debug_suspended_generator =
700       ExternalReference::debug_suspended_generator_address(masm->isolate());
701   __ li(t1, debug_suspended_generator);
702   __ lw(t1, MemOperand(t1));
703   __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1));
704   __ bind(&stepping_prepared);
705 
706   // Check the stack for overflow. We are not trying to catch interruptions
707   // (i.e. debug break and preemption) here, so check the "real stack limit".
708   Label stack_overflow;
709   __ LoadStackLimit(kScratchReg,
710                     MacroAssembler::StackLimitKind::kRealStackLimit);
711   __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
712 
713   // ----------- S t a t e -------------
714   //  -- a1    : the JSGeneratorObject to resume
715   //  -- t0    : generator function
716   //  -- cp    : generator context
717   //  -- ra    : return address
718   // -----------------------------------
719 
720   // Copy the function arguments from the generator object's register file.
721 
722   __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
723   __ lhu(a3,
724          FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
725   __ Subu(a3, a3, Operand(kJSArgcReceiverSlots));
726   __ lw(t1,
727         FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
728   {
729     Label done_loop, loop;
730     __ bind(&loop);
731     __ Subu(a3, a3, Operand(1));
732     __ Branch(&done_loop, lt, a3, Operand(zero_reg));
733     __ Lsa(kScratchReg, t1, a3, kPointerSizeLog2);
734     __ Lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
735     __ Push(kScratchReg);
736     __ Branch(&loop);
737     __ bind(&done_loop);
738     // Push receiver.
739     __ Lw(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
740     __ Push(kScratchReg);
741   }
742 
743   // Underlying function needs to have bytecode available.
744   if (FLAG_debug_code) {
745     Label is_baseline;
746     __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
747     __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
748     GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
749     __ GetObjectType(a3, a3, a3);
750     __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
751               Operand(BYTECODE_ARRAY_TYPE));
752     __ bind(&is_baseline);
753   }
754 
755   // Resume (Ignition/TurboFan) generator object.
756   {
757     __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
758     __ lhu(a0, FieldMemOperand(
759                    a0, SharedFunctionInfo::kFormalParameterCountOffset));
760     // We abuse new.target both to indicate that this is a resume call and to
761     // pass in the generator object.  In ordinary calls, new.target is always
762     // undefined because generator functions are non-constructable.
763     __ Move(a3, a1);
764     __ Move(a1, t0);
765     static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
766     __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
767     __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
768     __ Jump(a2);
769   }
770 
771   __ bind(&prepare_step_in_if_stepping);
772   {
773     FrameScope scope(masm, StackFrame::INTERNAL);
774     __ Push(a1, t0);
775     // Push hole as receiver since we do not use it for stepping.
776     __ PushRoot(RootIndex::kTheHoleValue);
777     __ CallRuntime(Runtime::kDebugOnFunctionCall);
778     __ Pop(a1);
779   }
780   __ Branch(USE_DELAY_SLOT, &stepping_prepared);
781   __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
782 
783   __ bind(&prepare_step_in_suspended_generator);
784   {
785     FrameScope scope(masm, StackFrame::INTERNAL);
786     __ Push(a1);
787     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
788     __ Pop(a1);
789   }
790   __ Branch(USE_DELAY_SLOT, &stepping_prepared);
791   __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
792 
793   __ bind(&stack_overflow);
794   {
795     FrameScope scope(masm, StackFrame::INTERNAL);
796     __ CallRuntime(Runtime::kThrowStackOverflow);
797     __ break_(0xCC);  // This should be unreachable.
798   }
799 }
800 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2)801 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
802                                                 Register optimized_code,
803                                                 Register closure,
804                                                 Register scratch1,
805                                                 Register scratch2) {
806   ASM_CODE_COMMENT(masm);
807   // Store code entry in the closure.
808   __ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
809   __ mov(scratch1, optimized_code);  // Write barrier clobbers scratch1 below.
810   __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
811                       kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
812                       RememberedSetAction::kOmit, SmiCheck::kOmit);
813 }
814 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)815 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
816                                   Register scratch2) {
817   ASM_CODE_COMMENT(masm);
818   Register params_size = scratch1;
819 
820   // Get the size of the formal parameters + receiver (in bytes).
821   __ lw(params_size,
822         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
823   __ lw(params_size,
824         FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
825 
826   Register actual_params_size = scratch2;
827   // Compute the size of the actual parameters + receiver (in bytes).
828   __ Lw(actual_params_size,
829         MemOperand(fp, StandardFrameConstants::kArgCOffset));
830   __ sll(actual_params_size, actual_params_size, kPointerSizeLog2);
831 
832   // If actual is bigger than formal, then we should use it to free up the stack
833   // arguments.
834   __ slt(t2, params_size, actual_params_size);
835   __ movn(params_size, actual_params_size, t2);
836 
837   // Leave the frame (also dropping the register file).
838   __ LeaveFrame(StackFrame::INTERPRETED);
839 
840   // Drop receiver + arguments.
841   __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
842                    TurboAssembler::kCountIncludesReceiver);
843 }
844 
845 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)846 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
847                                          Register actual_state,
848                                          TieringState expected_state,
849                                          Runtime::FunctionId function_id) {
850   ASM_CODE_COMMENT(masm);
851   Label no_match;
852   __ Branch(&no_match, ne, actual_state,
853             Operand(static_cast<int>(expected_state)));
854   GenerateTailCallToReturnedCode(masm, function_id);
855   __ bind(&no_match);
856 }
857 
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch1,Register scratch2)858 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
859                                       Register optimized_code_entry,
860                                       Register scratch1, Register scratch2) {
861   // ----------- S t a t e -------------
862   //  -- a0 : actual argument count
863   //  -- a3 : new target (preserved for callee if needed, and caller)
864   //  -- a1 : target function (preserved for callee if needed, and caller)
865   // -----------------------------------
866   DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
867 
868   Register closure = a1;
869   Label heal_optimized_code_slot;
870 
871   // If the optimized code is cleared, go to runtime to update the optimization
872   // marker field.
873   __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
874                    &heal_optimized_code_slot);
875 
876   // Check if the optimized code is marked for deopt. If it is, call the
877   // runtime to clear it.
878   __ Lw(scratch1,
879         FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
880   __ Lw(scratch1,
881         FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset));
882   __ And(scratch1, scratch1, Operand(1 << Code::kMarkedForDeoptimizationBit));
883   __ Branch(&heal_optimized_code_slot, ne, scratch1, Operand(zero_reg));
884 
885   // Optimized code is good, get it into the closure and link the closure into
886   // the optimized functions list, then tail call the optimized code.
887   // The feedback vector is no longer used, so re-use it as a scratch
888   // register.
889   ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
890                                       scratch1, scratch2);
891   static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
892   __ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
893   __ Jump(a2);
894 
895   // Optimized code slot contains deoptimized code or code is cleared and
896   // optimized code marker isn't updated. Evict the code, update the marker
897   // and re-enter the closure's code.
898   __ bind(&heal_optimized_code_slot);
899   GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
900 }
901 
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)902 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
903                               Register tiering_state) {
904   // ----------- S t a t e -------------
905   //  -- a0 : actual argument count
906   //  -- a3 : new target (preserved for callee if needed, and caller)
907   //  -- a1 : target function (preserved for callee if needed, and caller)
908   //  -- feedback vector (preserved for caller if needed)
909   //  -- tiering_state : a int32 containing a non-zero optimization
910   //  marker.
911   // -----------------------------------
912   ASM_CODE_COMMENT(masm);
913   DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state));
914 
915   TailCallRuntimeIfStateEquals(masm, tiering_state,
916                                TieringState::kRequestTurbofan_Synchronous,
917                                Runtime::kCompileTurbofan_Synchronous);
918   TailCallRuntimeIfStateEquals(masm, tiering_state,
919                                TieringState::kRequestTurbofan_Concurrent,
920                                Runtime::kCompileTurbofan_Concurrent);
921 
922   __ stop();
923 }
924 
925 // Advance the current bytecode offset. This simulates what all bytecode
926 // handlers do upon completion of the underlying operation. Will bail out to a
927 // label if the bytecode (without prefix) is a return bytecode. Will not advance
928 // the bytecode offset if the current bytecode is a JumpLoop, instead just
929 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Register scratch3,Label * if_return)930 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
931                                           Register bytecode_array,
932                                           Register bytecode_offset,
933                                           Register bytecode, Register scratch1,
934                                           Register scratch2, Register scratch3,
935                                           Label* if_return) {
936   ASM_CODE_COMMENT(masm);
937   Register bytecode_size_table = scratch1;
938 
939   // The bytecode offset value will be increased by one in wide and extra wide
940   // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
941   // will restore the original bytecode. In order to simplify the code, we have
942   // a backup of it.
943   Register original_bytecode_offset = scratch3;
944   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
945                      bytecode_size_table, original_bytecode_offset));
946   __ Move(original_bytecode_offset, bytecode_offset);
947   __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
948 
949   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
950   Label process_bytecode, extra_wide;
951   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
952   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
953   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
954   STATIC_ASSERT(3 ==
955                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
956   __ Branch(&process_bytecode, hi, bytecode, Operand(3));
957   __ And(scratch2, bytecode, Operand(1));
958   __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
959 
960   // Load the next bytecode and update table to the wide scaled table.
961   __ Addu(bytecode_offset, bytecode_offset, Operand(1));
962   __ Addu(scratch2, bytecode_array, bytecode_offset);
963   __ lbu(bytecode, MemOperand(scratch2));
964   __ Addu(bytecode_size_table, bytecode_size_table,
965           Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
966   __ jmp(&process_bytecode);
967 
968   __ bind(&extra_wide);
969   // Load the next bytecode and update table to the extra wide scaled table.
970   __ Addu(bytecode_offset, bytecode_offset, Operand(1));
971   __ Addu(scratch2, bytecode_array, bytecode_offset);
972   __ lbu(bytecode, MemOperand(scratch2));
973   __ Addu(bytecode_size_table, bytecode_size_table,
974           Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
975 
976   __ bind(&process_bytecode);
977 
978 // Bailout to the return label if this is a return bytecode.
979 #define JUMP_IF_EQUAL(NAME)          \
980   __ Branch(if_return, eq, bytecode, \
981             Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
982   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
983 #undef JUMP_IF_EQUAL
984 
985   // If this is a JumpLoop, re-execute it to perform the jump to the beginning
986   // of the loop.
987   Label end, not_jump_loop;
988   __ Branch(&not_jump_loop, ne, bytecode,
989             Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
990   // We need to restore the original bytecode_offset since we might have
991   // increased it to skip the wide / extra-wide prefix bytecode.
992   __ Move(bytecode_offset, original_bytecode_offset);
993   __ jmp(&end);
994 
995   __ bind(&not_jump_loop);
996   // Otherwise, load the size of the current bytecode and advance the offset.
997   __ Addu(scratch2, bytecode_size_table, bytecode);
998   __ lb(scratch2, MemOperand(scratch2));
999   __ Addu(bytecode_offset, bytecode_offset, scratch2);
1000 
1001   __ bind(&end);
1002 }
1003 
1004 // Read off the optimization state in the feedback vector and check if there
1005 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1006 static void LoadTieringStateAndJumpIfNeedsProcessing(
1007     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1008     Label* has_optimized_code_or_state) {
1009   ASM_CODE_COMMENT(masm);
1010   Register scratch = t6;
1011   __ Lw(optimization_state,
1012         FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1013   __ And(
1014       scratch, optimization_state,
1015       Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1016   __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
1017 }
1018 
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1019 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1020     MacroAssembler* masm, Register optimization_state,
1021     Register feedback_vector) {
1022   ASM_CODE_COMMENT(masm);
1023   Label maybe_has_optimized_code;
1024   // Check if optimized code marker is available
1025   {
1026     UseScratchRegisterScope temps(masm);
1027     Register scratch = temps.Acquire();
1028     __ And(scratch, optimization_state,
1029            Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
1030     __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
1031   }
1032 
1033   Register tiering_state = optimization_state;
1034   __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1035   MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1036 
1037   __ bind(&maybe_has_optimized_code);
1038   Register optimized_code_entry = optimization_state;
1039   __ Lw(tiering_state,
1040         FieldMemOperand(feedback_vector,
1041                         FeedbackVector::kMaybeOptimizedCodeOffset));
1042 
1043   TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
1044 }
1045 
1046 namespace {
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array)1047 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
1048                                  Register bytecode_array) {
1049   // Reset code age and the OSR state (optimized to a single write).
1050   static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
1051   STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1052   __ sw(zero_reg,
1053         FieldMemOperand(bytecode_array,
1054                         BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
1055 }
1056 
1057 }  // namespace
1058 
1059 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1060 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1061   UseScratchRegisterScope temps(masm);
1062   temps.Include({s1, s2});
1063   auto descriptor =
1064       Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1065   Register closure = descriptor.GetRegisterParameter(
1066       BaselineOutOfLinePrologueDescriptor::kClosure);
1067   // Load the feedback vector from the closure.
1068   Register feedback_vector = temps.Acquire();
1069   __ Lw(feedback_vector,
1070         FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1071   __ Lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1072   if (FLAG_debug_code) {
1073     UseScratchRegisterScope temps(masm);
1074     Register scratch = temps.Acquire();
1075     __ GetObjectType(feedback_vector, scratch, scratch);
1076     __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
1077               Operand(FEEDBACK_VECTOR_TYPE));
1078   }
1079   // Check for an tiering state.
1080   Label has_optimized_code_or_state;
1081   Register optimization_state = no_reg;
1082   {
1083     UseScratchRegisterScope temps(masm);
1084     optimization_state = temps.Acquire();
1085     // optimization_state will be used only in |has_optimized_code_or_state|
1086     // and outside it can be reused.
1087     LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1088                                              feedback_vector,
1089                                              &has_optimized_code_or_state);
1090   }
1091   // Increment invocation count for the function.
1092   {
1093     UseScratchRegisterScope temps(masm);
1094     Register invocation_count = temps.Acquire();
1095     __ Lw(invocation_count,
1096           FieldMemOperand(feedback_vector,
1097                           FeedbackVector::kInvocationCountOffset));
1098     __ Addu(invocation_count, invocation_count, Operand(1));
1099     __ Sw(invocation_count,
1100           FieldMemOperand(feedback_vector,
1101                           FeedbackVector::kInvocationCountOffset));
1102   }
1103 
1104   FrameScope frame_scope(masm, StackFrame::MANUAL);
1105   {
1106     ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1107     // Normally the first thing we'd do here is Push(ra, fp), but we already
1108     // entered the frame in BaselineCompiler::Prologue, as we had to use the
1109     // value ra before the call to this BaselineOutOfLinePrologue builtin.
1110     Register callee_context = descriptor.GetRegisterParameter(
1111         BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1112     Register callee_js_function = descriptor.GetRegisterParameter(
1113         BaselineOutOfLinePrologueDescriptor::kClosure);
1114     __ Push(callee_context, callee_js_function);
1115     DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1116     DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1117 
1118     Register argc = descriptor.GetRegisterParameter(
1119         BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1120     // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1121     // the frame, so load it into a register.
1122     Register bytecode_array = descriptor.GetRegisterParameter(
1123         BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1124     ResetBytecodeAgeAndOsrState(masm, bytecode_array);
1125     __ Push(argc, bytecode_array);
1126 
1127     // Baseline code frames store the feedback vector where interpreter would
1128     // store the bytecode offset.
1129     if (FLAG_debug_code) {
1130       UseScratchRegisterScope temps(masm);
1131       Register invocation_count = temps.Acquire();
1132       __ GetObjectType(feedback_vector, invocation_count, invocation_count);
1133       __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
1134                 Operand(FEEDBACK_VECTOR_TYPE));
1135     }
1136     // Our stack is currently aligned. We have have to push something along with
1137     // the feedback vector to keep it that way -- we may as well start
1138     // initialising the register frame.
1139     // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
1140     // `undefined` in the accumulator register, to skip the load in the baseline
1141     // code.
1142     __ Push(feedback_vector);
1143   }
1144 
1145   Label call_stack_guard;
1146   Register frame_size = descriptor.GetRegisterParameter(
1147       BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1148   {
1149     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1150     // Stack check. This folds the checks for both the interrupt stack limit
1151     // check and the real stack limit into one by just checking for the
1152     // interrupt limit. The interrupt limit is either equal to the real stack
1153     // limit or tighter. By ensuring we have space until that limit after
1154     // building the frame we can quickly precheck both at once.
1155     UseScratchRegisterScope temps(masm);
1156     Register sp_minus_frame_size = temps.Acquire();
1157     __ Subu(sp_minus_frame_size, sp, frame_size);
1158     Register interrupt_limit = temps.Acquire();
1159     __ LoadStackLimit(interrupt_limit,
1160                       MacroAssembler::StackLimitKind::kInterruptStackLimit);
1161     __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1162               Operand(interrupt_limit));
1163   }
1164 
1165   // Do "fast" return to the caller pc in ra.
1166   // TODO(v8:11429): Document this frame setup better.
1167   __ Ret();
1168 
1169   __ bind(&has_optimized_code_or_state);
1170   {
1171     ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1172     UseScratchRegisterScope temps(masm);
1173     temps.Exclude(optimization_state);
1174     // Ensure the optimization_state is not allocated again.
1175     // Drop the frame created by the baseline call.
1176     __ Pop(ra, fp);
1177     MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1178                                                  feedback_vector);
1179     __ Trap();
1180   }
1181 
1182   __ bind(&call_stack_guard);
1183   {
1184     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1185     FrameScope frame_scope(masm, StackFrame::INTERNAL);
1186     // Save incoming new target or generator
1187     __ Push(kJavaScriptCallNewTargetRegister);
1188     __ SmiTag(frame_size);
1189     __ Push(frame_size);
1190     __ CallRuntime(Runtime::kStackGuardWithGap);
1191     __ Pop(kJavaScriptCallNewTargetRegister);
1192   }
1193   __ Ret();
1194   temps.Exclude({kScratchReg, kScratchReg2});
1195 }
1196 
1197 // Generate code for entering a JS function with the interpreter.
1198 // On entry to the function the receiver and arguments have been pushed on the
1199 // stack left to right.
1200 //
1201 // The live registers are:
1202 //   o a0 : actual argument count
1203 //   o a1: the JS function object being called.
1204 //   o a3: the incoming new target or generator object
1205 //   o cp: our context
1206 //   o fp: the caller's frame pointer
1207 //   o sp: stack pointer
1208 //   o ra: return address
1209 //
1210 // The function builds an interpreter frame.  See InterpreterFrameConstants in
1211 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1212 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1213   Register closure = a1;
1214   Register feedback_vector = a2;
1215 
1216   // Get the bytecode array from the function object and load it into
1217   // kInterpreterBytecodeArrayRegister.
1218   __ lw(kScratchReg,
1219         FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1220   __ lw(kInterpreterBytecodeArrayRegister,
1221         FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
1222   Label is_baseline;
1223   GetSharedFunctionInfoBytecodeOrBaseline(
1224       masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
1225 
1226   // The bytecode array could have been flushed from the shared function info,
1227   // if so, call into CompileLazy.
1228   Label compile_lazy;
1229   __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
1230   __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
1231 
1232   // Load the feedback vector from the closure.
1233   __ lw(feedback_vector,
1234         FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1235   __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1236 
1237   Label push_stack_frame;
1238   // Check if feedback vector is valid. If valid, check for optimized code
1239   // and update invocation count. Otherwise, setup the stack frame.
1240   __ lw(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1241   __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
1242   __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
1243 
1244   // Read off the optimization state in the feedback vector, and if there
1245   // is optimized code or an tiering state, call that instead.
1246   Register optimization_state = t0;
1247   __ Lw(optimization_state,
1248         FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1249 
1250   // Check if the optimized code slot is not empty or has a tiering state.
1251   Label has_optimized_code_or_state;
1252 
1253   __ andi(t1, optimization_state,
1254           FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
1255   __ Branch(&has_optimized_code_or_state, ne, t1, Operand(zero_reg));
1256 
1257   Label not_optimized;
1258   __ bind(&not_optimized);
1259 
1260   // Increment invocation count for the function.
1261   __ lw(t0, FieldMemOperand(feedback_vector,
1262                             FeedbackVector::kInvocationCountOffset));
1263   __ Addu(t0, t0, Operand(1));
1264   __ sw(t0, FieldMemOperand(feedback_vector,
1265                             FeedbackVector::kInvocationCountOffset));
1266 
1267   // Open a frame scope to indicate that there is a frame on the stack.  The
1268   // MANUAL indicates that the scope shouldn't actually generate code to set up
1269   // the frame (that is done below).
1270   __ bind(&push_stack_frame);
1271   FrameScope frame_scope(masm, StackFrame::MANUAL);
1272   __ PushStandardFrame(closure);
1273 
1274   ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
1275 
1276   // Load initial bytecode offset.
1277   __ li(kInterpreterBytecodeOffsetRegister,
1278         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1279 
1280   // Push bytecode array and Smi tagged bytecode array offset.
1281   __ SmiTag(t0, kInterpreterBytecodeOffsetRegister);
1282   __ Push(kInterpreterBytecodeArrayRegister, t0);
1283 
1284   // Allocate the local and temporary register file on the stack.
1285   Label stack_overflow;
1286   {
1287     // Load frame size from the BytecodeArray object.
1288     __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1289                               BytecodeArray::kFrameSizeOffset));
1290 
1291     // Do a stack check to ensure we don't go over the limit.
1292     __ Subu(t1, sp, Operand(t0));
1293     __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1294     __ Branch(&stack_overflow, lo, t1, Operand(a2));
1295 
1296     // If ok, push undefined as the initial value for all register file entries.
1297     Label loop_header;
1298     Label loop_check;
1299     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1300     __ Branch(&loop_check);
1301     __ bind(&loop_header);
1302     // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1303     __ push(kInterpreterAccumulatorRegister);
1304     // Continue loop if not done.
1305     __ bind(&loop_check);
1306     __ Subu(t0, t0, Operand(kPointerSize));
1307     __ Branch(&loop_header, ge, t0, Operand(zero_reg));
1308   }
1309 
1310   // If the bytecode array has a valid incoming new target or generator object
1311   // register, initialize it with incoming value which was passed in r3.
1312   Label no_incoming_new_target_or_generator_register;
1313   __ lw(t1, FieldMemOperand(
1314                 kInterpreterBytecodeArrayRegister,
1315                 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1316   __ Branch(&no_incoming_new_target_or_generator_register, eq, t1,
1317             Operand(zero_reg));
1318   __ Lsa(t1, fp, t1, kPointerSizeLog2);
1319   __ sw(a3, MemOperand(t1));
1320   __ bind(&no_incoming_new_target_or_generator_register);
1321 
1322   // Perform interrupt stack check.
1323   // TODO(solanes): Merge with the real stack limit check above.
1324   Label stack_check_interrupt, after_stack_check_interrupt;
1325   __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1326   __ Branch(&stack_check_interrupt, lo, sp, Operand(a2));
1327   __ bind(&after_stack_check_interrupt);
1328 
1329   // The accumulator is already loaded with undefined.
1330 
1331   // Load the dispatch table into a register and dispatch to the bytecode
1332   // handler at the current bytecode offset.
1333   Label do_dispatch;
1334   __ bind(&do_dispatch);
1335   __ li(kInterpreterDispatchTableRegister,
1336         ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1337   __ Addu(a0, kInterpreterBytecodeArrayRegister,
1338           kInterpreterBytecodeOffsetRegister);
1339   __ lbu(t3, MemOperand(a0));
1340   __ Lsa(kScratchReg, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
1341   __ lw(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
1342   __ Call(kJavaScriptCallCodeStartRegister);
1343   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1344 
1345   // Any returns to the entry trampoline are either due to the return bytecode
1346   // or the interpreter tail calling a builtin and then a dispatch.
1347 
1348   // Get bytecode array and bytecode offset from the stack frame.
1349   __ lw(kInterpreterBytecodeArrayRegister,
1350         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1351   __ lw(kInterpreterBytecodeOffsetRegister,
1352         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1353   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1354   // Either return, or advance to the next bytecode and dispatch.
1355   Label do_return;
1356   __ Addu(a1, kInterpreterBytecodeArrayRegister,
1357           kInterpreterBytecodeOffsetRegister);
1358   __ lbu(a1, MemOperand(a1));
1359   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1360                                 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1361                                 t0, &do_return);
1362   __ jmp(&do_dispatch);
1363 
1364   __ bind(&do_return);
1365   // The return value is in v0.
1366   LeaveInterpreterFrame(masm, t0, t1);
1367   __ Jump(ra);
1368 
1369   __ bind(&stack_check_interrupt);
1370   // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1371   // for the call to the StackGuard.
1372   __ li(kInterpreterBytecodeOffsetRegister,
1373         Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1374                              kFunctionEntryBytecodeOffset)));
1375   __ Sw(kInterpreterBytecodeOffsetRegister,
1376         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1377   __ CallRuntime(Runtime::kStackGuard);
1378 
1379   // After the call, restore the bytecode array, bytecode offset and accumulator
1380   // registers again. Also, restore the bytecode offset in the stack to its
1381   // previous value.
1382   __ Lw(kInterpreterBytecodeArrayRegister,
1383         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1384   __ li(kInterpreterBytecodeOffsetRegister,
1385         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1386   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1387 
1388   __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1389   __ Sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1390 
1391   __ jmp(&after_stack_check_interrupt);
1392 
1393   __ bind(&has_optimized_code_or_state);
1394   MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1395                                                feedback_vector);
1396   __ bind(&is_baseline);
1397   {
1398     // Load the feedback vector from the closure.
1399     __ Lw(feedback_vector,
1400           FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1401     __ Lw(feedback_vector,
1402           FieldMemOperand(feedback_vector, Cell::kValueOffset));
1403 
1404     Label install_baseline_code;
1405     // Check if feedback vector is valid. If not, call prepare for baseline to
1406     // allocate it.
1407     __ Lw(t4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1408     __ lhu(t4, FieldMemOperand(t4, Map::kInstanceTypeOffset));
1409     __ Branch(&install_baseline_code, ne, t4, Operand(FEEDBACK_VECTOR_TYPE));
1410 
1411     // Check for an tiering state.
1412     LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1413                                              feedback_vector,
1414                                              &has_optimized_code_or_state);
1415 
1416     // Load the baseline code into the closure.
1417     __ Move(a2, kInterpreterBytecodeArrayRegister);
1418     static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1419     ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t4, t5);
1420     __ JumpCodeObject(a2);
1421 
1422     __ bind(&install_baseline_code);
1423     GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1424   }
1425 
1426   __ bind(&compile_lazy);
1427   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1428   // Unreachable code.
1429   __ break_(0xCC);
1430 
1431   __ bind(&stack_overflow);
1432   __ CallRuntime(Runtime::kThrowStackOverflow);
1433   // Unreachable code.
1434   __ break_(0xCC);
1435 }
1436 
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch,Register scratch2)1437 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1438                                         Register start_address,
1439                                         Register scratch, Register scratch2) {
1440   ASM_CODE_COMMENT(masm);
1441   // Find the address of the last argument.
1442   __ Subu(scratch, num_args, Operand(1));
1443   __ sll(scratch, scratch, kPointerSizeLog2);
1444   __ Subu(start_address, start_address, scratch);
1445 
1446   // Push the arguments.
1447   __ PushArray(start_address, num_args, scratch, scratch2,
1448                TurboAssembler::PushArrayOrder::kReverse);
1449 }
1450 
1451 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1452 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1453     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1454     InterpreterPushArgsMode mode) {
1455   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1456   // ----------- S t a t e -------------
1457   //  -- a0 : the number of arguments
1458   //  -- a2 : the address of the first argument to be pushed. Subsequent
1459   //          arguments should be consecutive above this, in the same order as
1460   //          they are to be pushed onto the stack.
1461   //  -- a1 : the target to call (can be any Object).
1462   // -----------------------------------
1463   Label stack_overflow;
1464   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1465     // The spread argument should not be pushed.
1466     __ Subu(a0, a0, Operand(1));
1467   }
1468 
1469   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1470     __ Subu(t0, a0, Operand(kJSArgcReceiverSlots));
1471   } else {
1472     __ mov(t0, a0);
1473   }
1474 
1475   __ StackOverflowCheck(t0, t4, t1, &stack_overflow);
1476 
1477   // This function modifies a2, t4 and t1.
1478   GenerateInterpreterPushArgs(masm, t0, a2, t4, t1);
1479 
1480   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1481     __ PushRoot(RootIndex::kUndefinedValue);
1482   }
1483 
1484   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1485     // Pass the spread in the register a2.
1486     // a2 already points to the penultime argument, the spread
1487     // is below that.
1488     __ Lw(a2, MemOperand(a2, -kSystemPointerSize));
1489   }
1490 
1491   // Call the target.
1492   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1493     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1494             RelocInfo::CODE_TARGET);
1495   } else {
1496     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1497             RelocInfo::CODE_TARGET);
1498   }
1499 
1500   __ bind(&stack_overflow);
1501   {
1502     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1503     // Unreachable code.
1504     __ break_(0xCC);
1505   }
1506 }
1507 
1508 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1509 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1510     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1511   // ----------- S t a t e -------------
1512   // -- a0 : argument count
1513   // -- a3 : new target
1514   // -- a1 : constructor to call
1515   // -- a2 : allocation site feedback if available, undefined otherwise.
1516   // -- t4 : address of the first argument
1517   // -----------------------------------
1518   Label stack_overflow;
1519   __ StackOverflowCheck(a0, t1, t0, &stack_overflow);
1520 
1521   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1522     // The spread argument should not be pushed.
1523     __ Subu(a0, a0, Operand(1));
1524   }
1525 
1526   Register argc_without_receiver = t2;
1527   __ Subu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1528 
1529   GenerateInterpreterPushArgs(masm, argc_without_receiver, t4, t1, t0);
1530 
1531   // Push a slot for the receiver.
1532   __ push(zero_reg);
1533 
1534   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1535     // Pass the spread in the register a2.
1536     // t4 already points to the penultimate argument, the spread
1537     // lies in the next interpreter register.
1538     // __ Subu(t4, t4, Operand(kSystemPointerSize));
1539     __ Lw(a2, MemOperand(t4, -kSystemPointerSize));
1540   } else {
1541     __ AssertUndefinedOrAllocationSite(a2, t0);
1542   }
1543 
1544   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1545     __ AssertFunction(a1);
1546 
1547     // Tail call to the array construct stub (still in the caller
1548     // context at this point).
1549     __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1550             RelocInfo::CODE_TARGET);
1551   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1552     // Call the constructor with a0, a1, and a3 unmodified.
1553     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1554             RelocInfo::CODE_TARGET);
1555   } else {
1556     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1557     // Call the constructor with a0, a1, and a3 unmodified.
1558     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1559   }
1560 
1561   __ bind(&stack_overflow);
1562   {
1563     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1564     // Unreachable code.
1565     __ break_(0xCC);
1566   }
1567 }
1568 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1569 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1570   // Set the return address to the correct point in the interpreter entry
1571   // trampoline.
1572   Label builtin_trampoline, trampoline_loaded;
1573   Smi interpreter_entry_return_pc_offset(
1574       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1575   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1576 
1577   // If the SFI function_data is an InterpreterData, the function will have a
1578   // custom copy of the interpreter entry trampoline for profiling. If so,
1579   // get the custom trampoline, otherwise grab the entry address of the global
1580   // trampoline.
1581   __ lw(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1582   __ lw(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1583   __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1584   __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1585                    kInterpreterDispatchTableRegister);
1586   __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1587             Operand(INTERPRETER_DATA_TYPE));
1588 
1589   __ lw(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1590   __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1591   __ Branch(&trampoline_loaded);
1592 
1593   __ bind(&builtin_trampoline);
1594   __ li(t0, ExternalReference::
1595                 address_of_interpreter_entry_trampoline_instruction_start(
1596                     masm->isolate()));
1597   __ lw(t0, MemOperand(t0));
1598 
1599   __ bind(&trampoline_loaded);
1600   __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1601 
1602   // Initialize the dispatch table register.
1603   __ li(kInterpreterDispatchTableRegister,
1604         ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1605 
1606   // Get the bytecode array pointer from the frame.
1607   __ lw(kInterpreterBytecodeArrayRegister,
1608         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1609 
1610   if (FLAG_debug_code) {
1611     // Check function data field is actually a BytecodeArray object.
1612     __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1613     __ Assert(ne,
1614               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1615               kScratchReg, Operand(zero_reg));
1616     __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1617     __ Assert(eq,
1618               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1619               a1, Operand(BYTECODE_ARRAY_TYPE));
1620   }
1621 
1622   // Get the target bytecode offset from the frame.
1623   __ lw(kInterpreterBytecodeOffsetRegister,
1624         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1625   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1626 
1627   if (FLAG_debug_code) {
1628     Label okay;
1629     __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
1630               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1631     // Unreachable code.
1632     __ break_(0xCC);
1633     __ bind(&okay);
1634   }
1635 
1636   // Dispatch to the target bytecode.
1637   __ Addu(a1, kInterpreterBytecodeArrayRegister,
1638           kInterpreterBytecodeOffsetRegister);
1639   __ lbu(t3, MemOperand(a1));
1640   __ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
1641   __ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1));
1642   __ Jump(kJavaScriptCallCodeStartRegister);
1643 }
1644 
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1645 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1646   // Advance the current bytecode offset stored within the given interpreter
1647   // stack frame. This simulates what all bytecode handlers do upon completion
1648   // of the underlying operation.
1649   __ lw(kInterpreterBytecodeArrayRegister,
1650         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1651   __ lw(kInterpreterBytecodeOffsetRegister,
1652         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1653   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1654 
1655   Label enter_bytecode, function_entry_bytecode;
1656   __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1657             Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1658                     kFunctionEntryBytecodeOffset));
1659 
1660   // Load the current bytecode.
1661   __ Addu(a1, kInterpreterBytecodeArrayRegister,
1662           kInterpreterBytecodeOffsetRegister);
1663   __ lbu(a1, MemOperand(a1));
1664 
1665   // Advance to the next bytecode.
1666   Label if_return;
1667   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1668                                 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1669                                 t0, &if_return);
1670 
1671   __ bind(&enter_bytecode);
1672   // Convert new bytecode offset to a Smi and save in the stackframe.
1673   __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1674   __ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1675 
1676   Generate_InterpreterEnterBytecode(masm);
1677 
1678   __ bind(&function_entry_bytecode);
1679   // If the code deoptimizes during the implicit function entry stack interrupt
1680   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1681   // not a valid bytecode offset. Detect this case and advance to the first
1682   // actual bytecode.
1683   __ li(kInterpreterBytecodeOffsetRegister,
1684         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1685   __ Branch(&enter_bytecode);
1686 
1687   // We should never take the if_return path.
1688   __ bind(&if_return);
1689   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1690 }
1691 
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1692 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1693   Generate_InterpreterEnterBytecode(masm);
1694 }
1695 
1696 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1697 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1698                                       bool java_script_builtin,
1699                                       bool with_result) {
1700   const RegisterConfiguration* config(RegisterConfiguration::Default());
1701   int allocatable_register_count = config->num_allocatable_general_registers();
1702   UseScratchRegisterScope temps(masm);
1703   Register scratch = temps.Acquire();  // Temp register is not allocatable.
1704   // Register scratch = t3;
1705   if (with_result) {
1706     if (java_script_builtin) {
1707       __ mov(scratch, v0);
1708     } else {
1709       // Overwrite the hole inserted by the deoptimizer with the return value
1710       // from the LAZY deopt point.
1711       __ sw(v0,
1712             MemOperand(
1713                 sp, config->num_allocatable_general_registers() * kPointerSize +
1714                         BuiltinContinuationFrameConstants::kFixedFrameSize));
1715     }
1716   }
1717   for (int i = allocatable_register_count - 1; i >= 0; --i) {
1718     int code = config->GetAllocatableGeneralCode(i);
1719     __ Pop(Register::from_code(code));
1720     if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1721       __ SmiUntag(Register::from_code(code));
1722     }
1723   }
1724 
1725   if (with_result && java_script_builtin) {
1726     // Overwrite the hole inserted by the deoptimizer with the return value from
1727     // the LAZY deopt point. t0 contains the arguments count, the return value
1728     // from LAZY is always the last argument.
1729     constexpr int return_value_offset =
1730         BuiltinContinuationFrameConstants::kFixedSlotCount -
1731         kJSArgcReceiverSlots;
1732     __ Addu(a0, a0, Operand(return_value_offset));
1733     __ Lsa(t0, sp, a0, kSystemPointerSizeLog2);
1734     __ Sw(scratch, MemOperand(t0));
1735     // Recover arguments count.
1736     __ Subu(a0, a0, Operand(return_value_offset));
1737   }
1738 
1739   __ lw(fp, MemOperand(
1740                 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1741   // Load builtin index (stored as a Smi) and use it to get the builtin start
1742   // address from the builtins table.
1743   __ Pop(t0);
1744   __ Addu(sp, sp,
1745           Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1746   __ Pop(ra);
1747   __ LoadEntryFromBuiltinIndex(t0);
1748   __ Jump(t0);
1749 }
1750 }  // namespace
1751 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1752 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1753   Generate_ContinueToBuiltinHelper(masm, false, false);
1754 }
1755 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1756 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1757     MacroAssembler* masm) {
1758   Generate_ContinueToBuiltinHelper(masm, false, true);
1759 }
1760 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1761 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1762   Generate_ContinueToBuiltinHelper(masm, true, false);
1763 }
1764 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1765 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1766     MacroAssembler* masm) {
1767   Generate_ContinueToBuiltinHelper(masm, true, true);
1768 }
1769 
Generate_NotifyDeoptimized(MacroAssembler * masm)1770 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1771   {
1772     FrameScope scope(masm, StackFrame::INTERNAL);
1773     __ CallRuntime(Runtime::kNotifyDeoptimized);
1774   }
1775 
1776   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
1777   __ lw(v0, MemOperand(sp, 0 * kPointerSize));
1778   __ Ret(USE_DELAY_SLOT);
1779   // Safe to fill delay slot Addu will emit one instruction.
1780   __ Addu(sp, sp, Operand(1 * kPointerSize));  // Remove accumulator.
1781 }
1782 
1783 namespace {
1784 
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand (zero_reg))1785 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1786                        Operand offset = Operand(zero_reg)) {
1787   __ Addu(ra, entry_address, offset);
1788   // And "return" to the OSR entry point of the function.
1789   __ Ret();
1790 }
1791 
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)1792 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
1793   {
1794     FrameScope scope(masm, StackFrame::INTERNAL);
1795     __ CallRuntime(Runtime::kCompileOptimizedOSR);
1796   }
1797 
1798   // If the code object is null, just return to the caller.
1799   __ Ret(eq, v0, Operand(Smi::zero()));
1800 
1801   if (is_interpreter) {
1802     // Drop the handler frame that is be sitting on top of the actual
1803     // JavaScript frame. This is the case then OSR is triggered from bytecode.
1804     __ LeaveFrame(StackFrame::STUB);
1805   }
1806   // Load deoptimization data from the code object.
1807   // <deopt_data> = <code>[#deoptimization_data_offset]
1808   __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
1809                                kHeapObjectTag));
1810 
1811   // Load the OSR entrypoint offset from the deoptimization data.
1812   // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1813   __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1814                                DeoptimizationData::kOsrPcOffsetIndex) -
1815                                kHeapObjectTag));
1816   __ SmiUntag(a1);
1817 
1818   // Compute the target address = code_obj + header_size + osr_offset
1819   // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1820   __ Addu(v0, v0, a1);
1821   Generate_OSREntry(masm, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
1822 }
1823 }  // namespace
1824 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1825 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1826   return OnStackReplacement(masm, true);
1827 }
1828 
Generate_BaselineOnStackReplacement(MacroAssembler * masm)1829 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
1830   __ Lw(kContextRegister,
1831         MemOperand(fp, StandardFrameConstants::kContextOffset));
1832   return OnStackReplacement(masm, false);
1833 }
1834 
1835 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1836 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1837   // ----------- S t a t e -------------
1838   //  -- a0    : argc
1839   //  -- sp[0] : receiver
1840   //  -- sp[4] : thisArg
1841   //  -- sp[8] : argArray
1842   // -----------------------------------
1843 
1844   // 1. Load receiver into a1, argArray into a2 (if present), remove all
1845   // arguments from the stack (including the receiver), and push thisArg (if
1846   // present) instead.
1847   {
1848     Label no_arg;
1849     __ LoadRoot(a2, RootIndex::kUndefinedValue);
1850     __ mov(a3, a2);
1851     // Lsa() cannot be used hare as scratch value used later.
1852     __ lw(a1, MemOperand(sp));  // receiver
1853     __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0)));
1854     __ lw(a3, MemOperand(sp, kSystemPointerSize));  // thisArg
1855     __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1)));
1856     __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize));  // argArray
1857     __ bind(&no_arg);
1858     __ DropArgumentsAndPushNewReceiver(a0, a3, TurboAssembler::kCountIsInteger,
1859                                        TurboAssembler::kCountIncludesReceiver);
1860   }
1861 
1862   // ----------- S t a t e -------------
1863   //  -- a2    : argArray
1864   //  -- a1    : receiver
1865   //  -- sp[0] : thisArg
1866   // -----------------------------------
1867 
1868   // 2. We don't need to check explicitly for callable receiver here,
1869   // since that's the first thing the Call/CallWithArrayLike builtins
1870   // will do.
1871 
1872   // 3. Tail call with no arguments if argArray is null or undefined.
1873   Label no_arguments;
1874   __ JumpIfRoot(a2, RootIndex::kNullValue, &no_arguments);
1875   __ JumpIfRoot(a2, RootIndex::kUndefinedValue, &no_arguments);
1876 
1877   // 4a. Apply the receiver to the given argArray.
1878   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1879           RelocInfo::CODE_TARGET);
1880 
1881   // 4b. The argArray is either null or undefined, so we tail call without any
1882   // arguments to the receiver.
1883   __ bind(&no_arguments);
1884   {
1885     __ li(a0, JSParameterCount(0));
1886     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1887   }
1888 }
1889 
1890 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1891 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1892   // 1. Get the callable to call (passed as receiver) from the stack.
1893   __ Pop(a1);
1894 
1895   // 2. Make sure we have at least one argument.
1896   // a0: actual number of arguments
1897   {
1898     Label done;
1899     __ Branch(&done, ne, a0, Operand(JSParameterCount(0)));
1900     __ PushRoot(RootIndex::kUndefinedValue);
1901     __ Addu(a0, a0, Operand(1));
1902     __ bind(&done);
1903   }
1904 
1905   // 3. Adjust the actual number of arguments.
1906   __ addiu(a0, a0, -1);
1907 
1908   // 4. Call the callable.
1909   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1910 }
1911 
Generate_ReflectApply(MacroAssembler * masm)1912 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1913   // ----------- S t a t e -------------
1914   //  -- a0     : argc
1915   //  -- sp[0]  : receiver
1916   //  -- sp[4]  : target         (if argc >= 1)
1917   //  -- sp[8]  : thisArgument   (if argc >= 2)
1918   //  -- sp[12] : argumentsList  (if argc == 3)
1919   // -----------------------------------
1920 
1921   // 1. Load target into a1 (if present), argumentsList into a0 (if present),
1922   // remove all arguments from the stack (including the receiver), and push
1923   // thisArgument (if present) instead.
1924   {
1925     Label no_arg;
1926     __ LoadRoot(a1, RootIndex::kUndefinedValue);
1927     __ mov(a2, a1);
1928     __ mov(a3, a1);
1929     __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0)));
1930     __ lw(a1, MemOperand(sp, kSystemPointerSize));  // target
1931     __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1)));
1932     __ lw(a3, MemOperand(sp, 2 * kSystemPointerSize));  // thisArgument
1933     __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2)));
1934     __ lw(a2, MemOperand(sp, 3 * kSystemPointerSize));  // argumentsList
1935     __ bind(&no_arg);
1936     __ DropArgumentsAndPushNewReceiver(a0, a3, TurboAssembler::kCountIsInteger,
1937                                        TurboAssembler::kCountIncludesReceiver);
1938   }
1939 
1940   // ----------- S t a t e -------------
1941   //  -- a2    : argumentsList
1942   //  -- a1    : target
1943   //  -- sp[0] : thisArgument
1944   // -----------------------------------
1945 
1946   // 2. We don't need to check explicitly for callable target here,
1947   // since that's the first thing the Call/CallWithArrayLike builtins
1948   // will do.
1949 
1950   // 3. Apply the target to the given argumentsList.
1951   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1952           RelocInfo::CODE_TARGET);
1953 }
1954 
Generate_ReflectConstruct(MacroAssembler * masm)1955 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1956   // ----------- S t a t e -------------
1957   //  -- a0     : argc
1958   //  -- sp[0]  : receiver
1959   //  -- sp[4]  : target
1960   //  -- sp[8]  : argumentsList
1961   //  -- sp[12] : new.target (optional)
1962   // -----------------------------------
1963 
1964   // 1. Load target into a1 (if present), argumentsList into a2 (if present),
1965   // new.target into a3 (if present, otherwise use target), remove all
1966   // arguments from the stack (including the receiver), and push thisArgument
1967   // (if present) instead.
1968   {
1969     Label no_arg;
1970     __ LoadRoot(a1, RootIndex::kUndefinedValue);
1971     __ mov(a2, a1);
1972     __ mov(t0, a1);
1973     __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0)));
1974     __ lw(a1, MemOperand(sp, kSystemPointerSize));  // target
1975     __ mov(a3, a1);  // new.target defaults to target
1976     __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1)));
1977     __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize));  // argumentsList
1978     __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2)));
1979     __ lw(a3, MemOperand(sp, 3 * kSystemPointerSize));  // new.target
1980     __ bind(&no_arg);
1981     __ DropArgumentsAndPushNewReceiver(a0, t0, TurboAssembler::kCountIsInteger,
1982                                        TurboAssembler::kCountIncludesReceiver);
1983   }
1984 
1985   // ----------- S t a t e -------------
1986   //  -- a2    : argumentsList
1987   //  -- a3    : new.target
1988   //  -- a1    : target
1989   //  -- sp[0] : receiver (undefined)
1990   // -----------------------------------
1991 
1992   // 2. We don't need to check explicitly for constructor target here,
1993   // since that's the first thing the Construct/ConstructWithArrayLike
1994   // builtins will do.
1995 
1996   // 3. We don't need to check explicitly for constructor new.target here,
1997   // since that's the second thing the Construct/ConstructWithArrayLike
1998   // builtins will do.
1999 
2000   // 4. Construct the target with the given new.target and argumentsList.
2001   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2002           RelocInfo::CODE_TARGET);
2003 }
2004 
2005 namespace {
2006 
2007 // Allocate new stack space for |count| arguments and shift all existing
2008 // arguments already on the stack. |pointer_to_new_space_out| points to the
2009 // first free slot on the stack to copy additional arguments to and
2010 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2,Register scratch3)2011 void Generate_AllocateSpaceAndShiftExistingArguments(
2012     MacroAssembler* masm, Register count, Register argc_in_out,
2013     Register pointer_to_new_space_out, Register scratch1, Register scratch2,
2014     Register scratch3) {
2015   DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2016                      scratch2));
2017   Register old_sp = scratch1;
2018   Register new_space = scratch2;
2019   __ mov(old_sp, sp);
2020   __ sll(new_space, count, kPointerSizeLog2);
2021   __ Subu(sp, sp, Operand(new_space));
2022 
2023   Register end = scratch2;
2024   Register value = scratch3;
2025   Register dest = pointer_to_new_space_out;
2026   __ mov(dest, sp);
2027   __ Lsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2);
2028   Label loop, done;
2029   __ Branch(&done, ge, old_sp, Operand(end));
2030   __ bind(&loop);
2031   __ lw(value, MemOperand(old_sp, 0));
2032   __ sw(value, MemOperand(dest, 0));
2033   __ Addu(old_sp, old_sp, Operand(kSystemPointerSize));
2034   __ Addu(dest, dest, Operand(kSystemPointerSize));
2035   __ Branch(&loop, lt, old_sp, Operand(end));
2036   __ bind(&done);
2037 
2038   // Update total number of arguments.
2039   __ Addu(argc_in_out, argc_in_out, count);
2040 }
2041 
2042 }  // namespace
2043 
2044 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2045 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2046                                                Handle<Code> code) {
2047   // ----------- S t a t e -------------
2048   //  -- a1 : target
2049   //  -- a0 : number of parameters on the stack
2050   //  -- a2 : arguments list (a FixedArray)
2051   //  -- t0 : len (number of elements to push from args)
2052   //  -- a3 : new.target (for [[Construct]])
2053   // -----------------------------------
2054   if (FLAG_debug_code) {
2055     // Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0.
2056     Label ok, fail;
2057     __ AssertNotSmi(a2);
2058     __ GetObjectType(a2, t8, t8);
2059     __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
2060     __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2061     __ Branch(&ok, eq, t0, Operand(0));
2062     // Fall through.
2063     __ bind(&fail);
2064     __ Abort(AbortReason::kOperandIsNotAFixedArray);
2065 
2066     __ bind(&ok);
2067   }
2068 
2069   // Check for stack overflow.
2070   Label stack_overflow;
2071   __ StackOverflowCheck(t0, kScratchReg, t1, &stack_overflow);
2072 
2073   // Move the arguments already in the stack,
2074   // including the receiver and the return address.
2075   // t0: Number of arguments to make room for.
2076   // a0: Number of arguments already on the stack.
2077   // t4: Points to first free slot on the stack after arguments were shifted.
2078   Generate_AllocateSpaceAndShiftExistingArguments(masm, t0, a0, t4, t3, t1, t2);
2079 
2080   // Push arguments onto the stack (thisArgument is already on the stack).
2081   {
2082     __ mov(t2, zero_reg);
2083     Label done, push, loop;
2084     __ LoadRoot(t1, RootIndex::kTheHoleValue);
2085     __ bind(&loop);
2086     __ Branch(&done, eq, t2, Operand(t0));
2087     __ Lsa(kScratchReg, a2, t2, kPointerSizeLog2);
2088     __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
2089     __ Addu(t2, t2, Operand(1));
2090     __ Branch(&push, ne, t1, Operand(kScratchReg));
2091     __ LoadRoot(kScratchReg, RootIndex::kUndefinedValue);
2092     __ bind(&push);
2093     __ Sw(kScratchReg, MemOperand(t4, 0));
2094     __ Addu(t4, t4, Operand(kSystemPointerSize));
2095     __ Branch(&loop);
2096     __ bind(&done);
2097   }
2098 
2099   // Tail-call to the actual Call or Construct builtin.
2100   __ Jump(code, RelocInfo::CODE_TARGET);
2101 
2102   __ bind(&stack_overflow);
2103   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2104 }
2105 
2106 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2107 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2108                                                       CallOrConstructMode mode,
2109                                                       Handle<Code> code) {
2110   // ----------- S t a t e -------------
2111   //  -- a0 : the number of arguments
2112   //  -- a3 : the new.target (for [[Construct]] calls)
2113   //  -- a1 : the target to call (can be any Object)
2114   //  -- a2 : start index (to support rest parameters)
2115   // -----------------------------------
2116 
2117   // Check if new.target has a [[Construct]] internal method.
2118   if (mode == CallOrConstructMode::kConstruct) {
2119     Label new_target_constructor, new_target_not_constructor;
2120     __ JumpIfSmi(a3, &new_target_not_constructor);
2121     __ lw(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
2122     __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2123     __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
2124     __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
2125     __ bind(&new_target_not_constructor);
2126     {
2127       FrameScope scope(masm, StackFrame::MANUAL);
2128       __ EnterFrame(StackFrame::INTERNAL);
2129       __ Push(a3);
2130       __ CallRuntime(Runtime::kThrowNotConstructor);
2131     }
2132     __ bind(&new_target_constructor);
2133   }
2134 
2135   Label stack_done, stack_overflow;
2136   __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2137   __ Subu(t2, t2, Operand(kJSArgcReceiverSlots));
2138   __ Subu(t2, t2, a2);
2139   __ Branch(&stack_done, le, t2, Operand(zero_reg));
2140   {
2141     // Check for stack overflow.
2142     __ StackOverflowCheck(t2, t0, t1, &stack_overflow);
2143 
2144     // Forward the arguments from the caller frame.
2145     // Point to the first argument to copy (skipping the receiver).
2146     __ Addu(t3, fp,
2147             Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2148                     kSystemPointerSize));
2149     __ Lsa(t3, t3, a2, kSystemPointerSizeLog2);
2150 
2151     // Move the arguments already in the stack,
2152     // including the receiver and the return address.
2153     // t2: Number of arguments to make room for.
2154     // a0: Number of arguments already on the stack.
2155     // a2: Points to first free slot on the stack after arguments were shifted.
2156     Generate_AllocateSpaceAndShiftExistingArguments(masm, t2, a0, a2, t5, t6,
2157                                                     t7);
2158 
2159     // Copy arguments from the caller frame.
2160     // TODO(victorgomes): Consider using forward order as potentially more cache
2161     // friendly.
2162     {
2163       Label loop;
2164       __ bind(&loop);
2165       {
2166         __ Subu(t2, t2, Operand(1));
2167         __ Lsa(kScratchReg, t3, t2, kPointerSizeLog2);
2168         __ lw(kScratchReg, MemOperand(kScratchReg));
2169         __ Lsa(t0, a2, t2, kPointerSizeLog2);
2170         __ Sw(kScratchReg, MemOperand(t0));
2171         __ Branch(&loop, ne, t2, Operand(zero_reg));
2172       }
2173     }
2174   }
2175   __ Branch(&stack_done);
2176   __ bind(&stack_overflow);
2177   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2178   __ bind(&stack_done);
2179 
2180   // Tail-call to the {code} handler.
2181   __ Jump(code, RelocInfo::CODE_TARGET);
2182 }
2183 
2184 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2185 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2186                                      ConvertReceiverMode mode) {
2187   // ----------- S t a t e -------------
2188   //  -- a0 : the number of arguments
2189   //  -- a1 : the function to call (checked to be a JSFunction)
2190   // -----------------------------------
2191   __ AssertCallableFunction(a1);
2192 
2193   __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2194 
2195   // Enter the context of the function; ToObject has to run in the function
2196   // context, and we also need to take the global proxy from the function
2197   // context in case of conversion.
2198   __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2199   // We need to convert the receiver for non-native sloppy mode functions.
2200   Label done_convert;
2201   __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2202   __ And(kScratchReg, a3,
2203          Operand(SharedFunctionInfo::IsNativeBit::kMask |
2204                  SharedFunctionInfo::IsStrictBit::kMask));
2205   __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2206   {
2207     // ----------- S t a t e -------------
2208     //  -- a0 : the number of arguments
2209     //  -- a1 : the function to call (checked to be a JSFunction)
2210     //  -- a2 : the shared function info.
2211     //  -- cp : the function context.
2212     // -----------------------------------
2213 
2214     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2215       // Patch receiver to global proxy.
2216       __ LoadGlobalProxy(a3);
2217     } else {
2218       Label convert_to_object, convert_receiver;
2219       __ LoadReceiver(a3, a0);
2220       __ JumpIfSmi(a3, &convert_to_object);
2221       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2222       __ GetObjectType(a3, t0, t0);
2223       __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
2224       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2225         Label convert_global_proxy;
2226         __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2227         __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2228         __ bind(&convert_global_proxy);
2229         {
2230           // Patch receiver to global proxy.
2231           __ LoadGlobalProxy(a3);
2232         }
2233         __ Branch(&convert_receiver);
2234       }
2235       __ bind(&convert_to_object);
2236       {
2237         // Convert receiver using ToObject.
2238         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2239         // in the fast case? (fall back to AllocateInNewSpace?)
2240         FrameScope scope(masm, StackFrame::INTERNAL);
2241         __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
2242         __ Push(a0, a1);
2243         __ mov(a0, a3);
2244         __ Push(cp);
2245         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2246                 RelocInfo::CODE_TARGET);
2247         __ Pop(cp);
2248         __ mov(a3, v0);
2249         __ Pop(a0, a1);
2250         __ sra(a0, a0, kSmiTagSize);  // Un-tag.
2251       }
2252       __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2253       __ bind(&convert_receiver);
2254     }
2255     __ StoreReceiver(a3, a0, kScratchReg);
2256   }
2257   __ bind(&done_convert);
2258 
2259   // ----------- S t a t e -------------
2260   //  -- a0 : the number of arguments
2261   //  -- a1 : the function to call (checked to be a JSFunction)
2262   //  -- a2 : the shared function info.
2263   //  -- cp : the function context.
2264   // -----------------------------------
2265 
2266   __ lhu(a2,
2267          FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2268   __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2269 }
2270 
2271 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2272 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2273   // ----------- S t a t e -------------
2274   //  -- a0 : the number of arguments
2275   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2276   // -----------------------------------
2277   __ AssertBoundFunction(a1);
2278 
2279   // Patch the receiver to [[BoundThis]].
2280   {
2281     __ lw(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2282     __ StoreReceiver(t0, a0, kScratchReg);
2283   }
2284 
2285   // Load [[BoundArguments]] into a2 and length of that into t0.
2286   __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2287   __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
2288   __ SmiUntag(t0);
2289 
2290   // ----------- S t a t e -------------
2291   //  -- a0 : the number of arguments
2292   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2293   //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2294   //  -- t0 : the number of [[BoundArguments]]
2295   // -----------------------------------
2296 
2297   // Reserve stack space for the [[BoundArguments]].
2298   {
2299     Label done;
2300     __ sll(t1, t0, kPointerSizeLog2);
2301     __ Subu(t1, sp, Operand(t1));
2302     // Check the stack for overflow. We are not trying to catch interruptions
2303     // (i.e. debug break and preemption) here, so check the "real stack limit".
2304     __ LoadStackLimit(kScratchReg,
2305                       MacroAssembler::StackLimitKind::kRealStackLimit);
2306     __ Branch(&done, hs, t1, Operand(kScratchReg));
2307     {
2308       FrameScope scope(masm, StackFrame::MANUAL);
2309       __ EnterFrame(StackFrame::INTERNAL);
2310       __ CallRuntime(Runtime::kThrowStackOverflow);
2311     }
2312     __ bind(&done);
2313   }
2314 
2315   // Pop receiver.
2316   __ Pop(t1);
2317 
2318   // Push [[BoundArguments]].
2319   {
2320     Label loop, done_loop;
2321     __ Addu(a0, a0, Operand(t0));
2322     __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2323     __ bind(&loop);
2324     __ Subu(t0, t0, Operand(1));
2325     __ Branch(&done_loop, lt, t0, Operand(zero_reg));
2326     __ Lsa(kScratchReg, a2, t0, kPointerSizeLog2);
2327     __ Lw(kScratchReg, MemOperand(kScratchReg));
2328     __ Push(kScratchReg);
2329     __ Branch(&loop);
2330     __ bind(&done_loop);
2331   }
2332 
2333   // Push receiver.
2334   __ Push(t1);
2335 
2336   // Call the [[BoundTargetFunction]] via the Call builtin.
2337   __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2338   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2339           RelocInfo::CODE_TARGET);
2340 }
2341 
2342 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2343 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2344   // ----------- S t a t e -------------
2345   //  -- a0 : the number of arguments
2346   //  -- a1 : the target to call (can be any Object).
2347   // -----------------------------------
2348 
2349   Register argc = a0;
2350   Register target = a1;
2351   Register map = t1;
2352   Register instance_type = t2;
2353   Register scratch = t8;
2354   DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
2355 
2356   Label non_callable, class_constructor;
2357   __ JumpIfSmi(target, &non_callable);
2358   __ LoadMap(map, target);
2359   __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2360                           scratch);
2361   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2362           RelocInfo::CODE_TARGET, ls, scratch,
2363           Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
2364                   FIRST_CALLABLE_JS_FUNCTION_TYPE));
2365   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2366           RelocInfo::CODE_TARGET, eq, instance_type,
2367           Operand(JS_BOUND_FUNCTION_TYPE));
2368 
2369   // Check if target has a [[Call]] internal method.
2370   {
2371     Register flags = t1;
2372     __ lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2373     map = no_reg;
2374     __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
2375     __ Branch(&non_callable, eq, flags, Operand(zero_reg));
2376   }
2377 
2378   // Check if target is a proxy and call CallProxy external builtin
2379   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
2380           instance_type, Operand(JS_PROXY_TYPE));
2381 
2382   // Check if target is a wrapped function and call CallWrappedFunction external
2383   // builtin
2384   __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2385           RelocInfo::CODE_TARGET, eq, instance_type,
2386           Operand(JS_WRAPPED_FUNCTION_TYPE));
2387 
2388   // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2389   // Check that the function is not a "classConstructor".
2390   __ Branch(&class_constructor, eq, instance_type,
2391             Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2392 
2393   // 2. Call to something else, which might have a [[Call]] internal method (if
2394   // not we raise an exception).
2395   // Overwrite the original receiver with the (original) target.
2396   __ StoreReceiver(target, argc, kScratchReg);
2397   // Let the "call_as_function_delegate" take care of the rest.
2398   __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2399   __ Jump(masm->isolate()->builtins()->CallFunction(
2400               ConvertReceiverMode::kNotNullOrUndefined),
2401           RelocInfo::CODE_TARGET);
2402 
2403   // 3. Call to something that is not callable.
2404   __ bind(&non_callable);
2405   {
2406     FrameScope scope(masm, StackFrame::INTERNAL);
2407     __ Push(target);
2408     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2409   }
2410 
2411   // 4. The function is a "classConstructor", need to raise an exception.
2412   __ bind(&class_constructor);
2413   {
2414     FrameScope frame(masm, StackFrame::INTERNAL);
2415     __ Push(target);
2416     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2417   }
2418 }
2419 
2420 // static
Generate_ConstructFunction(MacroAssembler * masm)2421 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2422   // ----------- S t a t e -------------
2423   //  -- a0 : the number of arguments
2424   //  -- a1 : the constructor to call (checked to be a JSFunction)
2425   //  -- a3 : the new target (checked to be a constructor)
2426   // -----------------------------------
2427   __ AssertConstructor(a1);
2428   __ AssertFunction(a1);
2429 
2430   // Calling convention for function specific ConstructStubs require
2431   // a2 to contain either an AllocationSite or undefined.
2432   __ LoadRoot(a2, RootIndex::kUndefinedValue);
2433 
2434   Label call_generic_stub;
2435 
2436   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2437   __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2438   __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFlagsOffset));
2439   __ And(t0, t0, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2440   __ Branch(&call_generic_stub, eq, t0, Operand(zero_reg));
2441 
2442   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2443           RelocInfo::CODE_TARGET);
2444 
2445   __ bind(&call_generic_stub);
2446   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2447           RelocInfo::CODE_TARGET);
2448 }
2449 
2450 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2451 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2452   // ----------- S t a t e -------------
2453   //  -- a0 : the number of arguments
2454   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2455   //  -- a3 : the new target (checked to be a constructor)
2456   // -----------------------------------
2457   __ AssertConstructor(a1);
2458   __ AssertBoundFunction(a1);
2459 
2460   // Load [[BoundArguments]] into a2 and length of that into t0.
2461   __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2462   __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
2463   __ SmiUntag(t0);
2464 
2465   // ----------- S t a t e -------------
2466   //  -- a0 : the number of arguments
2467   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2468   //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2469   //  -- a3 : the new target (checked to be a constructor)
2470   //  -- t0 : the number of [[BoundArguments]]
2471   // -----------------------------------
2472 
2473   // Reserve stack space for the [[BoundArguments]].
2474   {
2475     Label done;
2476     __ sll(t1, t0, kPointerSizeLog2);
2477     __ Subu(t1, sp, Operand(t1));
2478     // Check the stack for overflow. We are not trying to catch interruptions
2479     // (i.e. debug break and preemption) here, so check the "real stack limit".
2480     __ LoadStackLimit(kScratchReg,
2481                       MacroAssembler::StackLimitKind::kRealStackLimit);
2482     __ Branch(&done, hs, t1, Operand(kScratchReg));
2483     {
2484       FrameScope scope(masm, StackFrame::MANUAL);
2485       __ EnterFrame(StackFrame::INTERNAL);
2486       __ CallRuntime(Runtime::kThrowStackOverflow);
2487     }
2488     __ bind(&done);
2489   }
2490 
2491   // Pop receiver
2492   __ Pop(t1);
2493 
2494   // Push [[BoundArguments]].
2495   {
2496     Label loop, done_loop;
2497     __ Addu(a0, a0, Operand(t0));
2498     __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2499     __ bind(&loop);
2500     __ Subu(t0, t0, Operand(1));
2501     __ Branch(&done_loop, lt, t0, Operand(zero_reg));
2502     __ Lsa(kScratchReg, a2, t0, kPointerSizeLog2);
2503     __ Lw(kScratchReg, MemOperand(kScratchReg));
2504     __ Push(kScratchReg);
2505     __ Branch(&loop);
2506     __ bind(&done_loop);
2507   }
2508 
2509   // Push receiver.
2510   __ Push(t1);
2511 
2512   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2513   {
2514     Label skip_load;
2515     __ Branch(&skip_load, ne, a1, Operand(a3));
2516     __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2517     __ bind(&skip_load);
2518   }
2519 
2520   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2521   __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2522   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2523 }
2524 
2525 // static
Generate_Construct(MacroAssembler * masm)2526 void Builtins::Generate_Construct(MacroAssembler* masm) {
2527   // ----------- S t a t e -------------
2528   //  -- a0 : the number of arguments
2529   //  -- a1 : the constructor to call (can be any Object)
2530   //  -- a3 : the new target (either the same as the constructor or
2531   //          the JSFunction on which new was invoked initially)
2532   // -----------------------------------
2533 
2534   Register argc = a0;
2535   Register target = a1;
2536   Register map = t1;
2537   Register instance_type = t2;
2538   Register scratch = t8;
2539   DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
2540 
2541   // Check if target is a Smi.
2542   Label non_constructor, non_proxy;
2543   __ JumpIfSmi(target, &non_constructor);
2544 
2545   // Check if target has a [[Construct]] internal method.
2546   __ lw(map, FieldMemOperand(target, HeapObject::kMapOffset));
2547   {
2548     Register flags = t3;
2549     __ lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2550     __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask));
2551     __ Branch(&non_constructor, eq, flags, Operand(zero_reg));
2552   }
2553 
2554   // Dispatch based on instance type.
2555   __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch);
2556   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2557           RelocInfo::CODE_TARGET, ls, scratch,
2558           Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2559 
2560   // Only dispatch to bound functions after checking whether they are
2561   // constructors.
2562   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2563           RelocInfo::CODE_TARGET, eq, instance_type,
2564           Operand(JS_BOUND_FUNCTION_TYPE));
2565 
2566   // Only dispatch to proxies after checking whether they are constructors.
2567   __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE));
2568   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2569           RelocInfo::CODE_TARGET);
2570 
2571   // Called Construct on an exotic Object with a [[Construct]] internal method.
2572   __ bind(&non_proxy);
2573   {
2574     // Overwrite the original receiver with the (original) target.
2575     __ StoreReceiver(target, argc, kScratchReg);
2576     // Let the "call_as_constructor_delegate" take care of the rest.
2577     __ LoadNativeContextSlot(target,
2578                              Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2579     __ Jump(masm->isolate()->builtins()->CallFunction(),
2580             RelocInfo::CODE_TARGET);
2581   }
2582 
2583   // Called Construct on an Object that doesn't have a [[Construct]] internal
2584   // method.
2585   __ bind(&non_constructor);
2586   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2587           RelocInfo::CODE_TARGET);
2588 }
2589 
2590 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2591 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2592   // The function index was put in t0 by the jump table trampoline.
2593   // Convert to Smi for the runtime call.
2594   __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2595 
2596   // Compute register lists for parameters to be saved. We save all parameter
2597   // registers (see wasm-linkage.h). They might be overwritten in the runtime
2598   // call below. We don't have any callee-saved registers in wasm, so no need to
2599   // store anything else.
2600   constexpr RegList kSavedGpRegs = ([]() constexpr {
2601     RegList saved_gp_regs;
2602     for (Register gp_param_reg : wasm::kGpParamRegisters) {
2603       saved_gp_regs.set(gp_param_reg);
2604     }
2605 
2606     // All set registers were unique.
2607     CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
2608     // The Wasm instance must be part of the saved registers.
2609     CHECK(saved_gp_regs.has(kWasmInstanceRegister));
2610     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
2611              saved_gp_regs.Count());
2612     return saved_gp_regs;
2613   })();
2614 
2615   constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
2616     DoubleRegList saved_fp_regs;
2617     for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2618       saved_fp_regs.set(fp_param_reg);
2619     }
2620 
2621     CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
2622     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2623              saved_fp_regs.Count());
2624     return saved_fp_regs;
2625   })();
2626 
2627   {
2628     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2629     FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2630 
2631     // Save registers that we need to keep alive across the runtime call.
2632     __ MultiPush(kSavedGpRegs);
2633     __ MultiPushFPU(kSavedFpRegs);
2634 
2635     // Pass instance and function index as an explicit arguments to the runtime
2636     // function.
2637     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2638     // Initialize the JavaScript context with 0. CEntry will use it to
2639     // set the current context on the isolate.
2640     __ Move(kContextRegister, Smi::zero());
2641     __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2642 
2643     // Restore registers.
2644     __ MultiPopFPU(kSavedFpRegs);
2645     __ MultiPop(kSavedGpRegs);
2646   }
2647 
2648   // Untag the returned Smi, for later use.
2649   static_assert(!kSavedGpRegs.has(v0));
2650   __ SmiUntag(v0);
2651 
2652   // The runtime function returned the jump table slot offset as a Smi (now in
2653   // t8). Use that to compute the jump target.
2654   static_assert(!kSavedGpRegs.has(t8));
2655   __ Lw(t8,
2656         MemOperand(kWasmInstanceRegister,
2657                    WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
2658   __ Addu(t8, v0, t8);
2659 
2660   // Finally, jump to the jump table slot for the function.
2661   __ Jump(t8);
2662 }
2663 
Generate_WasmDebugBreak(MacroAssembler * masm)2664 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2665   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2666   {
2667     FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2668 
2669     // Save all parameter registers. They might hold live values, we restore
2670     // them after the runtime call.
2671     __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2672     __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2673 
2674     // Initialize the JavaScript context with 0. CEntry will use it to
2675     // set the current context on the isolate.
2676     __ Move(cp, Smi::zero());
2677     __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2678 
2679     // Restore registers.
2680     __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2681     __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2682   }
2683   __ Ret();
2684 }
2685 
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2686 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2687   __ Trap();
2688 }
2689 
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)2690 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
2691   // TODO(v8:12191): Implement for this platform.
2692   __ Trap();
2693 }
2694 
Generate_WasmSuspend(MacroAssembler * masm)2695 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
2696   // TODO(v8:12191): Implement for this platform.
2697   __ Trap();
2698 }
2699 
Generate_WasmResume(MacroAssembler * masm)2700 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
2701   // TODO(v8:12191): Implement for this platform.
2702   __ Trap();
2703 }
2704 
Generate_WasmOnStackReplace(MacroAssembler * masm)2705 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2706   // Only needed on x64.
2707   __ Trap();
2708 }
2709 
2710 #endif  // V8_ENABLE_WEBASSEMBLY
2711 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2712 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2713                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2714                                bool builtin_exit_frame) {
2715   // Called from JavaScript; parameters are on stack as if calling JS function
2716   // a0: number of arguments including receiver
2717   // a1: pointer to builtin function
2718   // fp: frame pointer    (restored after C call)
2719   // sp: stack pointer    (restored as callee's sp after C call)
2720   // cp: current context  (C callee-saved)
2721   //
2722   // If argv_mode == ArgvMode::kRegister:
2723   // a2: pointer to the first argument
2724 
2725   if (argv_mode == ArgvMode::kRegister) {
2726     // Move argv into the correct register.
2727     __ mov(s1, a2);
2728   } else {
2729     // Compute the argv pointer in a callee-saved register.
2730     __ Lsa(s1, sp, a0, kPointerSizeLog2);
2731     __ Subu(s1, s1, kPointerSize);
2732   }
2733 
2734   // Enter the exit frame that transitions from JavaScript to C++.
2735   FrameScope scope(masm, StackFrame::MANUAL);
2736   __ EnterExitFrame(
2737       save_doubles == SaveFPRegsMode::kSave, 0,
2738       builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2739 
2740   // s0: number of arguments  including receiver (C callee-saved)
2741   // s1: pointer to first argument (C callee-saved)
2742   // s2: pointer to builtin function (C callee-saved)
2743 
2744   // Prepare arguments for C routine.
2745   // a0 = argc
2746   __ mov(s0, a0);
2747   __ mov(s2, a1);
2748 
2749   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2750   // also need to reserve the 4 argument slots on the stack.
2751 
2752   __ AssertStackIsAligned();
2753 
2754   // a0 = argc, a1 = argv, a2 = isolate
2755   __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2756   __ mov(a1, s1);
2757 
2758   __ StoreReturnAddressAndCall(s2);
2759 
2760   // Result returned in v0 or v1:v0 - do not destroy these registers!
2761 
2762   // Check result for exception sentinel.
2763   Label exception_returned;
2764   __ LoadRoot(t0, RootIndex::kException);
2765   __ Branch(&exception_returned, eq, t0, Operand(v0));
2766 
2767   // Check that there is no pending exception, otherwise we
2768   // should have returned the exception sentinel.
2769   if (FLAG_debug_code) {
2770     Label okay;
2771     ExternalReference pending_exception_address = ExternalReference::Create(
2772         IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2773     __ li(a2, pending_exception_address);
2774     __ lw(a2, MemOperand(a2));
2775     __ LoadRoot(t0, RootIndex::kTheHoleValue);
2776     // Cannot use check here as it attempts to generate call into runtime.
2777     __ Branch(&okay, eq, t0, Operand(a2));
2778     __ stop();
2779     __ bind(&okay);
2780   }
2781 
2782   // Exit C frame and return.
2783   // v0:v1: result
2784   // sp: stack pointer
2785   // fp: frame pointer
2786   Register argc = argv_mode == ArgvMode::kRegister
2787                       // We don't want to pop arguments so set argc to no_reg.
2788                       ? no_reg
2789                       // s0: still holds argc (callee-saved).
2790                       : s0;
2791   __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
2792 
2793   // Handling of exception.
2794   __ bind(&exception_returned);
2795 
2796   ExternalReference pending_handler_context_address = ExternalReference::Create(
2797       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2798   ExternalReference pending_handler_entrypoint_address =
2799       ExternalReference::Create(
2800           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2801   ExternalReference pending_handler_fp_address = ExternalReference::Create(
2802       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2803   ExternalReference pending_handler_sp_address = ExternalReference::Create(
2804       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2805 
2806   // Ask the runtime for help to determine the handler. This will set v0 to
2807   // contain the current pending exception, don't clobber it.
2808   ExternalReference find_handler =
2809       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2810   {
2811     FrameScope scope(masm, StackFrame::MANUAL);
2812     __ PrepareCallCFunction(3, 0, a0);
2813     __ mov(a0, zero_reg);
2814     __ mov(a1, zero_reg);
2815     __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2816     __ CallCFunction(find_handler, 3);
2817   }
2818 
2819   // Retrieve the handler context, SP and FP.
2820   __ li(cp, pending_handler_context_address);
2821   __ lw(cp, MemOperand(cp));
2822   __ li(sp, pending_handler_sp_address);
2823   __ lw(sp, MemOperand(sp));
2824   __ li(fp, pending_handler_fp_address);
2825   __ lw(fp, MemOperand(fp));
2826 
2827   // If the handler is a JS frame, restore the context to the frame. Note that
2828   // the context will be set to (cp == 0) for non-JS frames.
2829   Label zero;
2830   __ Branch(&zero, eq, cp, Operand(zero_reg));
2831   __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2832   __ bind(&zero);
2833 
2834   // Clear c_entry_fp, like we do in `LeaveExitFrame`.
2835   {
2836     UseScratchRegisterScope temps(masm);
2837     Register scratch = temps.Acquire();
2838     __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
2839                                              masm->isolate()));
2840     __ Sw(zero_reg, MemOperand(scratch));
2841   }
2842 
2843   // Compute the handler entry address and jump to it.
2844   __ li(t9, pending_handler_entrypoint_address);
2845   __ lw(t9, MemOperand(t9));
2846   __ Jump(t9);
2847 }
2848 
Generate_DoubleToI(MacroAssembler * masm)2849 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2850   Label done;
2851   Register result_reg = t0;
2852 
2853   Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2854   Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2855   Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2856   DoubleRegister double_scratch = kScratchDoubleReg;
2857 
2858   // Account for saved regs.
2859   const int kArgumentOffset = 4 * kPointerSize;
2860 
2861   __ Push(result_reg);
2862   __ Push(scratch, scratch2, scratch3);
2863 
2864   // Load double input.
2865   __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
2866 
2867   // Try a conversion to a signed integer.
2868   __ Trunc_w_d(double_scratch, double_scratch);
2869   // Move the converted value into the result register.
2870   __ mfc1(scratch3, double_scratch);
2871 
2872   // Retrieve the FCSR.
2873   __ cfc1(scratch, FCSR);
2874 
2875   // Check for overflow and NaNs.
2876   __ And(scratch, scratch,
2877          kFCSROverflowCauseMask | kFCSRUnderflowCauseMask |
2878              kFCSRInvalidOpCauseMask);
2879   // If we had no exceptions then set result_reg and we are done.
2880   Label error;
2881   __ Branch(&error, ne, scratch, Operand(zero_reg));
2882   __ Move(result_reg, scratch3);
2883   __ Branch(&done);
2884   __ bind(&error);
2885 
2886   // Load the double value and perform a manual truncation.
2887   Register input_high = scratch2;
2888   Register input_low = scratch3;
2889 
2890   __ lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
2891   __ lw(input_high,
2892         MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
2893 
2894   Label normal_exponent;
2895   // Extract the biased exponent in result.
2896   __ Ext(result_reg, input_high, HeapNumber::kExponentShift,
2897          HeapNumber::kExponentBits);
2898 
2899   // Check for Infinity and NaNs, which should return 0.
2900   __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
2901   __ Movz(result_reg, zero_reg, scratch);
2902   __ Branch(&done, eq, scratch, Operand(zero_reg));
2903 
2904   // Express exponent as delta to (number of mantissa bits + 31).
2905   __ Subu(result_reg, result_reg,
2906           Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
2907 
2908   // If the delta is strictly positive, all bits would be shifted away,
2909   // which means that we can return 0.
2910   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
2911   __ mov(result_reg, zero_reg);
2912   __ Branch(&done);
2913 
2914   __ bind(&normal_exponent);
2915   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2916   // Calculate shift.
2917   __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
2918 
2919   // Save the sign.
2920   Register sign = result_reg;
2921   result_reg = no_reg;
2922   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
2923 
2924   // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
2925   // to check for this specific case.
2926   Label high_shift_needed, high_shift_done;
2927   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
2928   __ mov(input_high, zero_reg);
2929   __ Branch(&high_shift_done);
2930   __ bind(&high_shift_needed);
2931 
2932   // Set the implicit 1 before the mantissa part in input_high.
2933   __ Or(input_high, input_high,
2934         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2935   // Shift the mantissa bits to the correct position.
2936   // We don't need to clear non-mantissa bits as they will be shifted away.
2937   // If they weren't, it would mean that the answer is in the 32bit range.
2938   __ sllv(input_high, input_high, scratch);
2939 
2940   __ bind(&high_shift_done);
2941 
2942   // Replace the shifted bits with bits from the lower mantissa word.
2943   Label pos_shift, shift_done;
2944   __ li(kScratchReg, 32);
2945   __ subu(scratch, kScratchReg, scratch);
2946   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
2947 
2948   // Negate scratch.
2949   __ Subu(scratch, zero_reg, scratch);
2950   __ sllv(input_low, input_low, scratch);
2951   __ Branch(&shift_done);
2952 
2953   __ bind(&pos_shift);
2954   __ srlv(input_low, input_low, scratch);
2955 
2956   __ bind(&shift_done);
2957   __ Or(input_high, input_high, Operand(input_low));
2958   // Restore sign if necessary.
2959   __ mov(scratch, sign);
2960   result_reg = sign;
2961   sign = no_reg;
2962   __ Subu(result_reg, zero_reg, input_high);
2963   __ Movz(result_reg, input_high, scratch);
2964 
2965   __ bind(&done);
2966   __ sw(result_reg, MemOperand(sp, kArgumentOffset));
2967   __ Pop(scratch, scratch2, scratch3);
2968   __ Pop(result_reg);
2969   __ Ret();
2970 }
2971 
2972 namespace {
2973 
AddressOffset(ExternalReference ref0,ExternalReference ref1)2974 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2975   return ref0.address() - ref1.address();
2976 }
2977 
2978 // Calls an API function.  Allocates HandleScope, extracts returned value
2979 // from handle and propagates exceptions.  Restores context.  stack_space
2980 // - space to be unwound on exit (includes the call JS arguments space and
2981 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)2982 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
2983                               ExternalReference thunk_ref, int stack_space,
2984                               MemOperand* stack_space_operand,
2985                               MemOperand return_value_operand) {
2986   ASM_CODE_COMMENT(masm);
2987   Isolate* isolate = masm->isolate();
2988   ExternalReference next_address =
2989       ExternalReference::handle_scope_next_address(isolate);
2990   const int kNextOffset = 0;
2991   const int kLimitOffset = AddressOffset(
2992       ExternalReference::handle_scope_limit_address(isolate), next_address);
2993   const int kLevelOffset = AddressOffset(
2994       ExternalReference::handle_scope_level_address(isolate), next_address);
2995 
2996   DCHECK(function_address == a1 || function_address == a2);
2997 
2998   Label profiler_enabled, end_profiler_check;
2999   __ li(t9, ExternalReference::is_profiling_address(isolate));
3000   __ lb(t9, MemOperand(t9, 0));
3001   __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
3002   __ li(t9, ExternalReference::address_of_runtime_stats_flag());
3003   __ lw(t9, MemOperand(t9, 0));
3004   __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
3005   {
3006     // Call the api function directly.
3007     __ mov(t9, function_address);
3008     __ Branch(&end_profiler_check);
3009   }
3010   __ bind(&profiler_enabled);
3011   {
3012     // Additional parameter is the address of the actual callback.
3013     __ li(t9, thunk_ref);
3014   }
3015   __ bind(&end_profiler_check);
3016 
3017   // Allocate HandleScope in callee-save registers.
3018   __ li(s5, next_address);
3019   __ lw(s0, MemOperand(s5, kNextOffset));
3020   __ lw(s1, MemOperand(s5, kLimitOffset));
3021   __ lw(s2, MemOperand(s5, kLevelOffset));
3022   __ Addu(s2, s2, Operand(1));
3023   __ sw(s2, MemOperand(s5, kLevelOffset));
3024 
3025   __ StoreReturnAddressAndCall(t9);
3026 
3027   Label promote_scheduled_exception;
3028   Label delete_allocated_handles;
3029   Label leave_exit_frame;
3030   Label return_value_loaded;
3031 
3032   // Load value from ReturnValue.
3033   __ lw(v0, return_value_operand);
3034   __ bind(&return_value_loaded);
3035 
3036   // No more valid handles (the result handle was the last one). Restore
3037   // previous handle scope.
3038   __ sw(s0, MemOperand(s5, kNextOffset));
3039   if (FLAG_debug_code) {
3040     __ lw(a1, MemOperand(s5, kLevelOffset));
3041     __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
3042              Operand(s2));
3043   }
3044   __ Subu(s2, s2, Operand(1));
3045   __ sw(s2, MemOperand(s5, kLevelOffset));
3046   __ lw(kScratchReg, MemOperand(s5, kLimitOffset));
3047   __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
3048 
3049   // Leave the API exit frame.
3050   __ bind(&leave_exit_frame);
3051 
3052   if (stack_space_operand == nullptr) {
3053     DCHECK_NE(stack_space, 0);
3054     __ li(s0, Operand(stack_space));
3055   } else {
3056     DCHECK_EQ(stack_space, 0);
3057     // The ExitFrame contains four MIPS argument slots after the call so this
3058     // must be accounted for.
3059     // TODO(jgruber): Investigate if this is needed by the direct call.
3060     __ Drop(kCArgSlotCount);
3061     __ lw(s0, *stack_space_operand);
3062   }
3063 
3064   static constexpr bool kDontSaveDoubles = false;
3065   static constexpr bool kRegisterContainsSlotCount = false;
3066   __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
3067                     kRegisterContainsSlotCount);
3068 
3069   // Check if the function scheduled an exception.
3070   __ LoadRoot(t0, RootIndex::kTheHoleValue);
3071   __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
3072   __ lw(t1, MemOperand(kScratchReg));
3073   __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3074 
3075   __ Ret();
3076 
3077   // Re-throw by promoting a scheduled exception.
3078   __ bind(&promote_scheduled_exception);
3079   __ TailCallRuntime(Runtime::kPromoteScheduledException);
3080 
3081   // HandleScope limit has changed. Delete allocated extensions.
3082   __ bind(&delete_allocated_handles);
3083   __ sw(s1, MemOperand(s5, kLimitOffset));
3084   __ mov(s0, v0);
3085   __ mov(a0, v0);
3086   __ PrepareCallCFunction(1, s1);
3087   __ li(a0, ExternalReference::isolate_address(isolate));
3088   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3089   __ mov(v0, s0);
3090   __ jmp(&leave_exit_frame);
3091 }
3092 
3093 }  // namespace
3094 
Generate_CallApiCallback(MacroAssembler * masm)3095 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3096   // ----------- S t a t e -------------
3097   //  -- cp                  : context
3098   //  -- a1                  : api function address
3099   //  -- a2                  : arguments count
3100   //  -- a3                  : call data
3101   //  -- a0                  : holder
3102   //  -- sp[0]               : receiver
3103   //  -- sp[8]               : first argument
3104   //  -- ...
3105   //  -- sp[(argc) * 8]      : last argument
3106   // -----------------------------------
3107 
3108   Register api_function_address = a1;
3109   Register argc = a2;
3110   Register call_data = a3;
3111   Register holder = a0;
3112   Register scratch = t0;
3113   Register base = t1;  // For addressing MemOperands on the stack.
3114 
3115   DCHECK(!AreAliased(api_function_address, argc, call_data,
3116                      holder, scratch, base));
3117 
3118   using FCA = FunctionCallbackArguments;
3119 
3120   STATIC_ASSERT(FCA::kArgsLength == 6);
3121   STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3122   STATIC_ASSERT(FCA::kDataIndex == 4);
3123   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3124   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3125   STATIC_ASSERT(FCA::kIsolateIndex == 1);
3126   STATIC_ASSERT(FCA::kHolderIndex == 0);
3127 
3128   // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3129   //
3130   // Target state:
3131   //   sp[0 * kPointerSize]: kHolder
3132   //   sp[1 * kPointerSize]: kIsolate
3133   //   sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
3134   //   sp[3 * kPointerSize]: undefined (kReturnValue)
3135   //   sp[4 * kPointerSize]: kData
3136   //   sp[5 * kPointerSize]: undefined (kNewTarget)
3137 
3138   // Set up the base register for addressing through MemOperands. It will point
3139   // at the receiver (located at sp + argc * kPointerSize).
3140   __ Lsa(base, sp, argc, kPointerSizeLog2);
3141 
3142   // Reserve space on the stack.
3143   __ Subu(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
3144 
3145   // kHolder.
3146   __ sw(holder, MemOperand(sp, 0 * kPointerSize));
3147 
3148   // kIsolate.
3149   __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3150   __ sw(scratch, MemOperand(sp, 1 * kPointerSize));
3151 
3152   // kReturnValueDefaultValue and kReturnValue.
3153   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3154   __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
3155   __ sw(scratch, MemOperand(sp, 3 * kPointerSize));
3156 
3157   // kData.
3158   __ sw(call_data, MemOperand(sp, 4 * kPointerSize));
3159 
3160   // kNewTarget.
3161   __ sw(scratch, MemOperand(sp, 5 * kPointerSize));
3162 
3163   // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3164   // We use it below to set up the FunctionCallbackInfo object.
3165   __ mov(scratch, sp);
3166 
3167   // Allocate the v8::Arguments structure in the arguments' space since
3168   // it's not controlled by GC.
3169   static constexpr int kApiStackSpace = 4;
3170   static constexpr bool kDontSaveDoubles = false;
3171   FrameScope frame_scope(masm, StackFrame::MANUAL);
3172   __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3173 
3174   // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3175   // Arguments are after the return address (pushed by EnterExitFrame()).
3176   __ sw(scratch, MemOperand(sp, 1 * kPointerSize));
3177 
3178   // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3179   // on the stack).
3180   __ Addu(scratch, scratch,
3181           Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3182   __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
3183 
3184   // FunctionCallbackInfo::length_.
3185   __ sw(argc, MemOperand(sp, 3 * kPointerSize));
3186 
3187   // We also store the number of bytes to drop from the stack after returning
3188   // from the API function here.
3189   // Note: Unlike on other architectures, this stores the number of slots to
3190   // drop, not the number of bytes.
3191   __ Addu(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
3192   __ sw(scratch, MemOperand(sp, 4 * kPointerSize));
3193 
3194   // v8::InvocationCallback's argument.
3195   DCHECK(!AreAliased(api_function_address, scratch, a0));
3196   __ Addu(a0, sp, Operand(1 * kPointerSize));
3197 
3198   ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3199 
3200   // There are two stack slots above the arguments we constructed on the stack.
3201   // TODO(jgruber): Document what these arguments are.
3202   static constexpr int kStackSlotsAboveFCA = 2;
3203   MemOperand return_value_operand(
3204       fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3205 
3206   static constexpr int kUseStackSpaceOperand = 0;
3207   MemOperand stack_space_operand(sp, 4 * kPointerSize);
3208 
3209   AllowExternalCallThatCantCauseGC scope(masm);
3210   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3211                            kUseStackSpaceOperand, &stack_space_operand,
3212                            return_value_operand);
3213 }
3214 
Generate_CallApiGetter(MacroAssembler * masm)3215 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3216   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3217   // name below the exit frame to make GC aware of them.
3218   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3219   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3220   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3221   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3222   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3223   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3224   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3225   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3226 
3227   Register receiver = ApiGetterDescriptor::ReceiverRegister();
3228   Register holder = ApiGetterDescriptor::HolderRegister();
3229   Register callback = ApiGetterDescriptor::CallbackRegister();
3230   Register scratch = t0;
3231   DCHECK(!AreAliased(receiver, holder, callback, scratch));
3232 
3233   Register api_function_address = a2;
3234 
3235   // Here and below +1 is for name() pushed after the args_ array.
3236   using PCA = PropertyCallbackArguments;
3237   __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
3238   __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
3239   __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3240   __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
3241   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3242   __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
3243   __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3244                                     kPointerSize));
3245   __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3246   __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
3247   __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
3248   // should_throw_on_error -> false
3249   DCHECK_EQ(0, Smi::zero().ptr());
3250   __ sw(zero_reg,
3251         MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
3252   __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3253   __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
3254 
3255   // v8::PropertyCallbackInfo::args_ array and name handle.
3256   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3257 
3258   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3259   __ mov(a0, sp);                              // a0 = Handle<Name>
3260   __ Addu(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
3261 
3262   const int kApiStackSpace = 1;
3263   FrameScope frame_scope(masm, StackFrame::MANUAL);
3264   __ EnterExitFrame(false, kApiStackSpace);
3265 
3266   // Create v8::PropertyCallbackInfo object on the stack and initialize
3267   // it's args_ field.
3268   __ sw(a1, MemOperand(sp, 1 * kPointerSize));
3269   __ Addu(a1, sp, Operand(1 * kPointerSize));  // a1 = v8::PropertyCallbackInfo&
3270 
3271   ExternalReference thunk_ref =
3272       ExternalReference::invoke_accessor_getter_callback();
3273 
3274   __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3275   __ lw(api_function_address,
3276         FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3277 
3278   // +3 is to skip prolog, return address and name handle.
3279   MemOperand return_value_operand(
3280       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3281   MemOperand* const kUseStackSpaceConstant = nullptr;
3282   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3283                            kStackUnwindSpace, kUseStackSpaceConstant,
3284                            return_value_operand);
3285 }
3286 
Generate_DirectCEntry(MacroAssembler * masm)3287 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3288   // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3289   // purpose Code object) to be able to call into C functions that may trigger
3290   // GC and thus move the caller.
3291   //
3292   // DirectCEntry places the return address on the stack (updated by the GC),
3293   // making the call GC safe. The irregexp backend relies on this.
3294 
3295   // Make place for arguments to fit C calling convention. Callers use
3296   // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
3297   // have to do that here. Any caller must drop kCArgsSlotsSize stack space
3298   // after the call.
3299   __ Subu(sp, sp, Operand(kCArgsSlotsSize));
3300 
3301   __ sw(ra, MemOperand(sp, kCArgsSlotsSize));  // Store the return address.
3302   __ Call(t9);                                 // Call the C++ function.
3303   __ lw(t9, MemOperand(sp, kCArgsSlotsSize));  // Return to calling code.
3304 
3305   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3306     // In case of an error the return address may point to a memory area
3307     // filled with kZapValue by the GC. Dereference the address and check for
3308     // this.
3309     __ lw(t0, MemOperand(t9));
3310     __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, t0,
3311               Operand(reinterpret_cast<uint32_t>(kZapValue)));
3312   }
3313 
3314   __ Jump(t9);
3315 }
3316 
Generate_MemCopyUint8Uint8(MacroAssembler * masm)3317 void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
3318   // This code assumes that cache lines are 32 bytes and if the cache line is
3319   // larger it will not work correctly.
3320   {
3321     Label lastb, unaligned, aligned, chkw, loop16w, chk1w, wordCopy_loop,
3322         skip_pref, lastbloop, leave, ua_chk16w, ua_loop16w, ua_skip_pref,
3323         ua_chkw, ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
3324 
3325     // The size of each prefetch.
3326     uint32_t pref_chunk = 32;
3327     // The maximum size of a prefetch, it must not be less than pref_chunk.
3328     // If the real size of a prefetch is greater than max_pref_size and
3329     // the kPrefHintPrepareForStore hint is used, the code will not work
3330     // correctly.
3331     uint32_t max_pref_size = 128;
3332     DCHECK(pref_chunk < max_pref_size);
3333 
3334     // pref_limit is set based on the fact that we never use an offset
3335     // greater then 5 on a store pref and that a single pref can
3336     // never be larger then max_pref_size.
3337     uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
3338     int32_t pref_hint_load = kPrefHintLoadStreamed;
3339     int32_t pref_hint_store = kPrefHintPrepareForStore;
3340     uint32_t loadstore_chunk = 4;
3341 
3342     // The initial prefetches may fetch bytes that are before the buffer being
3343     // copied. Start copies with an offset of 4 so avoid this situation when
3344     // using kPrefHintPrepareForStore.
3345     DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
3346            pref_chunk * 4 >= max_pref_size);
3347 
3348     // If the size is less than 8, go to lastb. Regardless of size,
3349     // copy dst pointer to v0 for the retuen value.
3350     __ slti(t2, a2, 2 * loadstore_chunk);
3351     __ bne(t2, zero_reg, &lastb);
3352     __ mov(v0, a0);  // In delay slot.
3353 
3354     // If src and dst have different alignments, go to unaligned, if they
3355     // have the same alignment (but are not actually aligned) do a partial
3356     // load/store to make them aligned. If they are both already aligned
3357     // we can start copying at aligned.
3358     __ xor_(t8, a1, a0);
3359     __ andi(t8, t8, loadstore_chunk - 1);  // t8 is a0/a1 word-displacement.
3360     __ bne(t8, zero_reg, &unaligned);
3361     __ subu(a3, zero_reg, a0);  // In delay slot.
3362 
3363     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
3364     __ beq(a3, zero_reg, &aligned);        // Already aligned.
3365     __ subu(a2, a2, a3);  // In delay slot. a2 is the remining bytes count.
3366 
3367     if (kArchEndian == kLittle) {
3368       __ lwr(t8, MemOperand(a1));
3369       __ addu(a1, a1, a3);
3370       __ swr(t8, MemOperand(a0));
3371       __ addu(a0, a0, a3);
3372     } else {
3373       __ lwl(t8, MemOperand(a1));
3374       __ addu(a1, a1, a3);
3375       __ swl(t8, MemOperand(a0));
3376       __ addu(a0, a0, a3);
3377     }
3378     // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
3379     // count how many bytes we have to copy after all the 64 byte chunks are
3380     // copied and a3 to the dst pointer after all the 64 byte chunks have been
3381     // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
3382     __ bind(&aligned);
3383     __ andi(t8, a2, 0x3F);
3384     __ beq(a2, t8, &chkw);  // Less than 64?
3385     __ subu(a3, a2, t8);    // In delay slot.
3386     __ addu(a3, a0, a3);    // Now a3 is the final dst after loop.
3387 
3388     // When in the loop we prefetch with kPrefHintPrepareForStore hint,
3389     // in this case the a0+x should be past the "t0-32" address. This means:
3390     // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
3391     // x=64 the last "safe" a0 address is "t0-96". In the current version we
3392     // will use "pref hint, 128(a0)", so "t0-160" is the limit.
3393     if (pref_hint_store == kPrefHintPrepareForStore) {
3394       __ addu(t0, a0, a2);          // t0 is the "past the end" address.
3395       __ Subu(t9, t0, pref_limit);  // t9 is the "last safe pref" address.
3396     }
3397 
3398     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
3399     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
3400     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
3401     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
3402 
3403     if (pref_hint_store != kPrefHintPrepareForStore) {
3404       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
3405       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
3406       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
3407     }
3408     __ bind(&loop16w);
3409     __ lw(t0, MemOperand(a1));
3410 
3411     if (pref_hint_store == kPrefHintPrepareForStore) {
3412       __ sltu(v1, t9, a0);  // If a0 > t9, don't use next prefetch.
3413       __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
3414     }
3415     __ lw(t1, MemOperand(a1, 1, loadstore_chunk));  // Maybe in delay slot.
3416 
3417     __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
3418     __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
3419 
3420     __ bind(&skip_pref);
3421     __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
3422     __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
3423     __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
3424     __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
3425     __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
3426     __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
3427     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
3428 
3429     __ sw(t0, MemOperand(a0));
3430     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
3431     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
3432     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
3433     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
3434     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
3435     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
3436     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
3437 
3438     __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
3439     __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
3440     __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
3441     __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
3442     __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
3443     __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
3444     __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
3445     __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
3446     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
3447 
3448     __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
3449     __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
3450     __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
3451     __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
3452     __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
3453     __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
3454     __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
3455     __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
3456     __ addiu(a0, a0, 16 * loadstore_chunk);
3457     __ bne(a0, a3, &loop16w);
3458     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
3459     __ mov(a2, t8);
3460 
3461     // Here we have src and dest word-aligned but less than 64-bytes to go.
3462     // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
3463     // down to chk1w to handle the tail end of the copy.
3464     __ bind(&chkw);
3465     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
3466     __ andi(t8, a2, 0x1F);
3467     __ beq(a2, t8, &chk1w);  // Less than 32?
3468     __ nop();                // In delay slot.
3469     __ lw(t0, MemOperand(a1));
3470     __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
3471     __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
3472     __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
3473     __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
3474     __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
3475     __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
3476     __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
3477     __ addiu(a1, a1, 8 * loadstore_chunk);
3478     __ sw(t0, MemOperand(a0));
3479     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
3480     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
3481     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
3482     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
3483     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
3484     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
3485     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
3486     __ addiu(a0, a0, 8 * loadstore_chunk);
3487 
3488     // Here we have less than 32 bytes to copy. Set up for a loop to copy
3489     // one word at a time. Set a2 to count how many bytes we have to copy
3490     // after all the word chunks are copied and a3 to the dst pointer after
3491     // all the word chunks have been copied. We will loop, incrementing a0
3492     // and a1 until a0 equals a3.
3493     __ bind(&chk1w);
3494     __ andi(a2, t8, loadstore_chunk - 1);
3495     __ beq(a2, t8, &lastb);
3496     __ subu(a3, t8, a2);  // In delay slot.
3497     __ addu(a3, a0, a3);
3498 
3499     __ bind(&wordCopy_loop);
3500     __ lw(t3, MemOperand(a1));
3501     __ addiu(a0, a0, loadstore_chunk);
3502     __ addiu(a1, a1, loadstore_chunk);
3503     __ bne(a0, a3, &wordCopy_loop);
3504     __ sw(t3, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
3505 
3506     __ bind(&lastb);
3507     __ Branch(&leave, le, a2, Operand(zero_reg));
3508     __ addu(a3, a0, a2);
3509 
3510     __ bind(&lastbloop);
3511     __ lb(v1, MemOperand(a1));
3512     __ addiu(a0, a0, 1);
3513     __ addiu(a1, a1, 1);
3514     __ bne(a0, a3, &lastbloop);
3515     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
3516 
3517     __ bind(&leave);
3518     __ jr(ra);
3519     __ nop();
3520 
3521     // Unaligned case. Only the dst gets aligned so we need to do partial
3522     // loads of the source followed by normal stores to the dst (once we
3523     // have aligned the destination).
3524     __ bind(&unaligned);
3525     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
3526     __ beq(a3, zero_reg, &ua_chk16w);
3527     __ subu(a2, a2, a3);  // In delay slot.
3528 
3529     if (kArchEndian == kLittle) {
3530       __ lwr(v1, MemOperand(a1));
3531       __ lwl(v1,
3532              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3533       __ addu(a1, a1, a3);
3534       __ swr(v1, MemOperand(a0));
3535       __ addu(a0, a0, a3);
3536     } else {
3537       __ lwl(v1, MemOperand(a1));
3538       __ lwr(v1,
3539              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3540       __ addu(a1, a1, a3);
3541       __ swl(v1, MemOperand(a0));
3542       __ addu(a0, a0, a3);
3543     }
3544 
3545     // Now the dst (but not the source) is aligned. Set a2 to count how many
3546     // bytes we have to copy after all the 64 byte chunks are copied and a3 to
3547     // the dst pointer after all the 64 byte chunks have been copied. We will
3548     // loop, incrementing a0 and a1 until a0 equals a3.
3549     __ bind(&ua_chk16w);
3550     __ andi(t8, a2, 0x3F);
3551     __ beq(a2, t8, &ua_chkw);
3552     __ subu(a3, a2, t8);  // In delay slot.
3553     __ addu(a3, a0, a3);
3554 
3555     if (pref_hint_store == kPrefHintPrepareForStore) {
3556       __ addu(t0, a0, a2);
3557       __ Subu(t9, t0, pref_limit);
3558     }
3559 
3560     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
3561     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
3562     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
3563 
3564     if (pref_hint_store != kPrefHintPrepareForStore) {
3565       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
3566       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
3567       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
3568     }
3569 
3570     __ bind(&ua_loop16w);
3571     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
3572     if (kArchEndian == kLittle) {
3573       __ lwr(t0, MemOperand(a1));
3574       __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
3575       __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
3576 
3577       if (pref_hint_store == kPrefHintPrepareForStore) {
3578         __ sltu(v1, t9, a0);
3579         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
3580       }
3581       __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
3582 
3583       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
3584       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
3585 
3586       __ bind(&ua_skip_pref);
3587       __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
3588       __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
3589       __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
3590       __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
3591       __ lwl(t0,
3592              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3593       __ lwl(t1,
3594              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
3595       __ lwl(t2,
3596              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
3597       __ lwl(t3,
3598              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
3599       __ lwl(t4,
3600              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
3601       __ lwl(t5,
3602              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
3603       __ lwl(t6,
3604              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
3605       __ lwl(t7,
3606              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
3607     } else {
3608       __ lwl(t0, MemOperand(a1));
3609       __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
3610       __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
3611 
3612       if (pref_hint_store == kPrefHintPrepareForStore) {
3613         __ sltu(v1, t9, a0);
3614         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
3615       }
3616       __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
3617 
3618       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
3619       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
3620 
3621       __ bind(&ua_skip_pref);
3622       __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
3623       __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
3624       __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
3625       __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
3626       __ lwr(t0,
3627              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3628       __ lwr(t1,
3629              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
3630       __ lwr(t2,
3631              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
3632       __ lwr(t3,
3633              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
3634       __ lwr(t4,
3635              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
3636       __ lwr(t5,
3637              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
3638       __ lwr(t6,
3639              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
3640       __ lwr(t7,
3641              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
3642     }
3643     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
3644     __ sw(t0, MemOperand(a0));
3645     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
3646     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
3647     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
3648     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
3649     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
3650     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
3651     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
3652     if (kArchEndian == kLittle) {
3653       __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
3654       __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
3655       __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
3656       __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
3657       __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
3658       __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
3659       __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
3660       __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
3661       __ lwl(t0,
3662              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
3663       __ lwl(t1,
3664              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
3665       __ lwl(t2,
3666              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
3667       __ lwl(t3,
3668              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
3669       __ lwl(t4,
3670              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
3671       __ lwl(t5,
3672              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
3673       __ lwl(t6,
3674              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
3675       __ lwl(t7,
3676              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
3677     } else {
3678       __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
3679       __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
3680       __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
3681       __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
3682       __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
3683       __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
3684       __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
3685       __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
3686       __ lwr(t0,
3687              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
3688       __ lwr(t1,
3689              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
3690       __ lwr(t2,
3691              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
3692       __ lwr(t3,
3693              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
3694       __ lwr(t4,
3695              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
3696       __ lwr(t5,
3697              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
3698       __ lwr(t6,
3699              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
3700       __ lwr(t7,
3701              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
3702     }
3703     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
3704     __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
3705     __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
3706     __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
3707     __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
3708     __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
3709     __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
3710     __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
3711     __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
3712     __ addiu(a0, a0, 16 * loadstore_chunk);
3713     __ bne(a0, a3, &ua_loop16w);
3714     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
3715     __ mov(a2, t8);
3716 
3717     // Here less than 64-bytes. Check for
3718     // a 32 byte chunk and copy if there is one. Otherwise jump down to
3719     // ua_chk1w to handle the tail end of the copy.
3720     __ bind(&ua_chkw);
3721     __ Pref(pref_hint_load, MemOperand(a1));
3722     __ andi(t8, a2, 0x1F);
3723 
3724     __ beq(a2, t8, &ua_chk1w);
3725     __ nop();  // In delay slot.
3726     if (kArchEndian == kLittle) {
3727       __ lwr(t0, MemOperand(a1));
3728       __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
3729       __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
3730       __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
3731       __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
3732       __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
3733       __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
3734       __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
3735       __ lwl(t0,
3736              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3737       __ lwl(t1,
3738              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
3739       __ lwl(t2,
3740              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
3741       __ lwl(t3,
3742              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
3743       __ lwl(t4,
3744              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
3745       __ lwl(t5,
3746              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
3747       __ lwl(t6,
3748              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
3749       __ lwl(t7,
3750              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
3751     } else {
3752       __ lwl(t0, MemOperand(a1));
3753       __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
3754       __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
3755       __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
3756       __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
3757       __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
3758       __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
3759       __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
3760       __ lwr(t0,
3761              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3762       __ lwr(t1,
3763              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
3764       __ lwr(t2,
3765              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
3766       __ lwr(t3,
3767              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
3768       __ lwr(t4,
3769              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
3770       __ lwr(t5,
3771              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
3772       __ lwr(t6,
3773              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
3774       __ lwr(t7,
3775              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
3776     }
3777     __ addiu(a1, a1, 8 * loadstore_chunk);
3778     __ sw(t0, MemOperand(a0));
3779     __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
3780     __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
3781     __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
3782     __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
3783     __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
3784     __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
3785     __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
3786     __ addiu(a0, a0, 8 * loadstore_chunk);
3787 
3788     // Less than 32 bytes to copy. Set up for a loop to
3789     // copy one word at a time.
3790     __ bind(&ua_chk1w);
3791     __ andi(a2, t8, loadstore_chunk - 1);
3792     __ beq(a2, t8, &ua_smallCopy);
3793     __ subu(a3, t8, a2);  // In delay slot.
3794     __ addu(a3, a0, a3);
3795 
3796     __ bind(&ua_wordCopy_loop);
3797     if (kArchEndian == kLittle) {
3798       __ lwr(v1, MemOperand(a1));
3799       __ lwl(v1,
3800              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3801     } else {
3802       __ lwl(v1, MemOperand(a1));
3803       __ lwr(v1,
3804              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3805     }
3806     __ addiu(a0, a0, loadstore_chunk);
3807     __ addiu(a1, a1, loadstore_chunk);
3808     __ bne(a0, a3, &ua_wordCopy_loop);
3809     __ sw(v1, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
3810 
3811     // Copy the last 8 bytes.
3812     __ bind(&ua_smallCopy);
3813     __ beq(a2, zero_reg, &leave);
3814     __ addu(a3, a0, a2);  // In delay slot.
3815 
3816     __ bind(&ua_smallCopy_loop);
3817     __ lb(v1, MemOperand(a1));
3818     __ addiu(a0, a0, 1);
3819     __ addiu(a1, a1, 1);
3820     __ bne(a0, a3, &ua_smallCopy_loop);
3821     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
3822 
3823     __ jr(ra);
3824     __ nop();
3825   }
3826 }
3827 
3828 namespace {
3829 
3830 // This code tries to be close to ia32 code so that any changes can be
3831 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3832 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3833                                   DeoptimizeKind deopt_kind) {
3834   Isolate* isolate = masm->isolate();
3835 
3836   // Unlike on ARM we don't save all the registers, just the useful ones.
3837   // For the rest, there are gaps on the stack, so the offsets remain the same.
3838   static constexpr int kNumberOfRegisters = Register::kNumRegisters;
3839 
3840   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3841   RegList saved_regs = restored_regs | sp | ra;
3842 
3843   static constexpr int kDoubleRegsSize =
3844       kDoubleSize * DoubleRegister::kNumRegisters;
3845 
3846   // Save all FPU registers before messing with them.
3847   __ Subu(sp, sp, Operand(kDoubleRegsSize));
3848   const RegisterConfiguration* config = RegisterConfiguration::Default();
3849   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3850     int code = config->GetAllocatableDoubleCode(i);
3851     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3852     int offset = code * kDoubleSize;
3853     __ Sdc1(fpu_reg, MemOperand(sp, offset));
3854   }
3855 
3856   // Push saved_regs (needed to populate FrameDescription::registers_).
3857   // Leave gaps for other registers.
3858   __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
3859   for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3860     if ((saved_regs.bits() & (1 << i)) != 0) {
3861       __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
3862     }
3863   }
3864 
3865   __ li(a2,
3866         ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3867   __ sw(fp, MemOperand(a2));
3868 
3869   static constexpr int kSavedRegistersAreaSize =
3870       (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3871 
3872   // Get the address of the location in the code object (a2) (return
3873   // address for lazy deoptimization) and compute the fp-to-sp delta in
3874   // register a3.
3875   __ mov(a2, ra);
3876   __ Addu(a3, sp, Operand(kSavedRegistersAreaSize));
3877   __ Subu(a3, fp, a3);
3878 
3879   // Allocate a new deoptimizer object.
3880   __ PrepareCallCFunction(5, t0);
3881   // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
3882   __ mov(a0, zero_reg);
3883   Label context_check;
3884   __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3885   __ JumpIfSmi(a1, &context_check);
3886   __ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3887   __ bind(&context_check);
3888   __ li(a1, Operand(static_cast<int>(deopt_kind)));
3889   // a2: code address or 0 already loaded.
3890   // a3: Fp-to-sp delta already loaded.
3891   __ li(t0, ExternalReference::isolate_address(isolate));
3892   __ sw(t0, CFunctionArgumentOperand(5));  // Isolate.
3893   // Call Deoptimizer::New().
3894   {
3895     AllowExternalCallThatCantCauseGC scope(masm);
3896     __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3897   }
3898 
3899   // Preserve "deoptimizer" object in register v0 and get the input
3900   // frame descriptor pointer to a1 (deoptimizer->input_);
3901   // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
3902   __ mov(a0, v0);
3903   __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
3904 
3905   // Copy core registers into FrameDescription::registers_[kNumRegisters].
3906   DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3907   for (int i = 0; i < kNumberOfRegisters; i++) {
3908     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3909     if ((saved_regs.bits() & (1 << i)) != 0) {
3910       __ lw(a2, MemOperand(sp, i * kPointerSize));
3911       __ sw(a2, MemOperand(a1, offset));
3912     } else if (FLAG_debug_code) {
3913       __ li(a2, kDebugZapValue);
3914       __ sw(a2, MemOperand(a1, offset));
3915     }
3916   }
3917 
3918   int double_regs_offset = FrameDescription::double_registers_offset();
3919   // Copy FPU registers to
3920   // double_registers_[DoubleRegister::kNumAllocatableRegisters]
3921   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3922     int code = config->GetAllocatableDoubleCode(i);
3923     int dst_offset = code * kDoubleSize + double_regs_offset;
3924     int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
3925     __ Ldc1(f0, MemOperand(sp, src_offset));
3926     __ Sdc1(f0, MemOperand(a1, dst_offset));
3927   }
3928 
3929   // Remove the saved registers from the stack.
3930   __ Addu(sp, sp, Operand(kSavedRegistersAreaSize));
3931 
3932   // Compute a pointer to the unwinding limit in register a2; that is
3933   // the first stack slot not part of the input frame.
3934   __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
3935   __ Addu(a2, a2, sp);
3936 
3937   // Unwind the stack down to - but not including - the unwinding
3938   // limit and copy the contents of the activation frame to the input
3939   // frame description.
3940   __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
3941   Label pop_loop;
3942   Label pop_loop_header;
3943   __ BranchShort(&pop_loop_header);
3944   __ bind(&pop_loop);
3945   __ pop(t0);
3946   __ sw(t0, MemOperand(a3, 0));
3947   __ addiu(a3, a3, sizeof(uint32_t));
3948   __ bind(&pop_loop_header);
3949   __ BranchShort(&pop_loop, ne, a2, Operand(sp));
3950 
3951   // Compute the output frame in the deoptimizer.
3952   __ push(a0);  // Preserve deoptimizer object across call.
3953   // a0: deoptimizer object; a1: scratch.
3954   __ PrepareCallCFunction(1, a1);
3955   // Call Deoptimizer::ComputeOutputFrames().
3956   {
3957     AllowExternalCallThatCantCauseGC scope(masm);
3958     __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3959   }
3960   __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
3961 
3962   __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
3963 
3964   // Replace the current (input) frame with the output frames.
3965   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3966   // Outer loop state: t0 = current "FrameDescription** output_",
3967   // a1 = one past the last FrameDescription**.
3968   __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
3969   __ lw(t0, MemOperand(a0, Deoptimizer::output_offset()));  // t0 is output_.
3970   __ Lsa(a1, t0, a1, kPointerSizeLog2);
3971   __ BranchShort(&outer_loop_header);
3972   __ bind(&outer_push_loop);
3973   // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
3974   __ lw(a2, MemOperand(t0, 0));  // output_[ix]
3975   __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
3976   __ BranchShort(&inner_loop_header);
3977   __ bind(&inner_push_loop);
3978   __ Subu(a3, a3, Operand(sizeof(uint32_t)));
3979   __ Addu(t2, a2, Operand(a3));
3980   __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
3981   __ push(t3);
3982   __ bind(&inner_loop_header);
3983   __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
3984 
3985   __ Addu(t0, t0, Operand(kPointerSize));
3986   __ bind(&outer_loop_header);
3987   __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
3988 
3989   __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
3990   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3991     int code = config->GetAllocatableDoubleCode(i);
3992     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3993     int src_offset = code * kDoubleSize + double_regs_offset;
3994     __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
3995   }
3996 
3997   // Push pc and continuation from the last output frame.
3998   __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
3999   __ push(t2);
4000   __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
4001   __ push(t2);
4002 
4003   // Technically restoring 'at' should work unless zero_reg is also restored
4004   // but it's safer to check for this.
4005   DCHECK(!(restored_regs.has(at)));
4006   // Restore the registers from the last output frame.
4007   __ mov(at, a2);
4008   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
4009     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
4010     if ((restored_regs.bits() & (1 << i)) != 0) {
4011       __ lw(ToRegister(i), MemOperand(at, offset));
4012     }
4013   }
4014 
4015   __ pop(at);  // Get continuation, leave pc on stack.
4016   __ pop(ra);
4017   __ Jump(at);
4018   __ stop();
4019 }
4020 
4021 }  // namespace
4022 
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)4023 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
4024   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
4025 }
4026 
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)4027 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
4028   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
4029 }
4030 
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)4031 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
4032   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
4033 }
4034 
4035 namespace {
4036 
4037 // Restarts execution either at the current or next (in execution order)
4038 // bytecode. If there is baseline code on the shared function info, converts an
4039 // interpreter frame into a baseline frame and continues execution in baseline
4040 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)4041 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
4042                                          bool next_bytecode,
4043                                          bool is_osr = false) {
4044   Label start;
4045   __ bind(&start);
4046 
4047   // Get function from the frame.
4048   Register closure = a1;
4049   __ Lw(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
4050 
4051   // Get the Code object from the shared function info.
4052   Register code_obj = s1;
4053   __ Lw(code_obj,
4054         FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
4055   __ Lw(code_obj,
4056         FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
4057 
4058   // Check if we have baseline code. For OSR entry it is safe to assume we
4059   // always have baseline code.
4060   if (!is_osr) {
4061     Label start_with_baseline;
4062     __ GetObjectType(code_obj, t6, t6);
4063     __ Branch(&start_with_baseline, eq, t6, Operand(CODET_TYPE));
4064 
4065     // Start with bytecode as there is no baseline code.
4066     Builtin builtin_id = next_bytecode
4067                              ? Builtin::kInterpreterEnterAtNextBytecode
4068                              : Builtin::kInterpreterEnterAtBytecode;
4069     __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
4070             RelocInfo::CODE_TARGET);
4071 
4072     // Start with baseline code.
4073     __ bind(&start_with_baseline);
4074   } else if (FLAG_debug_code) {
4075     __ GetObjectType(code_obj, t6, t6);
4076     __ Assert(eq, AbortReason::kExpectedBaselineData, t6, Operand(CODET_TYPE));
4077   }
4078 
4079   if (FLAG_debug_code) {
4080     AssertCodeIsBaseline(masm, code_obj, t2);
4081   }
4082 
4083   // Replace BytecodeOffset with the feedback vector.
4084   Register feedback_vector = a2;
4085   __ Lw(feedback_vector,
4086         FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
4087   __ Lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
4088 
4089   Label install_baseline_code;
4090   // Check if feedback vector is valid. If not, call prepare for baseline to
4091   // allocate it.
4092   __ GetObjectType(feedback_vector, t6, t6);
4093   __ Branch(&install_baseline_code, ne, t6, Operand(FEEDBACK_VECTOR_TYPE));
4094 
4095   // Save BytecodeOffset from the stack frame.
4096   __ Lw(kInterpreterBytecodeOffsetRegister,
4097         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
4098   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
4099   // Replace BytecodeOffset with the feedback vector.
4100   __ Sw(feedback_vector,
4101         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
4102   feedback_vector = no_reg;
4103 
4104   // Compute baseline pc for bytecode offset.
4105   ExternalReference get_baseline_pc_extref;
4106   if (next_bytecode || is_osr) {
4107     get_baseline_pc_extref =
4108         ExternalReference::baseline_pc_for_next_executed_bytecode();
4109   } else {
4110     get_baseline_pc_extref =
4111         ExternalReference::baseline_pc_for_bytecode_offset();
4112   }
4113 
4114   Register get_baseline_pc = a3;
4115   __ li(get_baseline_pc, get_baseline_pc_extref);
4116 
4117   // If the code deoptimizes during the implicit function entry stack interrupt
4118   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
4119   // not a valid bytecode offset.
4120   // TODO(pthier): Investigate if it is feasible to handle this special case
4121   // in TurboFan instead of here.
4122   Label valid_bytecode_offset, function_entry_bytecode;
4123   if (!is_osr) {
4124     __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
4125               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
4126                       kFunctionEntryBytecodeOffset));
4127   }
4128 
4129   __ Subu(kInterpreterBytecodeOffsetRegister,
4130           kInterpreterBytecodeOffsetRegister,
4131           (BytecodeArray::kHeaderSize - kHeapObjectTag));
4132 
4133   __ bind(&valid_bytecode_offset);
4134   // Get bytecode array from the stack frame.
4135   __ Lw(kInterpreterBytecodeArrayRegister,
4136         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
4137   // Save the accumulator register, since it's clobbered by the below call.
4138   __ Push(kInterpreterAccumulatorRegister);
4139   {
4140     Register arg_reg_1 = a0;
4141     Register arg_reg_2 = a1;
4142     Register arg_reg_3 = a2;
4143     __ Move(arg_reg_1, code_obj);
4144     __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
4145     __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
4146     FrameScope scope(masm, StackFrame::INTERNAL);
4147     __ PrepareCallCFunction(3, 0, t0);
4148     __ CallCFunction(get_baseline_pc, 3, 0);
4149   }
4150   __ Addu(code_obj, code_obj, kReturnRegister0);
4151   __ Pop(kInterpreterAccumulatorRegister);
4152 
4153   if (is_osr) {
4154     // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
4155     // Sparkplug here.
4156     // TODO(liuyu): Remove Ld as arm64 after register reallocation.
4157     __ Lw(kInterpreterBytecodeArrayRegister,
4158           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
4159     ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
4160     Generate_OSREntry(masm, code_obj,
4161                       Operand(Code::kHeaderSize - kHeapObjectTag));
4162   } else {
4163     __ Addu(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
4164     __ Jump(code_obj);
4165   }
4166   __ Trap();  // Unreachable.
4167 
4168   if (!is_osr) {
4169     __ bind(&function_entry_bytecode);
4170     // If the bytecode offset is kFunctionEntryOffset, get the start address of
4171     // the first bytecode.
4172     __ mov(kInterpreterBytecodeOffsetRegister, zero_reg);
4173     if (next_bytecode) {
4174       __ li(get_baseline_pc,
4175             ExternalReference::baseline_pc_for_bytecode_offset());
4176     }
4177     __ Branch(&valid_bytecode_offset);
4178   }
4179 
4180   __ bind(&install_baseline_code);
4181   {
4182     FrameScope scope(masm, StackFrame::INTERNAL);
4183     __ Push(kInterpreterAccumulatorRegister);
4184     __ Push(closure);
4185     __ CallRuntime(Runtime::kInstallBaselineCode, 1);
4186     __ Pop(kInterpreterAccumulatorRegister);
4187   }
4188   // Retry from the start after installing baseline code.
4189   __ Branch(&start);
4190 }
4191 }  // namespace
4192 
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)4193 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
4194     MacroAssembler* masm) {
4195   Generate_BaselineOrInterpreterEntry(masm, false);
4196 }
4197 
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)4198 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
4199     MacroAssembler* masm) {
4200   Generate_BaselineOrInterpreterEntry(masm, true);
4201 }
4202 
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)4203 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
4204     MacroAssembler* masm) {
4205   Generate_BaselineOrInterpreterEntry(masm, false, true);
4206 }
4207 
4208 #undef __
4209 
4210 }  // namespace internal
4211 }  // namespace v8
4212 
4213 #endif  // V8_TARGET_ARCH_MIPS
4214