• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_LOONG64
6 
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 #include "src/debug/debug.h"
11 #include "src/deoptimizer/deoptimizer.h"
12 #include "src/execution/frame-constants.h"
13 #include "src/execution/frames.h"
14 #include "src/logging/counters.h"
15 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
16 #include "src/codegen/loong64/constants-loong64.h"
17 #include "src/codegen/macro-assembler-inl.h"
18 #include "src/codegen/register-configuration.h"
19 #include "src/heap/heap-inl.h"
20 #include "src/objects/cell.h"
21 #include "src/objects/foreign.h"
22 #include "src/objects/heap-number.h"
23 #include "src/objects/js-generator.h"
24 #include "src/objects/objects-inl.h"
25 #include "src/objects/smi.h"
26 #include "src/runtime/runtime.h"
27 
28 #if V8_ENABLE_WEBASSEMBLY
29 #include "src/wasm/wasm-linkage.h"
30 #include "src/wasm/wasm-objects.h"
31 #endif  // V8_ENABLE_WEBASSEMBLY
32 
33 namespace v8 {
34 namespace internal {
35 
36 #define __ ACCESS_MASM(masm)
37 
Generate_Adaptor(MacroAssembler * masm,Address address)38 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
39   __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
40   __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
41           RelocInfo::CODE_TARGET);
42 }
43 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)44 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
45                                            Runtime::FunctionId function_id) {
46   // ----------- S t a t e -------------
47   //  -- a0 : actual argument count
48   //  -- a1 : target function (preserved for callee)
49   //  -- a3 : new target (preserved for callee)
50   // -----------------------------------
51   {
52     FrameScope scope(masm, StackFrame::INTERNAL);
53     // Push a copy of the target function, the new target and the actual
54     // argument count.
55     // Push function as parameter to the runtime call.
56     __ SmiTag(kJavaScriptCallArgCountRegister);
57     __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
58             kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
59 
60     __ CallRuntime(function_id, 1);
61     __ LoadCodeObjectEntry(a2, a0);
62     // Restore target function, new target and actual argument count.
63     __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
64            kJavaScriptCallArgCountRegister);
65     __ SmiUntag(kJavaScriptCallArgCountRegister);
66   }
67 
68   static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
69   __ Jump(a2);
70 }
71 
72 namespace {
73 
74 enum class ArgumentsElementType {
75   kRaw,    // Push arguments as they are.
76   kHandle  // Dereference arguments before pushing.
77 };
78 
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,Register scratch2,ArgumentsElementType element_type)79 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
80                             Register scratch, Register scratch2,
81                             ArgumentsElementType element_type) {
82   DCHECK(!AreAliased(array, argc, scratch));
83   Label loop, entry;
84   __ Sub_d(scratch, argc, Operand(kJSArgcReceiverSlots));
85   __ Branch(&entry);
86   __ bind(&loop);
87   __ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
88   __ Ld_d(scratch2, MemOperand(scratch2, 0));
89   if (element_type == ArgumentsElementType::kHandle) {
90     __ Ld_d(scratch2, MemOperand(scratch2, 0));
91   }
92   __ Push(scratch2);
93   __ bind(&entry);
94   __ Add_d(scratch, scratch, Operand(-1));
95   __ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
96 }
97 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)98 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
99   // ----------- S t a t e -------------
100   //  -- a0     : number of arguments
101   //  -- a1     : constructor function
102   //  -- a3     : new target
103   //  -- cp     : context
104   //  -- ra     : return address
105   //  -- sp[...]: constructor arguments
106   // -----------------------------------
107 
108   // Enter a construct frame.
109   {
110     FrameScope scope(masm, StackFrame::CONSTRUCT);
111 
112     // Preserve the incoming parameters on the stack.
113     __ SmiTag(a0);
114     __ Push(cp, a0);
115     __ SmiUntag(a0);
116 
117     // Set up pointer to first argument (skip receiver).
118     __ Add_d(
119         t2, fp,
120         Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
121     // Copy arguments and receiver to the expression stack.
122     // t2: Pointer to start of arguments.
123     // a0: Number of arguments.
124     Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw);
125     // The receiver for the builtin/api call.
126     __ PushRoot(RootIndex::kTheHoleValue);
127 
128     // Call the function.
129     // a0: number of arguments (untagged)
130     // a1: constructor function
131     // a3: new target
132     __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
133 
134     // Restore context from the frame.
135     __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
136     // Restore smi-tagged arguments count from the frame.
137     __ Ld_d(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
138     // Leave construct frame.
139   }
140 
141   // Remove caller arguments from the stack and return.
142   __ DropArguments(t3, TurboAssembler::kCountIsSmi,
143                    TurboAssembler::kCountIncludesReceiver, t3);
144   __ Ret();
145 }
146 
147 }  // namespace
148 
149 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)150 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
151   // ----------- S t a t e -------------
152   //  --      a0: number of arguments (untagged)
153   //  --      a1: constructor function
154   //  --      a3: new target
155   //  --      cp: context
156   //  --      ra: return address
157   //  -- sp[...]: constructor arguments
158   // -----------------------------------
159 
160   // Enter a construct frame.
161   FrameScope scope(masm, StackFrame::MANUAL);
162   Label post_instantiation_deopt_entry, not_create_implicit_receiver;
163   __ EnterFrame(StackFrame::CONSTRUCT);
164 
165   // Preserve the incoming parameters on the stack.
166   __ SmiTag(a0);
167   __ Push(cp, a0, a1);
168   __ PushRoot(RootIndex::kUndefinedValue);
169   __ Push(a3);
170 
171   // ----------- S t a t e -------------
172   //  --        sp[0*kPointerSize]: new target
173   //  --        sp[1*kPointerSize]: padding
174   //  -- a1 and sp[2*kPointerSize]: constructor function
175   //  --        sp[3*kPointerSize]: number of arguments (tagged)
176   //  --        sp[4*kPointerSize]: context
177   // -----------------------------------
178 
179   __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
180   __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
181   __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
182   __ JumpIfIsInRange(
183       t2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
184       static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
185       &not_create_implicit_receiver);
186 
187   // If not derived class constructor: Allocate the new receiver object.
188   __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, t2,
189                       t3);
190   __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
191   __ Branch(&post_instantiation_deopt_entry);
192 
193   // Else: use TheHoleValue as receiver for constructor call
194   __ bind(&not_create_implicit_receiver);
195   __ LoadRoot(a0, RootIndex::kTheHoleValue);
196 
197   // ----------- S t a t e -------------
198   //  --                          a0: receiver
199   //  -- Slot 4 / sp[0*kPointerSize]: new target
200   //  -- Slot 3 / sp[1*kPointerSize]: padding
201   //  -- Slot 2 / sp[2*kPointerSize]: constructor function
202   //  -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
203   //  -- Slot 0 / sp[4*kPointerSize]: context
204   // -----------------------------------
205   // Deoptimizer enters here.
206   masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
207       masm->pc_offset());
208   __ bind(&post_instantiation_deopt_entry);
209 
210   // Restore new target.
211   __ Pop(a3);
212 
213   // Push the allocated receiver to the stack.
214   __ Push(a0);
215 
216   // We need two copies because we may have to return the original one
217   // and the calling conventions dictate that the called function pops the
218   // receiver. The second copy is pushed after the arguments, we saved in a6
219   // since a0 will store the return value of callRuntime.
220   __ mov(a6, a0);
221 
222   // Set up pointer to last argument.
223   __ Add_d(
224       t2, fp,
225       Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
226 
227   // ----------- S t a t e -------------
228   //  --                 r3: new target
229   //  -- sp[0*kPointerSize]: implicit receiver
230   //  -- sp[1*kPointerSize]: implicit receiver
231   //  -- sp[2*kPointerSize]: padding
232   //  -- sp[3*kPointerSize]: constructor function
233   //  -- sp[4*kPointerSize]: number of arguments (tagged)
234   //  -- sp[5*kPointerSize]: context
235   // -----------------------------------
236 
237   // Restore constructor function and argument count.
238   __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
239   __ Ld_d(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
240   __ SmiUntag(a0);
241 
242   Label stack_overflow;
243   __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
244 
245   // TODO(victorgomes): When the arguments adaptor is completely removed, we
246   // should get the formal parameter count and copy the arguments in its
247   // correct position (including any undefined), instead of delaying this to
248   // InvokeFunction.
249 
250   // Copy arguments and receiver to the expression stack.
251   // t2: Pointer to start of argument.
252   // a0: Number of arguments.
253   Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw);
254   // We need two copies because we may have to return the original one
255   // and the calling conventions dictate that the called function pops the
256   // receiver. The second copy is pushed after the arguments,
257   __ Push(a6);
258 
259   // Call the function.
260   __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
261 
262   // ----------- S t a t e -------------
263   //  --                 s0: constructor result
264   //  -- sp[0*kPointerSize]: implicit receiver
265   //  -- sp[1*kPointerSize]: padding
266   //  -- sp[2*kPointerSize]: constructor function
267   //  -- sp[3*kPointerSize]: number of arguments
268   //  -- sp[4*kPointerSize]: context
269   // -----------------------------------
270 
271   // Store offset of return address for deoptimizer.
272   masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
273       masm->pc_offset());
274 
275   // If the result is an object (in the ECMA sense), we should get rid
276   // of the receiver and use the result; see ECMA-262 section 13.2.2-7
277   // on page 74.
278   Label use_receiver, do_throw, leave_and_return, check_receiver;
279 
280   // If the result is undefined, we jump out to using the implicit receiver.
281   __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
282 
283   // Otherwise we do a smi check and fall through to check if the return value
284   // is a valid receiver.
285 
286   // Throw away the result of the constructor invocation and use the
287   // on-stack receiver as the result.
288   __ bind(&use_receiver);
289   __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
290   __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
291 
292   __ bind(&leave_and_return);
293   // Restore smi-tagged arguments count from the frame.
294   __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
295   // Leave construct frame.
296   __ LeaveFrame(StackFrame::CONSTRUCT);
297 
298   // Remove caller arguments from the stack and return.
299   __ DropArguments(a1, TurboAssembler::kCountIsSmi,
300                    TurboAssembler::kCountIncludesReceiver, a4);
301   __ Ret();
302 
303   __ bind(&check_receiver);
304   __ JumpIfSmi(a0, &use_receiver);
305 
306   // If the type of the result (stored in its map) is less than
307   // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
308   __ GetObjectType(a0, t2, t2);
309   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
310   __ Branch(&leave_and_return, greater_equal, t2,
311             Operand(FIRST_JS_RECEIVER_TYPE));
312   __ Branch(&use_receiver);
313 
314   __ bind(&do_throw);
315   // Restore the context from the frame.
316   __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
317   __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
318   __ break_(0xCC);
319 
320   __ bind(&stack_overflow);
321   // Restore the context from the frame.
322   __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
323   __ CallRuntime(Runtime::kThrowStackOverflow);
324   __ break_(0xCC);
325 }
326 
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)327 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
328   Generate_JSBuiltinsConstructStubHelper(masm);
329 }
330 
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)331 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
332                                  Register scratch) {
333   DCHECK(!AreAliased(code, scratch));
334   // Verify that the code kind is baseline code via the CodeKind.
335   __ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
336   __ DecodeField<Code::KindField>(scratch);
337   __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
338             Operand(static_cast<int>(CodeKind::BASELINE)));
339 }
340 
341 // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
342 // the more general dispatch.
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)343 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
344                                                     Register sfi_data,
345                                                     Register scratch1,
346                                                     Label* is_baseline) {
347   Label done;
348 
349   __ GetObjectType(sfi_data, scratch1, scratch1);
350   if (FLAG_debug_code) {
351     Label not_baseline;
352     __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
353     AssertCodeIsBaseline(masm, sfi_data, scratch1);
354     __ Branch(is_baseline);
355     __ bind(&not_baseline);
356   } else {
357     __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
358   }
359   __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
360   __ Ld_d(sfi_data,
361           FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
362 
363   __ bind(&done);
364 }
365 
366 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)367 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
368   // ----------- S t a t e -------------
369   //  -- a0 : the value to pass to the generator
370   //  -- a1 : the JSGeneratorObject to resume
371   //  -- ra : return address
372   // -----------------------------------
373   // Store input value into generator object.
374   __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
375   __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
376                       kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
377   // Check that a1 is still valid, RecordWrite might have clobbered it.
378   __ AssertGeneratorObject(a1);
379 
380   // Load suspended function and context.
381   __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
382   __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
383 
384   // Flood function if we are stepping.
385   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
386   Label stepping_prepared;
387   ExternalReference debug_hook =
388       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
389   __ li(a5, debug_hook);
390   __ Ld_b(a5, MemOperand(a5, 0));
391   __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
392 
393   // Flood function if we need to continue stepping in the suspended generator.
394   ExternalReference debug_suspended_generator =
395       ExternalReference::debug_suspended_generator_address(masm->isolate());
396   __ li(a5, debug_suspended_generator);
397   __ Ld_d(a5, MemOperand(a5, 0));
398   __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
399   __ bind(&stepping_prepared);
400 
401   // Check the stack for overflow. We are not trying to catch interruptions
402   // (i.e. debug break and preemption) here, so check the "real stack limit".
403   Label stack_overflow;
404   __ LoadStackLimit(kScratchReg,
405                     MacroAssembler::StackLimitKind::kRealStackLimit);
406   __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
407 
408   // ----------- S t a t e -------------
409   //  -- a1    : the JSGeneratorObject to resume
410   //  -- a4    : generator function
411   //  -- cp    : generator context
412   //  -- ra    : return address
413   // -----------------------------------
414 
415   // Push holes for arguments to generator function. Since the parser forced
416   // context allocation for any variables in generators, the actual argument
417   // values have already been copied into the context and these dummy values
418   // will never be used.
419   __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
420   __ Ld_hu(
421       a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
422   __ Sub_d(a3, a3, Operand(kJSArgcReceiverSlots));
423   __ Ld_d(t1, FieldMemOperand(
424                   a1, JSGeneratorObject::kParametersAndRegistersOffset));
425   {
426     Label done_loop, loop;
427     __ bind(&loop);
428     __ Sub_d(a3, a3, Operand(1));
429     __ Branch(&done_loop, lt, a3, Operand(zero_reg));
430     __ Alsl_d(kScratchReg, a3, t1, kPointerSizeLog2, t7);
431     __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
432     __ Push(kScratchReg);
433     __ Branch(&loop);
434     __ bind(&done_loop);
435     // Push receiver.
436     __ Ld_d(kScratchReg,
437             FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
438     __ Push(kScratchReg);
439   }
440 
441   // Underlying function needs to have bytecode available.
442   if (FLAG_debug_code) {
443     Label is_baseline;
444     __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
445     __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
446     GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, t5, &is_baseline);
447     __ GetObjectType(a3, a3, a3);
448     __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
449               Operand(BYTECODE_ARRAY_TYPE));
450     __ bind(&is_baseline);
451   }
452 
453   // Resume (Ignition/TurboFan) generator object.
454   {
455     __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
456     __ Ld_hu(a0, FieldMemOperand(
457                      a0, SharedFunctionInfo::kFormalParameterCountOffset));
458     // We abuse new.target both to indicate that this is a resume call and to
459     // pass in the generator object.  In ordinary calls, new.target is always
460     // undefined because generator functions are non-constructable.
461     __ Move(a3, a1);
462     __ Move(a1, a4);
463     static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
464     __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
465     __ JumpCodeObject(a2);
466   }
467 
468   __ bind(&prepare_step_in_if_stepping);
469   {
470     FrameScope scope(masm, StackFrame::INTERNAL);
471     __ Push(a1, a4);
472     // Push hole as receiver since we do not use it for stepping.
473     __ PushRoot(RootIndex::kTheHoleValue);
474     __ CallRuntime(Runtime::kDebugOnFunctionCall);
475     __ Pop(a1);
476   }
477   __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
478   __ Branch(&stepping_prepared);
479 
480   __ bind(&prepare_step_in_suspended_generator);
481   {
482     FrameScope scope(masm, StackFrame::INTERNAL);
483     __ Push(a1);
484     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
485     __ Pop(a1);
486   }
487   __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
488   __ Branch(&stepping_prepared);
489 
490   __ bind(&stack_overflow);
491   {
492     FrameScope scope(masm, StackFrame::INTERNAL);
493     __ CallRuntime(Runtime::kThrowStackOverflow);
494     __ break_(0xCC);  // This should be unreachable.
495   }
496 }
497 
Generate_ConstructedNonConstructable(MacroAssembler * masm)498 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
499   FrameScope scope(masm, StackFrame::INTERNAL);
500   __ Push(a1);
501   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
502 }
503 
504 // Clobbers scratch1 and scratch2; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc,Register scratch1,Register scratch2)505 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
506                                         Register scratch1, Register scratch2) {
507   // Check the stack for overflow. We are not trying to catch
508   // interruptions (e.g. debug break and preemption) here, so the "real stack
509   // limit" is checked.
510   Label okay;
511   __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
512   // Make a2 the space we have left. The stack might already be overflowed
513   // here which will cause r2 to become negative.
514   __ sub_d(scratch1, sp, scratch1);
515   // Check if the arguments will overflow the stack.
516   __ slli_d(scratch2, argc, kPointerSizeLog2);
517   __ Branch(&okay, gt, scratch1, Operand(scratch2));  // Signed comparison.
518 
519   // Out of stack space.
520   __ CallRuntime(Runtime::kThrowStackOverflow);
521 
522   __ bind(&okay);
523 }
524 
525 namespace {
526 
527 // Called with the native C calling convention. The corresponding function
528 // signature is either:
529 //
530 //   using JSEntryFunction = GeneratedCode<Address(
531 //       Address root_register_value, Address new_target, Address target,
532 //       Address receiver, intptr_t argc, Address** args)>;
533 // or
534 //   using JSEntryFunction = GeneratedCode<Address(
535 //       Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)536 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
537                              Builtin entry_trampoline) {
538   Label invoke, handler_entry, exit;
539 
540   {
541     NoRootArrayScope no_root_array(masm);
542 
543     // Registers:
544     //  either
545     //   a0: root register value
546     //   a1: entry address
547     //   a2: function
548     //   a3: receiver
549     //   a4: argc
550     //   a5: argv
551     //  or
552     //   a0: root register value
553     //   a1: microtask_queue
554 
555     // Save callee saved registers on the stack.
556     __ MultiPush(kCalleeSaved | ra);
557 
558     // Save callee-saved FPU registers.
559     __ MultiPushFPU(kCalleeSavedFPU);
560     // Set up the reserved register for 0.0.
561     __ Move(kDoubleRegZero, 0.0);
562 
563     // Initialize the root register.
564     // C calling convention. The first argument is passed in a0.
565     __ mov(kRootRegister, a0);
566   }
567 
568   // a1: entry address
569   // a2: function
570   // a3: receiver
571   // a4: argc
572   // a5: argv
573 
574   // We build an EntryFrame.
575   __ li(s1, Operand(-1));  // Push a bad frame pointer to fail if it is used.
576   __ li(s2, Operand(StackFrame::TypeToMarker(type)));
577   __ li(s3, Operand(StackFrame::TypeToMarker(type)));
578   ExternalReference c_entry_fp = ExternalReference::Create(
579       IsolateAddressId::kCEntryFPAddress, masm->isolate());
580   __ li(s5, c_entry_fp);
581   __ Ld_d(s4, MemOperand(s5, 0));
582   __ Push(s1, s2, s3, s4);
583 
584   // Clear c_entry_fp, now we've pushed its previous value to the stack.
585   // If the c_entry_fp is not already zero and we don't clear it, the
586   // SafeStackFrameIterator will assume we are executing C++ and miss the JS
587   // frames on top.
588   __ St_d(zero_reg, MemOperand(s5, 0));
589 
590   // Set up frame pointer for the frame to be pushed.
591   __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset);
592 
593   // Registers:
594   //  either
595   //   a1: entry address
596   //   a2: function
597   //   a3: receiver
598   //   a4: argc
599   //   a5: argv
600   //  or
601   //   a1: microtask_queue
602   //
603   // Stack:
604   // caller fp          |
605   // function slot      | entry frame
606   // context slot       |
607   // bad fp (0xFF...F)  |
608   // callee saved registers + ra
609   // [ O32: 4 args slots]
610   // args
611 
612   // If this is the outermost JS call, set js_entry_sp value.
613   Label non_outermost_js;
614   ExternalReference js_entry_sp = ExternalReference::Create(
615       IsolateAddressId::kJSEntrySPAddress, masm->isolate());
616   __ li(s1, js_entry_sp);
617   __ Ld_d(s2, MemOperand(s1, 0));
618   __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
619   __ St_d(fp, MemOperand(s1, 0));
620   __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
621   Label cont;
622   __ b(&cont);
623   __ nop();  // Branch delay slot nop.
624   __ bind(&non_outermost_js);
625   __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
626   __ bind(&cont);
627   __ Push(s3);
628 
629   // Jump to a faked try block that does the invoke, with a faked catch
630   // block that sets the pending exception.
631   __ jmp(&invoke);
632   __ bind(&handler_entry);
633 
634   // Store the current pc as the handler offset. It's used later to create the
635   // handler table.
636   masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
637 
638   // Caught exception: Store result (exception) in the pending exception
639   // field in the JSEnv and return a failure sentinel.  Coming in here the
640   // fp will be invalid because the PushStackHandler below sets it to 0 to
641   // signal the existence of the JSEntry frame.
642   __ li(s1, ExternalReference::Create(
643                 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
644   __ St_d(a0,
645           MemOperand(s1, 0));  // We come back from 'invoke'. result is in a0.
646   __ LoadRoot(a0, RootIndex::kException);
647   __ b(&exit);  // b exposes branch delay slot.
648   __ nop();     // Branch delay slot nop.
649 
650   // Invoke: Link this frame into the handler chain.
651   __ bind(&invoke);
652   __ PushStackHandler();
653   // If an exception not caught by another handler occurs, this handler
654   // returns control to the code after the bal(&invoke) above, which
655   // restores all kCalleeSaved registers (including cp and fp) to their
656   // saved values before returning a failure to C.
657   //
658   // Registers:
659   //  either
660   //   a0: root register value
661   //   a1: entry address
662   //   a2: function
663   //   a3: receiver
664   //   a4: argc
665   //   a5: argv
666   //  or
667   //   a0: root register value
668   //   a1: microtask_queue
669   //
670   // Stack:
671   // handler frame
672   // entry frame
673   // callee saved registers + ra
674   // [ O32: 4 args slots]
675   // args
676   //
677   // Invoke the function by calling through JS entry trampoline builtin and
678   // pop the faked function when we return.
679 
680   Handle<Code> trampoline_code =
681       masm->isolate()->builtins()->code_handle(entry_trampoline);
682   __ Call(trampoline_code, RelocInfo::CODE_TARGET);
683 
684   // Unlink this frame from the handler chain.
685   __ PopStackHandler();
686 
687   __ bind(&exit);  // a0 holds result
688   // Check if the current stack frame is marked as the outermost JS frame.
689   Label non_outermost_js_2;
690   __ Pop(a5);
691   __ Branch(&non_outermost_js_2, ne, a5,
692             Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
693   __ li(a5, js_entry_sp);
694   __ St_d(zero_reg, MemOperand(a5, 0));
695   __ bind(&non_outermost_js_2);
696 
697   // Restore the top frame descriptors from the stack.
698   __ Pop(a5);
699   __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
700                                       masm->isolate()));
701   __ St_d(a5, MemOperand(a4, 0));
702 
703   // Reset the stack to the callee saved registers.
704   __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset);
705 
706   // Restore callee-saved fpu registers.
707   __ MultiPopFPU(kCalleeSavedFPU);
708 
709   // Restore callee saved registers from the stack.
710   __ MultiPop(kCalleeSaved | ra);
711   // Return.
712   __ Jump(ra);
713 }
714 
715 }  // namespace
716 
Generate_JSEntry(MacroAssembler * masm)717 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
718   Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
719 }
720 
Generate_JSConstructEntry(MacroAssembler * masm)721 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
722   Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
723                           Builtin::kJSConstructEntryTrampoline);
724 }
725 
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)726 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
727   Generate_JSEntryVariant(masm, StackFrame::ENTRY,
728                           Builtin::kRunMicrotasksTrampoline);
729 }
730 
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)731 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
732                                              bool is_construct) {
733   // ----------- S t a t e -------------
734   //  -- a1: new.target
735   //  -- a2: function
736   //  -- a3: receiver_pointer
737   //  -- a4: argc
738   //  -- a5: argv
739   // -----------------------------------
740 
741   // Enter an internal frame.
742   {
743     FrameScope scope(masm, StackFrame::INTERNAL);
744 
745     // Setup the context (we need to use the caller context from the isolate).
746     ExternalReference context_address = ExternalReference::Create(
747         IsolateAddressId::kContextAddress, masm->isolate());
748     __ li(cp, context_address);
749     __ Ld_d(cp, MemOperand(cp, 0));
750 
751     // Push the function and the receiver onto the stack.
752     __ Push(a2);
753 
754     // Check if we have enough stack space to push all arguments.
755     __ mov(a6, a4);
756     Generate_CheckStackOverflow(masm, a6, a0, s2);
757 
758     // Copy arguments to the stack.
759     // a4: argc
760     // a5: argv, i.e. points to first arg
761     Generate_PushArguments(masm, a5, a4, s1, s2, ArgumentsElementType::kHandle);
762 
763     // Push the receive.
764     __ Push(a3);
765 
766     // a0: argc
767     // a1: function
768     // a3: new.target
769     __ mov(a3, a1);
770     __ mov(a1, a2);
771     __ mov(a0, a4);
772 
773     // Initialize all JavaScript callee-saved registers, since they will be seen
774     // by the garbage collector as part of handlers.
775     __ LoadRoot(a4, RootIndex::kUndefinedValue);
776     __ mov(a5, a4);
777     __ mov(s1, a4);
778     __ mov(s2, a4);
779     __ mov(s3, a4);
780     __ mov(s4, a4);
781     __ mov(s5, a4);
782     // s6 holds the root address. Do not clobber.
783     // s7 is cp. Do not init.
784 
785     // Invoke the code.
786     Handle<Code> builtin = is_construct
787                                ? BUILTIN_CODE(masm->isolate(), Construct)
788                                : masm->isolate()->builtins()->Call();
789     __ Call(builtin, RelocInfo::CODE_TARGET);
790 
791     // Leave internal frame.
792   }
793   __ Jump(ra);
794 }
795 
Generate_JSEntryTrampoline(MacroAssembler * masm)796 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
797   Generate_JSEntryTrampolineHelper(masm, false);
798 }
799 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)800 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
801   Generate_JSEntryTrampolineHelper(masm, true);
802 }
803 
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)804 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
805   // a1: microtask_queue
806   __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
807   __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
808 }
809 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure)810 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
811                                                 Register optimized_code,
812                                                 Register closure) {
813   DCHECK(!AreAliased(optimized_code, closure));
814   // Store code entry in the closure.
815   __ St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
816   __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
817                       kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
818                       RememberedSetAction::kOmit, SmiCheck::kOmit);
819 }
820 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)821 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
822                                   Register scratch2) {
823   Register params_size = scratch1;
824 
825   // Get the size of the formal parameters + receiver (in bytes).
826   __ Ld_d(params_size,
827           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
828   __ Ld_w(params_size,
829           FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
830 
831   Register actual_params_size = scratch2;
832   // Compute the size of the actual parameters + receiver (in bytes).
833   __ Ld_d(actual_params_size,
834           MemOperand(fp, StandardFrameConstants::kArgCOffset));
835   __ slli_d(actual_params_size, actual_params_size, kPointerSizeLog2);
836 
837   // If actual is bigger than formal, then we should use it to free up the stack
838   // arguments.
839   __ slt(t2, params_size, actual_params_size);
840   __ Movn(params_size, actual_params_size, t2);
841 
842   // Leave the frame (also dropping the register file).
843   __ LeaveFrame(StackFrame::INTERPRETED);
844 
845   // Drop receiver + arguments.
846   __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
847                    TurboAssembler::kCountIncludesReceiver);
848 }
849 
850 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)851 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
852                                          Register actual_state,
853                                          TieringState expected_state,
854                                          Runtime::FunctionId function_id) {
855   Label no_match;
856   __ Branch(&no_match, ne, actual_state,
857             Operand(static_cast<int>(expected_state)));
858   GenerateTailCallToReturnedCode(masm, function_id);
859   __ bind(&no_match);
860 }
861 
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry)862 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
863                                       Register optimized_code_entry) {
864   // ----------- S t a t e -------------
865   //  -- a0 : actual argument count
866   //  -- a3 : new target (preserved for callee if needed, and caller)
867   //  -- a1 : target function (preserved for callee if needed, and caller)
868   // -----------------------------------
869   DCHECK(!AreAliased(optimized_code_entry, a1, a3));
870 
871   Register closure = a1;
872   Label heal_optimized_code_slot;
873 
874   // If the optimized code is cleared, go to runtime to update the optimization
875   // marker field.
876   __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
877                    &heal_optimized_code_slot);
878 
879   // Check if the optimized code is marked for deopt. If it is, call the
880   // runtime to clear it.
881   __ Ld_d(a6, FieldMemOperand(optimized_code_entry,
882                               Code::kCodeDataContainerOffset));
883   __ Ld_w(a6, FieldMemOperand(a6, CodeDataContainer::kKindSpecificFlagsOffset));
884   __ And(a6, a6, Operand(1 << Code::kMarkedForDeoptimizationBit));
885   __ Branch(&heal_optimized_code_slot, ne, a6, Operand(zero_reg));
886 
887   // Optimized code is good, get it into the closure and link the closure into
888   // the optimized functions list, then tail call the optimized code.
889   // The feedback vector is no longer used, so re-use it as a scratch
890   // register.
891   ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
892 
893   static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
894   __ LoadCodeObjectEntry(a2, optimized_code_entry);
895   __ Jump(a2);
896 
897   // Optimized code slot contains deoptimized code or code is cleared and
898   // optimized code marker isn't updated. Evict the code, update the marker
899   // and re-enter the closure's code.
900   __ bind(&heal_optimized_code_slot);
901   GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
902 }
903 
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)904 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
905                               Register tiering_state) {
906   // ----------- S t a t e -------------
907   //  -- a0 : actual argument count
908   //  -- a3 : new target (preserved for callee if needed, and caller)
909   //  -- a1 : target function (preserved for callee if needed, and caller)
910   //  -- feedback vector (preserved for caller if needed)
911   //  -- tiering_state : a Smi containing a non-zero tiering state.
912   // -----------------------------------
913   DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state));
914 
915   TailCallRuntimeIfStateEquals(masm, tiering_state,
916                                TieringState::kRequestTurbofan_Synchronous,
917                                Runtime::kCompileTurbofan_Synchronous);
918   TailCallRuntimeIfStateEquals(masm, tiering_state,
919                                TieringState::kRequestTurbofan_Concurrent,
920                                Runtime::kCompileTurbofan_Concurrent);
921 
922   __ stop();
923 }
924 
925 // Advance the current bytecode offset. This simulates what all bytecode
926 // handlers do upon completion of the underlying operation. Will bail out to a
927 // label if the bytecode (without prefix) is a return bytecode. Will not advance
928 // the bytecode offset if the current bytecode is a JumpLoop, instead just
929 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Register scratch3,Label * if_return)930 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
931                                           Register bytecode_array,
932                                           Register bytecode_offset,
933                                           Register bytecode, Register scratch1,
934                                           Register scratch2, Register scratch3,
935                                           Label* if_return) {
936   Register bytecode_size_table = scratch1;
937 
938   // The bytecode offset value will be increased by one in wide and extra wide
939   // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
940   // will restore the original bytecode. In order to simplify the code, we have
941   // a backup of it.
942   Register original_bytecode_offset = scratch3;
943   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
944                      bytecode_size_table, original_bytecode_offset));
945   __ Move(original_bytecode_offset, bytecode_offset);
946   __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
947 
948   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
949   Label process_bytecode, extra_wide;
950   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
951   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
952   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
953   STATIC_ASSERT(3 ==
954                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
955   __ Branch(&process_bytecode, hi, bytecode, Operand(3));
956   __ And(scratch2, bytecode, Operand(1));
957   __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
958 
959   // Load the next bytecode and update table to the wide scaled table.
960   __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
961   __ Add_d(scratch2, bytecode_array, bytecode_offset);
962   __ Ld_bu(bytecode, MemOperand(scratch2, 0));
963   __ Add_d(bytecode_size_table, bytecode_size_table,
964            Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
965   __ jmp(&process_bytecode);
966 
967   __ bind(&extra_wide);
968   // Load the next bytecode and update table to the extra wide scaled table.
969   __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
970   __ Add_d(scratch2, bytecode_array, bytecode_offset);
971   __ Ld_bu(bytecode, MemOperand(scratch2, 0));
972   __ Add_d(bytecode_size_table, bytecode_size_table,
973            Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
974 
975   __ bind(&process_bytecode);
976 
977 // Bailout to the return label if this is a return bytecode.
978 #define JUMP_IF_EQUAL(NAME)          \
979   __ Branch(if_return, eq, bytecode, \
980             Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
981   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
982 #undef JUMP_IF_EQUAL
983 
984   // If this is a JumpLoop, re-execute it to perform the jump to the beginning
985   // of the loop.
986   Label end, not_jump_loop;
987   __ Branch(&not_jump_loop, ne, bytecode,
988             Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
989   // We need to restore the original bytecode_offset since we might have
990   // increased it to skip the wide / extra-wide prefix bytecode.
991   __ Move(bytecode_offset, original_bytecode_offset);
992   __ jmp(&end);
993 
994   __ bind(&not_jump_loop);
995   // Otherwise, load the size of the current bytecode and advance the offset.
996   __ Add_d(scratch2, bytecode_size_table, bytecode);
997   __ Ld_b(scratch2, MemOperand(scratch2, 0));
998   __ Add_d(bytecode_offset, bytecode_offset, scratch2);
999 
1000   __ bind(&end);
1001 }
1002 
1003 // Read off the optimization state in the feedback vector and check if there
1004 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1005 static void LoadTieringStateAndJumpIfNeedsProcessing(
1006     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1007     Label* has_optimized_code_or_state) {
1008   ASM_CODE_COMMENT(masm);
1009   Register scratch = t2;
1010   // TODO(liuyu): Remove CHECK
1011   CHECK_NE(t2, optimization_state);
1012   CHECK_NE(t2, feedback_vector);
1013   __ Ld_w(optimization_state,
1014           FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1015   __ And(
1016       scratch, optimization_state,
1017       Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1018   __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
1019 }
1020 
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1021 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1022     MacroAssembler* masm, Register optimization_state,
1023     Register feedback_vector) {
1024   ASM_CODE_COMMENT(masm);
1025   Label maybe_has_optimized_code;
1026   // Check if optimized code marker is available
1027   {
1028     UseScratchRegisterScope temps(masm);
1029     Register scratch = temps.Acquire();
1030     __ And(scratch, optimization_state,
1031            Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
1032     __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
1033   }
1034 
1035   Register tiering_state = optimization_state;
1036   __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1037   MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1038 
1039   __ bind(&maybe_has_optimized_code);
1040   Register optimized_code_entry = optimization_state;
1041   __ Ld_d(optimized_code_entry,
1042           FieldMemOperand(feedback_vector,
1043                           FeedbackVector::kMaybeOptimizedCodeOffset));
1044 
1045   TailCallOptimizedCodeSlot(masm, optimized_code_entry);
1046 }
1047 
1048 namespace {
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array)1049 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
1050                                  Register bytecode_array) {
1051   // Reset code age and the OSR state (optimized to a single write).
1052   static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
1053   STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1054   __ St_w(zero_reg,
1055           FieldMemOperand(bytecode_array,
1056                           BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
1057 }
1058 
1059 }  // namespace
1060 
1061 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1062 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1063   UseScratchRegisterScope temps(masm);
1064   temps.Include({s1, s2});
1065   temps.Exclude({t7});
1066   auto descriptor =
1067       Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1068   Register closure = descriptor.GetRegisterParameter(
1069       BaselineOutOfLinePrologueDescriptor::kClosure);
1070   // Load the feedback vector from the closure.
1071   Register feedback_vector = temps.Acquire();
1072   __ Ld_d(feedback_vector,
1073           FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1074   __ Ld_d(feedback_vector,
1075           FieldMemOperand(feedback_vector, Cell::kValueOffset));
1076   if (FLAG_debug_code) {
1077     UseScratchRegisterScope temps(masm);
1078     Register scratch = temps.Acquire();
1079     __ GetObjectType(feedback_vector, scratch, scratch);
1080     __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
1081               Operand(FEEDBACK_VECTOR_TYPE));
1082   }
1083   // Check for an tiering state.
1084   Label has_optimized_code_or_state;
1085   Register optimization_state = no_reg;
1086   {
1087     UseScratchRegisterScope temps(masm);
1088     optimization_state = temps.Acquire();
1089     // optimization_state will be used only in |has_optimized_code_or_state|
1090     // and outside it can be reused.
1091     LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1092                                              feedback_vector,
1093                                              &has_optimized_code_or_state);
1094   }
1095   // Increment invocation count for the function.
1096   {
1097     UseScratchRegisterScope temps(masm);
1098     Register invocation_count = temps.Acquire();
1099     __ Ld_w(invocation_count,
1100             FieldMemOperand(feedback_vector,
1101                             FeedbackVector::kInvocationCountOffset));
1102     __ Add_w(invocation_count, invocation_count, Operand(1));
1103     __ St_w(invocation_count,
1104             FieldMemOperand(feedback_vector,
1105                             FeedbackVector::kInvocationCountOffset));
1106   }
1107 
1108   FrameScope frame_scope(masm, StackFrame::MANUAL);
1109   {
1110     ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1111     // Normally the first thing we'd do here is Push(ra, fp), but we already
1112     // entered the frame in BaselineCompiler::Prologue, as we had to use the
1113     // value ra before the call to this BaselineOutOfLinePrologue builtin.
1114     Register callee_context = descriptor.GetRegisterParameter(
1115         BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1116     Register callee_js_function = descriptor.GetRegisterParameter(
1117         BaselineOutOfLinePrologueDescriptor::kClosure);
1118     __ Push(callee_context, callee_js_function);
1119     DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1120     DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1121 
1122     Register argc = descriptor.GetRegisterParameter(
1123         BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1124     // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1125     // the frame, so load it into a register.
1126     Register bytecode_array = descriptor.GetRegisterParameter(
1127         BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1128     ResetBytecodeAgeAndOsrState(masm, bytecode_array);
1129     __ Push(argc, bytecode_array);
1130 
1131     // Baseline code frames store the feedback vector where interpreter would
1132     // store the bytecode offset.
1133     if (FLAG_debug_code) {
1134       UseScratchRegisterScope temps(masm);
1135       Register invocation_count = temps.Acquire();
1136       __ GetObjectType(feedback_vector, invocation_count, invocation_count);
1137       __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
1138                 Operand(FEEDBACK_VECTOR_TYPE));
1139     }
1140     // Our stack is currently aligned. We have have to push something along with
1141     // the feedback vector to keep it that way -- we may as well start
1142     // initialising the register frame.
1143     // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
1144     // `undefined` in the accumulator register, to skip the load in the baseline
1145     // code.
1146     __ Push(feedback_vector);
1147   }
1148 
1149   Label call_stack_guard;
1150   Register frame_size = descriptor.GetRegisterParameter(
1151       BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1152   {
1153     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1154     // Stack check. This folds the checks for both the interrupt stack limit
1155     // check and the real stack limit into one by just checking for the
1156     // interrupt limit. The interrupt limit is either equal to the real stack
1157     // limit or tighter. By ensuring we have space until that limit after
1158     // building the frame we can quickly precheck both at once.
1159     UseScratchRegisterScope temps(masm);
1160     Register sp_minus_frame_size = temps.Acquire();
1161     __ Sub_d(sp_minus_frame_size, sp, frame_size);
1162     Register interrupt_limit = temps.Acquire();
1163     __ LoadStackLimit(interrupt_limit,
1164                       MacroAssembler::StackLimitKind::kInterruptStackLimit);
1165     __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1166               Operand(interrupt_limit));
1167   }
1168 
1169   // Do "fast" return to the caller pc in ra.
1170   // TODO(v8:11429): Document this frame setup better.
1171   __ Ret();
1172 
1173   __ bind(&has_optimized_code_or_state);
1174   {
1175     ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1176     UseScratchRegisterScope temps(masm);
1177     temps.Exclude(optimization_state);
1178     // Ensure the optimization_state is not allocated again.
1179     // Drop the frame created by the baseline call.
1180     __ Pop(ra, fp);
1181     MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1182                                                  feedback_vector);
1183     __ Trap();
1184   }
1185 
1186   __ bind(&call_stack_guard);
1187   {
1188     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1189     FrameScope frame_scope(masm, StackFrame::INTERNAL);
1190     // Save incoming new target or generator
1191     __ Push(kJavaScriptCallNewTargetRegister);
1192     __ SmiTag(frame_size);
1193     __ Push(frame_size);
1194     __ CallRuntime(Runtime::kStackGuardWithGap);
1195     __ Pop(kJavaScriptCallNewTargetRegister);
1196   }
1197   __ Ret();
1198   temps.Exclude({s1, s2});
1199 }
1200 
1201 // Generate code for entering a JS function with the interpreter.
1202 // On entry to the function the receiver and arguments have been pushed on the
1203 // stack left to right.
1204 //
1205 // The live registers are:
1206 //   o a0 : actual argument count
1207 //   o a1: the JS function object being called.
1208 //   o a3: the incoming new target or generator object
1209 //   o cp: our context
1210 //   o fp: the caller's frame pointer
1211 //   o sp: stack pointer
1212 //   o ra: return address
1213 //
1214 // The function builds an interpreter frame.  See InterpreterFrameConstants in
1215 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1216 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1217   Register closure = a1;
1218   Register feedback_vector = a2;
1219 
1220   // Get the bytecode array from the function object and load it into
1221   // kInterpreterBytecodeArrayRegister.
1222   __ Ld_d(kScratchReg,
1223           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1224   __ Ld_d(
1225       kInterpreterBytecodeArrayRegister,
1226       FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
1227   Label is_baseline;
1228   GetSharedFunctionInfoBytecodeOrBaseline(
1229       masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
1230 
1231   // The bytecode array could have been flushed from the shared function info,
1232   // if so, call into CompileLazy.
1233   Label compile_lazy;
1234   __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
1235   __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
1236 
1237   // Load the feedback vector from the closure.
1238   __ Ld_d(feedback_vector,
1239           FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1240   __ Ld_d(feedback_vector,
1241           FieldMemOperand(feedback_vector, Cell::kValueOffset));
1242 
1243   Label push_stack_frame;
1244   // Check if feedback vector is valid. If valid, check for optimized code
1245   // and update invocation count. Otherwise, setup the stack frame.
1246   __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1247   __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
1248   __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
1249 
1250   // Read off the optimization state in the feedback vector, and if there
1251   // is optimized code or an tiering state, call that instead.
1252   Register optimization_state = a4;
1253   __ Ld_w(optimization_state,
1254           FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1255 
1256   // Check if the optimized code slot is not empty or has a tiering state.
1257   Label has_optimized_code_or_state;
1258 
1259   __ andi(t0, optimization_state,
1260           FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
1261   __ Branch(&has_optimized_code_or_state, ne, t0, Operand(zero_reg));
1262 
1263   Label not_optimized;
1264   __ bind(&not_optimized);
1265 
1266   // Increment invocation count for the function.
1267   __ Ld_w(a4, FieldMemOperand(feedback_vector,
1268                               FeedbackVector::kInvocationCountOffset));
1269   __ Add_w(a4, a4, Operand(1));
1270   __ St_w(a4, FieldMemOperand(feedback_vector,
1271                               FeedbackVector::kInvocationCountOffset));
1272 
1273   // Open a frame scope to indicate that there is a frame on the stack.  The
1274   // MANUAL indicates that the scope shouldn't actually generate code to set up
1275   // the frame (that is done below).
1276   __ bind(&push_stack_frame);
1277   FrameScope frame_scope(masm, StackFrame::MANUAL);
1278   __ PushStandardFrame(closure);
1279 
1280   ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
1281 
1282   // Load initial bytecode offset.
1283   __ li(kInterpreterBytecodeOffsetRegister,
1284         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1285 
1286   // Push bytecode array and Smi tagged bytecode array offset.
1287   __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
1288   __ Push(kInterpreterBytecodeArrayRegister, a4);
1289 
1290   // Allocate the local and temporary register file on the stack.
1291   Label stack_overflow;
1292   {
1293     // Load frame size (word) from the BytecodeArray object.
1294     __ Ld_w(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1295                                 BytecodeArray::kFrameSizeOffset));
1296 
1297     // Do a stack check to ensure we don't go over the limit.
1298     __ Sub_d(a5, sp, Operand(a4));
1299     __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1300     __ Branch(&stack_overflow, lo, a5, Operand(a2));
1301 
1302     // If ok, push undefined as the initial value for all register file entries.
1303     Label loop_header;
1304     Label loop_check;
1305     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1306     __ Branch(&loop_check);
1307     __ bind(&loop_header);
1308     // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1309     __ Push(kInterpreterAccumulatorRegister);
1310     // Continue loop if not done.
1311     __ bind(&loop_check);
1312     __ Sub_d(a4, a4, Operand(kPointerSize));
1313     __ Branch(&loop_header, ge, a4, Operand(zero_reg));
1314   }
1315 
1316   // If the bytecode array has a valid incoming new target or generator object
1317   // register, initialize it with incoming value which was passed in r3.
1318   Label no_incoming_new_target_or_generator_register;
1319   __ Ld_w(a5, FieldMemOperand(
1320                   kInterpreterBytecodeArrayRegister,
1321                   BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1322   __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1323             Operand(zero_reg));
1324   __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7);
1325   __ St_d(a3, MemOperand(a5, 0));
1326   __ bind(&no_incoming_new_target_or_generator_register);
1327 
1328   // Perform interrupt stack check.
1329   // TODO(solanes): Merge with the real stack limit check above.
1330   Label stack_check_interrupt, after_stack_check_interrupt;
1331   __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1332   __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
1333   __ bind(&after_stack_check_interrupt);
1334 
1335   // The accumulator is already loaded with undefined.
1336 
1337   // Load the dispatch table into a register and dispatch to the bytecode
1338   // handler at the current bytecode offset.
1339   Label do_dispatch;
1340   __ bind(&do_dispatch);
1341   __ li(kInterpreterDispatchTableRegister,
1342         ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1343   __ Add_d(t5, kInterpreterBytecodeArrayRegister,
1344            kInterpreterBytecodeOffsetRegister);
1345   __ Ld_bu(a7, MemOperand(t5, 0));
1346   __ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister,
1347             kPointerSizeLog2, t7);
1348   __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0));
1349   __ Call(kJavaScriptCallCodeStartRegister);
1350   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1351 
1352   // Any returns to the entry trampoline are either due to the return bytecode
1353   // or the interpreter tail calling a builtin and then a dispatch.
1354 
1355   // Get bytecode array and bytecode offset from the stack frame.
1356   __ Ld_d(kInterpreterBytecodeArrayRegister,
1357           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1358   __ Ld_d(kInterpreterBytecodeOffsetRegister,
1359           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1360   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1361 
1362   // Either return, or advance to the next bytecode and dispatch.
1363   Label do_return;
1364   __ Add_d(a1, kInterpreterBytecodeArrayRegister,
1365            kInterpreterBytecodeOffsetRegister);
1366   __ Ld_bu(a1, MemOperand(a1, 0));
1367   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1368                                 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1369                                 a4, &do_return);
1370   __ jmp(&do_dispatch);
1371 
1372   __ bind(&do_return);
1373   // The return value is in a0.
1374   LeaveInterpreterFrame(masm, t0, t1);
1375   __ Jump(ra);
1376 
1377   __ bind(&stack_check_interrupt);
1378   // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1379   // for the call to the StackGuard.
1380   __ li(kInterpreterBytecodeOffsetRegister,
1381         Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1382                              kFunctionEntryBytecodeOffset)));
1383   __ St_d(kInterpreterBytecodeOffsetRegister,
1384           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1385   __ CallRuntime(Runtime::kStackGuard);
1386 
1387   // After the call, restore the bytecode array, bytecode offset and accumulator
1388   // registers again. Also, restore the bytecode offset in the stack to its
1389   // previous value.
1390   __ Ld_d(kInterpreterBytecodeArrayRegister,
1391           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1392   __ li(kInterpreterBytecodeOffsetRegister,
1393         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1394   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1395 
1396   __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
1397   __ St_d(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1398 
1399   __ jmp(&after_stack_check_interrupt);
1400 
1401   __ bind(&has_optimized_code_or_state);
1402   MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1403                                                feedback_vector);
1404 
1405   __ bind(&is_baseline);
1406   {
1407     // Load the feedback vector from the closure.
1408     __ Ld_d(feedback_vector,
1409             FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1410     __ Ld_d(feedback_vector,
1411             FieldMemOperand(feedback_vector, Cell::kValueOffset));
1412 
1413     Label install_baseline_code;
1414     // Check if feedback vector is valid. If not, call prepare for baseline to
1415     // allocate it.
1416     __ Ld_d(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1417     __ Ld_hu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
1418     __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
1419 
1420     // Check for an tiering state.
1421     LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1422                                              feedback_vector,
1423                                              &has_optimized_code_or_state);
1424 
1425     // Load the baseline code into the closure.
1426     __ Move(a2, kInterpreterBytecodeArrayRegister);
1427     static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1428     ReplaceClosureCodeWithOptimizedCode(masm, a2, closure);
1429     __ JumpCodeObject(a2);
1430 
1431     __ bind(&install_baseline_code);
1432     GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1433   }
1434 
1435   __ bind(&compile_lazy);
1436   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1437   // Unreachable code.
1438   __ break_(0xCC);
1439 
1440   __ bind(&stack_overflow);
1441   __ CallRuntime(Runtime::kThrowStackOverflow);
1442   // Unreachable code.
1443   __ break_(0xCC);
1444 }
1445 
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch,Register scratch2)1446 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1447                                         Register start_address,
1448                                         Register scratch, Register scratch2) {
1449   // Find the address of the last argument.
1450   __ Sub_d(scratch, num_args, Operand(1));
1451   __ slli_d(scratch, scratch, kPointerSizeLog2);
1452   __ Sub_d(start_address, start_address, scratch);
1453 
1454   // Push the arguments.
1455   __ PushArray(start_address, num_args, scratch, scratch2,
1456                TurboAssembler::PushArrayOrder::kReverse);
1457 }
1458 
1459 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1460 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1461     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1462     InterpreterPushArgsMode mode) {
1463   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1464   // ----------- S t a t e -------------
1465   //  -- a0 : the number of arguments
1466   //  -- a2 : the address of the first argument to be pushed. Subsequent
1467   //          arguments should be consecutive above this, in the same order as
1468   //          they are to be pushed onto the stack.
1469   //  -- a1 : the target to call (can be any Object).
1470   // -----------------------------------
1471   Label stack_overflow;
1472   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1473     // The spread argument should not be pushed.
1474     __ Sub_d(a0, a0, Operand(1));
1475   }
1476 
1477   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1478     __ Sub_d(a3, a0, Operand(kJSArgcReceiverSlots));
1479   } else {
1480     __ mov(a3, a0);
1481   }
1482 
1483   __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1484 
1485   // This function modifies a2, t0 and a4.
1486   GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
1487 
1488   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1489     __ PushRoot(RootIndex::kUndefinedValue);
1490   }
1491 
1492   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1493     // Pass the spread in the register a2.
1494     // a2 already points to the penultime argument, the spread
1495     // is below that.
1496     __ Ld_d(a2, MemOperand(a2, -kSystemPointerSize));
1497   }
1498 
1499   // Call the target.
1500   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1501     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1502             RelocInfo::CODE_TARGET);
1503   } else {
1504     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1505             RelocInfo::CODE_TARGET);
1506   }
1507 
1508   __ bind(&stack_overflow);
1509   {
1510     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1511     // Unreachable code.
1512     __ break_(0xCC);
1513   }
1514 }
1515 
1516 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1517 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1518     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1519   // ----------- S t a t e -------------
1520   // -- a0 : argument count
1521   // -- a3 : new target
1522   // -- a1 : constructor to call
1523   // -- a2 : allocation site feedback if available, undefined otherwise.
1524   // -- a4 : address of the first argument
1525   // -----------------------------------
1526   Label stack_overflow;
1527   __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1528 
1529   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1530     // The spread argument should not be pushed.
1531     __ Sub_d(a0, a0, Operand(1));
1532   }
1533 
1534   Register argc_without_receiver = a6;
1535   __ Sub_d(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1536 
1537   // Push the arguments, This function modifies t0, a4 and a5.
1538   GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, t0);
1539 
1540   // Push a slot for the receiver.
1541   __ Push(zero_reg);
1542 
1543   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1544     // Pass the spread in the register a2.
1545     // a4 already points to the penultimate argument, the spread
1546     // lies in the next interpreter register.
1547     __ Ld_d(a2, MemOperand(a4, -kSystemPointerSize));
1548   } else {
1549     __ AssertUndefinedOrAllocationSite(a2, t0);
1550   }
1551 
1552   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1553     __ AssertFunction(a1);
1554 
1555     // Tail call to the function-specific construct stub (still in the caller
1556     // context at this point).
1557     __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1558             RelocInfo::CODE_TARGET);
1559   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1560     // Call the constructor with a0, a1, and a3 unmodified.
1561     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1562             RelocInfo::CODE_TARGET);
1563   } else {
1564     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1565     // Call the constructor with a0, a1, and a3 unmodified.
1566     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1567   }
1568 
1569   __ bind(&stack_overflow);
1570   {
1571     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1572     // Unreachable code.
1573     __ break_(0xCC);
1574   }
1575 }
1576 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1577 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1578   // Set the return address to the correct point in the interpreter entry
1579   // trampoline.
1580   Label builtin_trampoline, trampoline_loaded;
1581   Smi interpreter_entry_return_pc_offset(
1582       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1583   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1584 
1585   // If the SFI function_data is an InterpreterData, the function will have a
1586   // custom copy of the interpreter entry trampoline for profiling. If so,
1587   // get the custom trampoline, otherwise grab the entry address of the global
1588   // trampoline.
1589   __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1590   __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1591   __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1592   __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1593                    kInterpreterDispatchTableRegister);
1594   __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1595             Operand(INTERPRETER_DATA_TYPE));
1596 
1597   __ Ld_d(t0,
1598           FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1599   __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1600   __ Branch(&trampoline_loaded);
1601 
1602   __ bind(&builtin_trampoline);
1603   __ li(t0, ExternalReference::
1604                 address_of_interpreter_entry_trampoline_instruction_start(
1605                     masm->isolate()));
1606   __ Ld_d(t0, MemOperand(t0, 0));
1607 
1608   __ bind(&trampoline_loaded);
1609   __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1610 
1611   // Initialize the dispatch table register.
1612   __ li(kInterpreterDispatchTableRegister,
1613         ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1614 
1615   // Get the bytecode array pointer from the frame.
1616   __ Ld_d(kInterpreterBytecodeArrayRegister,
1617           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1618 
1619   if (FLAG_debug_code) {
1620     // Check function data field is actually a BytecodeArray object.
1621     __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1622     __ Assert(ne,
1623               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1624               kScratchReg, Operand(zero_reg));
1625     __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1626     __ Assert(eq,
1627               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1628               a1, Operand(BYTECODE_ARRAY_TYPE));
1629   }
1630 
1631   // Get the target bytecode offset from the frame.
1632   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1633               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1634 
1635   if (FLAG_debug_code) {
1636     Label okay;
1637     __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
1638               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1639     // Unreachable code.
1640     __ break_(0xCC);
1641     __ bind(&okay);
1642   }
1643 
1644   // Dispatch to the target bytecode.
1645   __ Add_d(a1, kInterpreterBytecodeArrayRegister,
1646            kInterpreterBytecodeOffsetRegister);
1647   __ Ld_bu(a7, MemOperand(a1, 0));
1648   __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7);
1649   __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0));
1650   __ Jump(kJavaScriptCallCodeStartRegister);
1651 }
1652 
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1653 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1654   // Advance the current bytecode offset stored within the given interpreter
1655   // stack frame. This simulates what all bytecode handlers do upon completion
1656   // of the underlying operation.
1657   __ Ld_d(kInterpreterBytecodeArrayRegister,
1658           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1659   __ Ld_d(kInterpreterBytecodeOffsetRegister,
1660           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1661   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1662 
1663   Label enter_bytecode, function_entry_bytecode;
1664   __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1665             Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1666                     kFunctionEntryBytecodeOffset));
1667 
1668   // Load the current bytecode.
1669   __ Add_d(a1, kInterpreterBytecodeArrayRegister,
1670            kInterpreterBytecodeOffsetRegister);
1671   __ Ld_bu(a1, MemOperand(a1, 0));
1672 
1673   // Advance to the next bytecode.
1674   Label if_return;
1675   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1676                                 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1677                                 a4, &if_return);
1678 
1679   __ bind(&enter_bytecode);
1680   // Convert new bytecode offset to a Smi and save in the stackframe.
1681   __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1682   __ St_d(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1683 
1684   Generate_InterpreterEnterBytecode(masm);
1685 
1686   __ bind(&function_entry_bytecode);
1687   // If the code deoptimizes during the implicit function entry stack interrupt
1688   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1689   // not a valid bytecode offset. Detect this case and advance to the first
1690   // actual bytecode.
1691   __ li(kInterpreterBytecodeOffsetRegister,
1692         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1693   __ Branch(&enter_bytecode);
1694 
1695   // We should never take the if_return path.
1696   __ bind(&if_return);
1697   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1698 }
1699 
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1700 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1701   Generate_InterpreterEnterBytecode(masm);
1702 }
1703 
1704 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1705 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1706                                       bool java_script_builtin,
1707                                       bool with_result) {
1708   const RegisterConfiguration* config(RegisterConfiguration::Default());
1709   int allocatable_register_count = config->num_allocatable_general_registers();
1710   UseScratchRegisterScope temps(masm);
1711   Register scratch = temps.Acquire();
1712   if (with_result) {
1713     if (java_script_builtin) {
1714       __ mov(scratch, a0);
1715     } else {
1716       // Overwrite the hole inserted by the deoptimizer with the return value
1717       // from the LAZY deopt point.
1718       __ St_d(
1719           a0,
1720           MemOperand(
1721               sp, config->num_allocatable_general_registers() * kPointerSize +
1722                       BuiltinContinuationFrameConstants::kFixedFrameSize));
1723     }
1724   }
1725   for (int i = allocatable_register_count - 1; i >= 0; --i) {
1726     int code = config->GetAllocatableGeneralCode(i);
1727     __ Pop(Register::from_code(code));
1728     if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1729       __ SmiUntag(Register::from_code(code));
1730     }
1731   }
1732 
1733   if (with_result && java_script_builtin) {
1734     // Overwrite the hole inserted by the deoptimizer with the return value from
1735     // the LAZY deopt point. t0 contains the arguments count, the return value
1736     // from LAZY is always the last argument.
1737     constexpr int return_value_offset =
1738         BuiltinContinuationFrameConstants::kFixedSlotCount -
1739         kJSArgcReceiverSlots;
1740     __ Add_d(a0, a0, Operand(return_value_offset));
1741     __ Alsl_d(t0, a0, sp, kSystemPointerSizeLog2, t7);
1742     __ St_d(scratch, MemOperand(t0, 0));
1743     // Recover arguments count.
1744     __ Sub_d(a0, a0, Operand(return_value_offset));
1745   }
1746 
1747   __ Ld_d(
1748       fp,
1749       MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1750   // Load builtin index (stored as a Smi) and use it to get the builtin start
1751   // address from the builtins table.
1752   __ Pop(t0);
1753   __ Add_d(sp, sp,
1754            Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1755   __ Pop(ra);
1756   __ LoadEntryFromBuiltinIndex(t0);
1757   __ Jump(t0);
1758 }
1759 }  // namespace
1760 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1761 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1762   Generate_ContinueToBuiltinHelper(masm, false, false);
1763 }
1764 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1765 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1766     MacroAssembler* masm) {
1767   Generate_ContinueToBuiltinHelper(masm, false, true);
1768 }
1769 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1770 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1771   Generate_ContinueToBuiltinHelper(masm, true, false);
1772 }
1773 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1774 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1775     MacroAssembler* masm) {
1776   Generate_ContinueToBuiltinHelper(masm, true, true);
1777 }
1778 
Generate_NotifyDeoptimized(MacroAssembler * masm)1779 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1780   {
1781     FrameScope scope(masm, StackFrame::INTERNAL);
1782     __ CallRuntime(Runtime::kNotifyDeoptimized);
1783   }
1784 
1785   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
1786   __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
1787   __ Add_d(sp, sp, Operand(1 * kPointerSize));  // Remove state.
1788   __ Ret();
1789 }
1790 
1791 namespace {
1792 
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand (zero_reg))1793 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1794                        Operand offset = Operand(zero_reg)) {
1795   __ Add_d(ra, entry_address, offset);
1796   // And "return" to the OSR entry point of the function.
1797   __ Ret();
1798 }
1799 
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)1800 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
1801   {
1802     FrameScope scope(masm, StackFrame::INTERNAL);
1803     __ CallRuntime(Runtime::kCompileOptimizedOSR);
1804   }
1805 
1806   // If the code object is null, just return to the caller.
1807   __ Ret(eq, a0, Operand(Smi::zero()));
1808 
1809   if (is_interpreter) {
1810     // Drop the handler frame that is be sitting on top of the actual
1811     // JavaScript frame. This is the case then OSR is triggered from bytecode.
1812     __ LeaveFrame(StackFrame::STUB);
1813   }
1814 
1815   // Load deoptimization data from the code object.
1816   // <deopt_data> = <code>[#deoptimization_data_offset]
1817   __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
1818                                  kHeapObjectTag));
1819 
1820   // Load the OSR entrypoint offset from the deoptimization data.
1821   // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1822   __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1823                                      DeoptimizationData::kOsrPcOffsetIndex) -
1824                                      kHeapObjectTag));
1825 
1826   // Compute the target address = code_obj + header_size + osr_offset
1827   // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1828   __ Add_d(a0, a0, a1);
1829   Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
1830 }
1831 }  // namespace
1832 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1833 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1834   return OnStackReplacement(masm, true);
1835 }
1836 
Generate_BaselineOnStackReplacement(MacroAssembler * masm)1837 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
1838   __ Ld_d(kContextRegister,
1839           MemOperand(fp, StandardFrameConstants::kContextOffset));
1840   return OnStackReplacement(masm, false);
1841 }
1842 
1843 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1844 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1845   // ----------- S t a t e -------------
1846   //  -- a0    : argc
1847   //  -- sp[0] : receiver
1848   //  -- sp[4] : thisArg
1849   //  -- sp[8] : argArray
1850   // -----------------------------------
1851 
1852   Register argc = a0;
1853   Register arg_array = a2;
1854   Register receiver = a1;
1855   Register this_arg = a5;
1856   Register undefined_value = a3;
1857   Register scratch = a4;
1858 
1859   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1860 
1861   // 1. Load receiver into a1, argArray into a2 (if present), remove all
1862   // arguments from the stack (including the receiver), and push thisArg (if
1863   // present) instead.
1864   {
1865     __ Sub_d(scratch, argc, JSParameterCount(0));
1866     __ Ld_d(this_arg, MemOperand(sp, kPointerSize));
1867     __ Ld_d(arg_array, MemOperand(sp, 2 * kPointerSize));
1868     __ Movz(arg_array, undefined_value, scratch);  // if argc == 0
1869     __ Movz(this_arg, undefined_value, scratch);   // if argc == 0
1870     __ Sub_d(scratch, scratch, Operand(1));
1871     __ Movz(arg_array, undefined_value, scratch);  // if argc == 1
1872     __ Ld_d(receiver, MemOperand(sp, 0));
1873     __ DropArgumentsAndPushNewReceiver(argc, this_arg,
1874                                        TurboAssembler::kCountIsInteger,
1875                                        TurboAssembler::kCountIncludesReceiver);
1876   }
1877 
1878   // ----------- S t a t e -------------
1879   //  -- a2    : argArray
1880   //  -- a1    : receiver
1881   //  -- a3    : undefined root value
1882   //  -- sp[0] : thisArg
1883   // -----------------------------------
1884 
1885   // 2. We don't need to check explicitly for callable receiver here,
1886   // since that's the first thing the Call/CallWithArrayLike builtins
1887   // will do.
1888 
1889   // 3. Tail call with no arguments if argArray is null or undefined.
1890   Label no_arguments;
1891   __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
1892   __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
1893 
1894   // 4a. Apply the receiver to the given argArray.
1895   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1896           RelocInfo::CODE_TARGET);
1897 
1898   // 4b. The argArray is either null or undefined, so we tail call without any
1899   // arguments to the receiver.
1900   __ bind(&no_arguments);
1901   {
1902     __ li(a0, JSParameterCount(0));
1903     DCHECK(receiver == a1);
1904     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1905   }
1906 }
1907 
1908 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1909 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1910   // 1. Get the callable to call (passed as receiver) from the stack.
1911   { __ Pop(a1); }
1912 
1913   // 2. Make sure we have at least one argument.
1914   // a0: actual number of arguments
1915   {
1916     Label done;
1917     __ Branch(&done, ne, a0, Operand(JSParameterCount(0)));
1918     __ PushRoot(RootIndex::kUndefinedValue);
1919     __ Add_d(a0, a0, Operand(1));
1920     __ bind(&done);
1921   }
1922 
1923   // 3. Adjust the actual number of arguments.
1924   __ addi_d(a0, a0, -1);
1925 
1926   // 4. Call the callable.
1927   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1928 }
1929 
Generate_ReflectApply(MacroAssembler * masm)1930 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1931   // ----------- S t a t e -------------
1932   //  -- a0     : argc
1933   //  -- sp[0]  : receiver
1934   //  -- sp[8]  : target         (if argc >= 1)
1935   //  -- sp[16] : thisArgument   (if argc >= 2)
1936   //  -- sp[24] : argumentsList  (if argc == 3)
1937   // -----------------------------------
1938 
1939   Register argc = a0;
1940   Register arguments_list = a2;
1941   Register target = a1;
1942   Register this_argument = a5;
1943   Register undefined_value = a3;
1944   Register scratch = a4;
1945 
1946   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1947 
1948   // 1. Load target into a1 (if present), argumentsList into a2 (if present),
1949   // remove all arguments from the stack (including the receiver), and push
1950   // thisArgument (if present) instead.
1951   {
1952     // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
1953     // consistent state for a simple pop operation.
1954 
1955     __ Sub_d(scratch, argc, Operand(JSParameterCount(0)));
1956     __ Ld_d(target, MemOperand(sp, kPointerSize));
1957     __ Ld_d(this_argument, MemOperand(sp, 2 * kPointerSize));
1958     __ Ld_d(arguments_list, MemOperand(sp, 3 * kPointerSize));
1959     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 0
1960     __ Movz(this_argument, undefined_value, scratch);   // if argc == 0
1961     __ Movz(target, undefined_value, scratch);          // if argc == 0
1962     __ Sub_d(scratch, scratch, Operand(1));
1963     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 1
1964     __ Movz(this_argument, undefined_value, scratch);   // if argc == 1
1965     __ Sub_d(scratch, scratch, Operand(1));
1966     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 2
1967 
1968     __ DropArgumentsAndPushNewReceiver(argc, this_argument,
1969                                        TurboAssembler::kCountIsInteger,
1970                                        TurboAssembler::kCountIncludesReceiver);
1971   }
1972 
1973   // ----------- S t a t e -------------
1974   //  -- a2    : argumentsList
1975   //  -- a1    : target
1976   //  -- a3    : undefined root value
1977   //  -- sp[0] : thisArgument
1978   // -----------------------------------
1979 
1980   // 2. We don't need to check explicitly for callable target here,
1981   // since that's the first thing the Call/CallWithArrayLike builtins
1982   // will do.
1983 
1984   // 3. Apply the target to the given argumentsList.
1985   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1986           RelocInfo::CODE_TARGET);
1987 }
1988 
Generate_ReflectConstruct(MacroAssembler * masm)1989 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1990   // ----------- S t a t e -------------
1991   //  -- a0     : argc
1992   //  -- sp[0]   : receiver
1993   //  -- sp[8]   : target
1994   //  -- sp[16]  : argumentsList
1995   //  -- sp[24]  : new.target (optional)
1996   // -----------------------------------
1997 
1998   Register argc = a0;
1999   Register arguments_list = a2;
2000   Register target = a1;
2001   Register new_target = a3;
2002   Register undefined_value = a4;
2003   Register scratch = a5;
2004 
2005   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2006 
2007   // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2008   // new.target into a3 (if present, otherwise use target), remove all
2009   // arguments from the stack (including the receiver), and push thisArgument
2010   // (if present) instead.
2011   {
2012     // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2013     // consistent state for a simple pop operation.
2014 
2015     __ Sub_d(scratch, argc, Operand(JSParameterCount(0)));
2016     __ Ld_d(target, MemOperand(sp, kPointerSize));
2017     __ Ld_d(arguments_list, MemOperand(sp, 2 * kPointerSize));
2018     __ Ld_d(new_target, MemOperand(sp, 3 * kPointerSize));
2019     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 0
2020     __ Movz(new_target, undefined_value, scratch);      // if argc == 0
2021     __ Movz(target, undefined_value, scratch);          // if argc == 0
2022     __ Sub_d(scratch, scratch, Operand(1));
2023     __ Movz(arguments_list, undefined_value, scratch);  // if argc == 1
2024     __ Movz(new_target, target, scratch);               // if argc == 1
2025     __ Sub_d(scratch, scratch, Operand(1));
2026     __ Movz(new_target, target, scratch);  // if argc == 2
2027 
2028     __ DropArgumentsAndPushNewReceiver(argc, undefined_value,
2029                                        TurboAssembler::kCountIsInteger,
2030                                        TurboAssembler::kCountIncludesReceiver);
2031   }
2032 
2033   // ----------- S t a t e -------------
2034   //  -- a2    : argumentsList
2035   //  -- a1    : target
2036   //  -- a3    : new.target
2037   //  -- sp[0] : receiver (undefined)
2038   // -----------------------------------
2039 
2040   // 2. We don't need to check explicitly for constructor target here,
2041   // since that's the first thing the Construct/ConstructWithArrayLike
2042   // builtins will do.
2043 
2044   // 3. We don't need to check explicitly for constructor new.target here,
2045   // since that's the second thing the Construct/ConstructWithArrayLike
2046   // builtins will do.
2047 
2048   // 4. Construct the target with the given new.target and argumentsList.
2049   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2050           RelocInfo::CODE_TARGET);
2051 }
2052 
2053 namespace {
2054 
2055 // Allocate new stack space for |count| arguments and shift all existing
2056 // arguments already on the stack. |pointer_to_new_space_out| points to the
2057 // first free slot on the stack to copy additional arguments to and
2058 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2,Register scratch3)2059 void Generate_AllocateSpaceAndShiftExistingArguments(
2060     MacroAssembler* masm, Register count, Register argc_in_out,
2061     Register pointer_to_new_space_out, Register scratch1, Register scratch2,
2062     Register scratch3) {
2063   DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2064                      scratch2));
2065   Register old_sp = scratch1;
2066   Register new_space = scratch2;
2067   __ mov(old_sp, sp);
2068   __ slli_d(new_space, count, kPointerSizeLog2);
2069   __ Sub_d(sp, sp, Operand(new_space));
2070 
2071   Register end = scratch2;
2072   Register value = scratch3;
2073   Register dest = pointer_to_new_space_out;
2074   __ mov(dest, sp);
2075   __ Alsl_d(end, argc_in_out, old_sp, kSystemPointerSizeLog2);
2076   Label loop, done;
2077   __ Branch(&done, ge, old_sp, Operand(end));
2078   __ bind(&loop);
2079   __ Ld_d(value, MemOperand(old_sp, 0));
2080   __ St_d(value, MemOperand(dest, 0));
2081   __ Add_d(old_sp, old_sp, Operand(kSystemPointerSize));
2082   __ Add_d(dest, dest, Operand(kSystemPointerSize));
2083   __ Branch(&loop, lt, old_sp, Operand(end));
2084   __ bind(&done);
2085 
2086   // Update total number of arguments.
2087   __ Add_d(argc_in_out, argc_in_out, count);
2088 }
2089 
2090 }  // namespace
2091 
2092 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2093 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2094                                                Handle<Code> code) {
2095   // ----------- S t a t e -------------
2096   //  -- a1 : target
2097   //  -- a0 : number of parameters on the stack
2098   //  -- a2 : arguments list (a FixedArray)
2099   //  -- a4 : len (number of elements to push from args)
2100   //  -- a3 : new.target (for [[Construct]])
2101   // -----------------------------------
2102   if (FLAG_debug_code) {
2103     // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
2104     Label ok, fail;
2105     __ AssertNotSmi(a2);
2106     __ GetObjectType(a2, t8, t8);
2107     __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
2108     __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2109     __ Branch(&ok, eq, a4, Operand(zero_reg));
2110     // Fall through.
2111     __ bind(&fail);
2112     __ Abort(AbortReason::kOperandIsNotAFixedArray);
2113 
2114     __ bind(&ok);
2115   }
2116 
2117   Register args = a2;
2118   Register len = a4;
2119 
2120   // Check for stack overflow.
2121   Label stack_overflow;
2122   __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
2123 
2124   // Move the arguments already in the stack,
2125   // including the receiver and the return address.
2126   // a4: Number of arguments to make room for.
2127   // a0: Number of arguments already on the stack.
2128   // a7: Points to first free slot on the stack after arguments were shifted.
2129   Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7, a6, t0, t1);
2130 
2131   // Push arguments onto the stack (thisArgument is already on the stack).
2132   {
2133     Label done, push, loop;
2134     Register src = a6;
2135     Register scratch = len;
2136 
2137     __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
2138     __ Branch(&done, eq, len, Operand(zero_reg));
2139     __ slli_d(scratch, len, kPointerSizeLog2);
2140     __ Sub_d(scratch, sp, Operand(scratch));
2141     __ LoadRoot(t1, RootIndex::kTheHoleValue);
2142     __ bind(&loop);
2143     __ Ld_d(a5, MemOperand(src, 0));
2144     __ addi_d(src, src, kPointerSize);
2145     __ Branch(&push, ne, a5, Operand(t1));
2146     __ LoadRoot(a5, RootIndex::kUndefinedValue);
2147     __ bind(&push);
2148     __ St_d(a5, MemOperand(a7, 0));
2149     __ Add_d(a7, a7, Operand(kSystemPointerSize));
2150     __ Add_d(scratch, scratch, Operand(kSystemPointerSize));
2151     __ Branch(&loop, ne, scratch, Operand(sp));
2152     __ bind(&done);
2153   }
2154 
2155   // Tail-call to the actual Call or Construct builtin.
2156   __ Jump(code, RelocInfo::CODE_TARGET);
2157 
2158   __ bind(&stack_overflow);
2159   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2160 }
2161 
2162 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2163 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2164                                                       CallOrConstructMode mode,
2165                                                       Handle<Code> code) {
2166   // ----------- S t a t e -------------
2167   //  -- a0 : the number of arguments
2168   //  -- a3 : the new.target (for [[Construct]] calls)
2169   //  -- a1 : the target to call (can be any Object)
2170   //  -- a2 : start index (to support rest parameters)
2171   // -----------------------------------
2172 
2173   // Check if new.target has a [[Construct]] internal method.
2174   if (mode == CallOrConstructMode::kConstruct) {
2175     Label new_target_constructor, new_target_not_constructor;
2176     __ JumpIfSmi(a3, &new_target_not_constructor);
2177     __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
2178     __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2179     __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
2180     __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
2181     __ bind(&new_target_not_constructor);
2182     {
2183       FrameScope scope(masm, StackFrame::MANUAL);
2184       __ EnterFrame(StackFrame::INTERNAL);
2185       __ Push(a3);
2186       __ CallRuntime(Runtime::kThrowNotConstructor);
2187     }
2188     __ bind(&new_target_constructor);
2189   }
2190 
2191   Label stack_done, stack_overflow;
2192   __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2193   __ Sub_d(a7, a7, Operand(kJSArgcReceiverSlots));
2194   __ Sub_d(a7, a7, a2);
2195   __ Branch(&stack_done, le, a7, Operand(zero_reg));
2196   {
2197     // Check for stack overflow.
2198     __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
2199 
2200     // Forward the arguments from the caller frame.
2201 
2202     // Point to the first argument to copy (skipping the receiver).
2203     __ Add_d(a6, fp,
2204              Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2205                      kSystemPointerSize));
2206     __ Alsl_d(a6, a2, a6, kSystemPointerSizeLog2, t7);
2207 
2208     // Move the arguments already in the stack,
2209     // including the receiver and the return address.
2210     // a7: Number of arguments to make room for.
2211     // a0: Number of arguments already on the stack.
2212     // a2: Points to first free slot on the stack after arguments were shifted.
2213     Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2, t0, t1,
2214                                                     t2);
2215 
2216     // Copy arguments from the caller frame.
2217     // TODO(victorgomes): Consider using forward order as potentially more cache
2218     // friendly.
2219     {
2220       Label loop;
2221       __ bind(&loop);
2222       {
2223         __ Sub_w(a7, a7, Operand(1));
2224         __ Alsl_d(t0, a7, a6, kPointerSizeLog2, t7);
2225         __ Ld_d(kScratchReg, MemOperand(t0, 0));
2226         __ Alsl_d(t0, a7, a2, kPointerSizeLog2, t7);
2227         __ St_d(kScratchReg, MemOperand(t0, 0));
2228         __ Branch(&loop, ne, a7, Operand(zero_reg));
2229       }
2230     }
2231   }
2232   __ Branch(&stack_done);
2233   __ bind(&stack_overflow);
2234   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2235   __ bind(&stack_done);
2236 
2237   // Tail-call to the {code} handler.
2238   __ Jump(code, RelocInfo::CODE_TARGET);
2239 }
2240 
2241 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2242 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2243                                      ConvertReceiverMode mode) {
2244   // ----------- S t a t e -------------
2245   //  -- a0 : the number of arguments
2246   //  -- a1 : the function to call (checked to be a JSFunction)
2247   // -----------------------------------
2248   __ AssertCallableFunction(a1);
2249 
2250   __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2251 
2252   // Enter the context of the function; ToObject has to run in the function
2253   // context, and we also need to take the global proxy from the function
2254   // context in case of conversion.
2255   __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2256   // We need to convert the receiver for non-native sloppy mode functions.
2257   Label done_convert;
2258   __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2259   __ And(kScratchReg, a3,
2260          Operand(SharedFunctionInfo::IsNativeBit::kMask |
2261                  SharedFunctionInfo::IsStrictBit::kMask));
2262   __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2263   {
2264     // ----------- S t a t e -------------
2265     //  -- a0 : the number of arguments
2266     //  -- a1 : the function to call (checked to be a JSFunction)
2267     //  -- a2 : the shared function info.
2268     //  -- cp : the function context.
2269     // -----------------------------------
2270 
2271     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2272       // Patch receiver to global proxy.
2273       __ LoadGlobalProxy(a3);
2274     } else {
2275       Label convert_to_object, convert_receiver;
2276       __ LoadReceiver(a3, a0);
2277       __ JumpIfSmi(a3, &convert_to_object);
2278       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2279       __ GetObjectType(a3, a4, a4);
2280       __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
2281       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2282         Label convert_global_proxy;
2283         __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2284         __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2285         __ bind(&convert_global_proxy);
2286         {
2287           // Patch receiver to global proxy.
2288           __ LoadGlobalProxy(a3);
2289         }
2290         __ Branch(&convert_receiver);
2291       }
2292       __ bind(&convert_to_object);
2293       {
2294         // Convert receiver using ToObject.
2295         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2296         // in the fast case? (fall back to AllocateInNewSpace?)
2297         FrameScope scope(masm, StackFrame::INTERNAL);
2298         __ SmiTag(a0);
2299         __ Push(a0, a1);
2300         __ mov(a0, a3);
2301         __ Push(cp);
2302         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2303                 RelocInfo::CODE_TARGET);
2304         __ Pop(cp);
2305         __ mov(a3, a0);
2306         __ Pop(a0, a1);
2307         __ SmiUntag(a0);
2308       }
2309       __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2310       __ bind(&convert_receiver);
2311     }
2312     __ StoreReceiver(a3, a0, kScratchReg);
2313   }
2314   __ bind(&done_convert);
2315 
2316   // ----------- S t a t e -------------
2317   //  -- a0 : the number of arguments
2318   //  -- a1 : the function to call (checked to be a JSFunction)
2319   //  -- a2 : the shared function info.
2320   //  -- cp : the function context.
2321   // -----------------------------------
2322 
2323   __ Ld_hu(
2324       a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2325   __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2326 }
2327 
2328 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2329 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2330   // ----------- S t a t e -------------
2331   //  -- a0 : the number of arguments
2332   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2333   // -----------------------------------
2334   __ AssertBoundFunction(a1);
2335 
2336   // Patch the receiver to [[BoundThis]].
2337   {
2338     __ Ld_d(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2339     __ StoreReceiver(t0, a0, kScratchReg);
2340   }
2341 
2342   // Load [[BoundArguments]] into a2 and length of that into a4.
2343   __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2344   __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2345 
2346   // ----------- S t a t e -------------
2347   //  -- a0 : the number of arguments
2348   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2349   //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2350   //  -- a4 : the number of [[BoundArguments]]
2351   // -----------------------------------
2352 
2353   // Reserve stack space for the [[BoundArguments]].
2354   {
2355     Label done;
2356     __ slli_d(a5, a4, kPointerSizeLog2);
2357     __ Sub_d(t0, sp, Operand(a5));
2358     // Check the stack for overflow. We are not trying to catch interruptions
2359     // (i.e. debug break and preemption) here, so check the "real stack limit".
2360     __ LoadStackLimit(kScratchReg,
2361                       MacroAssembler::StackLimitKind::kRealStackLimit);
2362     __ Branch(&done, hs, t0, Operand(kScratchReg));
2363     {
2364       FrameScope scope(masm, StackFrame::MANUAL);
2365       __ EnterFrame(StackFrame::INTERNAL);
2366       __ CallRuntime(Runtime::kThrowStackOverflow);
2367     }
2368     __ bind(&done);
2369   }
2370 
2371   // Pop receiver.
2372   __ Pop(t0);
2373 
2374   // Push [[BoundArguments]].
2375   {
2376     Label loop, done_loop;
2377     __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2378     __ Add_d(a0, a0, Operand(a4));
2379     __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2380     __ bind(&loop);
2381     __ Sub_d(a4, a4, Operand(1));
2382     __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2383     __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
2384     __ Ld_d(kScratchReg, MemOperand(a5, 0));
2385     __ Push(kScratchReg);
2386     __ Branch(&loop);
2387     __ bind(&done_loop);
2388   }
2389 
2390   // Push receiver.
2391   __ Push(t0);
2392 
2393   // Call the [[BoundTargetFunction]] via the Call builtin.
2394   __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2395   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2396           RelocInfo::CODE_TARGET);
2397 }
2398 
2399 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2400 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2401   // ----------- S t a t e -------------
2402   //  -- a0 : the number of arguments
2403   //  -- a1 : the target to call (can be any Object).
2404   // -----------------------------------
2405 
2406   Register argc = a0;
2407   Register target = a1;
2408   Register map = t1;
2409   Register instance_type = t2;
2410   Register scratch = t8;
2411   DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
2412 
2413   Label non_callable, class_constructor;
2414   __ JumpIfSmi(target, &non_callable);
2415   __ LoadMap(map, target);
2416   __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2417                           scratch);
2418   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2419           RelocInfo::CODE_TARGET, ls, scratch,
2420           Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
2421                   FIRST_CALLABLE_JS_FUNCTION_TYPE));
2422   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2423           RelocInfo::CODE_TARGET, eq, instance_type,
2424           Operand(JS_BOUND_FUNCTION_TYPE));
2425 
2426   // Check if target has a [[Call]] internal method.
2427   {
2428     Register flags = t1;
2429     __ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2430     map = no_reg;
2431     __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
2432     __ Branch(&non_callable, eq, flags, Operand(zero_reg));
2433   }
2434 
2435   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
2436           instance_type, Operand(JS_PROXY_TYPE));
2437 
2438   // Check if target is a wrapped function and call CallWrappedFunction external
2439   // builtin
2440   __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2441           RelocInfo::CODE_TARGET, eq, instance_type,
2442           Operand(JS_WRAPPED_FUNCTION_TYPE));
2443 
2444   // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2445   // Check that the function is not a "classConstructor".
2446   __ Branch(&class_constructor, eq, instance_type,
2447             Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2448 
2449   // 2. Call to something else, which might have a [[Call]] internal method (if
2450   // not we raise an exception).
2451   // Overwrite the original receiver with the (original) target.
2452   __ StoreReceiver(target, argc, kScratchReg);
2453   // Let the "call_as_function_delegate" take care of the rest.
2454   __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2455   __ Jump(masm->isolate()->builtins()->CallFunction(
2456               ConvertReceiverMode::kNotNullOrUndefined),
2457           RelocInfo::CODE_TARGET);
2458 
2459   // 3. Call to something that is not callable.
2460   __ bind(&non_callable);
2461   {
2462     FrameScope scope(masm, StackFrame::INTERNAL);
2463     __ Push(target);
2464     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2465   }
2466 
2467   // 4. The function is a "classConstructor", need to raise an exception.
2468   __ bind(&class_constructor);
2469   {
2470     FrameScope frame(masm, StackFrame::INTERNAL);
2471     __ Push(target);
2472     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2473   }
2474 }
2475 
Generate_ConstructFunction(MacroAssembler * masm)2476 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2477   // ----------- S t a t e -------------
2478   //  -- a0 : the number of arguments
2479   //  -- a1 : the constructor to call (checked to be a JSFunction)
2480   //  -- a3 : the new target (checked to be a constructor)
2481   // -----------------------------------
2482   __ AssertConstructor(a1);
2483   __ AssertFunction(a1);
2484 
2485   // Calling convention for function specific ConstructStubs require
2486   // a2 to contain either an AllocationSite or undefined.
2487   __ LoadRoot(a2, RootIndex::kUndefinedValue);
2488 
2489   Label call_generic_stub;
2490 
2491   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2492   __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2493   __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2494   __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2495   __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
2496 
2497   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2498           RelocInfo::CODE_TARGET);
2499 
2500   __ bind(&call_generic_stub);
2501   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2502           RelocInfo::CODE_TARGET);
2503 }
2504 
2505 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2506 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2507   // ----------- S t a t e -------------
2508   //  -- a0 : the number of arguments
2509   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2510   //  -- a3 : the new target (checked to be a constructor)
2511   // -----------------------------------
2512   __ AssertConstructor(a1);
2513   __ AssertBoundFunction(a1);
2514 
2515   // Load [[BoundArguments]] into a2 and length of that into a4.
2516   __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2517   __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2518 
2519   // ----------- S t a t e -------------
2520   //  -- a0 : the number of arguments
2521   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2522   //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2523   //  -- a3 : the new target (checked to be a constructor)
2524   //  -- a4 : the number of [[BoundArguments]]
2525   // -----------------------------------
2526 
2527   // Reserve stack space for the [[BoundArguments]].
2528   {
2529     Label done;
2530     __ slli_d(a5, a4, kPointerSizeLog2);
2531     __ Sub_d(t0, sp, Operand(a5));
2532     // Check the stack for overflow. We are not trying to catch interruptions
2533     // (i.e. debug break and preemption) here, so check the "real stack limit".
2534     __ LoadStackLimit(kScratchReg,
2535                       MacroAssembler::StackLimitKind::kRealStackLimit);
2536     __ Branch(&done, hs, t0, Operand(kScratchReg));
2537     {
2538       FrameScope scope(masm, StackFrame::MANUAL);
2539       __ EnterFrame(StackFrame::INTERNAL);
2540       __ CallRuntime(Runtime::kThrowStackOverflow);
2541     }
2542     __ bind(&done);
2543   }
2544 
2545   // Pop receiver.
2546   __ Pop(t0);
2547 
2548   // Push [[BoundArguments]].
2549   {
2550     Label loop, done_loop;
2551     __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2552     __ Add_d(a0, a0, Operand(a4));
2553     __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2554     __ bind(&loop);
2555     __ Sub_d(a4, a4, Operand(1));
2556     __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2557     __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
2558     __ Ld_d(kScratchReg, MemOperand(a5, 0));
2559     __ Push(kScratchReg);
2560     __ Branch(&loop);
2561     __ bind(&done_loop);
2562   }
2563 
2564   // Push receiver.
2565   __ Push(t0);
2566 
2567   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2568   {
2569     Label skip_load;
2570     __ Branch(&skip_load, ne, a1, Operand(a3));
2571     __ Ld_d(a3,
2572             FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2573     __ bind(&skip_load);
2574   }
2575 
2576   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2577   __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2578   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2579 }
2580 
2581 // static
Generate_Construct(MacroAssembler * masm)2582 void Builtins::Generate_Construct(MacroAssembler* masm) {
2583   // ----------- S t a t e -------------
2584   //  -- a0 : the number of arguments
2585   //  -- a1 : the constructor to call (can be any Object)
2586   //  -- a3 : the new target (either the same as the constructor or
2587   //          the JSFunction on which new was invoked initially)
2588   // -----------------------------------
2589 
2590   Register argc = a0;
2591   Register target = a1;
2592   Register map = t1;
2593   Register instance_type = t2;
2594   Register scratch = t8;
2595   DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
2596 
2597   // Check if target is a Smi.
2598   Label non_constructor, non_proxy;
2599   __ JumpIfSmi(target, &non_constructor);
2600 
2601   // Check if target has a [[Construct]] internal method.
2602   __ Ld_d(map, FieldMemOperand(target, HeapObject::kMapOffset));
2603   {
2604     Register flags = t3;
2605     __ Ld_bu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2606     __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask));
2607     __ Branch(&non_constructor, eq, flags, Operand(zero_reg));
2608   }
2609 
2610   // Dispatch based on instance type.
2611   __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch);
2612   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2613           RelocInfo::CODE_TARGET, ls, scratch,
2614           Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2615 
2616   // Only dispatch to bound functions after checking whether they are
2617   // constructors.
2618   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2619           RelocInfo::CODE_TARGET, eq, instance_type,
2620           Operand(JS_BOUND_FUNCTION_TYPE));
2621 
2622   // Only dispatch to proxies after checking whether they are constructors.
2623   __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE));
2624   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2625           RelocInfo::CODE_TARGET);
2626 
2627   // Called Construct on an exotic Object with a [[Construct]] internal method.
2628   __ bind(&non_proxy);
2629   {
2630     // Overwrite the original receiver with the (original) target.
2631     __ StoreReceiver(target, argc, kScratchReg);
2632     // Let the "call_as_constructor_delegate" take care of the rest.
2633     __ LoadNativeContextSlot(target,
2634                              Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2635     __ Jump(masm->isolate()->builtins()->CallFunction(),
2636             RelocInfo::CODE_TARGET);
2637   }
2638 
2639   // Called Construct on an Object that doesn't have a [[Construct]] internal
2640   // method.
2641   __ bind(&non_constructor);
2642   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2643           RelocInfo::CODE_TARGET);
2644 }
2645 
2646 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2647 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2648   // The function index was put in t0 by the jump table trampoline.
2649   // Convert to Smi for the runtime call
2650   __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2651 
2652   // Compute register lists for parameters to be saved. We save all parameter
2653   // registers (see wasm-linkage.h). They might be overwritten in the runtime
2654   // call below. We don't have any callee-saved registers in wasm, so no need to
2655   // store anything else.
2656   constexpr RegList kSavedGpRegs = ([]() constexpr {
2657     RegList saved_gp_regs;
2658     for (Register gp_param_reg : wasm::kGpParamRegisters) {
2659       saved_gp_regs.set(gp_param_reg);
2660     }
2661 
2662     // All set registers were unique.
2663     CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
2664     // The Wasm instance must be part of the saved registers.
2665     CHECK(saved_gp_regs.has(kWasmInstanceRegister));
2666     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
2667              saved_gp_regs.Count());
2668     return saved_gp_regs;
2669   })();
2670 
2671   constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
2672     DoubleRegList saved_fp_regs;
2673     for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2674       saved_fp_regs.set(fp_param_reg);
2675     }
2676 
2677     CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
2678     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2679              saved_fp_regs.Count());
2680     return saved_fp_regs;
2681   })();
2682 
2683   {
2684     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2685     FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2686 
2687     // Save registers that we need to keep alive across the runtime call.
2688     __ MultiPush(kSavedGpRegs);
2689     __ MultiPushFPU(kSavedFpRegs);
2690 
2691     // kFixedFrameSizeFromFp is hard coded to include space for Simd
2692     // registers, so we still need to allocate extra (unused) space on the stack
2693     // as if they were saved.
2694     __ Sub_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
2695 
2696     // Pass instance and function index as an explicit arguments to the runtime
2697     // function.
2698     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2699     // Initialize the JavaScript context with 0. CEntry will use it to
2700     // set the current context on the isolate.
2701     __ Move(kContextRegister, Smi::zero());
2702     __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2703 
2704     // Untag the returned Smi into into t7, for later use.
2705     static_assert(!kSavedGpRegs.has(t7));
2706     __ SmiUntag(t7, a0);
2707 
2708     __ Add_d(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
2709     // Restore registers.
2710     __ MultiPopFPU(kSavedFpRegs);
2711     __ MultiPop(kSavedGpRegs);
2712   }
2713 
2714   // The runtime function returned the jump table slot offset as a Smi (now in
2715   // t7). Use that to compute the jump target.
2716   static_assert(!kSavedGpRegs.has(t8));
2717   __ Ld_d(t8, MemOperand(
2718                   kWasmInstanceRegister,
2719                   WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
2720   __ Add_d(t7, t8, Operand(t7));
2721 
2722   // Finally, jump to the jump table slot for the function.
2723   __ Jump(t7);
2724 }
2725 
Generate_WasmDebugBreak(MacroAssembler * masm)2726 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2727   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2728   {
2729     FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2730 
2731     // Save all parameter registers. They might hold live values, we restore
2732     // them after the runtime call.
2733     __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2734     __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2735 
2736     // Initialize the JavaScript context with 0. CEntry will use it to
2737     // set the current context on the isolate.
2738     __ Move(cp, Smi::zero());
2739     __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2740 
2741     // Restore registers.
2742     __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2743     __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2744   }
2745   __ Ret();
2746 }
2747 
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2748 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2749   __ Trap();
2750 }
2751 
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)2752 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
2753   // TODO(v8:12191): Implement for this platform.
2754   __ Trap();
2755 }
2756 
Generate_WasmSuspend(MacroAssembler * masm)2757 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
2758   // TODO(v8:12191): Implement for this platform.
2759   __ Trap();
2760 }
2761 
Generate_WasmResume(MacroAssembler * masm)2762 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
2763   // TODO(v8:12191): Implement for this platform.
2764   __ Trap();
2765 }
2766 
Generate_WasmOnStackReplace(MacroAssembler * masm)2767 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2768   // Only needed on x64.
2769   __ Trap();
2770 }
2771 
2772 #endif  // V8_ENABLE_WEBASSEMBLY
2773 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2774 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2775                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2776                                bool builtin_exit_frame) {
2777   // Called from JavaScript; parameters are on stack as if calling JS function
2778   // a0: number of arguments including receiver
2779   // a1: pointer to builtin function
2780   // fp: frame pointer    (restored after C call)
2781   // sp: stack pointer    (restored as callee's sp after C call)
2782   // cp: current context  (C callee-saved)
2783   //
2784   // If argv_mode == ArgvMode::kRegister:
2785   // a2: pointer to the first argument
2786 
2787   if (argv_mode == ArgvMode::kRegister) {
2788     // Move argv into the correct register.
2789     __ mov(s1, a2);
2790   } else {
2791     // Compute the argv pointer in a callee-saved register.
2792     __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7);
2793     __ Sub_d(s1, s1, kPointerSize);
2794   }
2795 
2796   // Enter the exit frame that transitions from JavaScript to C++.
2797   FrameScope scope(masm, StackFrame::MANUAL);
2798   __ EnterExitFrame(
2799       save_doubles == SaveFPRegsMode::kSave, 0,
2800       builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2801 
2802   // s0: number of arguments  including receiver (C callee-saved)
2803   // s1: pointer to first argument (C callee-saved)
2804   // s2: pointer to builtin function (C callee-saved)
2805 
2806   // Prepare arguments for C routine.
2807   // a0 = argc
2808   __ mov(s0, a0);
2809   __ mov(s2, a1);
2810 
2811   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2812   // also need to reserve the 4 argument slots on the stack.
2813 
2814   __ AssertStackIsAligned();
2815 
2816   // a0 = argc, a1 = argv, a2 = isolate
2817   __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2818   __ mov(a1, s1);
2819 
2820   __ StoreReturnAddressAndCall(s2);
2821 
2822   // Result returned in a0 or a1:a0 - do not destroy these registers!
2823 
2824   // Check result for exception sentinel.
2825   Label exception_returned;
2826   __ LoadRoot(a4, RootIndex::kException);
2827   __ Branch(&exception_returned, eq, a4, Operand(a0));
2828 
2829   // Check that there is no pending exception, otherwise we
2830   // should have returned the exception sentinel.
2831   if (FLAG_debug_code) {
2832     Label okay;
2833     ExternalReference pending_exception_address = ExternalReference::Create(
2834         IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2835     __ li(a2, pending_exception_address);
2836     __ Ld_d(a2, MemOperand(a2, 0));
2837     __ LoadRoot(a4, RootIndex::kTheHoleValue);
2838     // Cannot use check here as it attempts to generate call into runtime.
2839     __ Branch(&okay, eq, a4, Operand(a2));
2840     __ stop();
2841     __ bind(&okay);
2842   }
2843 
2844   // Exit C frame and return.
2845   // a0:a1: result
2846   // sp: stack pointer
2847   // fp: frame pointer
2848   Register argc = argv_mode == ArgvMode::kRegister
2849                       // We don't want to pop arguments so set argc to no_reg.
2850                       ? no_reg
2851                       // s0: still holds argc (callee-saved).
2852                       : s0;
2853   __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
2854 
2855   // Handling of exception.
2856   __ bind(&exception_returned);
2857 
2858   ExternalReference pending_handler_context_address = ExternalReference::Create(
2859       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2860   ExternalReference pending_handler_entrypoint_address =
2861       ExternalReference::Create(
2862           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2863   ExternalReference pending_handler_fp_address = ExternalReference::Create(
2864       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2865   ExternalReference pending_handler_sp_address = ExternalReference::Create(
2866       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2867 
2868   // Ask the runtime for help to determine the handler. This will set a0 to
2869   // contain the current pending exception, don't clobber it.
2870   ExternalReference find_handler =
2871       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2872   {
2873     FrameScope scope(masm, StackFrame::MANUAL);
2874     __ PrepareCallCFunction(3, 0, a0);
2875     __ mov(a0, zero_reg);
2876     __ mov(a1, zero_reg);
2877     __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2878     __ CallCFunction(find_handler, 3);
2879   }
2880 
2881   // Retrieve the handler context, SP and FP.
2882   __ li(cp, pending_handler_context_address);
2883   __ Ld_d(cp, MemOperand(cp, 0));
2884   __ li(sp, pending_handler_sp_address);
2885   __ Ld_d(sp, MemOperand(sp, 0));
2886   __ li(fp, pending_handler_fp_address);
2887   __ Ld_d(fp, MemOperand(fp, 0));
2888 
2889   // If the handler is a JS frame, restore the context to the frame. Note that
2890   // the context will be set to (cp == 0) for non-JS frames.
2891   Label zero;
2892   __ Branch(&zero, eq, cp, Operand(zero_reg));
2893   __ St_d(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2894   __ bind(&zero);
2895 
2896   // Clear c_entry_fp, like we do in `LeaveExitFrame`.
2897   {
2898     UseScratchRegisterScope temps(masm);
2899     Register scratch = temps.Acquire();
2900     __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
2901                                              masm->isolate()));
2902     __ St_d(zero_reg, MemOperand(scratch, 0));
2903   }
2904 
2905   // Compute the handler entry address and jump to it.
2906   __ li(t7, pending_handler_entrypoint_address);
2907   __ Ld_d(t7, MemOperand(t7, 0));
2908   __ Jump(t7);
2909 }
2910 
Generate_DoubleToI(MacroAssembler * masm)2911 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2912   Label done;
2913   Register result_reg = t0;
2914 
2915   Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2916   Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2917   Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2918   DoubleRegister double_scratch = kScratchDoubleReg;
2919 
2920   // Account for saved regs.
2921   const int kArgumentOffset = 4 * kPointerSize;
2922 
2923   __ Push(result_reg);
2924   __ Push(scratch, scratch2, scratch3);
2925 
2926   // Load double input.
2927   __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset));
2928 
2929   // Try a conversion to a signed integer.
2930   __ ftintrz_w_d(double_scratch, double_scratch);
2931   // Move the converted value into the result register.
2932   __ movfr2gr_s(scratch3, double_scratch);
2933 
2934   // Retrieve and restore the FCSR.
2935   __ movfcsr2gr(scratch);
2936 
2937   // Check for overflow and NaNs.
2938   __ And(scratch, scratch,
2939          kFCSRExceptionCauseMask ^ kFCSRDivideByZeroCauseMask);
2940   // If we had no exceptions then set result_reg and we are done.
2941   Label error;
2942   __ Branch(&error, ne, scratch, Operand(zero_reg));
2943   __ Move(result_reg, scratch3);
2944   __ Branch(&done);
2945   __ bind(&error);
2946 
2947   // Load the double value and perform a manual truncation.
2948   Register input_high = scratch2;
2949   Register input_low = scratch3;
2950 
2951   __ Ld_w(input_low,
2952           MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
2953   __ Ld_w(input_high,
2954           MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
2955 
2956   Label normal_exponent;
2957   // Extract the biased exponent in result.
2958   __ bstrpick_w(result_reg, input_high,
2959                 HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1,
2960                 HeapNumber::kExponentShift);
2961 
2962   // Check for Infinity and NaNs, which should return 0.
2963   __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask);
2964   __ Movz(result_reg, zero_reg, scratch);
2965   __ Branch(&done, eq, scratch, Operand(zero_reg));
2966 
2967   // Express exponent as delta to (number of mantissa bits + 31).
2968   __ Sub_w(result_reg, result_reg,
2969            Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
2970 
2971   // If the delta is strictly positive, all bits would be shifted away,
2972   // which means that we can return 0.
2973   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
2974   __ mov(result_reg, zero_reg);
2975   __ Branch(&done);
2976 
2977   __ bind(&normal_exponent);
2978   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2979   // Calculate shift.
2980   __ Add_w(scratch, result_reg,
2981            Operand(kShiftBase + HeapNumber::kMantissaBits));
2982 
2983   // Save the sign.
2984   Register sign = result_reg;
2985   result_reg = no_reg;
2986   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
2987 
2988   // On ARM shifts > 31 bits are valid and will result in zero. On LOONG64 we
2989   // need to check for this specific case.
2990   Label high_shift_needed, high_shift_done;
2991   __ Branch(&high_shift_needed, lt, scratch, Operand(32));
2992   __ mov(input_high, zero_reg);
2993   __ Branch(&high_shift_done);
2994   __ bind(&high_shift_needed);
2995 
2996   // Set the implicit 1 before the mantissa part in input_high.
2997   __ Or(input_high, input_high,
2998         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2999   // Shift the mantissa bits to the correct position.
3000   // We don't need to clear non-mantissa bits as they will be shifted away.
3001   // If they weren't, it would mean that the answer is in the 32bit range.
3002   __ sll_w(input_high, input_high, scratch);
3003 
3004   __ bind(&high_shift_done);
3005 
3006   // Replace the shifted bits with bits from the lower mantissa word.
3007   Label pos_shift, shift_done;
3008   __ li(kScratchReg, 32);
3009   __ sub_w(scratch, kScratchReg, scratch);
3010   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
3011 
3012   // Negate scratch.
3013   __ Sub_w(scratch, zero_reg, scratch);
3014   __ sll_w(input_low, input_low, scratch);
3015   __ Branch(&shift_done);
3016 
3017   __ bind(&pos_shift);
3018   __ srl_w(input_low, input_low, scratch);
3019 
3020   __ bind(&shift_done);
3021   __ Or(input_high, input_high, Operand(input_low));
3022   // Restore sign if necessary.
3023   __ mov(scratch, sign);
3024   result_reg = sign;
3025   sign = no_reg;
3026   __ Sub_w(result_reg, zero_reg, input_high);
3027   __ Movz(result_reg, input_high, scratch);
3028 
3029   __ bind(&done);
3030 
3031   __ St_d(result_reg, MemOperand(sp, kArgumentOffset));
3032   __ Pop(scratch, scratch2, scratch3);
3033   __ Pop(result_reg);
3034   __ Ret();
3035 }
3036 
3037 namespace {
3038 
AddressOffset(ExternalReference ref0,ExternalReference ref1)3039 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3040   int64_t offset = (ref0.address() - ref1.address());
3041   DCHECK(static_cast<int>(offset) == offset);
3042   return static_cast<int>(offset);
3043 }
3044 
3045 // Calls an API function.  Allocates HandleScope, extracts returned value
3046 // from handle and propagates exceptions.  Restores context.  stack_space
3047 // - space to be unwound on exit (includes the call JS arguments space and
3048 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)3049 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
3050                               ExternalReference thunk_ref, int stack_space,
3051                               MemOperand* stack_space_operand,
3052                               MemOperand return_value_operand) {
3053   Isolate* isolate = masm->isolate();
3054   ExternalReference next_address =
3055       ExternalReference::handle_scope_next_address(isolate);
3056   const int kNextOffset = 0;
3057   const int kLimitOffset = AddressOffset(
3058       ExternalReference::handle_scope_limit_address(isolate), next_address);
3059   const int kLevelOffset = AddressOffset(
3060       ExternalReference::handle_scope_level_address(isolate), next_address);
3061 
3062   DCHECK(function_address == a1 || function_address == a2);
3063 
3064   Label profiler_enabled, end_profiler_check;
3065   __ li(t7, ExternalReference::is_profiling_address(isolate));
3066   __ Ld_b(t7, MemOperand(t7, 0));
3067   __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
3068   __ li(t7, ExternalReference::address_of_runtime_stats_flag());
3069   __ Ld_w(t7, MemOperand(t7, 0));
3070   __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
3071   {
3072     // Call the api function directly.
3073     __ mov(t7, function_address);
3074     __ Branch(&end_profiler_check);
3075   }
3076 
3077   __ bind(&profiler_enabled);
3078   {
3079     // Additional parameter is the address of the actual callback.
3080     __ li(t7, thunk_ref);
3081   }
3082   __ bind(&end_profiler_check);
3083 
3084   // Allocate HandleScope in callee-save registers.
3085   __ li(s5, next_address);
3086   __ Ld_d(s0, MemOperand(s5, kNextOffset));
3087   __ Ld_d(s1, MemOperand(s5, kLimitOffset));
3088   __ Ld_w(s2, MemOperand(s5, kLevelOffset));
3089   __ Add_w(s2, s2, Operand(1));
3090   __ St_w(s2, MemOperand(s5, kLevelOffset));
3091 
3092   __ StoreReturnAddressAndCall(t7);
3093 
3094   Label promote_scheduled_exception;
3095   Label delete_allocated_handles;
3096   Label leave_exit_frame;
3097   Label return_value_loaded;
3098 
3099   // Load value from ReturnValue.
3100   __ Ld_d(a0, return_value_operand);
3101   __ bind(&return_value_loaded);
3102 
3103   // No more valid handles (the result handle was the last one). Restore
3104   // previous handle scope.
3105   __ St_d(s0, MemOperand(s5, kNextOffset));
3106   if (FLAG_debug_code) {
3107     __ Ld_w(a1, MemOperand(s5, kLevelOffset));
3108     __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
3109              Operand(s2));
3110   }
3111   __ Sub_w(s2, s2, Operand(1));
3112   __ St_w(s2, MemOperand(s5, kLevelOffset));
3113   __ Ld_d(kScratchReg, MemOperand(s5, kLimitOffset));
3114   __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
3115 
3116   // Leave the API exit frame.
3117   __ bind(&leave_exit_frame);
3118 
3119   if (stack_space_operand == nullptr) {
3120     DCHECK_NE(stack_space, 0);
3121     __ li(s0, Operand(stack_space));
3122   } else {
3123     DCHECK_EQ(stack_space, 0);
3124     __ Ld_d(s0, *stack_space_operand);
3125   }
3126 
3127   static constexpr bool kDontSaveDoubles = false;
3128   static constexpr bool kRegisterContainsSlotCount = false;
3129   __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
3130                     kRegisterContainsSlotCount);
3131 
3132   // Check if the function scheduled an exception.
3133   __ LoadRoot(a4, RootIndex::kTheHoleValue);
3134   __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
3135   __ Ld_d(a5, MemOperand(kScratchReg, 0));
3136   __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
3137 
3138   __ Ret();
3139 
3140   // Re-throw by promoting a scheduled exception.
3141   __ bind(&promote_scheduled_exception);
3142   __ TailCallRuntime(Runtime::kPromoteScheduledException);
3143 
3144   // HandleScope limit has changed. Delete allocated extensions.
3145   __ bind(&delete_allocated_handles);
3146   __ St_d(s1, MemOperand(s5, kLimitOffset));
3147   __ mov(s0, a0);
3148   __ PrepareCallCFunction(1, s1);
3149   __ li(a0, ExternalReference::isolate_address(isolate));
3150   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3151   __ mov(a0, s0);
3152   __ jmp(&leave_exit_frame);
3153 }
3154 
3155 }  // namespace
3156 
Generate_CallApiCallback(MacroAssembler * masm)3157 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3158   // ----------- S t a t e -------------
3159   //  -- cp                  : context
3160   //  -- a1                  : api function address
3161   //  -- a2                  : arguments count
3162   //  -- a3                  : call data
3163   //  -- a0                  : holder
3164   //  -- sp[0]               : receiver
3165   //  -- sp[8]               : first argument
3166   //  -- ...
3167   //  -- sp[(argc) * 8]      : last argument
3168   // -----------------------------------
3169 
3170   Register api_function_address = a1;
3171   Register argc = a2;
3172   Register call_data = a3;
3173   Register holder = a0;
3174   Register scratch = t0;
3175   Register base = t1;  // For addressing MemOperands on the stack.
3176 
3177   DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch,
3178                      base));
3179 
3180   using FCA = FunctionCallbackArguments;
3181 
3182   STATIC_ASSERT(FCA::kArgsLength == 6);
3183   STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3184   STATIC_ASSERT(FCA::kDataIndex == 4);
3185   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3186   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3187   STATIC_ASSERT(FCA::kIsolateIndex == 1);
3188   STATIC_ASSERT(FCA::kHolderIndex == 0);
3189 
3190   // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3191   //
3192   // Target state:
3193   //   sp[0 * kPointerSize]: kHolder
3194   //   sp[1 * kPointerSize]: kIsolate
3195   //   sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
3196   //   sp[3 * kPointerSize]: undefined (kReturnValue)
3197   //   sp[4 * kPointerSize]: kData
3198   //   sp[5 * kPointerSize]: undefined (kNewTarget)
3199 
3200   // Set up the base register for addressing through MemOperands. It will point
3201   // at the receiver (located at sp + argc * kPointerSize).
3202   __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7);
3203 
3204   // Reserve space on the stack.
3205   __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
3206 
3207   // kHolder.
3208   __ St_d(holder, MemOperand(sp, 0 * kPointerSize));
3209 
3210   // kIsolate.
3211   __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3212   __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
3213 
3214   // kReturnValueDefaultValue and kReturnValue.
3215   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3216   __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
3217   __ St_d(scratch, MemOperand(sp, 3 * kPointerSize));
3218 
3219   // kData.
3220   __ St_d(call_data, MemOperand(sp, 4 * kPointerSize));
3221 
3222   // kNewTarget.
3223   __ St_d(scratch, MemOperand(sp, 5 * kPointerSize));
3224 
3225   // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3226   // We use it below to set up the FunctionCallbackInfo object.
3227   __ mov(scratch, sp);
3228 
3229   // Allocate the v8::Arguments structure in the arguments' space since
3230   // it's not controlled by GC.
3231   static constexpr int kApiStackSpace = 4;
3232   static constexpr bool kDontSaveDoubles = false;
3233   FrameScope frame_scope(masm, StackFrame::MANUAL);
3234   __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3235 
3236   // EnterExitFrame may align the sp.
3237 
3238   // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3239   // Arguments are after the return address (pushed by EnterExitFrame()).
3240   __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
3241 
3242   // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3243   // on the stack).
3244   __ Add_d(scratch, scratch,
3245            Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3246 
3247   __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
3248 
3249   // FunctionCallbackInfo::length_.
3250   // Stored as int field, 32-bit integers within struct on stack always left
3251   // justified by n64 ABI.
3252   __ St_w(argc, MemOperand(sp, 3 * kPointerSize));
3253 
3254   // We also store the number of bytes to drop from the stack after returning
3255   // from the API function here.
3256   // Note: Unlike on other architectures, this stores the number of slots to
3257   // drop, not the number of bytes.
3258   __ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
3259   __ St_d(scratch, MemOperand(sp, 4 * kPointerSize));
3260 
3261   // v8::InvocationCallback's argument.
3262   DCHECK(!AreAliased(api_function_address, scratch, a0));
3263   __ Add_d(a0, sp, Operand(1 * kPointerSize));
3264 
3265   ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3266 
3267   // There are two stack slots above the arguments we constructed on the stack.
3268   // TODO(jgruber): Document what these arguments are.
3269   static constexpr int kStackSlotsAboveFCA = 2;
3270   MemOperand return_value_operand(
3271       fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3272 
3273   static constexpr int kUseStackSpaceOperand = 0;
3274   MemOperand stack_space_operand(sp, 4 * kPointerSize);
3275 
3276   AllowExternalCallThatCantCauseGC scope(masm);
3277   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3278                            kUseStackSpaceOperand, &stack_space_operand,
3279                            return_value_operand);
3280 }
3281 
Generate_CallApiGetter(MacroAssembler * masm)3282 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3283   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3284   // name below the exit frame to make GC aware of them.
3285   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3286   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3287   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3288   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3289   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3290   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3291   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3292   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3293 
3294   Register receiver = ApiGetterDescriptor::ReceiverRegister();
3295   Register holder = ApiGetterDescriptor::HolderRegister();
3296   Register callback = ApiGetterDescriptor::CallbackRegister();
3297   Register scratch = a4;
3298   DCHECK(!AreAliased(receiver, holder, callback, scratch));
3299 
3300   Register api_function_address = a2;
3301 
3302   // Here and below +1 is for name() pushed after the args_ array.
3303   using PCA = PropertyCallbackArguments;
3304   __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
3305   __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
3306   __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3307   __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
3308   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3309   __ St_d(scratch,
3310           MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
3311   __ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3312                                       kPointerSize));
3313   __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3314   __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
3315   __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
3316   // should_throw_on_error -> false
3317   DCHECK_EQ(0, Smi::zero().ptr());
3318   __ St_d(zero_reg,
3319           MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
3320   __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3321   __ St_d(scratch, MemOperand(sp, 0 * kPointerSize));
3322 
3323   // v8::PropertyCallbackInfo::args_ array and name handle.
3324   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3325 
3326   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3327   __ mov(a0, sp);                               // a0 = Handle<Name>
3328   __ Add_d(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
3329 
3330   const int kApiStackSpace = 1;
3331   FrameScope frame_scope(masm, StackFrame::MANUAL);
3332   __ EnterExitFrame(false, kApiStackSpace);
3333 
3334   // Create v8::PropertyCallbackInfo object on the stack and initialize
3335   // it's args_ field.
3336   __ St_d(a1, MemOperand(sp, 1 * kPointerSize));
3337   __ Add_d(a1, sp, Operand(1 * kPointerSize));
3338   // a1 = v8::PropertyCallbackInfo&
3339 
3340   ExternalReference thunk_ref =
3341       ExternalReference::invoke_accessor_getter_callback();
3342 
3343   __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3344   __ Ld_d(api_function_address,
3345           FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3346 
3347   // +3 is to skip prolog, return address and name handle.
3348   MemOperand return_value_operand(
3349       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3350   MemOperand* const kUseStackSpaceConstant = nullptr;
3351   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3352                            kStackUnwindSpace, kUseStackSpaceConstant,
3353                            return_value_operand);
3354 }
3355 
Generate_DirectCEntry(MacroAssembler * masm)3356 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3357   // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3358   // purpose Code object) to be able to call into C functions that may trigger
3359   // GC and thus move the caller.
3360   //
3361   // DirectCEntry places the return address on the stack (updated by the GC),
3362   // making the call GC safe. The irregexp backend relies on this.
3363 
3364   __ St_d(ra, MemOperand(sp, 0));  // Store the return address.
3365   __ Call(t7);                     // Call the C++ function.
3366   __ Ld_d(ra, MemOperand(sp, 0));  // Return to calling code.
3367 
3368   // TODO(LOONG_dev): LOONG64 Check this assert.
3369   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3370     // In case of an error the return address may point to a memory area
3371     // filled with kZapValue by the GC. Dereference the address and check for
3372     // this.
3373     __ Ld_d(a4, MemOperand(ra, 0));
3374     __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
3375               Operand(reinterpret_cast<uint64_t>(kZapValue)));
3376   }
3377 
3378   __ Jump(ra);
3379 }
3380 
3381 namespace {
3382 
3383 // This code tries to be close to ia32 code so that any changes can be
3384 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3385 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3386                                   DeoptimizeKind deopt_kind) {
3387   Isolate* isolate = masm->isolate();
3388 
3389   // Unlike on ARM we don't save all the registers, just the useful ones.
3390   // For the rest, there are gaps on the stack, so the offsets remain the same.
3391   const int kNumberOfRegisters = Register::kNumRegisters;
3392 
3393   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3394   RegList saved_regs = restored_regs | sp | ra;
3395 
3396   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
3397 
3398   // Save all double FPU registers before messing with them.
3399   __ Sub_d(sp, sp, Operand(kDoubleRegsSize));
3400   const RegisterConfiguration* config = RegisterConfiguration::Default();
3401   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3402     int code = config->GetAllocatableDoubleCode(i);
3403     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3404     int offset = code * kDoubleSize;
3405     __ Fst_d(fpu_reg, MemOperand(sp, offset));
3406   }
3407 
3408   // Push saved_regs (needed to populate FrameDescription::registers_).
3409   // Leave gaps for other registers.
3410   __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize);
3411   for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3412     if ((saved_regs.bits() & (1 << i)) != 0) {
3413       __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i));
3414     }
3415   }
3416 
3417   __ li(a2,
3418         ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3419   __ St_d(fp, MemOperand(a2, 0));
3420 
3421   const int kSavedRegistersAreaSize =
3422       (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3423 
3424   // Get the address of the location in the code object (a2) (return
3425   // address for lazy deoptimization) and compute the fp-to-sp delta in
3426   // register a3.
3427   __ mov(a2, ra);
3428   __ Add_d(a3, sp, Operand(kSavedRegistersAreaSize));
3429 
3430   __ sub_d(a3, fp, a3);
3431 
3432   // Allocate a new deoptimizer object.
3433   __ PrepareCallCFunction(5, a4);
3434   // Pass six arguments, according to n64 ABI.
3435   __ mov(a0, zero_reg);
3436   Label context_check;
3437   __ Ld_d(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3438   __ JumpIfSmi(a1, &context_check);
3439   __ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3440   __ bind(&context_check);
3441   __ li(a1, Operand(static_cast<int>(deopt_kind)));
3442   // a2: code address or 0 already loaded.
3443   // a3: already has fp-to-sp delta.
3444   __ li(a4, ExternalReference::isolate_address(isolate));
3445 
3446   // Call Deoptimizer::New().
3447   {
3448     AllowExternalCallThatCantCauseGC scope(masm);
3449     __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3450   }
3451 
3452   // Preserve "deoptimizer" object in register a0 and get the input
3453   // frame descriptor pointer to a1 (deoptimizer->input_);
3454   // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
3455   __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
3456 
3457   // Copy core registers into FrameDescription::registers_[kNumRegisters].
3458   DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3459   for (int i = 0; i < kNumberOfRegisters; i++) {
3460     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3461     if ((saved_regs.bits() & (1 << i)) != 0) {
3462       __ Ld_d(a2, MemOperand(sp, i * kPointerSize));
3463       __ St_d(a2, MemOperand(a1, offset));
3464     } else if (FLAG_debug_code) {
3465       __ li(a2, Operand(kDebugZapValue));
3466       __ St_d(a2, MemOperand(a1, offset));
3467     }
3468   }
3469 
3470   int double_regs_offset = FrameDescription::double_registers_offset();
3471   // Copy FPU registers to
3472   // double_registers_[DoubleRegister::kNumAllocatableRegisters]
3473   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3474     int code = config->GetAllocatableDoubleCode(i);
3475     int dst_offset = code * kDoubleSize + double_regs_offset;
3476     int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
3477     __ Fld_d(f0, MemOperand(sp, src_offset));
3478     __ Fst_d(f0, MemOperand(a1, dst_offset));
3479   }
3480 
3481   // Remove the saved registers from the stack.
3482   __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize));
3483 
3484   // Compute a pointer to the unwinding limit in register a2; that is
3485   // the first stack slot not part of the input frame.
3486   __ Ld_d(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
3487   __ add_d(a2, a2, sp);
3488 
3489   // Unwind the stack down to - but not including - the unwinding
3490   // limit and copy the contents of the activation frame to the input
3491   // frame description.
3492   __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset()));
3493   Label pop_loop;
3494   Label pop_loop_header;
3495   __ Branch(&pop_loop_header);
3496   __ bind(&pop_loop);
3497   __ Pop(a4);
3498   __ St_d(a4, MemOperand(a3, 0));
3499   __ addi_d(a3, a3, sizeof(uint64_t));
3500   __ bind(&pop_loop_header);
3501   __ BranchShort(&pop_loop, ne, a2, Operand(sp));
3502   // Compute the output frame in the deoptimizer.
3503   __ Push(a0);  // Preserve deoptimizer object across call.
3504   // a0: deoptimizer object; a1: scratch.
3505   __ PrepareCallCFunction(1, a1);
3506   // Call Deoptimizer::ComputeOutputFrames().
3507   {
3508     AllowExternalCallThatCantCauseGC scope(masm);
3509     __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3510   }
3511   __ Pop(a0);  // Restore deoptimizer object (class Deoptimizer).
3512 
3513   __ Ld_d(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
3514 
3515   // Replace the current (input) frame with the output frames.
3516   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3517   // Outer loop state: a4 = current "FrameDescription** output_",
3518   // a1 = one past the last FrameDescription**.
3519   __ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
3520   __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset()));  // a4 is output_.
3521   __ Alsl_d(a1, a1, a4, kPointerSizeLog2);
3522   __ Branch(&outer_loop_header);
3523   __ bind(&outer_push_loop);
3524   // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
3525   __ Ld_d(a2, MemOperand(a4, 0));  // output_[ix]
3526   __ Ld_d(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
3527   __ Branch(&inner_loop_header);
3528   __ bind(&inner_push_loop);
3529   __ Sub_d(a3, a3, Operand(sizeof(uint64_t)));
3530   __ Add_d(a6, a2, Operand(a3));
3531   __ Ld_d(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
3532   __ Push(a7);
3533   __ bind(&inner_loop_header);
3534   __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
3535 
3536   __ Add_d(a4, a4, Operand(kPointerSize));
3537   __ bind(&outer_loop_header);
3538   __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
3539 
3540   __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
3541   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3542     int code = config->GetAllocatableDoubleCode(i);
3543     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3544     int src_offset = code * kDoubleSize + double_regs_offset;
3545     __ Fld_d(fpu_reg, MemOperand(a1, src_offset));
3546   }
3547 
3548   // Push pc and continuation from the last output frame.
3549   __ Ld_d(a6, MemOperand(a2, FrameDescription::pc_offset()));
3550   __ Push(a6);
3551   __ Ld_d(a6, MemOperand(a2, FrameDescription::continuation_offset()));
3552   __ Push(a6);
3553 
3554   // Technically restoring 'at' should work unless zero_reg is also restored
3555   // but it's safer to check for this.
3556   DCHECK(!(restored_regs.has(t7)));
3557   // Restore the registers from the last output frame.
3558   __ mov(t7, a2);
3559   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3560     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3561     if ((restored_regs.bits() & (1 << i)) != 0) {
3562       __ Ld_d(ToRegister(i), MemOperand(t7, offset));
3563     }
3564   }
3565 
3566   __ Pop(t7);  // Get continuation, leave pc on stack.
3567   __ Pop(ra);
3568   __ Jump(t7);
3569   __ stop();
3570 }
3571 
3572 }  // namespace
3573 
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3574 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3575   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3576 }
3577 
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3578 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3579   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3580 }
3581 
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)3582 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
3583   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
3584 }
3585 
3586 namespace {
3587 
3588 // Restarts execution either at the current or next (in execution order)
3589 // bytecode. If there is baseline code on the shared function info, converts an
3590 // interpreter frame into a baseline frame and continues execution in baseline
3591 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)3592 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
3593                                          bool next_bytecode,
3594                                          bool is_osr = false) {
3595   Label start;
3596   __ bind(&start);
3597 
3598   // Get function from the frame.
3599   Register closure = a1;
3600   __ Ld_d(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3601 
3602   // Get the Code object from the shared function info.
3603   Register code_obj = s1;
3604   __ Ld_d(code_obj,
3605           FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
3606   __ Ld_d(code_obj,
3607           FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
3608 
3609   // Check if we have baseline code. For OSR entry it is safe to assume we
3610   // always have baseline code.
3611   if (!is_osr) {
3612     Label start_with_baseline;
3613     __ GetObjectType(code_obj, t2, t2);
3614     __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
3615 
3616     // Start with bytecode as there is no baseline code.
3617     Builtin builtin_id = next_bytecode
3618                              ? Builtin::kInterpreterEnterAtNextBytecode
3619                              : Builtin::kInterpreterEnterAtBytecode;
3620     __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
3621             RelocInfo::CODE_TARGET);
3622 
3623     // Start with baseline code.
3624     __ bind(&start_with_baseline);
3625   } else if (FLAG_debug_code) {
3626     __ GetObjectType(code_obj, t2, t2);
3627     __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
3628   }
3629 
3630   if (FLAG_debug_code) {
3631     AssertCodeIsBaseline(masm, code_obj, t2);
3632   }
3633 
3634   // Replace BytecodeOffset with the feedback vector.
3635   Register feedback_vector = a2;
3636   __ Ld_d(feedback_vector,
3637           FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3638   __ Ld_d(feedback_vector,
3639           FieldMemOperand(feedback_vector, Cell::kValueOffset));
3640 
3641   Label install_baseline_code;
3642   // Check if feedback vector is valid. If not, call prepare for baseline to
3643   // allocate it.
3644   __ GetObjectType(feedback_vector, t2, t2);
3645   __ Branch(&install_baseline_code, ne, t2, Operand(FEEDBACK_VECTOR_TYPE));
3646 
3647   // Save BytecodeOffset from the stack frame.
3648   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
3649               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3650   // Replace BytecodeOffset with the feedback vector.
3651   __ St_d(feedback_vector,
3652           MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3653   feedback_vector = no_reg;
3654 
3655   // Compute baseline pc for bytecode offset.
3656   ExternalReference get_baseline_pc_extref;
3657   if (next_bytecode || is_osr) {
3658     get_baseline_pc_extref =
3659         ExternalReference::baseline_pc_for_next_executed_bytecode();
3660   } else {
3661     get_baseline_pc_extref =
3662         ExternalReference::baseline_pc_for_bytecode_offset();
3663   }
3664 
3665   Register get_baseline_pc = a3;
3666   __ li(get_baseline_pc, get_baseline_pc_extref);
3667 
3668   // If the code deoptimizes during the implicit function entry stack interrupt
3669   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
3670   // not a valid bytecode offset.
3671   // TODO(pthier): Investigate if it is feasible to handle this special case
3672   // in TurboFan instead of here.
3673   Label valid_bytecode_offset, function_entry_bytecode;
3674   if (!is_osr) {
3675     __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
3676               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
3677                       kFunctionEntryBytecodeOffset));
3678   }
3679 
3680   __ Sub_d(kInterpreterBytecodeOffsetRegister,
3681            kInterpreterBytecodeOffsetRegister,
3682            (BytecodeArray::kHeaderSize - kHeapObjectTag));
3683 
3684   __ bind(&valid_bytecode_offset);
3685   // Get bytecode array from the stack frame.
3686   __ Ld_d(kInterpreterBytecodeArrayRegister,
3687           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3688   // Save the accumulator register, since it's clobbered by the below call.
3689   __ Push(kInterpreterAccumulatorRegister);
3690   {
3691     Register arg_reg_1 = a0;
3692     Register arg_reg_2 = a1;
3693     Register arg_reg_3 = a2;
3694     __ Move(arg_reg_1, code_obj);
3695     __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
3696     __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
3697     FrameScope scope(masm, StackFrame::INTERNAL);
3698     __ PrepareCallCFunction(3, 0, a4);
3699     __ CallCFunction(get_baseline_pc, 3, 0);
3700   }
3701   __ Add_d(code_obj, code_obj, kReturnRegister0);
3702   __ Pop(kInterpreterAccumulatorRegister);
3703 
3704   if (is_osr) {
3705     // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
3706     // Sparkplug here.
3707     // TODO(liuyu): Remove Ld as arm64 after register reallocation.
3708     __ Ld_d(kInterpreterBytecodeArrayRegister,
3709             MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3710     ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
3711     Generate_OSREntry(masm, code_obj,
3712                       Operand(Code::kHeaderSize - kHeapObjectTag));
3713   } else {
3714     __ Add_d(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
3715     __ Jump(code_obj);
3716   }
3717   __ Trap();  // Unreachable.
3718 
3719   if (!is_osr) {
3720     __ bind(&function_entry_bytecode);
3721     // If the bytecode offset is kFunctionEntryOffset, get the start address of
3722     // the first bytecode.
3723     __ mov(kInterpreterBytecodeOffsetRegister, zero_reg);
3724     if (next_bytecode) {
3725       __ li(get_baseline_pc,
3726             ExternalReference::baseline_pc_for_bytecode_offset());
3727     }
3728     __ Branch(&valid_bytecode_offset);
3729   }
3730 
3731   __ bind(&install_baseline_code);
3732   {
3733     FrameScope scope(masm, StackFrame::INTERNAL);
3734     __ Push(kInterpreterAccumulatorRegister);
3735     __ Push(closure);
3736     __ CallRuntime(Runtime::kInstallBaselineCode, 1);
3737     __ Pop(kInterpreterAccumulatorRegister);
3738   }
3739   // Retry from the start after installing baseline code.
3740   __ Branch(&start);
3741 }
3742 
3743 }  // namespace
3744 
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3745 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3746     MacroAssembler* masm) {
3747   Generate_BaselineOrInterpreterEntry(masm, false);
3748 }
3749 
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3750 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3751     MacroAssembler* masm) {
3752   Generate_BaselineOrInterpreterEntry(masm, true);
3753 }
3754 
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3755 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3756     MacroAssembler* masm) {
3757   Generate_BaselineOrInterpreterEntry(masm, false, true);
3758 }
3759 
3760 #undef __
3761 
3762 }  // namespace internal
3763 }  // namespace v8
3764 
3765 #endif  // V8_TARGET_ARCH_LOONG64
3766