• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_RISCV64
6 
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 #include "src/debug/debug.h"
11 #include "src/deoptimizer/deoptimizer.h"
12 #include "src/execution/frame-constants.h"
13 #include "src/execution/frames.h"
14 #include "src/logging/counters.h"
15 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
16 #include "src/codegen/macro-assembler-inl.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/codegen/riscv64/constants-riscv64.h"
19 #include "src/heap/heap-inl.h"
20 #include "src/objects/cell.h"
21 #include "src/objects/foreign.h"
22 #include "src/objects/heap-number.h"
23 #include "src/objects/js-generator.h"
24 #include "src/objects/objects-inl.h"
25 #include "src/objects/smi.h"
26 #include "src/runtime/runtime.h"
27 #include "src/wasm/wasm-linkage.h"
28 #include "src/wasm/wasm-objects.h"
29 
30 namespace v8 {
31 namespace internal {
32 
33 #define __ ACCESS_MASM(masm)
34 
Generate_Adaptor(MacroAssembler * masm,Address address)35 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
36   ASM_CODE_COMMENT(masm);
37   __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
38   __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
39           RelocInfo::CODE_TARGET);
40 }
41 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)42 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
43                                            Runtime::FunctionId function_id) {
44   // ----------- S t a t e -------------
45   //  -- a0 : actual argument count
46   //  -- a1 : target function (preserved for callee)
47   //  -- a3 : new target (preserved for callee)
48   // -----------------------------------
49   {
50     FrameScope scope(masm, StackFrame::INTERNAL);
51     // Push a copy of the target function, the new target and the actual
52     // argument count.
53     // Push function as parameter to the runtime call.
54     __ SmiTag(kJavaScriptCallArgCountRegister);
55     __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
56             kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
57 
58     __ CallRuntime(function_id, 1);
59     // Use the return value before restoring a0
60     __ Add64(a2, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
61     // Restore target function, new target and actual argument count.
62     __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
63            kJavaScriptCallArgCountRegister);
64     __ SmiUntag(kJavaScriptCallArgCountRegister);
65   }
66 
67   static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
68   __ Jump(a2);
69 }
70 
71 namespace {
72 
73 enum class ArgumentsElementType {
74   kRaw,    // Push arguments as they are.
75   kHandle  // Dereference arguments before pushing.
76 };
77 
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,Register scratch2,ArgumentsElementType element_type)78 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
79                             Register scratch, Register scratch2,
80                             ArgumentsElementType element_type) {
81   DCHECK(!AreAliased(array, argc, scratch));
82   Label loop, entry;
83   __ Sub64(scratch, argc, Operand(kJSArgcReceiverSlots));
84   __ Branch(&entry);
85   __ bind(&loop);
86   __ CalcScaledAddress(scratch2, array, scratch, kSystemPointerSizeLog2);
87   __ Ld(scratch2, MemOperand(scratch2));
88   if (element_type == ArgumentsElementType::kHandle) {
89     __ Ld(scratch2, MemOperand(scratch2));
90   }
91   __ push(scratch2);
92   __ bind(&entry);
93   __ Add64(scratch, scratch, Operand(-1));
94   __ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
95 }
96 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)97 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
98   // ----------- S t a t e -------------
99   //  -- a0     : number of arguments
100   //  -- a1     : constructor function
101   //  -- a3     : new target
102   //  -- cp     : context
103   //  -- ra     : return address
104   //  -- sp[...]: constructor arguments
105   // -----------------------------------
106 
107   // Enter a construct frame.
108   {
109     FrameScope scope(masm, StackFrame::CONSTRUCT);
110 
111     // Preserve the incoming parameters on the stack.
112     __ SmiTag(a0);
113     __ Push(cp, a0);
114     __ SmiUntag(a0);
115 
116     // Set up pointer to first argument (skip receiver).
117     __ Add64(
118         t2, fp,
119         Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
120     // t2: Pointer to start of arguments.
121     // a0: Number of arguments.
122     {
123       UseScratchRegisterScope temps(masm);
124       temps.Include(t0);
125       Generate_PushArguments(masm, t2, a0, temps.Acquire(), temps.Acquire(),
126                              ArgumentsElementType::kRaw);
127     }
128     // The receiver for the builtin/api call.
129     __ PushRoot(RootIndex::kTheHoleValue);
130 
131     // Call the function.
132     // a0: number of arguments (untagged)
133     // a1: constructor function
134     // a3: new target
135     __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
136 
137     // Restore context from the frame.
138     __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
139     // Restore smi-tagged arguments count from the frame.
140     __ Ld(kScratchReg, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
141     // Leave construct frame.
142   }
143 
144   // Remove caller arguments from the stack and return.
145   __ DropArguments(kScratchReg, MacroAssembler::kCountIsSmi,
146                    MacroAssembler::kCountIncludesReceiver, kScratchReg);
147   __ Ret();
148 }
149 
150 }  // namespace
151 
152 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)153 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
154   // ----------- S t a t e -------------
155   //  --      a0: number of arguments (untagged)
156   //  --      a1: constructor function
157   //  --      a3: new target
158   //  --      cp: context
159   //  --      ra: return address
160   //  -- sp[...]: constructor arguments
161   // -----------------------------------
162   UseScratchRegisterScope temps(masm);
163   temps.Include(t0, t1);
164   // Enter a construct frame.
165   FrameScope scope(masm, StackFrame::MANUAL);
166   Label post_instantiation_deopt_entry, not_create_implicit_receiver;
167   __ EnterFrame(StackFrame::CONSTRUCT);
168 
169   // Preserve the incoming parameters on the stack.
170   __ SmiTag(a0);
171   __ Push(cp, a0, a1);
172   __ PushRoot(RootIndex::kUndefinedValue);
173   __ Push(a3);
174 
175   // ----------- S t a t e -------------
176   //  --        sp[0*kSystemPointerSize]: new target
177   //  --        sp[1*kSystemPointerSize]: padding
178   //  -- a1 and sp[2*kSystemPointerSize]: constructor function
179   //  --        sp[3*kSystemPointerSize]: number of arguments (tagged)
180   //  --        sp[4*kSystemPointerSize]: context
181   // -----------------------------------
182   {
183     UseScratchRegisterScope temps(masm);
184     Register func_info = temps.Acquire();
185     __ LoadTaggedPointerField(
186         func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
187     __ Lwu(func_info,
188            FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
189     __ DecodeField<SharedFunctionInfo::FunctionKindBits>(func_info);
190     __ JumpIfIsInRange(
191         func_info,
192         static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
193         static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
194         &not_create_implicit_receiver);
195     Register scratch = func_info;
196     Register scratch2 = temps.Acquire();
197     // If not derived class constructor: Allocate the new receiver object.
198     __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
199                         scratch, scratch2);
200     __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
201             RelocInfo::CODE_TARGET);
202     __ BranchShort(&post_instantiation_deopt_entry);
203 
204     // Else: use TheHoleValue as receiver for constructor call
205     __ bind(&not_create_implicit_receiver);
206     __ LoadRoot(a0, RootIndex::kTheHoleValue);
207   }
208   // ----------- S t a t e -------------
209   //  --                          a0: receiver
210   //  -- Slot 4 / sp[0*kSystemPointerSize]: new target
211   //  -- Slot 3 / sp[1*kSystemPointerSize]: padding
212   //  -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
213   //  -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
214   //  -- Slot 0 / sp[4*kSystemPointerSize]: context
215   // -----------------------------------
216   // Deoptimizer enters here.
217   masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
218       masm->pc_offset());
219   __ bind(&post_instantiation_deopt_entry);
220 
221   // Restore new target.
222   __ Pop(a3);
223 
224   // Push the allocated receiver to the stack.
225   __ Push(a0);
226 
227   // We need two copies because we may have to return the original one
228   // and the calling conventions dictate that the called function pops the
229   // receiver. The second copy is pushed after the arguments, we saved in a6
230   // since a0 will store the return value of callRuntime.
231   __ Move(a6, a0);
232 
233   // Set up pointer to first argument (skip receiver)..
234   __ Add64(
235       t2, fp,
236       Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
237 
238   // ----------- S t a t e -------------
239   //  --                 a3: new target
240   //  -- sp[0*kSystemPointerSize]: implicit receiver
241   //  -- sp[1*kSystemPointerSize]: implicit receiver
242   //  -- sp[2*kSystemPointerSize]: padding
243   //  -- sp[3*kSystemPointerSize]: constructor function
244   //  -- sp[4*kSystemPointerSize]: number of arguments (tagged)
245   //  -- sp[5*kSystemPointerSize]: context
246   // -----------------------------------
247 
248   // Restore constructor function and argument count.
249   __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
250   __ Ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
251   __ SmiUntag(a0);
252 
253   Label stack_overflow;
254   {
255     UseScratchRegisterScope temps(masm);
256     __ StackOverflowCheck(a0, temps.Acquire(), temps.Acquire(),
257                           &stack_overflow);
258   }
259   // TODO(victorgomes): When the arguments adaptor is completely removed, we
260   // should get the formal parameter count and copy the arguments in its
261   // correct position (including any undefined), instead of delaying this to
262   // InvokeFunction.
263 
264   // Copy arguments and receiver to the expression stack.
265   // t2: Pointer to start of argument.
266   // a0: Number of arguments.
267   {
268     UseScratchRegisterScope temps(masm);
269     Generate_PushArguments(masm, t2, a0, temps.Acquire(), temps.Acquire(),
270                            ArgumentsElementType::kRaw);
271   }
272   // We need two copies because we may have to return the original one
273   // and the calling conventions dictate that the called function pops the
274   // receiver. The second copy is pushed after the arguments,
275   __ Push(a6);
276 
277   // Call the function.
278   __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
279 
280   // ----------- S t a t e -------------
281   //  --                 a0: constructor result
282   //  -- sp[0*kSystemPointerSize]: implicit receiver
283   //  -- sp[1*kSystemPointerSize]: padding
284   //  -- sp[2*kSystemPointerSize]: constructor function
285   //  -- sp[3*kSystemPointerSize]: number of arguments
286   //  -- sp[4*kSystemPointerSize]: context
287   // -----------------------------------
288 
289   // Store offset of return address for deoptimizer.
290   masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
291       masm->pc_offset());
292 
293   // Restore the context from the frame.
294   __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
295 
296   // If the result is an object (in the ECMA sense), we should get rid
297   // of the receiver and use the result; see ECMA-262 section 13.2.2-7
298   // on page 74.
299   Label use_receiver, do_throw, leave_and_return, check_receiver;
300 
301   // If the result is undefined, we jump out to using the implicit receiver.
302   __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
303 
304   // Otherwise we do a smi check and fall through to check if the return value
305   // is a valid receiver.
306 
307   // Throw away the result of the constructor invocation and use the
308   // on-stack receiver as the result.
309   __ bind(&use_receiver);
310   __ Ld(a0, MemOperand(sp, 0 * kSystemPointerSize));
311   __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
312 
313   __ bind(&leave_and_return);
314   // Restore smi-tagged arguments count from the frame.
315   __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
316   // Leave construct frame.
317   __ LeaveFrame(StackFrame::CONSTRUCT);
318 
319   // Remove caller arguments from the stack and return.
320   __ DropArguments(a1, MacroAssembler::kCountIsSmi,
321                    MacroAssembler::kCountIncludesReceiver, a4);
322   __ Ret();
323 
324   __ bind(&check_receiver);
325   __ JumpIfSmi(a0, &use_receiver);
326 
327   // If the type of the result (stored in its map) is less than
328   // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
329   {
330     UseScratchRegisterScope temps(masm);
331     Register map = temps.Acquire(), type = temps.Acquire();
332     __ GetObjectType(a0, map, type);
333 
334     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
335     __ Branch(&leave_and_return, greater_equal, type,
336               Operand(FIRST_JS_RECEIVER_TYPE));
337     __ Branch(&use_receiver);
338   }
339   __ bind(&do_throw);
340   // Restore the context from the frame.
341   __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
342   __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
343   __ break_(0xCC);
344 
345   __ bind(&stack_overflow);
346   // Restore the context from the frame.
347   __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
348   __ CallRuntime(Runtime::kThrowStackOverflow);
349   __ break_(0xCC);
350 }
351 
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)352 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
353   Generate_JSBuiltinsConstructStubHelper(masm);
354 }
355 
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)356 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
357                                  Register scratch) {
358   DCHECK(!AreAliased(code, scratch));
359   // Verify that the code kind is baseline code via the CodeKind.
360   __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
361   __ DecodeField<Code::KindField>(scratch);
362   __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
363             Operand(static_cast<int64_t>(CodeKind::BASELINE)));
364 }
365 // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
366 // the more general dispatch.
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)367 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
368                                                     Register sfi_data,
369                                                     Register scratch1,
370                                                     Label* is_baseline) {
371   ASM_CODE_COMMENT(masm);
372   Label done;
373 
374   __ GetObjectType(sfi_data, scratch1, scratch1);
375   __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
376 
377   __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
378             Label::Distance::kNear);
379   __ LoadTaggedPointerField(
380       sfi_data,
381       FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
382 
383   __ bind(&done);
384 }
385 
386 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)387 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
388   // ----------- S t a t e -------------
389   //  -- a0 : the value to pass to the generator
390   //  -- a1 : the JSGeneratorObject to resume
391   //  -- ra : return address
392   // -----------------------------------
393 
394   // Store input value into generator object.
395   __ StoreTaggedField(
396       a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
397   __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
398                       kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
399   // Check that a1 is still valid, RecordWrite might have clobbered it.
400   __ AssertGeneratorObject(a1);
401 
402   // Load suspended function and context.
403   __ LoadTaggedPointerField(
404       a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
405   __ LoadTaggedPointerField(cp,
406                             FieldMemOperand(a4, JSFunction::kContextOffset));
407 
408   // Flood function if we are stepping.
409   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
410   Label stepping_prepared;
411   ExternalReference debug_hook =
412       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
413   __ li(a5, debug_hook);
414   __ Lb(a5, MemOperand(a5));
415   __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
416 
417   // Flood function if we need to continue stepping in the suspended generator.
418   ExternalReference debug_suspended_generator =
419       ExternalReference::debug_suspended_generator_address(masm->isolate());
420   __ li(a5, debug_suspended_generator);
421   __ Ld(a5, MemOperand(a5));
422   __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
423   __ bind(&stepping_prepared);
424 
425   // Check the stack for overflow. We are not trying to catch interruptions
426   // (i.e. debug break and preemption) here, so check the "real stack limit".
427   Label stack_overflow;
428   __ LoadStackLimit(kScratchReg,
429                     MacroAssembler::StackLimitKind::kRealStackLimit);
430   __ Branch(&stack_overflow, Uless, sp, Operand(kScratchReg));
431 
432   // ----------- S t a t e -------------
433   //  -- a1    : the JSGeneratorObject to resume
434   //  -- a4    : generator function
435   //  -- cp    : generator context
436   //  -- ra    : return address
437   // -----------------------------------
438 
439   // Push holes for arguments to generator function. Since the parser forced
440   // context allocation for any variables in generators, the actual argument
441   // values have already been copied into the context and these dummy values
442   // will never be used.
443   __ LoadTaggedPointerField(
444       a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
445   __ Lhu(a3,
446          FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
447   __ Sub64(a3, a3, Operand(kJSArgcReceiverSlots));
448   __ LoadTaggedPointerField(
449       t1,
450       FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
451   {
452     Label done_loop, loop;
453     __ bind(&loop);
454     __ Sub64(a3, a3, Operand(1));
455     __ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
456     __ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
457     __ LoadAnyTaggedField(
458         kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
459     __ Push(kScratchReg);
460     __ Branch(&loop);
461     __ bind(&done_loop);
462     // Push receiver.
463     __ LoadAnyTaggedField(
464         kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
465     __ Push(kScratchReg);
466   }
467 
468   // Underlying function needs to have bytecode available.
469   if (FLAG_debug_code) {
470     Label is_baseline;
471     __ LoadTaggedPointerField(
472         a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
473     __ LoadTaggedPointerField(
474         a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
475     GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
476     __ GetObjectType(a3, a3, a3);
477     __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
478               Operand(BYTECODE_ARRAY_TYPE));
479     __ bind(&is_baseline);
480   }
481 
482   // Resume (Ignition/TurboFan) generator object.
483   {
484     __ LoadTaggedPointerField(
485         a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
486     __ Lhu(a0, FieldMemOperand(
487                    a0, SharedFunctionInfo::kFormalParameterCountOffset));
488     // We abuse new.target both to indicate that this is a resume call and to
489     // pass in the generator object.  In ordinary calls, new.target is always
490     // undefined because generator functions are non-constructable.
491     __ Move(a3, a1);
492     __ Move(a1, a4);
493     static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
494     __ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
495     __ JumpCodeObject(a2);
496   }
497 
498   __ bind(&prepare_step_in_if_stepping);
499   {
500     FrameScope scope(masm, StackFrame::INTERNAL);
501     __ Push(a1, a4);
502     // Push hole as receiver since we do not use it for stepping.
503     __ PushRoot(RootIndex::kTheHoleValue);
504     __ CallRuntime(Runtime::kDebugOnFunctionCall);
505     __ Pop(a1);
506   }
507   __ LoadTaggedPointerField(
508       a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
509   __ Branch(&stepping_prepared);
510 
511   __ bind(&prepare_step_in_suspended_generator);
512   {
513     FrameScope scope(masm, StackFrame::INTERNAL);
514     __ Push(a1);
515     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
516     __ Pop(a1);
517   }
518   __ LoadTaggedPointerField(
519       a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
520   __ Branch(&stepping_prepared);
521 
522   __ bind(&stack_overflow);
523   {
524     FrameScope scope(masm, StackFrame::INTERNAL);
525     __ CallRuntime(Runtime::kThrowStackOverflow);
526     __ break_(0xCC);  // This should be unreachable.
527   }
528 }
529 
Generate_ConstructedNonConstructable(MacroAssembler * masm)530 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
531   FrameScope scope(masm, StackFrame::INTERNAL);
532   __ Push(a1);
533   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
534 }
535 
536 // Clobbers scratch1 and scratch2; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc,Register scratch1,Register scratch2)537 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
538                                         Register scratch1, Register scratch2) {
539   // Check the stack for overflow. We are not trying to catch
540   // interruptions (e.g. debug break and preemption) here, so the "real stack
541   // limit" is checked.
542   Label okay;
543   __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
544   // Make a2 the space we have left. The stack might already be overflowed
545   // here which will cause r2 to become negative.
546   __ Sub64(scratch1, sp, scratch1);
547   // Check if the arguments will overflow the stack.
548   __ Sll64(scratch2, argc, kSystemPointerSizeLog2);
549   __ Branch(&okay, gt, scratch1, Operand(scratch2),
550             Label::Distance::kNear);  // Signed comparison.
551 
552   // Out of stack space.
553   __ CallRuntime(Runtime::kThrowStackOverflow);
554 
555   __ bind(&okay);
556 }
557 
558 namespace {
559 
560 // Called with the native C calling convention. The corresponding function
561 // signature is either:
562 //
563 //   using JSEntryFunction = GeneratedCode<Address(
564 //       Address root_register_value, Address new_target, Address target,
565 //       Address receiver, intptr_t argc, Address** args)>;
566 // or
567 //   using JSEntryFunction = GeneratedCode<Address(
568 //       Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)569 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
570                              Builtin entry_trampoline) {
571   Label invoke, handler_entry, exit;
572 
573   {
574     NoRootArrayScope no_root_array(masm);
575 
576     // TODO(plind): unify the ABI description here.
577     // Registers:
578     //  either
579     //   a0: root register value
580     //   a1: entry address
581     //   a2: function
582     //   a3: receiver
583     //   a4: argc
584     //   a5: argv
585     //  or
586     //   a0: root register value
587     //   a1: microtask_queue
588 
589     // Save callee saved registers on the stack.
590     __ MultiPush(kCalleeSaved | ra);
591 
592     // Save callee-saved FPU registers.
593     __ MultiPushFPU(kCalleeSavedFPU);
594     // Set up the reserved register for 0.0.
595     __ LoadFPRImmediate(kDoubleRegZero, 0.0);
596 
597     // Initialize the root register.
598     // C calling convention. The first argument is passed in a0.
599     __ Move(kRootRegister, a0);
600 
601 #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
602     // Initialize the pointer cage base register.
603     __ LoadRootRelative(kPtrComprCageBaseRegister,
604                         IsolateData::cage_base_offset());
605 #endif
606   }
607 
608   // a1: entry address
609   // a2: function
610   // a3: receiver
611   // a4: argc
612   // a5: argv
613 
614   // We build an EntryFrame.
615   __ li(s1, Operand(-1));  // Push a bad frame pointer to fail if it is used.
616   __ li(s2, Operand(StackFrame::TypeToMarker(type)));
617   __ li(s3, Operand(StackFrame::TypeToMarker(type)));
618   ExternalReference c_entry_fp = ExternalReference::Create(
619       IsolateAddressId::kCEntryFPAddress, masm->isolate());
620   __ li(s5, c_entry_fp);
621   __ Ld(s4, MemOperand(s5));
622   __ Push(s1, s2, s3, s4);
623   // Clear c_entry_fp, now we've pushed its previous value to the stack.
624   // If the c_entry_fp is not already zero and we don't clear it, the
625   // SafeStackFrameIterator will assume we are executing C++ and miss the JS
626   // frames on top.
627   __ Sd(zero_reg, MemOperand(s5));
628   // Set up frame pointer for the frame to be pushed.
629   __ Add64(fp, sp, -EntryFrameConstants::kCallerFPOffset);
630   // Registers:
631   //  either
632   //   a1: entry address
633   //   a2: function
634   //   a3: receiver
635   //   a4: argc
636   //   a5: argv
637   //  or
638   //   a1: microtask_queue
639   //
640   // Stack:
641   // caller fp          |
642   // function slot      | entry frame
643   // context slot       |
644   // bad fp (0xFF...F)  |
645   // callee saved registers + ra
646   // [ O32: 4 args slots]
647   // args
648 
649   // If this is the outermost JS call, set js_entry_sp value.
650   Label non_outermost_js;
651   ExternalReference js_entry_sp = ExternalReference::Create(
652       IsolateAddressId::kJSEntrySPAddress, masm->isolate());
653   __ li(s1, js_entry_sp);
654   __ Ld(s2, MemOperand(s1));
655   __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg),
656             Label::Distance::kNear);
657   __ Sd(fp, MemOperand(s1));
658   __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
659   Label cont;
660   __ Branch(&cont);
661   __ bind(&non_outermost_js);
662   __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
663   __ bind(&cont);
664   __ push(s3);
665 
666   // Jump to a faked try block that does the invoke, with a faked catch
667   // block that sets the pending exception.
668   __ BranchShort(&invoke);
669   __ bind(&handler_entry);
670 
671   // Store the current pc as the handler offset. It's used later to create the
672   // handler table.
673   masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
674 
675   // Caught exception: Store result (exception) in the pending exception
676   // field in the JSEnv and return a failure sentinel.  Coming in here the
677   // fp will be invalid because the PushStackHandler below sets it to 0 to
678   // signal the existence of the JSEntry frame.
679   __ li(s1, ExternalReference::Create(
680                 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
681   __ Sd(a0, MemOperand(s1));  // We come back from 'invoke'. result is in a0.
682   __ LoadRoot(a0, RootIndex::kException);
683   __ BranchShort(&exit);
684 
685   // Invoke: Link this frame into the handler chain.
686   __ bind(&invoke);
687   __ PushStackHandler();
688   // If an exception not caught by another handler occurs, this handler
689   // returns control to the code after the bal(&invoke) above, which
690   // restores all kCalleeSaved registers (including cp and fp) to their
691   // saved values before returning a failure to C.
692   //
693   // Registers:
694   //  either
695   //   a0: root register value
696   //   a1: entry address
697   //   a2: function
698   //   a3: receiver
699   //   a4: argc
700   //   a5: argv
701   //  or
702   //   a0: root register value
703   //   a1: microtask_queue
704   //
705   // Stack:
706   // handler frame
707   // entry frame
708   // callee saved registers + ra
709   // [ O32: 4 args slots]
710   // args
711   //
712   // Invoke the function by calling through JS entry trampoline builtin and
713   // pop the faked function when we return.
714 
715   Handle<Code> trampoline_code =
716       masm->isolate()->builtins()->code_handle(entry_trampoline);
717   __ Call(trampoline_code, RelocInfo::CODE_TARGET);
718 
719   // Unlink this frame from the handler chain.
720   __ PopStackHandler();
721 
722   __ bind(&exit);  // a0 holds result
723   // Check if the current stack frame is marked as the outermost JS frame.
724   Label non_outermost_js_2;
725   __ pop(a5);
726   __ Branch(&non_outermost_js_2, ne, a5,
727             Operand(StackFrame::OUTERMOST_JSENTRY_FRAME),
728             Label::Distance::kNear);
729   __ li(a5, js_entry_sp);
730   __ Sd(zero_reg, MemOperand(a5));
731   __ bind(&non_outermost_js_2);
732 
733   // Restore the top frame descriptors from the stack.
734   __ pop(a5);
735   __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
736                                       masm->isolate()));
737   __ Sd(a5, MemOperand(a4));
738 
739   // Reset the stack to the callee saved registers.
740   __ Add64(sp, sp, -EntryFrameConstants::kCallerFPOffset);
741 
742   // Restore callee-saved fpu registers.
743   __ MultiPopFPU(kCalleeSavedFPU);
744 
745   // Restore callee saved registers from the stack.
746   __ MultiPop(kCalleeSaved | ra);
747   // Return.
748   __ Jump(ra);
749 }
750 
751 }  // namespace
752 
Generate_JSEntry(MacroAssembler * masm)753 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
754   Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
755 }
756 
Generate_JSConstructEntry(MacroAssembler * masm)757 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
758   Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
759                           Builtin::kJSConstructEntryTrampoline);
760 }
761 
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)762 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
763   Generate_JSEntryVariant(masm, StackFrame::ENTRY,
764                           Builtin::kRunMicrotasksTrampoline);
765 }
766 
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)767 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
768                                              bool is_construct) {
769   // ----------- S t a t e -------------
770   //  -- a1: new.target
771   //  -- a2: function
772   //  -- a3: receiver_pointer
773   //  -- a4: argc
774   //  -- a5: argv
775   // -----------------------------------
776 
777   // Enter an internal frame.
778   {
779     FrameScope scope(masm, StackFrame::INTERNAL);
780 
781     // Setup the context (we need to use the caller context from the isolate).
782     ExternalReference context_address = ExternalReference::Create(
783         IsolateAddressId::kContextAddress, masm->isolate());
784     __ li(cp, context_address);
785     __ Ld(cp, MemOperand(cp));
786 
787     // Push the function onto the stack.
788     __ Push(a2);
789 
790     // Check if we have enough stack space to push all arguments.
791     __ mv(a6, a4);
792     Generate_CheckStackOverflow(masm, a6, a0, s2);
793 
794     // Copy arguments to the stack.
795     // a4: argc
796     // a5: argv, i.e. points to first arg
797     {
798       UseScratchRegisterScope temps(masm);
799       Generate_PushArguments(masm, a5, a4, temps.Acquire(), temps.Acquire(),
800                              ArgumentsElementType::kHandle);
801     }
802     // Push the receive.
803     __ Push(a3);
804 
805     // a0: argc
806     // a1: function
807     // a3: new.target
808     __ Move(a3, a1);
809     __ Move(a1, a2);
810     __ Move(a0, a4);
811 
812     // Initialize all JavaScript callee-saved registers, since they will be seen
813     // by the garbage collector as part of handlers.
814     __ LoadRoot(a4, RootIndex::kUndefinedValue);
815     __ Move(a5, a4);
816     __ Move(s1, a4);
817     __ Move(s2, a4);
818     __ Move(s3, a4);
819     __ Move(s4, a4);
820     __ Move(s5, a4);
821 #ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
822     __ Move(s11, a4);
823 #endif
824     // s6 holds the root address. Do not clobber.
825     // s7 is cp. Do not init.
826 
827     // Invoke the code.
828     Handle<Code> builtin = is_construct
829                                ? BUILTIN_CODE(masm->isolate(), Construct)
830                                : masm->isolate()->builtins()->Call();
831     __ Call(builtin, RelocInfo::CODE_TARGET);
832 
833     // Leave internal frame.
834   }
835   __ Jump(ra);
836 }
837 
Generate_JSEntryTrampoline(MacroAssembler * masm)838 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
839   Generate_JSEntryTrampolineHelper(masm, false);
840 }
841 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)842 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
843   Generate_JSEntryTrampolineHelper(masm, true);
844 }
845 
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)846 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
847   // a1: microtask_queue
848   __ Move(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
849   __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
850 }
851 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2)852 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
853                                                 Register optimized_code,
854                                                 Register closure,
855                                                 Register scratch1,
856                                                 Register scratch2) {
857   ASM_CODE_COMMENT(masm);
858   DCHECK(!AreAliased(optimized_code, closure));
859   // Store code entry in the closure.
860   __ StoreTaggedField(optimized_code,
861                       FieldMemOperand(closure, JSFunction::kCodeOffset));
862   __ Move(scratch1, optimized_code);  // Write barrier clobbers scratch1 below.
863   __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1,
864                       kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
865                       RememberedSetAction::kOmit, SmiCheck::kOmit);
866 }
867 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)868 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
869                                   Register scratch2) {
870   ASM_CODE_COMMENT(masm);
871   Register params_size = scratch1;
872 
873   // Get the size of the formal parameters + receiver (in bytes).
874   __ Ld(params_size,
875         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
876   __ Lw(params_size,
877         FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
878 
879   Register actual_params_size = scratch2;
880   Label L1;
881   // Compute the size of the actual parameters + receiver (in bytes).
882   __ Ld(actual_params_size,
883         MemOperand(fp, StandardFrameConstants::kArgCOffset));
884   __ Sll64(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
885   // If actual is bigger than formal, then we should use it to free up the stack
886   // arguments.
887   __ Branch(&L1, le, actual_params_size, Operand(params_size),
888             Label::Distance::kNear);
889   __ Move(params_size, actual_params_size);
890   __ bind(&L1);
891 
892   // Leave the frame (also dropping the register file).
893   __ LeaveFrame(StackFrame::INTERPRETED);
894 
895   // Drop receiver + arguments.
896   __ DropArguments(params_size, MacroAssembler::kCountIsBytes,
897                    MacroAssembler::kCountIncludesReceiver);
898 }
899 
900 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)901 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
902                                          Register actual_state,
903                                          TieringState expected_state,
904                                          Runtime::FunctionId function_id) {
905   ASM_CODE_COMMENT(masm);
906   Label no_match;
907   __ Branch(&no_match, ne, actual_state,
908             Operand(static_cast<int>(expected_state)), Label::Distance::kNear);
909   GenerateTailCallToReturnedCode(masm, function_id);
910   __ bind(&no_match);
911 }
912 
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch1,Register scratch2)913 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
914                                       Register optimized_code_entry,
915                                       Register scratch1, Register scratch2) {
916   // ----------- S t a t e -------------
917   //  -- a0 : actual argument count
918   //  -- a3 : new target (preserved for callee if needed, and caller)
919   //  -- a1 : target function (preserved for callee if needed, and caller)
920   // -----------------------------------
921   ASM_CODE_COMMENT(masm);
922   DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
923 
924   Register closure = a1;
925   Label heal_optimized_code_slot;
926 
927   // If the optimized code is cleared, go to runtime to update the optimization
928   // marker field.
929   __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
930                    &heal_optimized_code_slot);
931 
932   // Check if the optimized code is marked for deopt. If it is, call the
933   // runtime to clear it.
934   __ LoadTaggedPointerField(
935       a5,
936       FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
937   __ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
938   __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
939   __ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg),
940             Label::Distance::kNear);
941 
942   // Optimized code is good, get it into the closure and link the closure into
943   // the optimized functions list, then tail call the optimized code.
944   // The feedback vector is no longer used, so re-use it as a scratch
945   // register.
946   ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
947                                       scratch1, scratch2);
948 
949   static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
950   __ LoadCodeObjectEntry(a2, optimized_code_entry);
951   __ Jump(a2);
952 
953   // Optimized code slot contains deoptimized code or code is cleared and
954   // optimized code marker isn't updated. Evict the code, update the marker
955   // and re-enter the closure's code.
956   __ bind(&heal_optimized_code_slot);
957   GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
958 }
959 
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)960 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
961                               Register tiering_state) {
962   // ----------- S t a t e -------------
963   //  -- a0 : actual argument count
964   //  -- a3 : new target (preserved for callee if needed, and caller)
965   //  -- a1 : target function (preserved for callee if needed, and caller)
966   //  -- feedback vector (preserved for caller if needed)
967   //  -- tiering_state : a int32 containing a non-zero optimization
968   //  marker.
969   // -----------------------------------
970   ASM_CODE_COMMENT(masm);
971   DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state));
972 
973   // TODO(v8:8394): The logging of first execution will break if
974   // feedback vectors are not allocated. We need to find a different way of
975   // logging these events if required.
976   TailCallRuntimeIfStateEquals(masm, tiering_state,
977                                TieringState::kRequestTurbofan_Synchronous,
978                                Runtime::kCompileTurbofan_Synchronous);
979   TailCallRuntimeIfStateEquals(masm, tiering_state,
980                                TieringState::kRequestTurbofan_Concurrent,
981                                Runtime::kCompileTurbofan_Concurrent);
982 
983   __ stop();
984 }
985 
986 // Advance the current bytecode offset. This simulates what all bytecode
987 // handlers do upon completion of the underlying operation. Will bail out to a
988 // label if the bytecode (without prefix) is a return bytecode. Will not advance
989 // the bytecode offset if the current bytecode is a JumpLoop, instead just
990 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Register scratch3,Label * if_return)991 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
992                                           Register bytecode_array,
993                                           Register bytecode_offset,
994                                           Register bytecode, Register scratch1,
995                                           Register scratch2, Register scratch3,
996                                           Label* if_return) {
997   ASM_CODE_COMMENT(masm);
998   Register bytecode_size_table = scratch1;
999 
1000   // The bytecode offset value will be increased by one in wide and extra wide
1001   // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
1002   // will restore the original bytecode. In order to simplify the code, we have
1003   // a backup of it.
1004   Register original_bytecode_offset = scratch3;
1005   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
1006                      bytecode_size_table, original_bytecode_offset));
1007   __ Move(original_bytecode_offset, bytecode_offset);
1008   __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
1009 
1010   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1011   Label process_bytecode, extra_wide;
1012   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
1013   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1014   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
1015   STATIC_ASSERT(3 ==
1016                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1017   __ Branch(&process_bytecode, Ugreater, bytecode, Operand(3),
1018             Label::Distance::kNear);
1019   __ And(scratch2, bytecode, Operand(1));
1020   __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg),
1021             Label::Distance::kNear);
1022 
1023   // Load the next bytecode and update table to the wide scaled table.
1024   __ Add64(bytecode_offset, bytecode_offset, Operand(1));
1025   __ Add64(scratch2, bytecode_array, bytecode_offset);
1026   __ Lbu(bytecode, MemOperand(scratch2));
1027   __ Add64(bytecode_size_table, bytecode_size_table,
1028            Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1029   __ BranchShort(&process_bytecode);
1030 
1031   __ bind(&extra_wide);
1032   // Load the next bytecode and update table to the extra wide scaled table.
1033   __ Add64(bytecode_offset, bytecode_offset, Operand(1));
1034   __ Add64(scratch2, bytecode_array, bytecode_offset);
1035   __ Lbu(bytecode, MemOperand(scratch2));
1036   __ Add64(bytecode_size_table, bytecode_size_table,
1037            Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
1038 
1039   __ bind(&process_bytecode);
1040 
1041 // Bailout to the return label if this is a return bytecode.
1042 #define JUMP_IF_EQUAL(NAME)          \
1043   __ Branch(if_return, eq, bytecode, \
1044             Operand(static_cast<int64_t>(interpreter::Bytecode::k##NAME)));
1045   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1046 #undef JUMP_IF_EQUAL
1047 
1048   // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1049   // of the loop.
1050   Label end, not_jump_loop;
1051   __ Branch(&not_jump_loop, ne, bytecode,
1052             Operand(static_cast<int64_t>(interpreter::Bytecode::kJumpLoop)),
1053             Label::Distance::kNear);
1054   // We need to restore the original bytecode_offset since we might have
1055   // increased it to skip the wide / extra-wide prefix bytecode.
1056   __ Move(bytecode_offset, original_bytecode_offset);
1057   __ BranchShort(&end);
1058 
1059   __ bind(&not_jump_loop);
1060   // Otherwise, load the size of the current bytecode and advance the offset.
1061   __ Add64(scratch2, bytecode_size_table, bytecode);
1062   __ Lb(scratch2, MemOperand(scratch2));
1063   __ Add64(bytecode_offset, bytecode_offset, scratch2);
1064 
1065   __ bind(&end);
1066 }
1067 
1068 // Read off the optimization state in the feedback vector and check if there
1069 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1070 static void LoadTieringStateAndJumpIfNeedsProcessing(
1071     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1072     Label* has_optimized_code_or_state) {
1073   ASM_CODE_COMMENT(masm);
1074   DCHECK(!AreAliased(optimization_state, feedback_vector));
1075   UseScratchRegisterScope temps(masm);
1076   Register scratch = temps.Acquire();
1077   __ Lw(optimization_state,
1078         FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1079   __ And(
1080       scratch, optimization_state,
1081       Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1082   __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
1083 }
1084 
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1085 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1086     MacroAssembler* masm, Register optimization_state,
1087     Register feedback_vector) {
1088   ASM_CODE_COMMENT(masm);
1089   DCHECK(!AreAliased(optimization_state, feedback_vector));
1090   UseScratchRegisterScope temps(masm);
1091   temps.Include(t0, t1);
1092   Label maybe_has_optimized_code;
1093   // Check if optimized code marker is available
1094   {
1095     UseScratchRegisterScope temps(masm);
1096     Register scratch = temps.Acquire();
1097     __ And(scratch, optimization_state,
1098            Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
1099     __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg),
1100               Label::Distance::kNear);
1101   }
1102   Register tiering_state = optimization_state;
1103   __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1104   MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1105 
1106   __ bind(&maybe_has_optimized_code);
1107   Register optimized_code_entry = optimization_state;
1108   __ LoadAnyTaggedField(
1109       tiering_state,
1110       FieldMemOperand(feedback_vector,
1111                       FeedbackVector::kMaybeOptimizedCodeOffset));
1112   TailCallOptimizedCodeSlot(masm, optimized_code_entry, temps.Acquire(),
1113                             temps.Acquire());
1114 }
1115 
1116 namespace {
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array)1117 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
1118                                  Register bytecode_array) {
1119   // Reset code age and the OSR state (optimized to a single write).
1120   static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
1121   STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1122   __ Sw(zero_reg,
1123         FieldMemOperand(bytecode_array,
1124                         BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
1125 }
1126 
1127 }  // namespace
1128 
1129 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1130 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1131   UseScratchRegisterScope temps(masm);
1132   temps.Include({kScratchReg, kScratchReg2});
1133   auto descriptor =
1134       Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1135   Register closure = descriptor.GetRegisterParameter(
1136       BaselineOutOfLinePrologueDescriptor::kClosure);
1137   // Load the feedback vector from the closure.
1138   Register feedback_vector = temps.Acquire();
1139   __ Ld(feedback_vector,
1140         FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1141   __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1142   if (FLAG_debug_code) {
1143     UseScratchRegisterScope temps(masm);
1144     Register type = temps.Acquire();
1145     __ GetObjectType(feedback_vector, type, type);
1146     __ Assert(eq, AbortReason::kExpectedFeedbackVector, type,
1147               Operand(FEEDBACK_VECTOR_TYPE));
1148   }
1149 
1150   // Check for an tiering state.
1151   Label has_optimized_code_or_state;
1152   Register optimization_state = temps.Acquire();
1153   LoadTieringStateAndJumpIfNeedsProcessing(
1154       masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
1155 
1156   // Increment invocation count for the function.
1157   {
1158     UseScratchRegisterScope temps(masm);
1159     Register invocation_count = temps.Acquire();
1160     __ Lw(invocation_count,
1161           FieldMemOperand(feedback_vector,
1162                           FeedbackVector::kInvocationCountOffset));
1163     __ Add32(invocation_count, invocation_count, Operand(1));
1164     __ Sw(invocation_count,
1165           FieldMemOperand(feedback_vector,
1166                           FeedbackVector::kInvocationCountOffset));
1167   }
1168 
1169   FrameScope frame_scope(masm, StackFrame::MANUAL);
1170   {
1171     ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1172     // Normally the first thing we'd do here is Push(lr, fp), but we already
1173     // entered the frame in BaselineCompiler::Prologue, as we had to use the
1174     // value lr before the call to this BaselineOutOfLinePrologue builtin.
1175 
1176     Register callee_context = descriptor.GetRegisterParameter(
1177         BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1178     Register callee_js_function = descriptor.GetRegisterParameter(
1179         BaselineOutOfLinePrologueDescriptor::kClosure);
1180     __ Push(callee_context, callee_js_function);
1181     DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1182     DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1183 
1184     Register argc = descriptor.GetRegisterParameter(
1185         BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1186     // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1187     // the frame, so load it into a register.
1188     Register bytecode_array = descriptor.GetRegisterParameter(
1189         BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1190     ResetBytecodeAgeAndOsrState(masm, bytecode_array);
1191     __ Push(argc, bytecode_array);
1192 
1193     // Baseline code frames store the feedback vector where interpreter would
1194     // store the bytecode offset.
1195     if (FLAG_debug_code) {
1196       UseScratchRegisterScope temps(masm);
1197       Register invocation_count = temps.Acquire();
1198       __ GetObjectType(feedback_vector, invocation_count, invocation_count);
1199       __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
1200                 Operand(FEEDBACK_VECTOR_TYPE));
1201     }
1202     // Our stack is currently aligned. We have have to push something along with
1203     // the feedback vector to keep it that way -- we may as well start
1204     // initialising the register frame.
1205     // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
1206     // `undefined` in the accumulator register, to skip the load in the baseline
1207     // code.
1208     __ Push(feedback_vector);
1209   }
1210 
1211   Label call_stack_guard;
1212   Register frame_size = descriptor.GetRegisterParameter(
1213       BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1214   {
1215     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1216     // Stack check. This folds the checks for both the interrupt stack limit
1217     // check and the real stack limit into one by just checking for the
1218     // interrupt limit. The interrupt limit is either equal to the real stack
1219     // limit or tighter. By ensuring we have space until that limit after
1220     // building the frame we can quickly precheck both at once.
1221     UseScratchRegisterScope temps(masm);
1222     Register sp_minus_frame_size = temps.Acquire();
1223     __ Sub64(sp_minus_frame_size, sp, frame_size);
1224     Register interrupt_limit = temps.Acquire();
1225     __ LoadStackLimit(interrupt_limit,
1226                       MacroAssembler::StackLimitKind::kInterruptStackLimit);
1227     __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1228               Operand(interrupt_limit));
1229   }
1230 
1231   // Do "fast" return to the caller pc in lr.
1232   // TODO(v8:11429): Document this frame setup better.
1233   __ Ret();
1234 
1235   __ bind(&has_optimized_code_or_state);
1236   {
1237     ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1238     // Drop the frame created by the baseline call.
1239     __ Pop(ra, fp);
1240     MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1241                                                  feedback_vector);
1242     __ Trap();
1243   }
1244 
1245   __ bind(&call_stack_guard);
1246   {
1247     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1248     FrameScope frame_scope(masm, StackFrame::INTERNAL);
1249     // Save incoming new target or generator
1250     __ Push(kJavaScriptCallNewTargetRegister);
1251     __ SmiTag(frame_size);
1252     __ Push(frame_size);
1253     __ CallRuntime(Runtime::kStackGuardWithGap);
1254     __ Pop(kJavaScriptCallNewTargetRegister);
1255   }
1256   __ Ret();
1257   temps.Exclude({kScratchReg, kScratchReg2});
1258 }
1259 
1260 // Generate code for entering a JS function with the interpreter.
1261 // On entry to the function the receiver and arguments have been pushed on the
1262 // stack left to right.
1263 //
1264 // The live registers are:
1265 //   o a0 : actual argument count
1266 //   o a1: the JS function object being called.
1267 //   o a3: the incoming new target or generator object
1268 //   o cp: our context
1269 //   o fp: the caller's frame pointer
1270 //   o sp: stack pointer
1271 //   o ra: return address
1272 //
1273 // The function builds an interpreter frame.  See InterpreterFrameConstants in
1274 // frames-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1275 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1276   Register closure = a1;
1277   Register feedback_vector = a2;
1278   UseScratchRegisterScope temps(masm);
1279   temps.Include(t0, t1);
1280   Register scratch = temps.Acquire();
1281   Register scratch2 = temps.Acquire();
1282   // Get the bytecode array from the function object and load it into
1283   // kInterpreterBytecodeArrayRegister.
1284   __ LoadTaggedPointerField(
1285       kScratchReg,
1286       FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1287   __ LoadTaggedPointerField(
1288       kInterpreterBytecodeArrayRegister,
1289       FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
1290   Label is_baseline;
1291   GetSharedFunctionInfoBytecodeOrBaseline(
1292       masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
1293 
1294   // The bytecode array could have been flushed from the shared function info,
1295   // if so, call into CompileLazy.
1296   Label compile_lazy;
1297   __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
1298   __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
1299 
1300   // Load the feedback vector from the closure.
1301   __ LoadTaggedPointerField(
1302       feedback_vector,
1303       FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1304   __ LoadTaggedPointerField(
1305       feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1306 
1307   Label push_stack_frame;
1308   // Check if feedback vector is valid. If valid, check for optimized code
1309   // and update invocation count. Otherwise, setup the stack frame.
1310   __ LoadTaggedPointerField(
1311       a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1312   __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
1313   __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
1314             Label::Distance::kNear);
1315 
1316   // Read off the optimization state in the feedback vector, and if there
1317   // is optimized code or an tiering state, call that instead.
1318   Register optimization_state = a4;
1319   __ Lw(optimization_state,
1320         FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1321 
1322   // Check if the optimized code slot is not empty or has a tiering state.
1323   Label has_optimized_code_or_state;
1324 
1325   __ And(scratch, optimization_state,
1326          FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
1327   __ Branch(&has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
1328 
1329   Label not_optimized;
1330   __ bind(&not_optimized);
1331 
1332   // Increment invocation count for the function.
1333   __ Lw(a4, FieldMemOperand(feedback_vector,
1334                             FeedbackVector::kInvocationCountOffset));
1335   __ Add32(a4, a4, Operand(1));
1336   __ Sw(a4, FieldMemOperand(feedback_vector,
1337                             FeedbackVector::kInvocationCountOffset));
1338 
1339   // Open a frame scope to indicate that there is a frame on the stack.  The
1340   // MANUAL indicates that the scope shouldn't actually generate code to set up
1341   // the frame (that is done below).
1342   __ bind(&push_stack_frame);
1343   FrameScope frame_scope(masm, StackFrame::MANUAL);
1344   __ PushStandardFrame(closure);
1345 
1346   ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
1347 
1348   // Load initial bytecode offset.
1349   __ li(kInterpreterBytecodeOffsetRegister,
1350         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1351 
1352   // Push bytecode array and Smi tagged bytecode array offset.
1353   __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
1354   __ Push(kInterpreterBytecodeArrayRegister, a4);
1355 
1356   // Allocate the local and temporary register file on the stack.
1357   Label stack_overflow;
1358   {
1359     // Load frame size (word) from the BytecodeArray object.
1360     __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1361                               BytecodeArray::kFrameSizeOffset));
1362 
1363     // Do a stack check to ensure we don't go over the limit.
1364     __ Sub64(a5, sp, Operand(a4));
1365     __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1366     __ Branch(&stack_overflow, Uless, a5, Operand(a2));
1367 
1368     // If ok, push undefined as the initial value for all register file entries.
1369     Label loop_header;
1370     Label loop_check;
1371     __ LoadRoot(a5, RootIndex::kUndefinedValue);
1372     __ BranchShort(&loop_check);
1373     __ bind(&loop_header);
1374     // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1375     __ push(a5);
1376     // Continue loop if not done.
1377     __ bind(&loop_check);
1378     __ Sub64(a4, a4, Operand(kSystemPointerSize));
1379     __ Branch(&loop_header, ge, a4, Operand(zero_reg));
1380   }
1381 
1382   // If the bytecode array has a valid incoming new target or generator object
1383   // register, initialize it with incoming value which was passed in a3.
1384   Label no_incoming_new_target_or_generator_register;
1385   __ Lw(a5, FieldMemOperand(
1386                 kInterpreterBytecodeArrayRegister,
1387                 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1388   __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1389             Operand(zero_reg), Label::Distance::kNear);
1390   __ CalcScaledAddress(a5, fp, a5, kSystemPointerSizeLog2);
1391   __ Sd(a3, MemOperand(a5));
1392   __ bind(&no_incoming_new_target_or_generator_register);
1393 
1394   // Perform interrupt stack check.
1395   // TODO(solanes): Merge with the real stack limit check above.
1396   Label stack_check_interrupt, after_stack_check_interrupt;
1397   __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1398   __ Branch(&stack_check_interrupt, Uless, sp, Operand(a5),
1399             Label::Distance::kNear);
1400   __ bind(&after_stack_check_interrupt);
1401 
1402   // Load accumulator as undefined.
1403   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1404 
1405   // Load the dispatch table into a register and dispatch to the bytecode
1406   // handler at the current bytecode offset.
1407   Label do_dispatch;
1408   __ bind(&do_dispatch);
1409   __ li(kInterpreterDispatchTableRegister,
1410         ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1411   __ Add64(a1, kInterpreterBytecodeArrayRegister,
1412            kInterpreterBytecodeOffsetRegister);
1413   __ Lbu(a7, MemOperand(a1));
1414   __ CalcScaledAddress(kScratchReg, kInterpreterDispatchTableRegister, a7,
1415                        kSystemPointerSizeLog2);
1416   __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
1417   __ Call(kJavaScriptCallCodeStartRegister);
1418   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1419 
1420   // Any returns to the entry trampoline are either due to the return bytecode
1421   // or the interpreter tail calling a builtin and then a dispatch.
1422 
1423   // Get bytecode array and bytecode offset from the stack frame.
1424   __ Ld(kInterpreterBytecodeArrayRegister,
1425         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1426   __ Ld(kInterpreterBytecodeOffsetRegister,
1427         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1428   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1429 
1430   // Either return, or advance to the next bytecode and dispatch.
1431   Label do_return;
1432   __ Add64(a1, kInterpreterBytecodeArrayRegister,
1433            kInterpreterBytecodeOffsetRegister);
1434   __ Lbu(a1, MemOperand(a1));
1435   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1436                                 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1437                                 a4, &do_return);
1438   __ Branch(&do_dispatch);
1439 
1440   __ bind(&do_return);
1441   // The return value is in a0.
1442   LeaveInterpreterFrame(masm, scratch, scratch2);
1443   __ Jump(ra);
1444 
1445   __ bind(&stack_check_interrupt);
1446   // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1447   // for the call to the StackGuard.
1448   __ li(kInterpreterBytecodeOffsetRegister,
1449         Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1450                              kFunctionEntryBytecodeOffset)));
1451   __ Sd(kInterpreterBytecodeOffsetRegister,
1452         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1453   __ CallRuntime(Runtime::kStackGuard);
1454 
1455   // After the call, restore the bytecode array, bytecode offset and accumulator
1456   // registers again. Also, restore the bytecode offset in the stack to its
1457   // previous value.
1458   __ Ld(kInterpreterBytecodeArrayRegister,
1459         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1460   __ li(kInterpreterBytecodeOffsetRegister,
1461         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1462   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1463 
1464   __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
1465   __ Sd(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1466 
1467   __ Branch(&after_stack_check_interrupt);
1468 
1469   __ bind(&has_optimized_code_or_state);
1470   MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1471                                                feedback_vector);
1472   __ bind(&is_baseline);
1473   {
1474     // Load the feedback vector from the closure.
1475     __ LoadTaggedPointerField(
1476         feedback_vector,
1477         FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1478     __ LoadTaggedPointerField(
1479         feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1480 
1481     Label install_baseline_code;
1482     // Check if feedback vector is valid. If not, call prepare for baseline to
1483     // allocate it.
1484     __ LoadTaggedPointerField(
1485         scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1486     __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1487     __ Branch(&install_baseline_code, ne, scratch,
1488               Operand(FEEDBACK_VECTOR_TYPE));
1489 
1490     // Check for an tiering state.
1491     LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1492                                              feedback_vector,
1493                                              &has_optimized_code_or_state);
1494 
1495     // Load the baseline code into the closure.
1496     __ Move(a2, kInterpreterBytecodeArrayRegister);
1497     static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1498     ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, scratch, scratch2);
1499     __ JumpCodeObject(a2);
1500 
1501     __ bind(&install_baseline_code);
1502     GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1503   }
1504 
1505   __ bind(&compile_lazy);
1506   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1507   // Unreachable code.
1508   __ break_(0xCC);
1509 
1510   __ bind(&stack_overflow);
1511   __ CallRuntime(Runtime::kThrowStackOverflow);
1512   // Unreachable code.
1513   __ break_(0xCC);
1514 }
1515 
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1516 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1517                                         Register start_address,
1518                                         Register scratch) {
1519   ASM_CODE_COMMENT(masm);
1520   // Find the address of the last argument.
1521   __ Sub64(scratch, num_args, Operand(1));
1522   __ Sll64(scratch, scratch, kSystemPointerSizeLog2);
1523   __ Sub64(start_address, start_address, scratch);
1524 
1525   // Push the arguments.
1526   __ PushArray(start_address, num_args,
1527                TurboAssembler::PushArrayOrder::kReverse);
1528 }
1529 
1530 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1531 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1532     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1533     InterpreterPushArgsMode mode) {
1534   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1535   // ----------- S t a t e -------------
1536   //  -- a0 : the number of arguments
1537   //  -- a2 : the address of the first argument to be pushed. Subsequent
1538   //          arguments should be consecutive above this, in the same order as
1539   //          they are to be pushed onto the stack.
1540   //  -- a1 : the target to call (can be any Object).
1541   // -----------------------------------
1542   Label stack_overflow;
1543   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1544     // The spread argument should not be pushed.
1545     __ Sub64(a0, a0, Operand(1));
1546   }
1547 
1548   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1549     __ Sub64(a3, a0, Operand(kJSArgcReceiverSlots));
1550   } else {
1551     __ Move(a3, a0);
1552   }
1553   __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1554 
1555   // This function modifies a2 and a4.
1556   GenerateInterpreterPushArgs(masm, a3, a2, a4);
1557   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1558     __ PushRoot(RootIndex::kUndefinedValue);
1559   }
1560 
1561   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1562     // Pass the spread in the register a2.
1563     // a2 already points to the penultime argument, the spread
1564     // is below that.
1565     __ Ld(a2, MemOperand(a2, -kSystemPointerSize));
1566   }
1567 
1568   // Call the target.
1569   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1570     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1571             RelocInfo::CODE_TARGET);
1572   } else {
1573     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1574             RelocInfo::CODE_TARGET);
1575   }
1576 
1577   __ bind(&stack_overflow);
1578   {
1579     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1580     // Unreachable code.
1581     __ break_(0xCC);
1582   }
1583 }
1584 
1585 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1586 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1587     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1588   // ----------- S t a t e -------------
1589   // -- a0 : argument count
1590   // -- a3 : new target
1591   // -- a1 : constructor to call
1592   // -- a2 : allocation site feedback if available, undefined otherwise.
1593   // -- a4 : address of the first argument
1594   // -----------------------------------
1595   Label stack_overflow;
1596   __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1597 
1598   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1599     // The spread argument should not be pushed.
1600     __ Sub64(a0, a0, Operand(1));
1601   }
1602   Register argc_without_receiver = a6;
1603   __ Sub64(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1604   // Push the arguments, This function modifies a4 and a5.
1605   GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5);
1606 
1607   // Push a slot for the receiver.
1608   __ push(zero_reg);
1609 
1610   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1611     // Pass the spread in the register a2.
1612     // a4 already points to the penultimate argument, the spread
1613     // lies in the next interpreter register.
1614     __ Ld(a2, MemOperand(a4, -kSystemPointerSize));
1615   } else {
1616     __ AssertUndefinedOrAllocationSite(a2, t0);
1617   }
1618 
1619   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1620     __ AssertFunction(a1);
1621 
1622     // Tail call to the function-specific construct stub (still in the caller
1623     // context at this point).
1624     __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1625             RelocInfo::CODE_TARGET);
1626   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1627     // Call the constructor with a0, a1, and a3 unmodified.
1628     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1629             RelocInfo::CODE_TARGET);
1630   } else {
1631     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1632     // Call the constructor with a0, a1, and a3 unmodified.
1633     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1634   }
1635 
1636   __ bind(&stack_overflow);
1637   {
1638     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1639     // Unreachable code.
1640     __ break_(0xCC);
1641   }
1642 }
1643 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1644 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1645   // Set the return address to the correct point in the interpreter entry
1646   // trampoline.
1647   Label builtin_trampoline, trampoline_loaded;
1648   Smi interpreter_entry_return_pc_offset(
1649       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1650   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1651 
1652   // If the SFI function_data is an InterpreterData, the function will have a
1653   // custom copy of the interpreter entry trampoline for profiling. If so,
1654   // get the custom trampoline, otherwise grab the entry address of the global
1655   // trampoline.
1656   __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1657   __ LoadTaggedPointerField(
1658       t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1659   __ LoadTaggedPointerField(
1660       t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1661   __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1662                    kInterpreterDispatchTableRegister);
1663   __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1664             Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
1665 
1666   __ LoadTaggedPointerField(
1667       t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1668   __ Add64(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1669   __ BranchShort(&trampoline_loaded);
1670 
1671   __ bind(&builtin_trampoline);
1672   __ li(t0, ExternalReference::
1673                 address_of_interpreter_entry_trampoline_instruction_start(
1674                     masm->isolate()));
1675   __ Ld(t0, MemOperand(t0));
1676 
1677   __ bind(&trampoline_loaded);
1678   __ Add64(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1679 
1680   // Initialize the dispatch table register.
1681   __ li(kInterpreterDispatchTableRegister,
1682         ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1683 
1684   // Get the bytecode array pointer from the frame.
1685   __ Ld(kInterpreterBytecodeArrayRegister,
1686         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1687 
1688   if (FLAG_debug_code) {
1689     // Check function data field is actually a BytecodeArray object.
1690     __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1691     __ Assert(ne,
1692               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1693               kScratchReg, Operand(zero_reg));
1694     __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1695     __ Assert(eq,
1696               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1697               a1, Operand(BYTECODE_ARRAY_TYPE));
1698   }
1699 
1700   // Get the target bytecode offset from the frame.
1701   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1702               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1703 
1704   if (FLAG_debug_code) {
1705     Label okay;
1706     __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
1707               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag),
1708               Label::Distance::kNear);
1709     // Unreachable code.
1710     __ break_(0xCC);
1711     __ bind(&okay);
1712   }
1713 
1714   // Dispatch to the target bytecode.
1715   __ Add64(a1, kInterpreterBytecodeArrayRegister,
1716            kInterpreterBytecodeOffsetRegister);
1717   __ Lbu(a7, MemOperand(a1));
1718   __ CalcScaledAddress(a1, kInterpreterDispatchTableRegister, a7,
1719                        kSystemPointerSizeLog2);
1720   __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
1721   __ Jump(kJavaScriptCallCodeStartRegister);
1722 }
1723 
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1724 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1725   // Advance the current bytecode offset stored within the given interpreter
1726   // stack frame. This simulates what all bytecode handlers do upon completion
1727   // of the underlying operation.
1728   __ Ld(kInterpreterBytecodeArrayRegister,
1729         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1730   __ Ld(kInterpreterBytecodeOffsetRegister,
1731         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1732   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1733 
1734   Label enter_bytecode, function_entry_bytecode;
1735   __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1736             Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1737                     kFunctionEntryBytecodeOffset));
1738 
1739   // Load the current bytecode.
1740   __ Add64(a1, kInterpreterBytecodeArrayRegister,
1741            kInterpreterBytecodeOffsetRegister);
1742   __ Lbu(a1, MemOperand(a1));
1743 
1744   // Advance to the next bytecode.
1745   Label if_return;
1746   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1747                                 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1748                                 a4, &if_return);
1749 
1750   __ bind(&enter_bytecode);
1751   // Convert new bytecode offset to a Smi and save in the stackframe.
1752   __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1753   __ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1754 
1755   Generate_InterpreterEnterBytecode(masm);
1756 
1757   __ bind(&function_entry_bytecode);
1758   // If the code deoptimizes during the implicit function entry stack interrupt
1759   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1760   // not a valid bytecode offset. Detect this case and advance to the first
1761   // actual bytecode.
1762   __ li(kInterpreterBytecodeOffsetRegister,
1763         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1764   __ Branch(&enter_bytecode);
1765 
1766   // We should never take the if_return path.
1767   __ bind(&if_return);
1768   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1769 }
1770 
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1771 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1772   Generate_InterpreterEnterBytecode(masm);
1773 }
1774 
1775 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1776 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1777                                       bool java_script_builtin,
1778                                       bool with_result) {
1779   const RegisterConfiguration* config(RegisterConfiguration::Default());
1780   int allocatable_register_count = config->num_allocatable_general_registers();
1781   UseScratchRegisterScope temp(masm);
1782   Register scratch = temp.Acquire();
1783   if (with_result) {
1784     if (java_script_builtin) {
1785       __ Move(scratch, a0);
1786     } else {
1787       // Overwrite the hole inserted by the deoptimizer with the return value
1788       // from the LAZY deopt point.
1789       __ Sd(a0,
1790             MemOperand(sp,
1791                        config->num_allocatable_general_registers() *
1792                                kSystemPointerSize +
1793                            BuiltinContinuationFrameConstants::kFixedFrameSize));
1794     }
1795   }
1796   for (int i = allocatable_register_count - 1; i >= 0; --i) {
1797     int code = config->GetAllocatableGeneralCode(i);
1798     __ Pop(Register::from_code(code));
1799     if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1800       __ SmiUntag(Register::from_code(code));
1801     }
1802   }
1803 
1804   if (with_result && java_script_builtin) {
1805     // Overwrite the hole inserted by the deoptimizer with the return value from
1806     // the LAZY deopt point. t0 contains the arguments count, the return value
1807     // from LAZY is always the last argument.
1808     constexpr int return_value_offset =
1809         BuiltinContinuationFrameConstants::kFixedSlotCount -
1810         kJSArgcReceiverSlots;
1811     __ Add64(a0, a0, Operand(return_value_offset));
1812     __ CalcScaledAddress(t0, sp, a0, kSystemPointerSizeLog2);
1813     __ Sd(scratch, MemOperand(t0));
1814     // Recover arguments count.
1815     __ Sub64(a0, a0, Operand(return_value_offset));
1816   }
1817 
1818   __ Ld(fp, MemOperand(
1819                 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1820   // Load builtin index (stored as a Smi) and use it to get the builtin start
1821   // address from the builtins table.
1822   __ Pop(t6);
1823   __ Add64(sp, sp,
1824            Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1825   __ Pop(ra);
1826   __ LoadEntryFromBuiltinIndex(t6);
1827   __ Jump(t6);
1828 }
1829 }  // namespace
1830 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1831 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1832   Generate_ContinueToBuiltinHelper(masm, false, false);
1833 }
1834 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1835 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1836     MacroAssembler* masm) {
1837   Generate_ContinueToBuiltinHelper(masm, false, true);
1838 }
1839 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1840 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1841   Generate_ContinueToBuiltinHelper(masm, true, false);
1842 }
1843 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1844 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1845     MacroAssembler* masm) {
1846   Generate_ContinueToBuiltinHelper(masm, true, true);
1847 }
1848 
Generate_NotifyDeoptimized(MacroAssembler * masm)1849 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1850   {
1851     FrameScope scope(masm, StackFrame::INTERNAL);
1852     __ CallRuntime(Runtime::kNotifyDeoptimized);
1853   }
1854 
1855   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
1856   __ Ld(a0, MemOperand(sp, 0 * kSystemPointerSize));
1857   __ Add64(sp, sp, Operand(1 * kSystemPointerSize));  // Remove state.
1858   __ Ret();
1859 }
1860 
1861 namespace {
1862 
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand (int64_t (0)))1863 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1864                        Operand offset = Operand(int64_t(0))) {
1865   __ Add64(ra, entry_address, offset);
1866   // And "return" to the OSR entry point of the function.
1867   __ Ret();
1868 }
1869 
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)1870 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
1871   ASM_CODE_COMMENT(masm);
1872   {
1873     FrameScope scope(masm, StackFrame::INTERNAL);
1874     __ CallRuntime(Runtime::kCompileOptimizedOSR);
1875   }
1876 
1877   // If the code object is null, just return to the caller.
1878   __ Ret(eq, a0, Operand(Smi::zero()));
1879   if (is_interpreter) {
1880     // Drop the handler frame that is be sitting on top of the actual
1881     // JavaScript frame. This is the case then OSR is triggered from bytecode.
1882     __ LeaveFrame(StackFrame::STUB);
1883   }
1884   // Load deoptimization data from the code object.
1885   // <deopt_data> = <code>[#deoptimization_data_offset]
1886   __ LoadTaggedPointerField(
1887       a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
1888                              kHeapObjectTag));
1889 
1890   // Load the OSR entrypoint offset from the deoptimization data.
1891   // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1892   __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1893                                      DeoptimizationData::kOsrPcOffsetIndex) -
1894                                      kHeapObjectTag));
1895 
1896   // Compute the target address = code_obj + header_size + osr_offset
1897   // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1898   __ Add64(a0, a0, a1);
1899   Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
1900 }
1901 }  // namespace
1902 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1903 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1904   return OnStackReplacement(masm, true);
1905 }
1906 
Generate_BaselineOnStackReplacement(MacroAssembler * masm)1907 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
1908   __ Ld(kContextRegister,
1909         MemOperand(fp, StandardFrameConstants::kContextOffset));
1910   return OnStackReplacement(masm, false);
1911 }
1912 
1913 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1914 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1915   // ----------- S t a t e -------------
1916   //  -- a0    : argc
1917   //  -- sp[0] : receiver
1918   //  -- sp[4] : thisArg
1919   //  -- sp[8] : argArray
1920   // -----------------------------------
1921 
1922   Register argc = a0;
1923   Register arg_array = a2;
1924   Register receiver = a1;
1925   Register this_arg = a5;
1926   Register undefined_value = a3;
1927 
1928   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1929 
1930   // 1. Load receiver into a1, argArray into a2 (if present), remove all
1931   // arguments from the stack (including the receiver), and push thisArg (if
1932   // present) instead.
1933   {
1934     // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
1935     // consistent state for a simple pop operation.
1936 
1937     __ Ld(this_arg, MemOperand(sp, kSystemPointerSize));
1938     __ Ld(arg_array, MemOperand(sp, 2 * kSystemPointerSize));
1939 
1940     Label done0, done1;
1941     UseScratchRegisterScope temps(masm);
1942     Register scratch = temps.Acquire();
1943     __ Sub64(scratch, argc, JSParameterCount(0));
1944     __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear);
1945     __ Move(arg_array, undefined_value);  // if argc == 0
1946     __ Move(this_arg, undefined_value);   // if argc == 0
1947     __ bind(&done0);                      // else (i.e., argc > 0)
1948 
1949     __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear);
1950     __ Move(arg_array, undefined_value);  // if argc == 1
1951     __ bind(&done1);                      // else (i.e., argc > 1)
1952 
1953     __ Ld(receiver, MemOperand(sp));
1954     __ DropArgumentsAndPushNewReceiver(argc, this_arg,
1955                                        MacroAssembler::kCountIsInteger,
1956                                        MacroAssembler::kCountIncludesReceiver);
1957   }
1958 
1959   // ----------- S t a t e -------------
1960   //  -- a2    : argArray
1961   //  -- a1    : receiver
1962   //  -- a3    : undefined root value
1963   //  -- sp[0] : thisArg
1964   // -----------------------------------
1965 
1966   // 2. We don't need to check explicitly for callable receiver here,
1967   // since that's the first thing the Call/CallWithArrayLike builtins
1968   // will do.
1969 
1970   // 3. Tail call with no arguments if argArray is null or undefined.
1971   Label no_arguments;
1972   __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
1973   __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value),
1974             Label::Distance::kNear);
1975 
1976   // 4a. Apply the receiver to the given argArray.
1977   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1978           RelocInfo::CODE_TARGET);
1979 
1980   // 4b. The argArray is either null or undefined, so we tail call without any
1981   // arguments to the receiver.
1982   __ bind(&no_arguments);
1983   {
1984     __ li(a0, JSParameterCount(0));
1985     DCHECK(receiver == a1);
1986     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1987   }
1988 }
1989 
1990 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1991 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1992   // 1. Get the callable to call (passed as receiver) from the stack.
1993   { __ Pop(a1); }
1994 
1995   // 2. Make sure we have at least one argument.
1996   // a0: actual number of arguments
1997   {
1998     Label done;
1999     __ Branch(&done, ne, a0, Operand(JSParameterCount(0)),
2000               Label::Distance::kNear);
2001     __ PushRoot(RootIndex::kUndefinedValue);
2002     __ Add64(a0, a0, Operand(1));
2003     __ bind(&done);
2004   }
2005 
2006   // 3. Adjust the actual number of arguments.
2007   __ Add64(a0, a0, -1);
2008 
2009   // 4. Call the callable.
2010   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2011 }
2012 
Generate_ReflectApply(MacroAssembler * masm)2013 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2014   // ----------- S t a t e -------------
2015   //  -- a0     : argc
2016   //  -- sp[0]  : receiver
2017   //  -- sp[8]  : target         (if argc >= 1)
2018   //  -- sp[16] : thisArgument   (if argc >= 2)
2019   //  -- sp[24] : argumentsList  (if argc == 3)
2020   // -----------------------------------
2021 
2022   Register argc = a0;
2023   Register arguments_list = a2;
2024   Register target = a1;
2025   Register this_argument = a5;
2026   Register undefined_value = a3;
2027 
2028   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2029 
2030   // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2031   // remove all arguments from the stack (including the receiver), and push
2032   // thisArgument (if present) instead.
2033   {
2034     // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2035     // consistent state for a simple pop operation.
2036 
2037     __ Ld(target, MemOperand(sp, kSystemPointerSize));
2038     __ Ld(this_argument, MemOperand(sp, 2 * kSystemPointerSize));
2039     __ Ld(arguments_list, MemOperand(sp, 3 * kSystemPointerSize));
2040 
2041     Label done0, done1, done2;
2042     UseScratchRegisterScope temps(masm);
2043     Register scratch = temps.Acquire();
2044     __ Sub64(scratch, argc, Operand(JSParameterCount(0)));
2045     __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear);
2046     __ Move(arguments_list, undefined_value);  // if argc == 0
2047     __ Move(this_argument, undefined_value);   // if argc == 0
2048     __ Move(target, undefined_value);          // if argc == 0
2049     __ bind(&done0);                           // argc != 0
2050 
2051     __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear);
2052     __ Move(arguments_list, undefined_value);  // if argc == 1
2053     __ Move(this_argument, undefined_value);   // if argc == 1
2054     __ bind(&done1);                           // argc > 1
2055 
2056     __ Branch(&done2, ne, scratch, Operand(2), Label::Distance::kNear);
2057     __ Move(arguments_list, undefined_value);  // if argc == 2
2058     __ bind(&done2);                           // argc > 2
2059 
2060     __ DropArgumentsAndPushNewReceiver(argc, this_argument,
2061                                        MacroAssembler::kCountIsInteger,
2062                                        MacroAssembler::kCountIncludesReceiver);
2063   }
2064 
2065   // ----------- S t a t e -------------
2066   //  -- a2    : argumentsList
2067   //  -- a1    : target
2068   //  -- a3    : undefined root value
2069   //  -- sp[0] : thisArgument
2070   // -----------------------------------
2071 
2072   // 2. We don't need to check explicitly for callable target here,
2073   // since that's the first thing the Call/CallWithArrayLike builtins
2074   // will do.
2075 
2076   // 3. Apply the target to the given argumentsList.
2077   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2078           RelocInfo::CODE_TARGET);
2079 }
2080 
Generate_ReflectConstruct(MacroAssembler * masm)2081 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2082   // ----------- S t a t e -------------
2083   //  -- a0     : argc
2084   //  -- sp[0]   : receiver
2085   //  -- sp[8]   : target
2086   //  -- sp[16]  : argumentsList
2087   //  -- sp[24]  : new.target (optional)
2088   // -----------------------------------
2089   Register argc = a0;
2090   Register arguments_list = a2;
2091   Register target = a1;
2092   Register new_target = a3;
2093   Register undefined_value = a4;
2094 
2095   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2096 
2097   // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2098   // new.target into a3 (if present, otherwise use target), remove all
2099   // arguments from the stack (including the receiver), and push thisArgument
2100   // (if present) instead.
2101   {
2102     // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2103     // consistent state for a simple pop operation.
2104     __ Ld(target, MemOperand(sp, kSystemPointerSize));
2105     __ Ld(arguments_list, MemOperand(sp, 2 * kSystemPointerSize));
2106     __ Ld(new_target, MemOperand(sp, 3 * kSystemPointerSize));
2107 
2108     Label done0, done1, done2;
2109     UseScratchRegisterScope temps(masm);
2110     Register scratch = temps.Acquire();
2111     __ Sub64(scratch, argc, Operand(JSParameterCount(0)));
2112     __ Branch(&done0, ne, scratch, Operand(zero_reg), Label::Distance::kNear);
2113     __ Move(arguments_list, undefined_value);  // if argc == 0
2114     __ Move(new_target, undefined_value);      // if argc == 0
2115     __ Move(target, undefined_value);          // if argc == 0
2116     __ bind(&done0);
2117 
2118     __ Branch(&done1, ne, scratch, Operand(1), Label::Distance::kNear);
2119     __ Move(arguments_list, undefined_value);  // if argc == 1
2120     __ Move(new_target, target);               // if argc == 1
2121     __ bind(&done1);
2122 
2123     __ Branch(&done2, ne, scratch, Operand(2), Label::Distance::kNear);
2124     __ Move(new_target, target);  // if argc == 2
2125     __ bind(&done2);
2126 
2127     __ DropArgumentsAndPushNewReceiver(argc, undefined_value,
2128                                        MacroAssembler::kCountIsInteger,
2129                                        MacroAssembler::kCountIncludesReceiver);
2130   }
2131 
2132   // ----------- S t a t e -------------
2133   //  -- a2    : argumentsList
2134   //  -- a1    : target
2135   //  -- a3    : new.target
2136   //  -- sp[0] : receiver (undefined)
2137   // -----------------------------------
2138 
2139   // 2. We don't need to check explicitly for constructor target here,
2140   // since that's the first thing the Construct/ConstructWithArrayLike
2141   // builtins will do.
2142 
2143   // 3. We don't need to check explicitly for constructor new.target here,
2144   // since that's the second thing the Construct/ConstructWithArrayLike
2145   // builtins will do.
2146 
2147   // 4. Construct the target with the given new.target and argumentsList.
2148   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2149           RelocInfo::CODE_TARGET);
2150 }
2151 
2152 namespace {
2153 
2154 // Allocate new stack space for |count| arguments and shift all existing
2155 // arguments already on the stack. |pointer_to_new_space_out| points to the
2156 // first free slot on the stack to copy additional arguments to and
2157 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out)2158 void Generate_AllocateSpaceAndShiftExistingArguments(
2159     MacroAssembler* masm, Register count, Register argc_in_out,
2160     Register pointer_to_new_space_out) {
2161   UseScratchRegisterScope temps(masm);
2162   Register scratch1 = temps.Acquire();
2163   Register scratch2 = temps.Acquire();
2164   Register scratch3 = temps.Acquire();
2165   DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2166                      scratch2));
2167   Register old_sp = scratch1;
2168   Register new_space = scratch2;
2169   __ mv(old_sp, sp);
2170   __ slli(new_space, count, kPointerSizeLog2);
2171   __ Sub64(sp, sp, Operand(new_space));
2172 
2173   Register end = scratch2;
2174   Register value = scratch3;
2175   Register dest = pointer_to_new_space_out;
2176   __ mv(dest, sp);
2177   __ CalcScaledAddress(end, old_sp, argc_in_out, kSystemPointerSizeLog2);
2178   Label loop, done;
2179   __ Branch(&done, ge, old_sp, Operand(end));
2180   __ bind(&loop);
2181   __ Ld(value, MemOperand(old_sp, 0));
2182   __ Sd(value, MemOperand(dest, 0));
2183   __ Add64(old_sp, old_sp, Operand(kSystemPointerSize));
2184   __ Add64(dest, dest, Operand(kSystemPointerSize));
2185   __ Branch(&loop, lt, old_sp, Operand(end));
2186   __ bind(&done);
2187 
2188   // Update total number of arguments.
2189   __ Add64(argc_in_out, argc_in_out, count);
2190 }
2191 
2192 }  // namespace
2193 
2194 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2195 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2196                                                Handle<Code> code) {
2197   UseScratchRegisterScope temps(masm);
2198   temps.Include(t1, t0);
2199   // ----------- S t a t e -------------
2200   //  -- a1 : target
2201   //  -- a0 : number of parameters on the stack
2202   //  -- a2 : arguments list (a FixedArray)
2203   //  -- a4 : len (number of elements to push from args)
2204   //  -- a3 : new.target (for [[Construct]])
2205   // -----------------------------------
2206   if (FLAG_debug_code) {
2207     // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
2208     Label ok, fail;
2209     __ AssertNotSmi(a2);
2210     __ GetObjectType(a2, kScratchReg, kScratchReg);
2211     __ Branch(&ok, eq, kScratchReg, Operand(FIXED_ARRAY_TYPE),
2212               Label::Distance::kNear);
2213     __ Branch(&fail, ne, kScratchReg, Operand(FIXED_DOUBLE_ARRAY_TYPE),
2214               Label::Distance::kNear);
2215     __ Branch(&ok, eq, a4, Operand(zero_reg), Label::Distance::kNear);
2216     // Fall through.
2217     __ bind(&fail);
2218     __ Abort(AbortReason::kOperandIsNotAFixedArray);
2219 
2220     __ bind(&ok);
2221   }
2222 
2223   Register args = a2;
2224   Register len = a4;
2225 
2226   // Check for stack overflow.
2227   Label stack_overflow;
2228   __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
2229 
2230   // Move the arguments already in the stack,
2231   // including the receiver and the return address.
2232   // a4: Number of arguments to make room for.
2233   // a0: Number of arguments already on the stack.
2234   // a7: Points to first free slot on the stack after arguments were shifted.
2235   Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7);
2236 
2237   // Push arguments onto the stack (thisArgument is already on the stack).
2238   {
2239     Label done, push, loop;
2240     Register src = a6;
2241     Register scratch = len;
2242     UseScratchRegisterScope temps(masm);
2243     Register hole_value = temps.Acquire();
2244     __ Add64(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
2245     __ Branch(&done, eq, len, Operand(zero_reg), Label::Distance::kNear);
2246     __ Sll64(scratch, len, kTaggedSizeLog2);
2247     __ Sub64(scratch, sp, Operand(scratch));
2248     __ LoadRoot(hole_value, RootIndex::kTheHoleValue);
2249     __ bind(&loop);
2250     __ LoadTaggedPointerField(a5, MemOperand(src));
2251     __ Add64(src, src, kTaggedSize);
2252     __ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
2253     __ LoadRoot(a5, RootIndex::kUndefinedValue);
2254     __ bind(&push);
2255     __ Sd(a5, MemOperand(a7, 0));
2256     __ Add64(a7, a7, Operand(kSystemPointerSize));
2257     __ Add64(scratch, scratch, Operand(kTaggedSize));
2258     __ Branch(&loop, ne, scratch, Operand(sp));
2259     __ bind(&done);
2260   }
2261 
2262   // Tail-call to the actual Call or Construct builtin.
2263   __ Jump(code, RelocInfo::CODE_TARGET);
2264 
2265   __ bind(&stack_overflow);
2266   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2267 }
2268 
2269 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2270 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2271                                                       CallOrConstructMode mode,
2272                                                       Handle<Code> code) {
2273   // ----------- S t a t e -------------
2274   //  -- a0 : the number of arguments
2275   //  -- a3 : the new.target (for [[Construct]] calls)
2276   //  -- a1 : the target to call (can be any Object)
2277   //  -- a2 : start index (to support rest parameters)
2278   // -----------------------------------
2279   UseScratchRegisterScope temps(masm);
2280   temps.Include(t0, t1);
2281   temps.Include(t2);
2282   // Check if new.target has a [[Construct]] internal method.
2283   if (mode == CallOrConstructMode::kConstruct) {
2284     Label new_target_constructor, new_target_not_constructor;
2285     UseScratchRegisterScope temps(masm);
2286     Register scratch = temps.Acquire();
2287     __ JumpIfSmi(a3, &new_target_not_constructor);
2288     __ LoadTaggedPointerField(scratch,
2289                               FieldMemOperand(a3, HeapObject::kMapOffset));
2290     __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2291     __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
2292     __ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
2293               Label::Distance::kNear);
2294     __ bind(&new_target_not_constructor);
2295     {
2296       FrameScope scope(masm, StackFrame::MANUAL);
2297       __ EnterFrame(StackFrame::INTERNAL);
2298       __ Push(a3);
2299       __ CallRuntime(Runtime::kThrowNotConstructor);
2300     }
2301     __ bind(&new_target_constructor);
2302   }
2303 
2304   // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
2305   // code is erased.
2306   __ Move(a6, fp);
2307   __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2308 
2309   Label stack_done, stack_overflow;
2310   __ Sub64(a7, a7, Operand(kJSArgcReceiverSlots));
2311   __ Sub64(a7, a7, a2);
2312   __ Branch(&stack_done, le, a7, Operand(zero_reg));
2313   {
2314     // Check for stack overflow.
2315     __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
2316 
2317     // Forward the arguments from the caller frame.
2318 
2319     // Point to the first argument to copy (skipping the receiver).
2320     __ Add64(a6, a6,
2321              Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2322                      kSystemPointerSize));
2323     __ CalcScaledAddress(a6, a6, a2, kSystemPointerSizeLog2);
2324 
2325     // Move the arguments already in the stack,
2326     // including the receiver and the return address.
2327     // a7: Number of arguments to make room for.
2328     // a0: Number of arguments already on the stack.
2329     // a2: Points to first free slot on the stack after arguments were shifted.
2330     Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2);
2331 
2332     // Copy arguments from the caller frame.
2333     // TODO(victorgomes): Consider using forward order as potentially more cache
2334     // friendly.
2335     {
2336       Label loop;
2337       __ bind(&loop);
2338       {
2339         UseScratchRegisterScope temps(masm);
2340         Register scratch = temps.Acquire(), addr = temps.Acquire();
2341         __ Sub32(a7, a7, Operand(1));
2342         __ CalcScaledAddress(addr, a6, a7, kSystemPointerSizeLog2);
2343         __ Ld(scratch, MemOperand(addr));
2344         __ CalcScaledAddress(addr, a2, a7, kSystemPointerSizeLog2);
2345         __ Sd(scratch, MemOperand(addr));
2346         __ Branch(&loop, ne, a7, Operand(zero_reg));
2347       }
2348     }
2349   }
2350   __ BranchShort(&stack_done);
2351   __ bind(&stack_overflow);
2352   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2353   __ bind(&stack_done);
2354 
2355   // Tail-call to the {code} handler.
2356   __ Jump(code, RelocInfo::CODE_TARGET);
2357 }
2358 
2359 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2360 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2361                                      ConvertReceiverMode mode) {
2362   // ----------- S t a t e -------------
2363   //  -- a0 : the number of arguments
2364   //  -- a1 : the function to call (checked to be a JSFunction)
2365   // -----------------------------------
2366   __ AssertCallableFunction(a1);
2367 
2368   Label class_constructor;
2369   __ LoadTaggedPointerField(
2370       a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2371   __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2372   __ And(kScratchReg, a3,
2373          Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
2374   __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
2375 
2376   // Enter the context of the function; ToObject has to run in the function
2377   // context, and we also need to take the global proxy from the function
2378   // context in case of conversion.
2379   __ LoadTaggedPointerField(cp,
2380                             FieldMemOperand(a1, JSFunction::kContextOffset));
2381   // We need to convert the receiver for non-native sloppy mode functions.
2382   Label done_convert;
2383   __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2384   __ And(kScratchReg, a3,
2385          Operand(SharedFunctionInfo::IsNativeBit::kMask |
2386                  SharedFunctionInfo::IsStrictBit::kMask));
2387   __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2388   {
2389     // ----------- S t a t e -------------
2390     //  -- a0 : the number of arguments
2391     //  -- a1 : the function to call (checked to be a JSFunction)
2392     //  -- a2 : the shared function info.
2393     //  -- cp : the function context.
2394     // -----------------------------------
2395 
2396     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2397       // Patch receiver to global proxy.
2398       __ LoadGlobalProxy(a3);
2399     } else {
2400       Label convert_to_object, convert_receiver;
2401       __ LoadReceiver(a3, a0);
2402       __ JumpIfSmi(a3, &convert_to_object);
2403       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2404       __ GetObjectType(a3, a4, a4);
2405       __ Branch(&done_convert, Ugreater_equal, a4,
2406                 Operand(FIRST_JS_RECEIVER_TYPE));
2407       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2408         Label convert_global_proxy;
2409         __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2410         __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2411         __ bind(&convert_global_proxy);
2412         {
2413           // Patch receiver to global proxy.
2414           __ LoadGlobalProxy(a3);
2415         }
2416         __ Branch(&convert_receiver);
2417       }
2418       __ bind(&convert_to_object);
2419       {
2420         // Convert receiver using ToObject.
2421         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2422         // in the fast case? (fall back to AllocateInNewSpace?)
2423         FrameScope scope(masm, StackFrame::INTERNAL);
2424         __ SmiTag(a0);
2425         __ Push(a0, a1);
2426         __ Move(a0, a3);
2427         __ Push(cp);
2428         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2429                 RelocInfo::CODE_TARGET);
2430         __ Pop(cp);
2431         __ Move(a3, a0);
2432         __ Pop(a0, a1);
2433         __ SmiUntag(a0);
2434       }
2435       __ LoadTaggedPointerField(
2436           a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2437       __ bind(&convert_receiver);
2438     }
2439     __ StoreReceiver(a3, a0, kScratchReg);
2440   }
2441   __ bind(&done_convert);
2442 
2443   // ----------- S t a t e -------------
2444   //  -- a0 : the number of arguments
2445   //  -- a1 : the function to call (checked to be a JSFunction)
2446   //  -- a2 : the shared function info.
2447   //  -- cp : the function context.
2448   // -----------------------------------
2449 
2450   __ Lhu(a2,
2451          FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2452   __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2453 
2454   // The function is a "classConstructor", need to raise an exception.
2455   __ bind(&class_constructor);
2456   {
2457     FrameScope frame(masm, StackFrame::INTERNAL);
2458     __ Push(a1);
2459     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2460   }
2461 }
2462 
2463 namespace {
2464 
Generate_PushBoundArguments(MacroAssembler * masm)2465 void Generate_PushBoundArguments(MacroAssembler* masm) {
2466   // ----------- S t a t e -------------
2467   //  -- a0 : the number of arguments
2468   //  -- a1 : target (checked to be a JSBoundFunction)
2469   //  -- a3 : new.target (only in case of [[Construct]])
2470   // -----------------------------------
2471   UseScratchRegisterScope temps(masm);
2472   temps.Include(t0, t1);
2473   Register bound_argc = a4;
2474   Register bound_argv = a2;
2475   // Load [[BoundArguments]] into a2 and length of that into a4.
2476   Label no_bound_arguments;
2477   __ LoadTaggedPointerField(
2478       bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2479   __ SmiUntagField(bound_argc,
2480                    FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
2481   __ Branch(&no_bound_arguments, eq, bound_argc, Operand(zero_reg));
2482   {
2483     // ----------- S t a t e -------------
2484     //  -- a0 : the number of arguments
2485     //  -- a1 : target (checked to be a JSBoundFunction)
2486     //  -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2487     //  -- a3 : new.target (only in case of [[Construct]])
2488     //  -- a4: the number of [[BoundArguments]]
2489     // -----------------------------------
2490     UseScratchRegisterScope temps(masm);
2491     Register scratch = temps.Acquire();
2492     Label done;
2493     // Reserve stack space for the [[BoundArguments]].
2494     {
2495       // Check the stack for overflow. We are not trying to catch interruptions
2496       // (i.e. debug break and preemption) here, so check the "real stack
2497       // limit".
2498       __ StackOverflowCheck(a4, temps.Acquire(), temps.Acquire(), nullptr,
2499                             &done);
2500       {
2501         FrameScope scope(masm, StackFrame::MANUAL);
2502         __ EnterFrame(StackFrame::INTERNAL);
2503         __ CallRuntime(Runtime::kThrowStackOverflow);
2504       }
2505       __ bind(&done);
2506     }
2507 
2508     // Pop receiver.
2509     __ Pop(scratch);
2510 
2511     // Push [[BoundArguments]].
2512     {
2513       Label loop, done_loop;
2514       __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2515       __ Add64(a0, a0, Operand(a4));
2516       __ Add64(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2517       __ bind(&loop);
2518       __ Sub64(a4, a4, Operand(1));
2519       __ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
2520       __ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
2521       __ LoadAnyTaggedField(kScratchReg, MemOperand(a5));
2522       __ Push(kScratchReg);
2523       __ Branch(&loop);
2524       __ bind(&done_loop);
2525     }
2526 
2527     // Push receiver.
2528     __ Push(scratch);
2529   }
2530   __ bind(&no_bound_arguments);
2531 }
2532 
2533 }  // namespace
2534 
2535 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2536 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2537   // ----------- S t a t e -------------
2538   //  -- a0 : the number of arguments
2539   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2540   // -----------------------------------
2541   __ AssertBoundFunction(a1);
2542 
2543   // Patch the receiver to [[BoundThis]].
2544   {
2545     UseScratchRegisterScope temps(masm);
2546     Register scratch = temps.Acquire();
2547     __ LoadAnyTaggedField(
2548         scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2549     __ StoreReceiver(scratch, a0, kScratchReg);
2550   }
2551 
2552   // Push the [[BoundArguments]] onto the stack.
2553   Generate_PushBoundArguments(masm);
2554 
2555   // Call the [[BoundTargetFunction]] via the Call builtin.
2556   __ LoadTaggedPointerField(
2557       a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2558   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2559           RelocInfo::CODE_TARGET);
2560 }
2561 
2562 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2563 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2564   // ----------- S t a t e -------------
2565   //  -- a0 : the number of arguments
2566   //  -- a1 : the target to call (can be any Object).
2567   // -----------------------------------
2568 
2569   Label non_callable, class_constructor;
2570   UseScratchRegisterScope temps(masm);
2571   temps.Include(t1, t2);
2572   temps.Include(t4);
2573   Register map = temps.Acquire(), type = temps.Acquire(),
2574            range = temps.Acquire();
2575   __ JumpIfSmi(a1, &non_callable);
2576   __ LoadMap(map, a1);
2577   __ GetInstanceTypeRange(map, type, FIRST_CALLABLE_JS_FUNCTION_TYPE, range);
2578   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2579           RelocInfo::CODE_TARGET, Uless_equal, range,
2580           Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
2581                   FIRST_CALLABLE_JS_FUNCTION_TYPE));
2582   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2583           RelocInfo::CODE_TARGET, eq, type, Operand(JS_BOUND_FUNCTION_TYPE));
2584   Register scratch = map;
2585   // Check if target has a [[Call]] internal method.
2586   __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
2587   __ And(scratch, scratch, Operand(Map::Bits1::IsCallableBit::kMask));
2588   __ Branch(&non_callable, eq, scratch, Operand(zero_reg),
2589             Label::Distance::kNear);
2590 
2591   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
2592           type, Operand(JS_PROXY_TYPE));
2593 
2594   // Check if target is a wrapped function and call CallWrappedFunction external
2595   // builtin
2596   __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2597           RelocInfo::CODE_TARGET, eq, type, Operand(JS_WRAPPED_FUNCTION_TYPE));
2598 
2599   // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2600   // Check that the function is not a "classConstructor".
2601   __ Branch(&class_constructor, eq, type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2602 
2603   // 2. Call to something else, which might have a [[Call]] internal method (if
2604   // not we raise an exception).
2605   // Overwrite the original receiver with the (original) target.
2606   __ StoreReceiver(a1, a0, kScratchReg);
2607   // Let the "call_as_function_delegate" take care of the rest.
2608   __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2609   __ Jump(masm->isolate()->builtins()->CallFunction(
2610               ConvertReceiverMode::kNotNullOrUndefined),
2611           RelocInfo::CODE_TARGET);
2612 
2613   // 3. Call to something that is not callable.
2614   __ bind(&non_callable);
2615   {
2616     FrameScope scope(masm, StackFrame::INTERNAL);
2617     __ Push(a1);
2618     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2619   }
2620   // 4. The function is a "classConstructor", need to raise an exception.
2621   __ bind(&class_constructor);
2622   {
2623     FrameScope frame(masm, StackFrame::INTERNAL);
2624     __ Push(a1);
2625     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2626     __ ebreak();
2627   }
2628 }
2629 
Generate_ConstructFunction(MacroAssembler * masm)2630 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2631   // ----------- S t a t e -------------
2632   //  -- a0 : the number of arguments
2633   //  -- a1 : the constructor to call (checked to be a JSFunction)
2634   //  -- a3 : the new target (checked to be a constructor)
2635   // -----------------------------------
2636   __ AssertConstructor(a1);
2637   __ AssertFunction(a1);
2638 
2639   // Calling convention for function specific ConstructStubs require
2640   // a2 to contain either an AllocationSite or undefined.
2641   __ LoadRoot(a2, RootIndex::kUndefinedValue);
2642 
2643   Label call_generic_stub;
2644 
2645   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2646   __ LoadTaggedPointerField(
2647       a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2648   __ Lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2649   __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2650   __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg),
2651             Label::Distance::kNear);
2652 
2653   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2654           RelocInfo::CODE_TARGET);
2655 
2656   __ bind(&call_generic_stub);
2657   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2658           RelocInfo::CODE_TARGET);
2659 }
2660 
2661 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2662 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2663   // ----------- S t a t e -------------
2664   //  -- a0 : the number of arguments
2665   //  -- a1 : the function to call (checked to be a JSBoundFunction)
2666   //  -- a3 : the new target (checked to be a constructor)
2667   // -----------------------------------
2668   __ AssertBoundFunction(a1);
2669 
2670   // Push the [[BoundArguments]] onto the stack.
2671   Generate_PushBoundArguments(masm);
2672 
2673   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2674   Label skip;
2675   {
2676     UseScratchRegisterScope temps(masm);
2677     Register scratch = temps.Acquire();
2678     __ CmpTagged(scratch, a1, a3);
2679     __ Branch(&skip, ne, scratch, Operand(zero_reg), Label::Distance::kNear);
2680   }
2681   __ LoadTaggedPointerField(
2682       a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2683   __ bind(&skip);
2684 
2685   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2686   __ LoadTaggedPointerField(
2687       a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2688   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2689 }
2690 
2691 // static
Generate_Construct(MacroAssembler * masm)2692 void Builtins::Generate_Construct(MacroAssembler* masm) {
2693   // ----------- S t a t e -------------
2694   //  -- a0 : the number of arguments
2695   //  -- a1 : the constructor to call (can be any Object)
2696   //  -- a3 : the new target (either the same as the constructor or
2697   //          the JSFunction on which new was invoked initially)
2698   // -----------------------------------
2699 
2700   // Check if target is a Smi.
2701   Label non_constructor, non_proxy;
2702   __ JumpIfSmi(a1, &non_constructor);
2703 
2704   // Check if target has a [[Construct]] internal method.
2705   UseScratchRegisterScope temps(masm);
2706   temps.Include(t0, t1);
2707   Register map = temps.Acquire();
2708   Register scratch = temps.Acquire();
2709   __ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
2710   __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
2711   __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
2712   __ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
2713   Register range = temps.Acquire();
2714   // Dispatch based on instance type.
2715   __ GetInstanceTypeRange(map, scratch, FIRST_JS_FUNCTION_TYPE, range);
2716   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2717           RelocInfo::CODE_TARGET, Uless_equal, range,
2718           Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2719 
2720   // Only dispatch to bound functions after checking whether they are
2721   // constructors.
2722   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2723           RelocInfo::CODE_TARGET, eq, scratch, Operand(JS_BOUND_FUNCTION_TYPE));
2724 
2725   // Only dispatch to proxies after checking whether they are constructors.
2726   __ Branch(&non_proxy, ne, scratch, Operand(JS_PROXY_TYPE),
2727             Label::Distance::kNear);
2728   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2729           RelocInfo::CODE_TARGET);
2730 
2731   // Called Construct on an exotic Object with a [[Construct]] internal method.
2732   __ bind(&non_proxy);
2733   {
2734     // Overwrite the original receiver with the (original) target.
2735     __ StoreReceiver(a1, a0, kScratchReg);
2736     // Let the "call_as_constructor_delegate" take care of the rest.
2737     __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2738     __ Jump(masm->isolate()->builtins()->CallFunction(),
2739             RelocInfo::CODE_TARGET);
2740   }
2741 
2742   // Called Construct on an Object that doesn't have a [[Construct]] internal
2743   // method.
2744   __ bind(&non_constructor);
2745   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2746           RelocInfo::CODE_TARGET);
2747 }
2748 
2749 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2750 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2751   // The function index was put in t0 by the jump table trampoline.
2752   // Convert to Smi for the runtime call
2753   __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2754 
2755   RegList kSavedGpRegs = ([]() constexpr {
2756     RegList saved_gp_regs;
2757     for (Register gp_param_reg : wasm::kGpParamRegisters) {
2758       saved_gp_regs.set(gp_param_reg);
2759     }
2760 
2761     // All set registers were unique.
2762     CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
2763     // The Wasm instance must be part of the saved registers.
2764     CHECK(saved_gp_regs.has(kWasmInstanceRegister));
2765     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
2766              saved_gp_regs.Count());
2767     return saved_gp_regs;
2768   })();
2769 
2770   DoubleRegList kSavedFpRegs = ([]() constexpr {
2771     DoubleRegList saved_fp_regs;
2772     for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2773       saved_fp_regs.set(fp_param_reg);
2774     }
2775 
2776     CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
2777     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2778              saved_fp_regs.Count());
2779     return saved_fp_regs;
2780   })();
2781 
2782   {
2783     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2784     FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2785 
2786     __ MultiPush(kSavedGpRegs);
2787     __ MultiPushFPU(kSavedFpRegs);
2788 
2789     // Pass instance and function index as an explicit arguments to the runtime
2790     // function.
2791     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2792     // Initialize the JavaScript context with 0. CEntry will use it to
2793     // set the current context on the isolate.
2794     __ Move(kContextRegister, Smi::zero());
2795     __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2796 
2797     __ SmiUntag(s1, a0);  // move return value to s1 since a0 will be restored
2798                           // to the value before the call
2799     CHECK(!kSavedGpRegs.has(s1));
2800 
2801     // Restore registers.
2802     __ MultiPopFPU(kSavedFpRegs);
2803     __ MultiPop(kSavedGpRegs);
2804   }
2805 
2806   // The runtime function returned the jump table slot offset as a Smi (now in
2807   // x17). Use that to compute the jump target.
2808   __ Ld(kScratchReg,
2809         MemOperand(kWasmInstanceRegister,
2810                    WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
2811   __ Add64(s1, s1, Operand(kScratchReg));
2812   // Finally, jump to the entrypoint.
2813   __ Jump(s1);
2814 }
2815 
Generate_WasmDebugBreak(MacroAssembler * masm)2816 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2817   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2818   {
2819     FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2820 
2821     // Save all parameter registers. They might hold live values, we restore
2822     // them after the runtime call.
2823     __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2824     __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2825 
2826     // Initialize the JavaScript context with 0. CEntry will use it to
2827     // set the current context on the isolate.
2828     __ Move(cp, Smi::zero());
2829     __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2830 
2831     // Restore registers.
2832     __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2833     __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2834   }
2835   __ Ret();
2836 }
2837 #endif  // V8_ENABLE_WEBASSEMBLY
2838 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2839 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2840                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2841                                bool builtin_exit_frame) {
2842   // Called from JavaScript; parameters are on stack as if calling JS function
2843   // a0: number of arguments including receiver
2844   // a1: pointer to builtin function
2845   // fp: frame pointer    (restored after C call)
2846   // sp: stack pointer    (restored as callee's sp after C call)
2847   // cp: current context  (C callee-saved)
2848   //
2849   // If argv_mode == ArgvMode::kRegister:
2850   // a2: pointer to the first argument
2851 
2852   if (argv_mode == ArgvMode::kRegister) {
2853     // Move argv into the correct register.
2854     __ Move(s1, a2);
2855   } else {
2856     // Compute the argv pointer in a callee-saved register.
2857     __ CalcScaledAddress(s1, sp, a0, kSystemPointerSizeLog2);
2858     __ Sub64(s1, s1, kSystemPointerSize);
2859   }
2860 
2861   // Enter the exit frame that transitions from JavaScript to C++.
2862   FrameScope scope(masm, StackFrame::MANUAL);
2863   __ EnterExitFrame(
2864       save_doubles == SaveFPRegsMode::kSave, 0,
2865       builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2866 
2867   // s3: number of arguments  including receiver (C callee-saved)
2868   // s1: pointer to first argument (C callee-saved)
2869   // s2: pointer to builtin function (C callee-saved)
2870 
2871   // Prepare arguments for C routine.
2872   // a0 = argc
2873   __ Move(s3, a0);
2874   __ Move(s2, a1);
2875 
2876   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2877   // also need to reserve the 4 argument slots on the stack.
2878 
2879   __ AssertStackIsAligned();
2880 
2881   // a0 = argc, a1 = argv, a2 = isolate
2882   __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2883   __ Move(a1, s1);
2884 
2885   __ StoreReturnAddressAndCall(s2);
2886 
2887   // Result returned in a0 or a1:a0 - do not destroy these registers!
2888 
2889   // Check result for exception sentinel.
2890   Label exception_returned;
2891   __ LoadRoot(a4, RootIndex::kException);
2892   __ Branch(&exception_returned, eq, a4, Operand(a0));
2893 
2894   // Check that there is no pending exception, otherwise we
2895   // should have returned the exception sentinel.
2896   if (FLAG_debug_code) {
2897     Label okay;
2898     ExternalReference pending_exception_address = ExternalReference::Create(
2899         IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2900     __ li(a2, pending_exception_address);
2901     __ Ld(a2, MemOperand(a2));
2902     __ LoadRoot(a4, RootIndex::kTheHoleValue);
2903     // Cannot use check here as it attempts to generate call into runtime.
2904     __ Branch(&okay, eq, a4, Operand(a2), Label::Distance::kNear);
2905     __ stop();
2906     __ bind(&okay);
2907   }
2908 
2909   // Exit C frame and return.
2910   // a0:a1: result
2911   // sp: stack pointer
2912   // fp: frame pointer
2913   Register argc = argv_mode == ArgvMode::kRegister
2914                       // We don't want to pop arguments so set argc to no_reg.
2915                       ? no_reg
2916                       // s3: still holds argc (callee-saved).
2917                       : s3;
2918   __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
2919 
2920   // Handling of exception.
2921   __ bind(&exception_returned);
2922 
2923   ExternalReference pending_handler_context_address = ExternalReference::Create(
2924       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2925   ExternalReference pending_handler_entrypoint_address =
2926       ExternalReference::Create(
2927           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2928   ExternalReference pending_handler_fp_address = ExternalReference::Create(
2929       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2930   ExternalReference pending_handler_sp_address = ExternalReference::Create(
2931       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2932 
2933   // Ask the runtime for help to determine the handler. This will set a0 to
2934   // contain the current pending exception, don't clobber it.
2935   ExternalReference find_handler =
2936       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2937   {
2938     FrameScope scope(masm, StackFrame::MANUAL);
2939     __ PrepareCallCFunction(3, 0, a0);
2940     __ Move(a0, zero_reg);
2941     __ Move(a1, zero_reg);
2942     __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2943     __ CallCFunction(find_handler, 3);
2944   }
2945 
2946   // Retrieve the handler context, SP and FP.
2947   __ li(cp, pending_handler_context_address);
2948   __ Ld(cp, MemOperand(cp));
2949   __ li(sp, pending_handler_sp_address);
2950   __ Ld(sp, MemOperand(sp));
2951   __ li(fp, pending_handler_fp_address);
2952   __ Ld(fp, MemOperand(fp));
2953 
2954   // If the handler is a JS frame, restore the context to the frame. Note that
2955   // the context will be set to (cp == 0) for non-JS frames.
2956   Label zero;
2957   __ Branch(&zero, eq, cp, Operand(zero_reg), Label::Distance::kNear);
2958   __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2959   __ bind(&zero);
2960 
2961   // Compute the handler entry address and jump to it.
2962   UseScratchRegisterScope temp(masm);
2963   Register scratch = temp.Acquire();
2964   __ li(scratch, pending_handler_entrypoint_address);
2965   __ Ld(scratch, MemOperand(scratch));
2966   __ Jump(scratch);
2967 }
2968 
Generate_DoubleToI(MacroAssembler * masm)2969 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2970   Label done;
2971   Register result_reg = t0;
2972 
2973   Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2974   Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2975   Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2976   DoubleRegister double_scratch = kScratchDoubleReg;
2977 
2978   // Account for saved regs.
2979   const int kArgumentOffset = 4 * kSystemPointerSize;
2980 
2981   __ Push(result_reg);
2982   __ Push(scratch, scratch2, scratch3);
2983 
2984   // Load double input.
2985   __ LoadDouble(double_scratch, MemOperand(sp, kArgumentOffset));
2986 
2987   // Try a conversion to a signed integer, if exception occurs, scratch is
2988   // set to 0
2989   __ Trunc_w_d(scratch3, double_scratch, scratch);
2990 
2991   // If we had no exceptions then set result_reg and we are done.
2992   Label error;
2993   __ Branch(&error, eq, scratch, Operand(zero_reg), Label::Distance::kNear);
2994   __ Move(result_reg, scratch3);
2995   __ Branch(&done);
2996   __ bind(&error);
2997 
2998   // Load the double value and perform a manual truncation.
2999   Register input_high = scratch2;
3000   Register input_low = scratch3;
3001 
3002   __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
3003   __ Lw(input_high,
3004         MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
3005 
3006   Label normal_exponent;
3007   // Extract the biased exponent in result.
3008   __ ExtractBits(result_reg, input_high, HeapNumber::kExponentShift,
3009                  HeapNumber::kExponentBits);
3010 
3011   // Check for Infinity and NaNs, which should return 0.
3012   __ Sub32(scratch, result_reg, HeapNumber::kExponentMask);
3013   __ LoadZeroIfConditionZero(
3014       result_reg,
3015       scratch);  // result_reg = scratch == 0 ? 0 : result_reg
3016   __ Branch(&done, eq, scratch, Operand(zero_reg));
3017 
3018   // Express exponent as delta to (number of mantissa bits + 31).
3019   __ Sub32(result_reg, result_reg,
3020            Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
3021 
3022   // If the delta is strictly positive, all bits would be shifted away,
3023   // which means that we can return 0.
3024   __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg),
3025             Label::Distance::kNear);
3026   __ Move(result_reg, zero_reg);
3027   __ Branch(&done);
3028 
3029   __ bind(&normal_exponent);
3030   const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
3031   // Calculate shift.
3032   __ Add32(scratch, result_reg,
3033            Operand(kShiftBase + HeapNumber::kMantissaBits));
3034 
3035   // Save the sign.
3036   Register sign = result_reg;
3037   result_reg = no_reg;
3038   __ And(sign, input_high, Operand(HeapNumber::kSignMask));
3039 
3040   // We must specially handle shifts greater than 31.
3041   Label high_shift_needed, high_shift_done;
3042   __ Branch(&high_shift_needed, lt, scratch, Operand(32),
3043             Label::Distance::kNear);
3044   __ Move(input_high, zero_reg);
3045   __ BranchShort(&high_shift_done);
3046   __ bind(&high_shift_needed);
3047 
3048   // Set the implicit 1 before the mantissa part in input_high.
3049   __ Or(input_high, input_high,
3050         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
3051   // Shift the mantissa bits to the correct position.
3052   // We don't need to clear non-mantissa bits as they will be shifted away.
3053   // If they weren't, it would mean that the answer is in the 32bit range.
3054   __ Sll32(input_high, input_high, scratch);
3055 
3056   __ bind(&high_shift_done);
3057 
3058   // Replace the shifted bits with bits from the lower mantissa word.
3059   Label pos_shift, shift_done, sign_negative;
3060   __ li(kScratchReg, 32);
3061   __ subw(scratch, kScratchReg, scratch);
3062   __ Branch(&pos_shift, ge, scratch, Operand(zero_reg), Label::Distance::kNear);
3063 
3064   // Negate scratch.
3065   __ Sub32(scratch, zero_reg, scratch);
3066   __ Sll32(input_low, input_low, scratch);
3067   __ BranchShort(&shift_done);
3068 
3069   __ bind(&pos_shift);
3070   __ srlw(input_low, input_low, scratch);
3071 
3072   __ bind(&shift_done);
3073   __ Or(input_high, input_high, Operand(input_low));
3074   // Restore sign if necessary.
3075   __ Move(scratch, sign);
3076   result_reg = sign;
3077   sign = no_reg;
3078   __ Sub32(result_reg, zero_reg, input_high);
3079   __ Branch(&sign_negative, ne, scratch, Operand(zero_reg),
3080             Label::Distance::kNear);
3081   __ Move(result_reg, input_high);
3082   __ bind(&sign_negative);
3083 
3084   __ bind(&done);
3085 
3086   __ Sd(result_reg, MemOperand(sp, kArgumentOffset));
3087   __ Pop(scratch, scratch2, scratch3);
3088   __ Pop(result_reg);
3089   __ Ret();
3090 }
3091 
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)3092 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
3093   // TODO(v8:10701): Implement for this platform.
3094   __ Trap();
3095 }
3096 
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)3097 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
3098   // TODO(v8:12191): Implement for this platform.
3099   __ Trap();
3100 }
3101 
Generate_WasmSuspend(MacroAssembler * masm)3102 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
3103   // TODO(v8:12191): Implement for this platform.
3104   __ Trap();
3105 }
3106 
Generate_WasmResume(MacroAssembler * masm)3107 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
3108   // TODO(v8:12191): Implement for this platform.
3109   __ Trap();
3110 }
3111 
Generate_WasmOnStackReplace(MacroAssembler * masm)3112 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
3113   // Only needed on x64.
3114   __ Trap();
3115 }
3116 namespace {
3117 
AddressOffset(ExternalReference ref0,ExternalReference ref1)3118 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3119   int64_t offset = (ref0.address() - ref1.address());
3120   DCHECK(static_cast<int>(offset) == offset);
3121   return static_cast<int>(offset);
3122 }
3123 
3124 // Calls an API function.  Allocates HandleScope, extracts returned value
3125 // from handle and propagates exceptions.  Restores context.  stack_space
3126 // - space to be unwound on exit (includes the call JS arguments space and
3127 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)3128 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
3129                               ExternalReference thunk_ref, int stack_space,
3130                               MemOperand* stack_space_operand,
3131                               MemOperand return_value_operand) {
3132   ASM_CODE_COMMENT(masm);
3133   Isolate* isolate = masm->isolate();
3134   ExternalReference next_address =
3135       ExternalReference::handle_scope_next_address(isolate);
3136   const int kNextOffset = 0;
3137   const int kLimitOffset = AddressOffset(
3138       ExternalReference::handle_scope_limit_address(isolate), next_address);
3139   const int kLevelOffset = AddressOffset(
3140       ExternalReference::handle_scope_level_address(isolate), next_address);
3141 
3142   DCHECK(function_address == a1 || function_address == a2);
3143 
3144   Label profiler_enabled, end_profiler_check;
3145   {
3146     UseScratchRegisterScope temp(masm);
3147     Register scratch = temp.Acquire();
3148     __ li(scratch, ExternalReference::is_profiling_address(isolate));
3149     __ Lb(scratch, MemOperand(scratch, 0));
3150     __ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg),
3151               Label::Distance::kNear);
3152     __ li(scratch, ExternalReference::address_of_runtime_stats_flag());
3153     __ Lw(scratch, MemOperand(scratch, 0));
3154     __ Branch(&profiler_enabled, ne, scratch, Operand(zero_reg),
3155               Label::Distance::kNear);
3156     {
3157       // Call the api function directly.
3158       __ Move(scratch, function_address);
3159       __ BranchShort(&end_profiler_check);
3160     }
3161 
3162     __ bind(&profiler_enabled);
3163     {
3164       // Additional parameter is the address of the actual callback.
3165       __ li(scratch, thunk_ref);
3166     }
3167     __ bind(&end_profiler_check);
3168 
3169     // Allocate HandleScope in callee-save registers.
3170     __ li(s5, next_address);
3171     __ Ld(s3, MemOperand(s5, kNextOffset));
3172     __ Ld(s1, MemOperand(s5, kLimitOffset));
3173     __ Lw(s2, MemOperand(s5, kLevelOffset));
3174     __ Add32(s2, s2, Operand(1));
3175     __ Sw(s2, MemOperand(s5, kLevelOffset));
3176 
3177     __ StoreReturnAddressAndCall(scratch);
3178   }
3179 
3180   Label promote_scheduled_exception;
3181   Label delete_allocated_handles;
3182   Label leave_exit_frame;
3183   Label return_value_loaded;
3184 
3185   // Load value from ReturnValue.
3186   __ Ld(a0, return_value_operand);
3187   __ bind(&return_value_loaded);
3188 
3189   // No more valid handles (the result handle was the last one). Restore
3190   // previous handle scope.
3191   __ Sd(s3, MemOperand(s5, kNextOffset));
3192   if (FLAG_debug_code) {
3193     __ Lw(a1, MemOperand(s5, kLevelOffset));
3194     __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
3195              Operand(s2));
3196   }
3197   __ Sub32(s2, s2, Operand(1));
3198   __ Sw(s2, MemOperand(s5, kLevelOffset));
3199   __ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
3200   __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
3201 
3202   // Leave the API exit frame.
3203   __ bind(&leave_exit_frame);
3204 
3205   if (stack_space_operand == nullptr) {
3206     DCHECK_NE(stack_space, 0);
3207     __ li(s3, Operand(stack_space));
3208   } else {
3209     DCHECK_EQ(stack_space, 0);
3210     STATIC_ASSERT(kCArgSlotCount == 0);
3211     __ Ld(s3, *stack_space_operand);
3212   }
3213 
3214   static constexpr bool kDontSaveDoubles = false;
3215   static constexpr bool kRegisterContainsSlotCount = false;
3216   __ LeaveExitFrame(kDontSaveDoubles, s3, NO_EMIT_RETURN,
3217                     kRegisterContainsSlotCount);
3218 
3219   // Check if the function scheduled an exception.
3220   __ LoadRoot(a4, RootIndex::kTheHoleValue);
3221   __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
3222   __ Ld(a5, MemOperand(kScratchReg));
3223   __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5),
3224             Label::Distance::kNear);
3225 
3226   __ Ret();
3227 
3228   // Re-throw by promoting a scheduled exception.
3229   __ bind(&promote_scheduled_exception);
3230   __ TailCallRuntime(Runtime::kPromoteScheduledException);
3231 
3232   // HandleScope limit has changed. Delete allocated extensions.
3233   __ bind(&delete_allocated_handles);
3234   __ Sd(s1, MemOperand(s5, kLimitOffset));
3235   __ Move(s3, a0);
3236   __ PrepareCallCFunction(1, s1);
3237   __ li(a0, ExternalReference::isolate_address(isolate));
3238   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3239   __ Move(a0, s3);
3240   __ Branch(&leave_exit_frame);
3241 }
3242 
3243 }  // namespace
3244 
Generate_CallApiCallback(MacroAssembler * masm)3245 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3246   // ----------- S t a t e -------------
3247   //  -- cp                  : context
3248   //  -- a1                  : api function address
3249   //  -- a2                  : arguments count
3250   //  -- a3                  : call data
3251   //  -- a0                  : holder
3252   //  --
3253   //  -- sp[0]               : receiver
3254   //  -- sp[8]               : first argument
3255   //  -- ...
3256   //  -- sp[(argc) * 8]      : last argument
3257   // -----------------------------------
3258   UseScratchRegisterScope temps(masm);
3259   temps.Include(t0, t1);
3260   Register api_function_address = a1;
3261   Register argc = a2;
3262   Register call_data = a3;
3263   Register holder = a0;
3264   Register scratch = temps.Acquire();
3265   Register base = temps.Acquire();  // For addressing MemOperands on the stack.
3266 
3267   DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch,
3268                      base));
3269 
3270   using FCA = FunctionCallbackArguments;
3271 
3272   STATIC_ASSERT(FCA::kArgsLength == 6);
3273   STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3274   STATIC_ASSERT(FCA::kDataIndex == 4);
3275   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3276   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3277   STATIC_ASSERT(FCA::kIsolateIndex == 1);
3278   STATIC_ASSERT(FCA::kHolderIndex == 0);
3279 
3280   // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3281   //
3282   // Target state:
3283   //   sp[0 * kSystemPointerSize]: kHolder
3284   //   sp[1 * kSystemPointerSize]: kIsolate
3285   //   sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
3286   //   sp[3 * kSystemPointerSize]: undefined (kReturnValue)
3287   //   sp[4 * kSystemPointerSize]: kData
3288   //   sp[5 * kSystemPointerSize]: undefined (kNewTarget)
3289 
3290   // Set up the base register for addressing through MemOperands. It will point
3291   // at the receiver (located at sp + argc * kSystemPointerSize).
3292   __ CalcScaledAddress(base, sp, argc, kSystemPointerSizeLog2);
3293 
3294   // Reserve space on the stack.
3295   __ Sub64(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
3296 
3297   // kHolder.
3298   __ Sd(holder, MemOperand(sp, 0 * kSystemPointerSize));
3299 
3300   // kIsolate.
3301   __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3302   __ Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize));
3303 
3304   // kReturnValueDefaultValue and kReturnValue.
3305   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3306   __ Sd(scratch, MemOperand(sp, 2 * kSystemPointerSize));
3307   __ Sd(scratch, MemOperand(sp, 3 * kSystemPointerSize));
3308 
3309   // kData.
3310   __ Sd(call_data, MemOperand(sp, 4 * kSystemPointerSize));
3311 
3312   // kNewTarget.
3313   __ Sd(scratch, MemOperand(sp, 5 * kSystemPointerSize));
3314 
3315   // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3316   // We use it below to set up the FunctionCallbackInfo object.
3317   __ Move(scratch, sp);
3318 
3319   // Allocate the v8::Arguments structure in the arguments' space since
3320   // it's not controlled by GC.
3321   static constexpr int kApiStackSpace = 4;
3322   static constexpr bool kDontSaveDoubles = false;
3323   FrameScope frame_scope(masm, StackFrame::MANUAL);
3324   __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3325 
3326   // EnterExitFrame may align the sp.
3327 
3328   // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3329   // Arguments are after the return address (pushed by EnterExitFrame()).
3330   __ Sd(scratch, MemOperand(sp, 1 * kSystemPointerSize));
3331 
3332   // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3333   // on the stack).
3334   __ Add64(scratch, scratch,
3335            Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3336   __ Sd(scratch, MemOperand(sp, 2 * kSystemPointerSize));
3337 
3338   // FunctionCallbackInfo::length_.
3339   // Stored as int field, 32-bit integers within struct on stack always left
3340   // justified by n64 ABI.
3341   __ Sw(argc, MemOperand(sp, 3 * kSystemPointerSize));
3342 
3343   // We also store the number of bytes to drop from the stack after returning
3344   // from the API function here.
3345   // Note: Unlike on other architectures, this stores the number of slots to
3346   // drop, not the number of bytes.
3347   __ Add64(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
3348   __ Sd(scratch, MemOperand(sp, 4 * kSystemPointerSize));
3349 
3350   // v8::InvocationCallback's argument.
3351   DCHECK(!AreAliased(api_function_address, scratch, a0));
3352   __ Add64(a0, sp, Operand(1 * kSystemPointerSize));
3353 
3354   ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3355 
3356   // There are two stack slots above the arguments we constructed on the stack.
3357   // TODO(jgruber): Document what these arguments are.
3358   static constexpr int kStackSlotsAboveFCA = 2;
3359   MemOperand return_value_operand(
3360       fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
3361 
3362   static constexpr int kUseStackSpaceOperand = 0;
3363   MemOperand stack_space_operand(sp, 4 * kSystemPointerSize);
3364 
3365   AllowExternalCallThatCantCauseGC scope(masm);
3366   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3367                            kUseStackSpaceOperand, &stack_space_operand,
3368                            return_value_operand);
3369 }
3370 
Generate_CallApiGetter(MacroAssembler * masm)3371 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3372   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3373   // name below the exit frame to make GC aware of them.
3374   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3375   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3376   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3377   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3378   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3379   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3380   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3381   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3382 
3383   Register receiver = ApiGetterDescriptor::ReceiverRegister();
3384   Register holder = ApiGetterDescriptor::HolderRegister();
3385   Register callback = ApiGetterDescriptor::CallbackRegister();
3386   Register scratch = a4;
3387   DCHECK(!AreAliased(receiver, holder, callback, scratch));
3388 
3389   Register api_function_address = a2;
3390 
3391   // Here and below +1 is for name() pushed after the args_ array.
3392   using PCA = PropertyCallbackArguments;
3393   __ Sub64(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
3394   __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
3395   __ LoadAnyTaggedField(scratch,
3396                         FieldMemOperand(callback, AccessorInfo::kDataOffset));
3397   __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
3398   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3399   __ Sd(scratch,
3400         MemOperand(sp, (PCA::kReturnValueOffset + 1) * kSystemPointerSize));
3401   __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3402                                     kSystemPointerSize));
3403   __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3404   __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kSystemPointerSize));
3405   __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kSystemPointerSize));
3406   // should_throw_on_error -> false
3407   DCHECK_EQ(0, Smi::zero().ptr());
3408   __ Sd(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
3409                                      kSystemPointerSize));
3410   __ LoadTaggedPointerField(
3411       scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3412   __ Sd(scratch, MemOperand(sp, 0 * kSystemPointerSize));
3413 
3414   // v8::PropertyCallbackInfo::args_ array and name handle.
3415   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3416 
3417   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3418   __ Move(a0, sp);                                    // a0 = Handle<Name>
3419   __ Add64(a1, a0, Operand(1 * kSystemPointerSize));  // a1 = v8::PCI::args_
3420 
3421   const int kApiStackSpace = 1;
3422   FrameScope frame_scope(masm, StackFrame::MANUAL);
3423   __ EnterExitFrame(false, kApiStackSpace);
3424 
3425   // Create v8::PropertyCallbackInfo object on the stack and initialize
3426   // it's args_ field.
3427   __ Sd(a1, MemOperand(sp, 1 * kSystemPointerSize));
3428   __ Add64(a1, sp, Operand(1 * kSystemPointerSize));
3429   // a1 = v8::PropertyCallbackInfo&
3430 
3431   ExternalReference thunk_ref =
3432       ExternalReference::invoke_accessor_getter_callback();
3433 
3434   __ LoadTaggedPointerField(
3435       scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3436   __ Ld(api_function_address,
3437         FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3438 
3439   // +3 is to skip prolog, return address and name handle.
3440   MemOperand return_value_operand(
3441       fp,
3442       (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
3443   MemOperand* const kUseStackSpaceConstant = nullptr;
3444   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3445                            kStackUnwindSpace, kUseStackSpaceConstant,
3446                            return_value_operand);
3447 }
3448 
Generate_DirectCEntry(MacroAssembler * masm)3449 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3450   // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3451   // purpose Code object) to be able to call into C functions that may trigger
3452   // GC and thus move the caller.
3453   //
3454   // DirectCEntry places the return address on the stack (updated by the GC),
3455   // making the call GC safe. The irregexp backend relies on this.
3456 
3457   // Make place for arguments to fit C calling convention. Callers use
3458   // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
3459   // have to do that here. Any caller must drop kCArgsSlotsSize stack space
3460   // after the call.
3461   __ Add64(sp, sp, -kCArgsSlotsSize);
3462 
3463   __ Sd(ra, MemOperand(sp, kCArgsSlotsSize));  // Store the return address.
3464   __ Call(t6);                                 // Call the C++ function.
3465   __ Ld(t6, MemOperand(sp, kCArgsSlotsSize));  // Return to calling code.
3466 
3467   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3468     // In case of an error the return address may point to a memory area
3469     // filled with kZapValue by the GC. Dereference the address and check for
3470     // this.
3471     __ Uld(a4, MemOperand(t6));
3472     __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
3473               Operand(reinterpret_cast<uint64_t>(kZapValue)));
3474   }
3475 
3476   __ Jump(t6);
3477 }
3478 
3479 namespace {
3480 
3481 // This code tries to be close to ia32 code so that any changes can be
3482 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3483 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3484                                   DeoptimizeKind deopt_kind) {
3485   Isolate* isolate = masm->isolate();
3486 
3487   // Unlike on ARM we don't save all the registers, just the useful ones.
3488   // For the rest, there are gaps on the stack, so the offsets remain the same.
3489   const int kNumberOfRegisters = Register::kNumRegisters;
3490 
3491   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3492   RegList saved_regs = restored_regs | sp | ra;
3493 
3494   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
3495 
3496   // Save all double FPU registers before messing with them.
3497   __ Sub64(sp, sp, Operand(kDoubleRegsSize));
3498   const RegisterConfiguration* config = RegisterConfiguration::Default();
3499   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3500     int code = config->GetAllocatableDoubleCode(i);
3501     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3502     int offset = code * kDoubleSize;
3503     __ StoreDouble(fpu_reg, MemOperand(sp, offset));
3504   }
3505 
3506   // Push saved_regs (needed to populate FrameDescription::registers_).
3507   // Leave gaps for other registers.
3508   __ Sub64(sp, sp, kNumberOfRegisters * kSystemPointerSize);
3509   for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3510     if ((saved_regs.bits() & (1 << i)) != 0) {
3511       __ Sd(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
3512     }
3513   }
3514 
3515   __ li(a2,
3516         ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3517   __ Sd(fp, MemOperand(a2));
3518 
3519   const int kSavedRegistersAreaSize =
3520       (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
3521 
3522   // Get the address of the location in the code object (a3) (return
3523   // address for lazy deoptimization) and compute the fp-to-sp delta in
3524   // register a4.
3525   __ Move(a2, ra);
3526   __ Add64(a3, sp, Operand(kSavedRegistersAreaSize));
3527 
3528   __ Sub64(a3, fp, a3);
3529 
3530   // Allocate a new deoptimizer object.
3531   __ PrepareCallCFunction(5, a4);
3532   // Pass five arguments, according to n64 ABI.
3533   __ Move(a0, zero_reg);
3534   Label context_check;
3535   __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3536   __ JumpIfSmi(a1, &context_check);
3537   __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3538   __ bind(&context_check);
3539   __ li(a1, Operand(static_cast<int64_t>(deopt_kind)));
3540   // a2: code object address
3541   // a3: fp-to-sp delta
3542   __ li(a4, ExternalReference::isolate_address(isolate));
3543 
3544   // Call Deoptimizer::New().
3545   {
3546     AllowExternalCallThatCantCauseGC scope(masm);
3547     __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3548   }
3549 
3550   // Preserve "deoptimizer" object in register a0 and get the input
3551   // frame descriptor pointer to a1 (deoptimizer->input_);
3552   __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
3553 
3554   // Copy core registers into FrameDescription::registers_[kNumRegisters].
3555   DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3556   for (int i = 0; i < kNumberOfRegisters; i++) {
3557     int offset =
3558         (i * kSystemPointerSize) + FrameDescription::registers_offset();
3559     if ((saved_regs.bits() & (1 << i)) != 0) {
3560       __ Ld(a2, MemOperand(sp, i * kSystemPointerSize));
3561       __ Sd(a2, MemOperand(a1, offset));
3562     } else if (FLAG_debug_code) {
3563       __ li(a2, kDebugZapValue);
3564       __ Sd(a2, MemOperand(a1, offset));
3565     }
3566   }
3567 
3568   int double_regs_offset = FrameDescription::double_registers_offset();
3569   // Copy FPU registers to
3570   // double_registers_[DoubleRegister::kNumAllocatableRegisters]
3571   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3572     int code = config->GetAllocatableDoubleCode(i);
3573     int dst_offset = code * kDoubleSize + double_regs_offset;
3574     int src_offset =
3575         code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
3576     __ LoadDouble(ft0, MemOperand(sp, src_offset));
3577     __ StoreDouble(ft0, MemOperand(a1, dst_offset));
3578   }
3579 
3580   // Remove the saved registers from the stack.
3581   __ Add64(sp, sp, Operand(kSavedRegistersAreaSize));
3582 
3583   // Compute a pointer to the unwinding limit in register a2; that is
3584   // the first stack slot not part of the input frame.
3585   __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
3586   __ Add64(a2, a2, sp);
3587 
3588   // Unwind the stack down to - but not including - the unwinding
3589   // limit and copy the contents of the activation frame to the input
3590   // frame description.
3591   __ Add64(a3, a1, Operand(FrameDescription::frame_content_offset()));
3592   Label pop_loop;
3593   Label pop_loop_header;
3594   __ BranchShort(&pop_loop_header);
3595   __ bind(&pop_loop);
3596   __ pop(a4);
3597   __ Sd(a4, MemOperand(a3, 0));
3598   __ Add64(a3, a3, sizeof(uint64_t));
3599   __ bind(&pop_loop_header);
3600   __ Branch(&pop_loop, ne, a2, Operand(sp), Label::Distance::kNear);
3601   // Compute the output frame in the deoptimizer.
3602   __ push(a0);  // Preserve deoptimizer object across call.
3603   // a0: deoptimizer object; a1: scratch.
3604   __ PrepareCallCFunction(1, a1);
3605   // Call Deoptimizer::ComputeOutputFrames().
3606   {
3607     AllowExternalCallThatCantCauseGC scope(masm);
3608     __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3609   }
3610   __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
3611 
3612   __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
3613 
3614   // Replace the current (input) frame with the output frames.
3615   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3616   // Outer loop state: a4 = current "FrameDescription** output_",
3617   // a1 = one past the last FrameDescription**.
3618   __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
3619   __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset()));  // a4 is output_.
3620   __ CalcScaledAddress(a1, a4, a1, kSystemPointerSizeLog2);
3621   __ BranchShort(&outer_loop_header);
3622   __ bind(&outer_push_loop);
3623   // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
3624   __ Ld(a2, MemOperand(a4, 0));  // output_[ix]
3625   __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
3626   __ BranchShort(&inner_loop_header);
3627   __ bind(&inner_push_loop);
3628   __ Sub64(a3, a3, Operand(sizeof(uint64_t)));
3629   __ Add64(a6, a2, Operand(a3));
3630   __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
3631   __ push(a7);
3632   __ bind(&inner_loop_header);
3633   __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
3634 
3635   __ Add64(a4, a4, Operand(kSystemPointerSize));
3636   __ bind(&outer_loop_header);
3637   __ Branch(&outer_push_loop, lt, a4, Operand(a1));
3638 
3639   __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
3640   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3641     int code = config->GetAllocatableDoubleCode(i);
3642     const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3643     int src_offset = code * kDoubleSize + double_regs_offset;
3644     __ LoadDouble(fpu_reg, MemOperand(a1, src_offset));
3645   }
3646 
3647   // Push pc and continuation from the last output frame.
3648   __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
3649   __ push(a6);
3650   __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
3651   __ push(a6);
3652 
3653   // Technically restoring 't3' should work unless zero_reg is also restored
3654   // but it's safer to check for this.
3655   DCHECK(!(restored_regs.has(t3)));
3656   // Restore the registers from the last output frame.
3657   __ Move(t3, a2);
3658   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3659     int offset =
3660         (i * kSystemPointerSize) + FrameDescription::registers_offset();
3661     if ((restored_regs.bits() & (1 << i)) != 0) {
3662       __ Ld(ToRegister(i), MemOperand(t3, offset));
3663     }
3664   }
3665 
3666   __ pop(t6);  // Get continuation, leave pc on stack.
3667   __ pop(ra);
3668   __ Jump(t6);
3669   __ stop();
3670 }
3671 
3672 }  // namespace
3673 
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3674 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3675   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3676 }
3677 
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3678 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3679   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3680 }
3681 
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)3682 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
3683   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
3684 }
3685 
3686 namespace {
3687 
3688 // Restarts execution either at the current or next (in execution order)
3689 // bytecode. If there is baseline code on the shared function info, converts an
3690 // interpreter frame into a baseline frame and continues execution in baseline
3691 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)3692 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
3693                                          bool next_bytecode,
3694                                          bool is_osr = false) {
3695   Label start;
3696   __ bind(&start);
3697 
3698   // Get function from the frame.
3699   Register closure = a1;
3700   __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3701 
3702   // Get the Code object from the shared function info.
3703   Register code_obj = s1;
3704   __ LoadTaggedPointerField(
3705       code_obj,
3706       FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
3707   __ LoadTaggedPointerField(
3708       code_obj,
3709       FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
3710 
3711   // Check if we have baseline code. For OSR entry it is safe to assume we
3712   // always have baseline code.
3713   if (!is_osr) {
3714     Label start_with_baseline;
3715     UseScratchRegisterScope temps(masm);
3716     Register scratch = temps.Acquire();
3717     __ GetObjectType(code_obj, scratch, scratch);
3718     __ Branch(&start_with_baseline, eq, scratch, Operand(CODET_TYPE));
3719 
3720     // Start with bytecode as there is no baseline code.
3721     Builtin builtin_id = next_bytecode
3722                              ? Builtin::kInterpreterEnterAtNextBytecode
3723                              : Builtin::kInterpreterEnterAtBytecode;
3724     __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
3725             RelocInfo::CODE_TARGET);
3726 
3727     // Start with baseline code.
3728     __ bind(&start_with_baseline);
3729   } else if (FLAG_debug_code) {
3730     UseScratchRegisterScope temps(masm);
3731     Register scratch = temps.Acquire();
3732     __ GetObjectType(code_obj, scratch, scratch);
3733     __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
3734               Operand(CODET_TYPE));
3735   }
3736   if (FLAG_debug_code) {
3737     UseScratchRegisterScope temps(masm);
3738     Register scratch = temps.Acquire();
3739     AssertCodeIsBaseline(masm, code_obj, scratch);
3740   }
3741   // Replace BytecodeOffset with the feedback vector.
3742   Register feedback_vector = a2;
3743   __ LoadTaggedPointerField(
3744       feedback_vector,
3745       FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3746   __ LoadTaggedPointerField(
3747       feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
3748   Label install_baseline_code;
3749   // Check if feedback vector is valid. If not, call prepare for baseline to
3750   // allocate it.
3751   UseScratchRegisterScope temps(masm);
3752   Register type = temps.Acquire();
3753   __ GetObjectType(feedback_vector, type, type);
3754   __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
3755   // Save BytecodeOffset from the stack frame.
3756   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
3757               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3758   // Replace BytecodeOffset with the feedback vector.
3759   __ Sd(feedback_vector,
3760         MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3761   feedback_vector = no_reg;
3762 
3763   // Compute baseline pc for bytecode offset.
3764   ExternalReference get_baseline_pc_extref;
3765   if (next_bytecode || is_osr) {
3766     get_baseline_pc_extref =
3767         ExternalReference::baseline_pc_for_next_executed_bytecode();
3768   } else {
3769     get_baseline_pc_extref =
3770         ExternalReference::baseline_pc_for_bytecode_offset();
3771   }
3772 
3773   Register get_baseline_pc = a3;
3774   __ li(get_baseline_pc, get_baseline_pc_extref);
3775 
3776   // If the code deoptimizes during the implicit function entry stack interrupt
3777   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
3778   // not a valid bytecode offset.
3779   // TODO(pthier): Investigate if it is feasible to handle this special case
3780   // in TurboFan instead of here.
3781   Label valid_bytecode_offset, function_entry_bytecode;
3782   if (!is_osr) {
3783     __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
3784               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
3785                       kFunctionEntryBytecodeOffset));
3786   }
3787 
3788   __ Sub64(kInterpreterBytecodeOffsetRegister,
3789            kInterpreterBytecodeOffsetRegister,
3790            (BytecodeArray::kHeaderSize - kHeapObjectTag));
3791 
3792   __ bind(&valid_bytecode_offset);
3793   // Get bytecode array from the stack frame.
3794   __ Ld(kInterpreterBytecodeArrayRegister,
3795         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3796   __ Push(kInterpreterAccumulatorRegister);
3797   {
3798     Register arg_reg_1 = a0;
3799     Register arg_reg_2 = a1;
3800     Register arg_reg_3 = a2;
3801     __ Move(arg_reg_1, code_obj);
3802     __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
3803     __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
3804     FrameScope scope(masm, StackFrame::INTERNAL);
3805     __ CallCFunction(get_baseline_pc, 3, 0);
3806   }
3807   __ Add64(code_obj, code_obj, kReturnRegister0);
3808   __ Pop(kInterpreterAccumulatorRegister);
3809 
3810   if (is_osr) {
3811     // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
3812     // Sparkplug here.
3813     __ Ld(kInterpreterBytecodeArrayRegister,
3814           MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3815     ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
3816     Generate_OSREntry(masm, code_obj,
3817                       Operand(Code::kHeaderSize - kHeapObjectTag));
3818   } else {
3819     __ Add64(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
3820     __ Jump(code_obj);
3821   }
3822   __ Trap();  // Unreachable.
3823 
3824   if (!is_osr) {
3825     __ bind(&function_entry_bytecode);
3826     // If the bytecode offset is kFunctionEntryOffset, get the start address of
3827     // the first bytecode.
3828     __ li(kInterpreterBytecodeOffsetRegister, Operand(int64_t(0)));
3829     if (next_bytecode) {
3830       __ li(get_baseline_pc,
3831             ExternalReference::baseline_pc_for_bytecode_offset());
3832     }
3833     __ Branch(&valid_bytecode_offset);
3834   }
3835 
3836   __ bind(&install_baseline_code);
3837   {
3838     FrameScope scope(masm, StackFrame::INTERNAL);
3839     __ Push(kInterpreterAccumulatorRegister);
3840     __ Push(closure);
3841     __ CallRuntime(Runtime::kInstallBaselineCode, 1);
3842     __ Pop(kInterpreterAccumulatorRegister);
3843   }
3844   // Retry from the start after installing baseline code.
3845   __ Branch(&start);
3846 }
3847 
3848 }  // namespace
3849 
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3850 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3851     MacroAssembler* masm) {
3852   Generate_BaselineOrInterpreterEntry(masm, false);
3853 }
3854 
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3855 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3856     MacroAssembler* masm) {
3857   Generate_BaselineOrInterpreterEntry(masm, true);
3858 }
3859 
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3860 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3861     MacroAssembler* masm) {
3862   Generate_BaselineOrInterpreterEntry(masm, false, true);
3863 }
3864 
3865 #undef __
3866 
3867 }  // namespace internal
3868 }  // namespace v8
3869 
3870 #endif  // V8_TARGET_ARCH_RISCV64
3871