• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM64
6 
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
11 #include "src/codegen/macro-assembler-inl.h"
12 #include "src/codegen/register-configuration.h"
13 #include "src/debug/debug.h"
14 #include "src/deoptimizer/deoptimizer.h"
15 #include "src/execution/frame-constants.h"
16 #include "src/execution/frames.h"
17 #include "src/heap/heap-inl.h"
18 #include "src/logging/counters.h"
19 #include "src/objects/cell.h"
20 #include "src/objects/foreign.h"
21 #include "src/objects/heap-number.h"
22 #include "src/objects/instance-type.h"
23 #include "src/objects/js-generator.h"
24 #include "src/objects/objects-inl.h"
25 #include "src/objects/smi.h"
26 #include "src/runtime/runtime.h"
27 
28 #if V8_ENABLE_WEBASSEMBLY
29 #include "src/wasm/wasm-linkage.h"
30 #include "src/wasm/wasm-objects.h"
31 #endif  // V8_ENABLE_WEBASSEMBLY
32 
33 #if defined(V8_OS_WIN)
34 #include "src/diagnostics/unwinding-info-win64.h"
35 #endif  // V8_OS_WIN
36 
37 namespace v8 {
38 namespace internal {
39 
40 #define __ ACCESS_MASM(masm)
41 
Generate_Adaptor(MacroAssembler * masm,Address address)42 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
43   __ CodeEntry();
44 
45   __ Mov(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
46   __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
47           RelocInfo::CODE_TARGET);
48 }
49 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)50 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
51                                            Runtime::FunctionId function_id) {
52   ASM_CODE_COMMENT(masm);
53   // ----------- S t a t e -------------
54   //  -- x0 : actual argument count
55   //  -- x1 : target function (preserved for callee)
56   //  -- x3 : new target (preserved for callee)
57   // -----------------------------------
58   {
59     FrameScope scope(masm, StackFrame::INTERNAL);
60     // Push a copy of the target function, the new target and the actual
61     // argument count.
62     __ SmiTag(kJavaScriptCallArgCountRegister);
63     __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
64             kJavaScriptCallArgCountRegister, padreg);
65     // Push another copy as a parameter to the runtime call.
66     __ PushArgument(kJavaScriptCallTargetRegister);
67 
68     __ CallRuntime(function_id, 1);
69     __ Mov(x2, x0);
70 
71     // Restore target function, new target and actual argument count.
72     __ Pop(padreg, kJavaScriptCallArgCountRegister,
73            kJavaScriptCallNewTargetRegister, kJavaScriptCallTargetRegister);
74     __ SmiUntag(kJavaScriptCallArgCountRegister);
75   }
76 
77   static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
78   __ JumpCodeTObject(x2);
79 }
80 
81 namespace {
82 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)83 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
84   // ----------- S t a t e -------------
85   //  -- x0     : number of arguments
86   //  -- x1     : constructor function
87   //  -- x3     : new target
88   //  -- cp     : context
89   //  -- lr     : return address
90   //  -- sp[...]: constructor arguments
91   // -----------------------------------
92 
93   ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
94   Label stack_overflow;
95 
96   __ StackOverflowCheck(x0, &stack_overflow);
97 
98   // Enter a construct frame.
99   {
100     FrameScope scope(masm, StackFrame::CONSTRUCT);
101     Label already_aligned;
102     Register argc = x0;
103 
104     if (FLAG_debug_code) {
105       // Check that FrameScope pushed the context on to the stack already.
106       __ Peek(x2, 0);
107       __ Cmp(x2, cp);
108       __ Check(eq, AbortReason::kUnexpectedValue);
109     }
110 
111     // Push number of arguments.
112     __ SmiTag(x11, argc);
113     __ Push(x11, padreg);
114 
115     // Round up to maintain alignment.
116     Register slot_count = x2;
117     Register slot_count_without_rounding = x12;
118     __ Add(slot_count_without_rounding, argc, 1);
119     __ Bic(slot_count, slot_count_without_rounding, 1);
120     __ Claim(slot_count);
121 
122     // Preserve the incoming parameters on the stack.
123     __ LoadRoot(x4, RootIndex::kTheHoleValue);
124 
125     // Compute a pointer to the slot immediately above the location on the
126     // stack to which arguments will be later copied.
127     __ SlotAddress(x2, argc);
128 
129     // Store padding, if needed.
130     __ Tbnz(slot_count_without_rounding, 0, &already_aligned);
131     __ Str(padreg, MemOperand(x2));
132     __ Bind(&already_aligned);
133 
134     // TODO(victorgomes): When the arguments adaptor is completely removed, we
135     // should get the formal parameter count and copy the arguments in its
136     // correct position (including any undefined), instead of delaying this to
137     // InvokeFunction.
138 
139     // Copy arguments to the expression stack.
140     {
141       Register count = x2;
142       Register dst = x10;
143       Register src = x11;
144       __ SlotAddress(dst, 0);
145       // Poke the hole (receiver).
146       __ Str(x4, MemOperand(dst));
147       __ Add(dst, dst, kSystemPointerSize);  // Skip receiver.
148       __ Add(src, fp,
149              StandardFrameConstants::kCallerSPOffset +
150                  kSystemPointerSize);  // Skip receiver.
151       __ Sub(count, argc, kJSArgcReceiverSlots);
152       __ CopyDoubleWords(dst, src, count);
153     }
154 
155     // ----------- S t a t e -------------
156     //  --                           x0: number of arguments (untagged)
157     //  --                           x1: constructor function
158     //  --                           x3: new target
159     // If argc is odd:
160     //  --     sp[0*kSystemPointerSize]: the hole (receiver)
161     //  --     sp[1*kSystemPointerSize]: argument 1
162     //  --             ...
163     //  -- sp[(n-1)*kSystemPointerSize]: argument (n - 1)
164     //  -- sp[(n+0)*kSystemPointerSize]: argument n
165     //  -- sp[(n+1)*kSystemPointerSize]: padding
166     //  -- sp[(n+2)*kSystemPointerSize]: padding
167     //  -- sp[(n+3)*kSystemPointerSize]: number of arguments (tagged)
168     //  -- sp[(n+4)*kSystemPointerSize]: context (pushed by FrameScope)
169     // If argc is even:
170     //  --     sp[0*kSystemPointerSize]: the hole (receiver)
171     //  --     sp[1*kSystemPointerSize]: argument 1
172     //  --             ...
173     //  -- sp[(n-1)*kSystemPointerSize]: argument (n - 1)
174     //  -- sp[(n+0)*kSystemPointerSize]: argument n
175     //  -- sp[(n+1)*kSystemPointerSize]: padding
176     //  -- sp[(n+2)*kSystemPointerSize]: number of arguments (tagged)
177     //  -- sp[(n+3)*kSystemPointerSize]: context (pushed by FrameScope)
178     // -----------------------------------
179 
180     // Call the function.
181     __ InvokeFunctionWithNewTarget(x1, x3, argc, InvokeType::kCall);
182 
183     // Restore the context from the frame.
184     __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
185     // Restore smi-tagged arguments count from the frame. Use fp relative
186     // addressing to avoid the circular dependency between padding existence and
187     // argc parity.
188     __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
189     // Leave construct frame.
190   }
191 
192   // Remove caller arguments from the stack and return.
193   __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
194   __ Ret();
195 
196   __ Bind(&stack_overflow);
197   {
198     FrameScope scope(masm, StackFrame::INTERNAL);
199     __ CallRuntime(Runtime::kThrowStackOverflow);
200     __ Unreachable();
201   }
202 }
203 
204 }  // namespace
205 
206 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)207 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
208   // ----------- S t a t e -------------
209   //  -- x0     : number of arguments
210   //  -- x1     : constructor function
211   //  -- x3     : new target
212   //  -- lr     : return address
213   //  -- cp     : context pointer
214   //  -- sp[...]: constructor arguments
215   // -----------------------------------
216 
217   ASM_LOCATION("Builtins::Generate_JSConstructStubGeneric");
218 
219   FrameScope scope(masm, StackFrame::MANUAL);
220   // Enter a construct frame.
221   __ EnterFrame(StackFrame::CONSTRUCT);
222   Label post_instantiation_deopt_entry, not_create_implicit_receiver;
223 
224   if (FLAG_debug_code) {
225     // Check that FrameScope pushed the context on to the stack already.
226     __ Peek(x2, 0);
227     __ Cmp(x2, cp);
228     __ Check(eq, AbortReason::kUnexpectedValue);
229   }
230 
231   // Preserve the incoming parameters on the stack.
232   __ SmiTag(x0);
233   __ Push(x0, x1, padreg, x3);
234 
235   // ----------- S t a t e -------------
236   //  --        sp[0*kSystemPointerSize]: new target
237   //  --        sp[1*kSystemPointerSize]: padding
238   //  -- x1 and sp[2*kSystemPointerSize]: constructor function
239   //  --        sp[3*kSystemPointerSize]: number of arguments (tagged)
240   //  --        sp[4*kSystemPointerSize]: context (pushed by FrameScope)
241   // -----------------------------------
242 
243   __ LoadTaggedPointerField(
244       x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
245   __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
246   __ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
247   __ JumpIfIsInRange(
248       w4, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
249       static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
250       &not_create_implicit_receiver);
251 
252   // If not derived class constructor: Allocate the new receiver object.
253   __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, x4,
254                       x5);
255 
256   __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
257 
258   __ B(&post_instantiation_deopt_entry);
259 
260   // Else: use TheHoleValue as receiver for constructor call
261   __ Bind(&not_create_implicit_receiver);
262   __ LoadRoot(x0, RootIndex::kTheHoleValue);
263 
264   // ----------- S t a t e -------------
265   //  --                                x0: receiver
266   //  -- Slot 4 / sp[0*kSystemPointerSize]: new target
267   //  -- Slot 3 / sp[1*kSystemPointerSize]: padding
268   //  -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
269   //  -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
270   //  -- Slot 0 / sp[4*kSystemPointerSize]: context
271   // -----------------------------------
272   // Deoptimizer enters here.
273   masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
274       masm->pc_offset());
275 
276   __ Bind(&post_instantiation_deopt_entry);
277 
278   // Restore new target from the top of the stack.
279   __ Peek(x3, 0 * kSystemPointerSize);
280 
281   // Restore constructor function and argument count.
282   __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
283   __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
284 
285   // Copy arguments to the expression stack. The called function pops the
286   // receiver along with its arguments, so we need an extra receiver on the
287   // stack, in case we have to return it later.
288 
289   // Overwrite the new target with a receiver.
290   __ Poke(x0, 0);
291 
292   // Push two further copies of the receiver. One will be popped by the called
293   // function. The second acts as padding if the number of arguments plus
294   // receiver is odd - pushing receiver twice avoids branching. It also means
295   // that we don't have to handle the even and odd cases specially on
296   // InvokeFunction's return, as top of stack will be the receiver in either
297   // case.
298   __ Push(x0, x0);
299 
300   // ----------- S t a t e -------------
301   //  --                              x3: new target
302   //  --                             x12: number of arguments (untagged)
303   //  --        sp[0*kSystemPointerSize]: implicit receiver (overwrite if argc
304   //  odd)
305   //  --        sp[1*kSystemPointerSize]: implicit receiver
306   //  --        sp[2*kSystemPointerSize]: implicit receiver
307   //  --        sp[3*kSystemPointerSize]: padding
308   //  -- x1 and sp[4*kSystemPointerSize]: constructor function
309   //  --        sp[5*kSystemPointerSize]: number of arguments (tagged)
310   //  --        sp[6*kSystemPointerSize]: context
311   // -----------------------------------
312 
313   // Round the number of arguments down to the next even number, and claim
314   // slots for the arguments. If the number of arguments was odd, the last
315   // argument will overwrite one of the receivers pushed above.
316   Register argc_without_receiver = x11;
317   __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots);
318   __ Bic(x10, x12, 1);
319 
320   // Check if we have enough stack space to push all arguments.
321   Label stack_overflow;
322   __ StackOverflowCheck(x10, &stack_overflow);
323   __ Claim(x10);
324 
325   // TODO(victorgomes): When the arguments adaptor is completely removed, we
326   // should get the formal parameter count and copy the arguments in its
327   // correct position (including any undefined), instead of delaying this to
328   // InvokeFunction.
329 
330   // Copy the arguments.
331   {
332     Register count = x2;
333     Register dst = x10;
334     Register src = x11;
335     __ Mov(count, argc_without_receiver);
336     __ Poke(x0, 0);          // Add the receiver.
337     __ SlotAddress(dst, 1);  // Skip receiver.
338     __ Add(src, fp,
339            StandardFrameConstants::kCallerSPOffset + kSystemPointerSize);
340     __ CopyDoubleWords(dst, src, count);
341   }
342 
343   // Call the function.
344   __ Mov(x0, x12);
345   __ InvokeFunctionWithNewTarget(x1, x3, x0, InvokeType::kCall);
346 
347   // ----------- S t a t e -------------
348   //  -- sp[0*kSystemPointerSize]: implicit receiver
349   //  -- sp[1*kSystemPointerSize]: padding
350   //  -- sp[2*kSystemPointerSize]: constructor function
351   //  -- sp[3*kSystemPointerSize]: number of arguments
352   //  -- sp[4*kSystemPointerSize]: context
353   // -----------------------------------
354 
355   // Store offset of return address for deoptimizer.
356   masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
357       masm->pc_offset());
358 
359   // If the result is an object (in the ECMA sense), we should get rid
360   // of the receiver and use the result; see ECMA-262 section 13.2.2-7
361   // on page 74.
362   Label use_receiver, do_throw, leave_and_return, check_receiver;
363 
364   // If the result is undefined, we jump out to using the implicit receiver.
365   __ CompareRoot(x0, RootIndex::kUndefinedValue);
366   __ B(ne, &check_receiver);
367 
368   // Throw away the result of the constructor invocation and use the
369   // on-stack receiver as the result.
370   __ Bind(&use_receiver);
371   __ Peek(x0, 0 * kSystemPointerSize);
372   __ CompareRoot(x0, RootIndex::kTheHoleValue);
373   __ B(eq, &do_throw);
374 
375   __ Bind(&leave_and_return);
376   // Restore smi-tagged arguments count from the frame.
377   __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
378   // Leave construct frame.
379   __ LeaveFrame(StackFrame::CONSTRUCT);
380   // Remove caller arguments from the stack and return.
381   __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
382   __ Ret();
383 
384   // Otherwise we do a smi check and fall through to check if the return value
385   // is a valid receiver.
386   __ bind(&check_receiver);
387 
388   // If the result is a smi, it is *not* an object in the ECMA sense.
389   __ JumpIfSmi(x0, &use_receiver);
390 
391   // If the type of the result (stored in its map) is less than
392   // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
393   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
394   __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_and_return,
395                       ge);
396   __ B(&use_receiver);
397 
398   __ Bind(&do_throw);
399   // Restore the context from the frame.
400   __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
401   __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
402   __ Unreachable();
403 
404   __ Bind(&stack_overflow);
405   // Restore the context from the frame.
406   __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
407   __ CallRuntime(Runtime::kThrowStackOverflow);
408   __ Unreachable();
409 }
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)410 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
411   Generate_JSBuiltinsConstructStubHelper(masm);
412 }
413 
Generate_ConstructedNonConstructable(MacroAssembler * masm)414 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
415   FrameScope scope(masm, StackFrame::INTERNAL);
416   __ PushArgument(x1);
417   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
418   __ Unreachable();
419 }
420 
AssertCodeTIsBaselineAllowClobber(MacroAssembler * masm,Register code,Register scratch)421 static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
422                                               Register code, Register scratch) {
423   // Verify that the code kind is baseline code via the CodeKind.
424   __ Ldr(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
425   __ DecodeField<CodeT::KindField>(scratch);
426   __ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
427   __ Assert(eq, AbortReason::kExpectedBaselineData);
428 }
429 
AssertCodeTIsBaseline(MacroAssembler * masm,Register code,Register scratch)430 static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
431                                   Register scratch) {
432   DCHECK(!AreAliased(code, scratch));
433   return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
434 }
435 
436 // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
437 // the more general dispatch.
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)438 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
439                                                     Register sfi_data,
440                                                     Register scratch1,
441                                                     Label* is_baseline) {
442   ASM_CODE_COMMENT(masm);
443   Label done;
444   __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
445   if (FLAG_debug_code) {
446     Label not_baseline;
447     __ B(ne, &not_baseline);
448     AssertCodeTIsBaseline(masm, sfi_data, scratch1);
449     __ B(eq, is_baseline);
450     __ Bind(&not_baseline);
451   } else {
452     __ B(eq, is_baseline);
453   }
454   __ Cmp(scratch1, INTERPRETER_DATA_TYPE);
455   __ B(ne, &done);
456   __ LoadTaggedPointerField(
457       sfi_data,
458       FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
459   __ Bind(&done);
460 }
461 
462 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)463 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
464   // ----------- S t a t e -------------
465   //  -- x0 : the value to pass to the generator
466   //  -- x1 : the JSGeneratorObject to resume
467   //  -- lr : return address
468   // -----------------------------------
469 
470   // Store input value into generator object.
471   __ StoreTaggedField(
472       x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
473   __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0,
474                       kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
475   // Check that x1 is still valid, RecordWrite might have clobbered it.
476   __ AssertGeneratorObject(x1);
477 
478   // Load suspended function and context.
479   __ LoadTaggedPointerField(
480       x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
481   __ LoadTaggedPointerField(cp,
482                             FieldMemOperand(x4, JSFunction::kContextOffset));
483 
484   // Flood function if we are stepping.
485   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
486   Label stepping_prepared;
487   ExternalReference debug_hook =
488       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
489   __ Mov(x10, debug_hook);
490   __ Ldrsb(x10, MemOperand(x10));
491   __ CompareAndBranch(x10, Operand(0), ne, &prepare_step_in_if_stepping);
492 
493   // Flood function if we need to continue stepping in the suspended generator.
494   ExternalReference debug_suspended_generator =
495       ExternalReference::debug_suspended_generator_address(masm->isolate());
496   __ Mov(x10, debug_suspended_generator);
497   __ Ldr(x10, MemOperand(x10));
498   __ CompareAndBranch(x10, Operand(x1), eq,
499                       &prepare_step_in_suspended_generator);
500   __ Bind(&stepping_prepared);
501 
502   // Check the stack for overflow. We are not trying to catch interruptions
503   // (i.e. debug break and preemption) here, so check the "real stack limit".
504   Label stack_overflow;
505   __ LoadStackLimit(x10, StackLimitKind::kRealStackLimit);
506   __ Cmp(sp, x10);
507   __ B(lo, &stack_overflow);
508 
509   // Get number of arguments for generator function.
510   __ LoadTaggedPointerField(
511       x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
512   __ Ldrh(w10, FieldMemOperand(
513                    x10, SharedFunctionInfo::kFormalParameterCountOffset));
514 
515   __ Sub(x10, x10, kJSArgcReceiverSlots);
516   // Claim slots for arguments and receiver (rounded up to a multiple of two).
517   __ Add(x11, x10, 2);
518   __ Bic(x11, x11, 1);
519   __ Claim(x11);
520 
521   // Store padding (which might be replaced by the last argument).
522   __ Sub(x11, x11, 1);
523   __ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
524 
525   // Poke receiver into highest claimed slot.
526   __ LoadTaggedPointerField(
527       x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
528   __ Poke(x5, __ ReceiverOperand(x10));
529 
530   // ----------- S t a t e -------------
531   //  -- x1                       : the JSGeneratorObject to resume
532   //  -- x4                       : generator function
533   //  -- x10                      : argument count
534   //  -- cp                       : generator context
535   //  -- lr                       : return address
536   //  -- sp[0 .. arg count]       : claimed for receiver and args
537   // -----------------------------------
538 
539   // Copy the function arguments from the generator object's register file.
540   __ LoadTaggedPointerField(
541       x5,
542       FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
543   {
544     Label loop, done;
545     __ Cbz(x10, &done);
546     __ SlotAddress(x12, x10);
547     __ Add(x5, x5, Operand(x10, LSL, kTaggedSizeLog2));
548     __ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
549     __ Bind(&loop);
550     __ Sub(x10, x10, 1);
551     __ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
552     __ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
553     __ Cbnz(x10, &loop);
554     __ Bind(&done);
555   }
556 
557   // Underlying function needs to have bytecode available.
558   if (FLAG_debug_code) {
559     Label is_baseline;
560     __ LoadTaggedPointerField(
561         x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
562     __ LoadTaggedPointerField(
563         x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
564     GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
565     __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
566     __ Assert(eq, AbortReason::kMissingBytecodeArray);
567     __ bind(&is_baseline);
568   }
569 
570   // Resume (Ignition/TurboFan) generator object.
571   {
572     __ LoadTaggedPointerField(
573         x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
574     __ Ldrh(w0, FieldMemOperand(
575                     x0, SharedFunctionInfo::kFormalParameterCountOffset));
576     // We abuse new.target both to indicate that this is a resume call and to
577     // pass in the generator object.  In ordinary calls, new.target is always
578     // undefined because generator functions are non-constructable.
579     __ Mov(x3, x1);
580     __ Mov(x1, x4);
581     static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
582     __ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
583     __ JumpCodeTObject(x2);
584   }
585 
586   __ Bind(&prepare_step_in_if_stepping);
587   {
588     FrameScope scope(masm, StackFrame::INTERNAL);
589     // Push hole as receiver since we do not use it for stepping.
590     __ LoadRoot(x5, RootIndex::kTheHoleValue);
591     __ Push(x1, padreg, x4, x5);
592     __ CallRuntime(Runtime::kDebugOnFunctionCall);
593     __ Pop(padreg, x1);
594     __ LoadTaggedPointerField(
595         x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
596   }
597   __ B(&stepping_prepared);
598 
599   __ Bind(&prepare_step_in_suspended_generator);
600   {
601     FrameScope scope(masm, StackFrame::INTERNAL);
602     __ Push(x1, padreg);
603     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
604     __ Pop(padreg, x1);
605     __ LoadTaggedPointerField(
606         x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
607   }
608   __ B(&stepping_prepared);
609 
610   __ bind(&stack_overflow);
611   {
612     FrameScope scope(masm, StackFrame::INTERNAL);
613     __ CallRuntime(Runtime::kThrowStackOverflow);
614     __ Unreachable();  // This should be unreachable.
615   }
616 }
617 
618 namespace {
619 
620 // Called with the native C calling convention. The corresponding function
621 // signature is either:
622 //
623 //   using JSEntryFunction = GeneratedCode<Address(
624 //       Address root_register_value, Address new_target, Address target,
625 //       Address receiver, intptr_t argc, Address** argv)>;
626 // or
627 //   using JSEntryFunction = GeneratedCode<Address(
628 //       Address root_register_value, MicrotaskQueue* microtask_queue)>;
629 //
630 // Input is either:
631 //   x0: root_register_value.
632 //   x1: new_target.
633 //   x2: target.
634 //   x3: receiver.
635 //   x4: argc.
636 //   x5: argv.
637 // or
638 //   x0: root_register_value.
639 //   x1: microtask_queue.
640 // Output:
641 //   x0: result.
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)642 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
643                              Builtin entry_trampoline) {
644   Label invoke, handler_entry, exit;
645 
646   {
647     NoRootArrayScope no_root_array(masm);
648 
649 #if defined(V8_OS_WIN)
650     // In order to allow Windows debugging tools to reconstruct a call stack, we
651     // must generate information describing how to recover at least fp, sp, and
652     // pc for the calling frame. Here, JSEntry registers offsets to
653     // xdata_encoder which then emits the offset values as part of the unwind
654     // data accordingly.
655     win64_unwindinfo::XdataEncoder* xdata_encoder = masm->GetXdataEncoder();
656     if (xdata_encoder) {
657       xdata_encoder->onFramePointerAdjustment(
658           EntryFrameConstants::kDirectCallerFPOffset,
659           EntryFrameConstants::kDirectCallerSPOffset);
660     }
661 #endif
662 
663     __ PushCalleeSavedRegisters();
664 
665     // Set up the reserved register for 0.0.
666     __ Fmov(fp_zero, 0.0);
667 
668     // Initialize the root register.
669     // C calling convention. The first argument is passed in x0.
670     __ Mov(kRootRegister, x0);
671 
672 #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
673     // Initialize the pointer cage base register.
674     __ LoadRootRelative(kPtrComprCageBaseRegister,
675                         IsolateData::cage_base_offset());
676 #endif
677   }
678 
679   // Set up fp. It points to the {fp, lr} pair pushed as the last step in
680   // PushCalleeSavedRegisters.
681   STATIC_ASSERT(
682       EntryFrameConstants::kCalleeSavedRegisterBytesPushedAfterFpLrPair == 0);
683   STATIC_ASSERT(EntryFrameConstants::kOffsetToCalleeSavedRegisters == 0);
684   __ Mov(fp, sp);
685 
686   // Build an entry frame (see layout below).
687 
688   // Push frame type markers.
689   __ Mov(x12, StackFrame::TypeToMarker(type));
690   __ Push(x12, xzr);
691 
692   __ Mov(x11, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
693                                         masm->isolate()));
694   __ Ldr(x10, MemOperand(x11));  // x10 = C entry FP.
695 
696   // Clear c_entry_fp, now we've loaded its value to be pushed on the stack.
697   // If the c_entry_fp is not already zero and we don't clear it, the
698   // SafeStackFrameIterator will assume we are executing C++ and miss the JS
699   // frames on top.
700   __ Str(xzr, MemOperand(x11));
701 
702   // Set js_entry_sp if this is the outermost JS call.
703   Label done;
704   ExternalReference js_entry_sp = ExternalReference::Create(
705       IsolateAddressId::kJSEntrySPAddress, masm->isolate());
706   __ Mov(x12, js_entry_sp);
707   __ Ldr(x11, MemOperand(x12));  // x11 = previous JS entry SP.
708 
709   // Select between the inner and outermost frame marker, based on the JS entry
710   // sp. We assert that the inner marker is zero, so we can use xzr to save a
711   // move instruction.
712   DCHECK_EQ(StackFrame::INNER_JSENTRY_FRAME, 0);
713   __ Cmp(x11, 0);  // If x11 is zero, this is the outermost frame.
714   // x11 = JS entry frame marker.
715   __ Csel(x11, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
716   __ B(ne, &done);
717   __ Str(fp, MemOperand(x12));
718 
719   __ Bind(&done);
720 
721   __ Push(x10, x11);
722 
723   // The frame set up looks like this:
724   // sp[0] : JS entry frame marker.
725   // sp[1] : C entry FP.
726   // sp[2] : stack frame marker (0).
727   // sp[3] : stack frame marker (type).
728   // sp[4] : saved fp   <- fp points here.
729   // sp[5] : saved lr
730   // sp[6,24) : other saved registers
731 
732   // Jump to a faked try block that does the invoke, with a faked catch
733   // block that sets the pending exception.
734   __ B(&invoke);
735 
736   // Prevent the constant pool from being emitted between the record of the
737   // handler_entry position and the first instruction of the sequence here.
738   // There is no risk because Assembler::Emit() emits the instruction before
739   // checking for constant pool emission, but we do not want to depend on
740   // that.
741   {
742     Assembler::BlockPoolsScope block_pools(masm);
743 
744     // Store the current pc as the handler offset. It's used later to create the
745     // handler table.
746     __ BindExceptionHandler(&handler_entry);
747     masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
748 
749     // Caught exception: Store result (exception) in the pending exception
750     // field in the JSEnv and return a failure sentinel. Coming in here the
751     // fp will be invalid because UnwindAndFindHandler sets it to 0 to
752     // signal the existence of the JSEntry frame.
753     __ Mov(x10,
754            ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
755                                      masm->isolate()));
756   }
757   __ Str(x0, MemOperand(x10));
758   __ LoadRoot(x0, RootIndex::kException);
759   __ B(&exit);
760 
761   // Invoke: Link this frame into the handler chain.
762   __ Bind(&invoke);
763 
764   // Push new stack handler.
765   static_assert(StackHandlerConstants::kSize == 2 * kSystemPointerSize,
766                 "Unexpected offset for StackHandlerConstants::kSize");
767   static_assert(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize,
768                 "Unexpected offset for StackHandlerConstants::kNextOffset");
769 
770   // Link the current handler as the next handler.
771   __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
772                                         masm->isolate()));
773   __ Ldr(x10, MemOperand(x11));
774   __ Push(padreg, x10);
775 
776   // Set this new handler as the current one.
777   {
778     UseScratchRegisterScope temps(masm);
779     Register scratch = temps.AcquireX();
780     __ Mov(scratch, sp);
781     __ Str(scratch, MemOperand(x11));
782   }
783 
784   // If an exception not caught by another handler occurs, this handler
785   // returns control to the code after the B(&invoke) above, which
786   // restores all callee-saved registers (including cp and fp) to their
787   // saved values before returning a failure to C.
788   //
789   // Invoke the function by calling through JS entry trampoline builtin and
790   // pop the faked function when we return.
791   Handle<CodeT> trampoline_code =
792       masm->isolate()->builtins()->code_handle(entry_trampoline);
793   __ Call(trampoline_code, RelocInfo::CODE_TARGET);
794 
795   // Pop the stack handler and unlink this frame from the handler chain.
796   static_assert(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize,
797                 "Unexpected offset for StackHandlerConstants::kNextOffset");
798   __ Pop(x10, padreg);
799   __ Mov(x11, ExternalReference::Create(IsolateAddressId::kHandlerAddress,
800                                         masm->isolate()));
801   __ Drop(StackHandlerConstants::kSlotCount - 2);
802   __ Str(x10, MemOperand(x11));
803 
804   __ Bind(&exit);
805   // x0 holds the result.
806   // The stack pointer points to the top of the entry frame pushed on entry from
807   // C++ (at the beginning of this stub):
808   // sp[0] : JS entry frame marker.
809   // sp[1] : C entry FP.
810   // sp[2] : stack frame marker (0).
811   // sp[3] : stack frame marker (type).
812   // sp[4] : saved fp   <- fp might point here, or might be zero.
813   // sp[5] : saved lr
814   // sp[6,24) : other saved registers
815 
816   // Check if the current stack frame is marked as the outermost JS frame.
817   Label non_outermost_js_2;
818   {
819     Register c_entry_fp = x11;
820     __ PeekPair(x10, c_entry_fp, 0);
821     __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
822     __ B(ne, &non_outermost_js_2);
823     __ Mov(x12, js_entry_sp);
824     __ Str(xzr, MemOperand(x12));
825     __ Bind(&non_outermost_js_2);
826 
827     // Restore the top frame descriptors from the stack.
828     __ Mov(x12, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
829                                           masm->isolate()));
830     __ Str(c_entry_fp, MemOperand(x12));
831   }
832 
833   // Reset the stack to the callee saved registers.
834   static_assert(
835       EntryFrameConstants::kFixedFrameSize % (2 * kSystemPointerSize) == 0,
836       "Size of entry frame is not a multiple of 16 bytes");
837   __ Drop(EntryFrameConstants::kFixedFrameSize / kSystemPointerSize);
838   // Restore the callee-saved registers and return.
839   __ PopCalleeSavedRegisters();
840   __ Ret();
841 }
842 
843 }  // namespace
844 
Generate_JSEntry(MacroAssembler * masm)845 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
846   Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
847 }
848 
Generate_JSConstructEntry(MacroAssembler * masm)849 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
850   Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
851                           Builtin::kJSConstructEntryTrampoline);
852 }
853 
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)854 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
855   Generate_JSEntryVariant(masm, StackFrame::ENTRY,
856                           Builtin::kRunMicrotasksTrampoline);
857 }
858 
859 // Input:
860 //   x1: new.target.
861 //   x2: function.
862 //   x3: receiver.
863 //   x4: argc.
864 //   x5: argv.
865 // Output:
866 //   x0: result.
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)867 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
868                                              bool is_construct) {
869   Register new_target = x1;
870   Register function = x2;
871   Register receiver = x3;
872   Register argc = x4;
873   Register argv = x5;
874   Register scratch = x10;
875   Register slots_to_claim = x11;
876 
877   {
878     // Enter an internal frame.
879     FrameScope scope(masm, StackFrame::INTERNAL);
880 
881     // Setup the context (we need to use the caller context from the isolate).
882     __ Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
883                                               masm->isolate()));
884     __ Ldr(cp, MemOperand(scratch));
885 
886     // Claim enough space for the arguments and the function, including an
887     // optional slot of padding.
888     constexpr int additional_slots = 2;
889     __ Add(slots_to_claim, argc, additional_slots);
890     __ Bic(slots_to_claim, slots_to_claim, 1);
891 
892     // Check if we have enough stack space to push all arguments.
893     Label enough_stack_space, stack_overflow;
894     __ StackOverflowCheck(slots_to_claim, &stack_overflow);
895     __ B(&enough_stack_space);
896 
897     __ Bind(&stack_overflow);
898     __ CallRuntime(Runtime::kThrowStackOverflow);
899     __ Unreachable();
900 
901     __ Bind(&enough_stack_space);
902     __ Claim(slots_to_claim);
903 
904     // Store padding (which might be overwritten).
905     __ SlotAddress(scratch, slots_to_claim);
906     __ Str(padreg, MemOperand(scratch, -kSystemPointerSize));
907 
908     // Store receiver on the stack.
909     __ Poke(receiver, 0);
910     // Store function on the stack.
911     __ SlotAddress(scratch, argc);
912     __ Str(function, MemOperand(scratch));
913 
914     // Copy arguments to the stack in a loop, in reverse order.
915     // x4: argc.
916     // x5: argv.
917     Label loop, done;
918 
919     // Skip the argument set up if we have no arguments.
920     __ Cmp(argc, JSParameterCount(0));
921     __ B(eq, &done);
922 
923     // scratch has been set to point to the location of the function, which
924     // marks the end of the argument copy.
925     __ SlotAddress(x0, 1);  // Skips receiver.
926     __ Bind(&loop);
927     // Load the handle.
928     __ Ldr(x11, MemOperand(argv, kSystemPointerSize, PostIndex));
929     // Dereference the handle.
930     __ Ldr(x11, MemOperand(x11));
931     // Poke the result into the stack.
932     __ Str(x11, MemOperand(x0, kSystemPointerSize, PostIndex));
933     // Loop if we've not reached the end of copy marker.
934     __ Cmp(x0, scratch);
935     __ B(lt, &loop);
936 
937     __ Bind(&done);
938 
939     __ Mov(x0, argc);
940     __ Mov(x3, new_target);
941     __ Mov(x1, function);
942     // x0: argc.
943     // x1: function.
944     // x3: new.target.
945 
946     // Initialize all JavaScript callee-saved registers, since they will be seen
947     // by the garbage collector as part of handlers.
948     // The original values have been saved in JSEntry.
949     __ LoadRoot(x19, RootIndex::kUndefinedValue);
950     __ Mov(x20, x19);
951     __ Mov(x21, x19);
952     __ Mov(x22, x19);
953     __ Mov(x23, x19);
954     __ Mov(x24, x19);
955     __ Mov(x25, x19);
956 #ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
957     __ Mov(x28, x19);
958 #endif
959     // Don't initialize the reserved registers.
960     // x26 : root register (kRootRegister).
961     // x27 : context pointer (cp).
962     // x28 : pointer cage base register (kPtrComprCageBaseRegister).
963     // x29 : frame pointer (fp).
964 
965     Handle<CodeT> builtin = is_construct
966                                 ? BUILTIN_CODE(masm->isolate(), Construct)
967                                 : masm->isolate()->builtins()->Call();
968     __ Call(builtin, RelocInfo::CODE_TARGET);
969 
970     // Exit the JS internal frame and remove the parameters (except function),
971     // and return.
972   }
973 
974   // Result is in x0. Return.
975   __ Ret();
976 }
977 
Generate_JSEntryTrampoline(MacroAssembler * masm)978 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
979   Generate_JSEntryTrampolineHelper(masm, false);
980 }
981 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)982 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
983   Generate_JSEntryTrampolineHelper(masm, true);
984 }
985 
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)986 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
987   // This expects two C++ function parameters passed by Invoke() in
988   // execution.cc.
989   //   x0: root_register_value
990   //   x1: microtask_queue
991 
992   __ Mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), x1);
993   __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
994 }
995 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure)996 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
997                                                 Register optimized_code,
998                                                 Register closure) {
999   ASM_CODE_COMMENT(masm);
1000   DCHECK(!AreAliased(optimized_code, closure));
1001   // Store code entry in the closure.
1002   __ AssertCodeT(optimized_code);
1003   __ StoreTaggedField(optimized_code,
1004                       FieldMemOperand(closure, JSFunction::kCodeOffset));
1005   __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
1006                       kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
1007                       RememberedSetAction::kOmit, SmiCheck::kOmit);
1008 }
1009 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)1010 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
1011                                   Register scratch2) {
1012   ASM_CODE_COMMENT(masm);
1013   Register params_size = scratch1;
1014   // Get the size of the formal parameters + receiver (in bytes).
1015   __ Ldr(params_size,
1016          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1017   __ Ldr(params_size.W(),
1018          FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
1019 
1020   Register actual_params_size = scratch2;
1021   // Compute the size of the actual parameters + receiver (in bytes).
1022   __ Ldr(actual_params_size,
1023          MemOperand(fp, StandardFrameConstants::kArgCOffset));
1024   __ lsl(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
1025 
1026   // If actual is bigger than formal, then we should use it to free up the stack
1027   // arguments.
1028   Label corrected_args_count;
1029   __ Cmp(params_size, actual_params_size);
1030   __ B(ge, &corrected_args_count);
1031   __ Mov(params_size, actual_params_size);
1032   __ Bind(&corrected_args_count);
1033 
1034   // Leave the frame (also dropping the register file).
1035   __ LeaveFrame(StackFrame::INTERPRETED);
1036 
1037   // Drop receiver + arguments.
1038   if (FLAG_debug_code) {
1039     __ Tst(params_size, kSystemPointerSize - 1);
1040     __ Check(eq, AbortReason::kUnexpectedValue);
1041   }
1042   __ Lsr(params_size, params_size, kSystemPointerSizeLog2);
1043   __ DropArguments(params_size);
1044 }
1045 
1046 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)1047 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
1048                                          Register actual_state,
1049                                          TieringState expected_state,
1050                                          Runtime::FunctionId function_id) {
1051   ASM_CODE_COMMENT(masm);
1052   Label no_match;
1053   __ CompareAndBranch(actual_state, Operand(static_cast<int>(expected_state)),
1054                       ne, &no_match);
1055   GenerateTailCallToReturnedCode(masm, function_id);
1056   __ bind(&no_match);
1057 }
1058 
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch)1059 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
1060                                       Register optimized_code_entry,
1061                                       Register scratch) {
1062   // ----------- S t a t e -------------
1063   //  -- x0 : actual argument count
1064   //  -- x3 : new target (preserved for callee if needed, and caller)
1065   //  -- x1 : target function (preserved for callee if needed, and caller)
1066   // -----------------------------------
1067   ASM_CODE_COMMENT(masm);
1068   DCHECK(!AreAliased(x1, x3, optimized_code_entry, scratch));
1069 
1070   Register closure = x1;
1071   Label heal_optimized_code_slot;
1072 
1073   // If the optimized code is cleared, go to runtime to update the optimization
1074   // marker field.
1075   __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
1076                    &heal_optimized_code_slot);
1077 
1078   // Check if the optimized code is marked for deopt. If it is, call the
1079   // runtime to clear it.
1080   __ AssertCodeT(optimized_code_entry);
1081   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
1082     __ Ldr(scratch.W(),
1083            FieldMemOperand(optimized_code_entry,
1084                            CodeDataContainer::kKindSpecificFlagsOffset));
1085     __ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
1086             &heal_optimized_code_slot);
1087 
1088   } else {
1089     __ LoadTaggedPointerField(
1090         scratch,
1091         FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
1092     __ Ldr(
1093         scratch.W(),
1094         FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
1095     __ Tbnz(scratch.W(), Code::kMarkedForDeoptimizationBit,
1096             &heal_optimized_code_slot);
1097   }
1098 
1099   // Optimized code is good, get it into the closure and link the closure into
1100   // the optimized functions list, then tail call the optimized code.
1101   ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
1102   static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
1103   __ Move(x2, optimized_code_entry);
1104   __ JumpCodeTObject(x2);
1105 
1106   // Optimized code slot contains deoptimized code or code is cleared and
1107   // optimized code marker isn't updated. Evict the code, update the marker
1108   // and re-enter the closure's code.
1109   __ bind(&heal_optimized_code_slot);
1110   GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
1111 }
1112 
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)1113 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
1114                               Register tiering_state) {
1115   // ----------- S t a t e -------------
1116   //  -- x0 : actual argument count
1117   //  -- x3 : new target (preserved for callee if needed, and caller)
1118   //  -- x1 : target function (preserved for callee if needed, and caller)
1119   //  -- feedback vector (preserved for caller if needed)
1120   //  -- tiering_state : int32 containing non-zero tiering state.
1121   // -----------------------------------
1122   ASM_CODE_COMMENT(masm);
1123   DCHECK(!AreAliased(feedback_vector, x1, x3, tiering_state));
1124 
1125   TailCallRuntimeIfStateEquals(masm, tiering_state,
1126                                TieringState::kRequestTurbofan_Synchronous,
1127                                Runtime::kCompileTurbofan_Synchronous);
1128   TailCallRuntimeIfStateEquals(masm, tiering_state,
1129                                TieringState::kRequestTurbofan_Concurrent,
1130                                Runtime::kCompileTurbofan_Concurrent);
1131 
1132   __ Unreachable();
1133 }
1134 
1135 // Advance the current bytecode offset. This simulates what all bytecode
1136 // handlers do upon completion of the underlying operation. Will bail out to a
1137 // label if the bytecode (without prefix) is a return bytecode. Will not advance
1138 // the bytecode offset if the current bytecode is a JumpLoop, instead just
1139 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Label * if_return)1140 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
1141                                           Register bytecode_array,
1142                                           Register bytecode_offset,
1143                                           Register bytecode, Register scratch1,
1144                                           Register scratch2, Label* if_return) {
1145   ASM_CODE_COMMENT(masm);
1146   Register bytecode_size_table = scratch1;
1147 
1148   // The bytecode offset value will be increased by one in wide and extra wide
1149   // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
1150   // will restore the original bytecode. In order to simplify the code, we have
1151   // a backup of it.
1152   Register original_bytecode_offset = scratch2;
1153   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
1154                      bytecode, original_bytecode_offset));
1155 
1156   __ Mov(bytecode_size_table, ExternalReference::bytecode_size_table_address());
1157   __ Mov(original_bytecode_offset, bytecode_offset);
1158 
1159   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1160   Label process_bytecode, extra_wide;
1161   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
1162   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1163   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
1164   STATIC_ASSERT(3 ==
1165                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1166   __ Cmp(bytecode, Operand(0x3));
1167   __ B(hi, &process_bytecode);
1168   __ Tst(bytecode, Operand(0x1));
1169   // The code to load the next bytecode is common to both wide and extra wide.
1170   // We can hoist them up here since they do not modify the flags after Tst.
1171   __ Add(bytecode_offset, bytecode_offset, Operand(1));
1172   __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
1173   __ B(ne, &extra_wide);
1174 
1175   // Update table to the wide scaled table.
1176   __ Add(bytecode_size_table, bytecode_size_table,
1177          Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1178   __ B(&process_bytecode);
1179 
1180   __ Bind(&extra_wide);
1181   // Update table to the extra wide scaled table.
1182   __ Add(bytecode_size_table, bytecode_size_table,
1183          Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
1184 
1185   __ Bind(&process_bytecode);
1186 
1187 // Bailout to the return label if this is a return bytecode.
1188 #define JUMP_IF_EQUAL(NAME)                                              \
1189   __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
1190   __ B(if_return, eq);
1191   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1192 #undef JUMP_IF_EQUAL
1193 
1194   // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1195   // of the loop.
1196   Label end, not_jump_loop;
1197   __ Cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1198   __ B(ne, &not_jump_loop);
1199   // We need to restore the original bytecode_offset since we might have
1200   // increased it to skip the wide / extra-wide prefix bytecode.
1201   __ Mov(bytecode_offset, original_bytecode_offset);
1202   __ B(&end);
1203 
1204   __ bind(&not_jump_loop);
1205   // Otherwise, load the size of the current bytecode and advance the offset.
1206   __ Ldrb(scratch1.W(), MemOperand(bytecode_size_table, bytecode));
1207   __ Add(bytecode_offset, bytecode_offset, scratch1);
1208 
1209   __ Bind(&end);
1210 }
1211 
1212 // Read off the optimization state in the feedback vector and check if there
1213 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1214 static void LoadTieringStateAndJumpIfNeedsProcessing(
1215     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1216     Label* has_optimized_code_or_state) {
1217   ASM_CODE_COMMENT(masm);
1218   DCHECK(!AreAliased(optimization_state, feedback_vector));
1219   __ Ldr(optimization_state,
1220          FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1221   __ TestAndBranchIfAnySet(
1222       optimization_state,
1223       FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask,
1224       has_optimized_code_or_state);
1225 }
1226 
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1227 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1228     MacroAssembler* masm, Register optimization_state,
1229     Register feedback_vector) {
1230   ASM_CODE_COMMENT(masm);
1231   DCHECK(!AreAliased(optimization_state, feedback_vector));
1232   Label maybe_has_optimized_code;
1233   // Check if optimized code is available
1234   __ TestAndBranchIfAllClear(optimization_state,
1235                              FeedbackVector::kTieringStateIsAnyRequestMask,
1236                              &maybe_has_optimized_code);
1237 
1238   Register tiering_state = optimization_state;
1239   __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1240   MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1241 
1242   __ bind(&maybe_has_optimized_code);
1243   Register optimized_code_entry = x7;
1244   __ LoadAnyTaggedField(
1245       optimized_code_entry,
1246       FieldMemOperand(feedback_vector,
1247                       FeedbackVector::kMaybeOptimizedCodeOffset));
1248   TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
1249 }
1250 
1251 namespace {
1252 
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array)1253 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
1254                                  Register bytecode_array) {
1255   // Reset the bytecode age and OSR state (optimized to a single write).
1256   static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
1257   STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1258   __ Str(wzr,
1259          FieldMemOperand(bytecode_array,
1260                          BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
1261 }
1262 
1263 }  // namespace
1264 
1265 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1266 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1267   UseScratchRegisterScope temps(masm);
1268   // Need a few extra registers
1269   temps.Include(x14, x15);
1270 
1271   auto descriptor =
1272       Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1273   Register closure = descriptor.GetRegisterParameter(
1274       BaselineOutOfLinePrologueDescriptor::kClosure);
1275   // Load the feedback vector from the closure.
1276   Register feedback_vector = temps.AcquireX();
1277   __ LoadTaggedPointerField(
1278       feedback_vector,
1279       FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1280   __ LoadTaggedPointerField(
1281       feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1282   if (FLAG_debug_code) {
1283     __ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
1284     __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1285   }
1286 
1287   // Check the tiering state.
1288   Label has_optimized_code_or_state;
1289   Register optimization_state = temps.AcquireW();
1290   LoadTieringStateAndJumpIfNeedsProcessing(
1291       masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
1292 
1293   // Increment invocation count for the function.
1294   {
1295     UseScratchRegisterScope temps(masm);
1296     Register invocation_count = temps.AcquireW();
1297     __ Ldr(invocation_count,
1298            FieldMemOperand(feedback_vector,
1299                            FeedbackVector::kInvocationCountOffset));
1300     __ Add(invocation_count, invocation_count, Operand(1));
1301     __ Str(invocation_count,
1302            FieldMemOperand(feedback_vector,
1303                            FeedbackVector::kInvocationCountOffset));
1304   }
1305 
1306   FrameScope frame_scope(masm, StackFrame::MANUAL);
1307   {
1308     ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1309     // Normally the first thing we'd do here is Push(lr, fp), but we already
1310     // entered the frame in BaselineCompiler::Prologue, as we had to use the
1311     // value lr before the call to this BaselineOutOfLinePrologue builtin.
1312 
1313     Register callee_context = descriptor.GetRegisterParameter(
1314         BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1315     Register callee_js_function = descriptor.GetRegisterParameter(
1316         BaselineOutOfLinePrologueDescriptor::kClosure);
1317     __ Push(callee_context, callee_js_function);
1318     DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1319     DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1320 
1321     Register argc = descriptor.GetRegisterParameter(
1322         BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1323     // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1324     // the frame, so load it into a register.
1325     Register bytecode_array = descriptor.GetRegisterParameter(
1326         BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1327     ResetBytecodeAgeAndOsrState(masm, bytecode_array);
1328     __ Push(argc, bytecode_array);
1329 
1330     // Baseline code frames store the feedback vector where interpreter would
1331     // store the bytecode offset.
1332     if (FLAG_debug_code) {
1333       __ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
1334       __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1335     }
1336     // Our stack is currently aligned. We have have to push something along with
1337     // the feedback vector to keep it that way -- we may as well start
1338     // initialising the register frame.
1339     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1340     __ Push(feedback_vector, kInterpreterAccumulatorRegister);
1341   }
1342 
1343   Label call_stack_guard;
1344   Register frame_size = descriptor.GetRegisterParameter(
1345       BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1346   {
1347     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1348     // Stack check. This folds the checks for both the interrupt stack limit
1349     // check and the real stack limit into one by just checking for the
1350     // interrupt limit. The interrupt limit is either equal to the real stack
1351     // limit or tighter. By ensuring we have space until that limit after
1352     // building the frame we can quickly precheck both at once.
1353     UseScratchRegisterScope temps(masm);
1354 
1355     Register sp_minus_frame_size = temps.AcquireX();
1356     __ Sub(sp_minus_frame_size, sp, frame_size);
1357     Register interrupt_limit = temps.AcquireX();
1358     __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
1359     __ Cmp(sp_minus_frame_size, interrupt_limit);
1360     __ B(lo, &call_stack_guard);
1361   }
1362 
1363   // Do "fast" return to the caller pc in lr.
1364   if (FLAG_debug_code) {
1365     // The accumulator should already be "undefined", we don't have to load it.
1366     __ CompareRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1367     __ Assert(eq, AbortReason::kUnexpectedValue);
1368   }
1369   __ Ret();
1370 
1371   __ bind(&has_optimized_code_or_state);
1372   {
1373     ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1374     // Drop the frame created by the baseline call.
1375     __ Pop<TurboAssembler::kAuthLR>(fp, lr);
1376     MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1377                                                  feedback_vector);
1378     __ Trap();
1379   }
1380 
1381   __ bind(&call_stack_guard);
1382   {
1383     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1384     Register new_target = descriptor.GetRegisterParameter(
1385         BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
1386 
1387     FrameScope frame_scope(masm, StackFrame::INTERNAL);
1388     // Save incoming new target or generator
1389     __ Push(padreg, new_target);
1390     __ SmiTag(frame_size);
1391     __ PushArgument(frame_size);
1392     __ CallRuntime(Runtime::kStackGuardWithGap);
1393     __ Pop(new_target, padreg);
1394   }
1395   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1396   __ Ret();
1397 }
1398 
1399 // Generate code for entering a JS function with the interpreter.
1400 // On entry to the function the receiver and arguments have been pushed on the
1401 // stack left to right.
1402 //
1403 // The live registers are:
1404 //   - x0: actual argument count
1405 //   - x1: the JS function object being called.
1406 //   - x3: the incoming new target or generator object
1407 //   - cp: our context.
1408 //   - fp: our caller's frame pointer.
1409 //   - lr: return address.
1410 //
1411 // The function builds an interpreter frame. See InterpreterFrameConstants in
1412 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1413 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1414   Register closure = x1;
1415   Register feedback_vector = x2;
1416 
1417   // Get the bytecode array from the function object and load it into
1418   // kInterpreterBytecodeArrayRegister.
1419   __ LoadTaggedPointerField(
1420       x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1421   __ LoadTaggedPointerField(
1422       kInterpreterBytecodeArrayRegister,
1423       FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
1424 
1425   Label is_baseline;
1426   GetSharedFunctionInfoBytecodeOrBaseline(
1427       masm, kInterpreterBytecodeArrayRegister, x11, &is_baseline);
1428 
1429   // The bytecode array could have been flushed from the shared function info,
1430   // if so, call into CompileLazy.
1431   Label compile_lazy;
1432   __ CompareObjectType(kInterpreterBytecodeArrayRegister, x4, x4,
1433                        BYTECODE_ARRAY_TYPE);
1434   __ B(ne, &compile_lazy);
1435 
1436   // Load the feedback vector from the closure.
1437   __ LoadTaggedPointerField(
1438       feedback_vector,
1439       FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1440   __ LoadTaggedPointerField(
1441       feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1442 
1443   Label push_stack_frame;
1444   // Check if feedback vector is valid. If valid, check for optimized code
1445   // and update invocation count. Otherwise, setup the stack frame.
1446   __ LoadTaggedPointerField(
1447       x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1448   __ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
1449   __ Cmp(x7, FEEDBACK_VECTOR_TYPE);
1450   __ B(ne, &push_stack_frame);
1451 
1452   // Check the tiering state.
1453   Label has_optimized_code_or_state;
1454   Register optimization_state = w7;
1455   LoadTieringStateAndJumpIfNeedsProcessing(
1456       masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
1457 
1458   Label not_optimized;
1459   __ bind(&not_optimized);
1460 
1461   // Increment invocation count for the function.
1462   __ Ldr(w10, FieldMemOperand(feedback_vector,
1463                               FeedbackVector::kInvocationCountOffset));
1464   __ Add(w10, w10, Operand(1));
1465   __ Str(w10, FieldMemOperand(feedback_vector,
1466                               FeedbackVector::kInvocationCountOffset));
1467 
1468   // Open a frame scope to indicate that there is a frame on the stack.  The
1469   // MANUAL indicates that the scope shouldn't actually generate code to set up
1470   // the frame (that is done below).
1471   __ Bind(&push_stack_frame);
1472   FrameScope frame_scope(masm, StackFrame::MANUAL);
1473   __ Push<TurboAssembler::kSignLR>(lr, fp);
1474   __ mov(fp, sp);
1475   __ Push(cp, closure);
1476 
1477   ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
1478 
1479   // Load the initial bytecode offset.
1480   __ Mov(kInterpreterBytecodeOffsetRegister,
1481          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1482 
1483   // Push actual argument count, bytecode array, Smi tagged bytecode array
1484   // offset and an undefined (to properly align the stack pointer).
1485   STATIC_ASSERT(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
1486   __ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
1487   __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
1488   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1489   __ Push(x6, kInterpreterAccumulatorRegister);
1490 
1491   // Allocate the local and temporary register file on the stack.
1492   Label stack_overflow;
1493   {
1494     // Load frame size from the BytecodeArray object.
1495     __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1496                                 BytecodeArray::kFrameSizeOffset));
1497 
1498     // Do a stack check to ensure we don't go over the limit.
1499     __ Sub(x10, sp, Operand(x11));
1500     {
1501       UseScratchRegisterScope temps(masm);
1502       Register scratch = temps.AcquireX();
1503       __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
1504       __ Cmp(x10, scratch);
1505     }
1506     __ B(lo, &stack_overflow);
1507 
1508     // If ok, push undefined as the initial value for all register file entries.
1509     // Note: there should always be at least one stack slot for the return
1510     // register in the register file.
1511     Label loop_header;
1512     __ Lsr(x11, x11, kSystemPointerSizeLog2);
1513     // Round down (since we already have an undefined in the stack) the number
1514     // of registers to a multiple of 2, to align the stack to 16 bytes.
1515     __ Bic(x11, x11, 1);
1516     __ PushMultipleTimes(kInterpreterAccumulatorRegister, x11);
1517     __ Bind(&loop_header);
1518   }
1519 
1520   // If the bytecode array has a valid incoming new target or generator object
1521   // register, initialize it with incoming value which was passed in x3.
1522   Label no_incoming_new_target_or_generator_register;
1523   __ Ldrsw(x10,
1524            FieldMemOperand(
1525                kInterpreterBytecodeArrayRegister,
1526                BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1527   __ Cbz(x10, &no_incoming_new_target_or_generator_register);
1528   __ Str(x3, MemOperand(fp, x10, LSL, kSystemPointerSizeLog2));
1529   __ Bind(&no_incoming_new_target_or_generator_register);
1530 
1531   // Perform interrupt stack check.
1532   // TODO(solanes): Merge with the real stack limit check above.
1533   Label stack_check_interrupt, after_stack_check_interrupt;
1534   __ LoadStackLimit(x10, StackLimitKind::kInterruptStackLimit);
1535   __ Cmp(sp, x10);
1536   __ B(lo, &stack_check_interrupt);
1537   __ Bind(&after_stack_check_interrupt);
1538 
1539   // The accumulator is already loaded with undefined.
1540 
1541   // Load the dispatch table into a register and dispatch to the bytecode
1542   // handler at the current bytecode offset.
1543   Label do_dispatch;
1544   __ bind(&do_dispatch);
1545   __ Mov(
1546       kInterpreterDispatchTableRegister,
1547       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1548   __ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
1549                           kInterpreterBytecodeOffsetRegister));
1550   __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
1551   __ Ldr(kJavaScriptCallCodeStartRegister,
1552          MemOperand(kInterpreterDispatchTableRegister, x1));
1553   __ Call(kJavaScriptCallCodeStartRegister);
1554 
1555   // Any returns to the entry trampoline are either due to the return bytecode
1556   // or the interpreter tail calling a builtin and then a dispatch.
1557   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1558   __ JumpTarget();
1559 
1560   // Get bytecode array and bytecode offset from the stack frame.
1561   __ Ldr(kInterpreterBytecodeArrayRegister,
1562          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1563   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1564               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1565 
1566   // Either return, or advance to the next bytecode and dispatch.
1567   Label do_return;
1568   __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
1569                          kInterpreterBytecodeOffsetRegister));
1570   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1571                                 kInterpreterBytecodeOffsetRegister, x1, x2, x3,
1572                                 &do_return);
1573   __ B(&do_dispatch);
1574 
1575   __ bind(&do_return);
1576   // The return value is in x0.
1577   LeaveInterpreterFrame(masm, x2, x4);
1578   __ Ret();
1579 
1580   __ bind(&stack_check_interrupt);
1581   // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1582   // for the call to the StackGuard.
1583   __ Mov(kInterpreterBytecodeOffsetRegister,
1584          Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1585                               kFunctionEntryBytecodeOffset)));
1586   __ Str(kInterpreterBytecodeOffsetRegister,
1587          MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1588   __ CallRuntime(Runtime::kStackGuard);
1589 
1590   // After the call, restore the bytecode array, bytecode offset and accumulator
1591   // registers again. Also, restore the bytecode offset in the stack to its
1592   // previous value.
1593   __ Ldr(kInterpreterBytecodeArrayRegister,
1594          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1595   __ Mov(kInterpreterBytecodeOffsetRegister,
1596          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1597   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1598 
1599   __ SmiTag(x10, kInterpreterBytecodeOffsetRegister);
1600   __ Str(x10, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1601 
1602   __ jmp(&after_stack_check_interrupt);
1603 
1604   __ bind(&has_optimized_code_or_state);
1605   MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1606                                                feedback_vector);
1607 
1608   __ bind(&is_baseline);
1609   {
1610     // Load the feedback vector from the closure.
1611     __ LoadTaggedPointerField(
1612         feedback_vector,
1613         FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1614     __ LoadTaggedPointerField(
1615         feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1616 
1617     Label install_baseline_code;
1618     // Check if feedback vector is valid. If not, call prepare for baseline to
1619     // allocate it.
1620     __ LoadTaggedPointerField(
1621         x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1622     __ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
1623     __ Cmp(x7, FEEDBACK_VECTOR_TYPE);
1624     __ B(ne, &install_baseline_code);
1625 
1626     // Check the tiering state.
1627     LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1628                                              feedback_vector,
1629                                              &has_optimized_code_or_state);
1630 
1631     // Load the baseline code into the closure.
1632     __ Move(x2, kInterpreterBytecodeArrayRegister);
1633     static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
1634     ReplaceClosureCodeWithOptimizedCode(masm, x2, closure);
1635     __ JumpCodeTObject(x2);
1636 
1637     __ bind(&install_baseline_code);
1638     GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1639   }
1640 
1641   __ bind(&compile_lazy);
1642   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1643   __ Unreachable();  // Should not return.
1644 
1645   __ bind(&stack_overflow);
1646   __ CallRuntime(Runtime::kThrowStackOverflow);
1647   __ Unreachable();  // Should not return.
1648 }
1649 
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register first_arg_index,Register spread_arg_out,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1650 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1651                                         Register first_arg_index,
1652                                         Register spread_arg_out,
1653                                         ConvertReceiverMode receiver_mode,
1654                                         InterpreterPushArgsMode mode) {
1655   ASM_CODE_COMMENT(masm);
1656   Register last_arg_addr = x10;
1657   Register stack_addr = x11;
1658   Register slots_to_claim = x12;
1659   Register slots_to_copy = x13;
1660 
1661   DCHECK(!AreAliased(num_args, first_arg_index, last_arg_addr, stack_addr,
1662                      slots_to_claim, slots_to_copy));
1663   // spread_arg_out may alias with the first_arg_index input.
1664   DCHECK(!AreAliased(spread_arg_out, last_arg_addr, stack_addr, slots_to_claim,
1665                      slots_to_copy));
1666 
1667   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1668     // Exclude final spread from slots to claim and the number of arguments.
1669     __ Sub(num_args, num_args, 1);
1670   }
1671 
1672   // Round up to an even number of slots.
1673   __ Add(slots_to_claim, num_args, 1);
1674   __ Bic(slots_to_claim, slots_to_claim, 1);
1675 
1676   // Add a stack check before pushing arguments.
1677   Label stack_overflow, done;
1678   __ StackOverflowCheck(slots_to_claim, &stack_overflow);
1679   __ B(&done);
1680   __ Bind(&stack_overflow);
1681   __ TailCallRuntime(Runtime::kThrowStackOverflow);
1682   __ Unreachable();
1683   __ Bind(&done);
1684 
1685   __ Claim(slots_to_claim);
1686 
1687   {
1688     // Store padding, which may be overwritten.
1689     UseScratchRegisterScope temps(masm);
1690     Register scratch = temps.AcquireX();
1691     __ Sub(scratch, slots_to_claim, 1);
1692     __ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
1693   }
1694 
1695   const bool skip_receiver =
1696       receiver_mode == ConvertReceiverMode::kNullOrUndefined;
1697   if (skip_receiver) {
1698     __ Sub(slots_to_copy, num_args, kJSArgcReceiverSlots);
1699   } else {
1700     __ Mov(slots_to_copy, num_args);
1701   }
1702   __ SlotAddress(stack_addr, skip_receiver ? 1 : 0);
1703 
1704   __ Sub(last_arg_addr, first_arg_index,
1705          Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
1706   __ Add(last_arg_addr, last_arg_addr, kSystemPointerSize);
1707 
1708   // Load the final spread argument into spread_arg_out, if necessary.
1709   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1710     __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kSystemPointerSize));
1711   }
1712 
1713   __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
1714                      TurboAssembler::kDstLessThanSrcAndReverse);
1715 
1716   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1717     // Store "undefined" as the receiver arg if we need to.
1718     Register receiver = x14;
1719     __ LoadRoot(receiver, RootIndex::kUndefinedValue);
1720     __ Poke(receiver, 0);
1721   }
1722 }
1723 
1724 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1725 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1726     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1727     InterpreterPushArgsMode mode) {
1728   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1729   // ----------- S t a t e -------------
1730   //  -- x0 : the number of arguments
1731   //  -- x2 : the address of the first argument to be pushed. Subsequent
1732   //          arguments should be consecutive above this, in the same order as
1733   //          they are to be pushed onto the stack.
1734   //  -- x1 : the target to call (can be any Object).
1735   // -----------------------------------
1736 
1737   // Push the arguments. num_args may be updated according to mode.
1738   // spread_arg_out will be updated to contain the last spread argument, when
1739   // mode == InterpreterPushArgsMode::kWithFinalSpread.
1740   Register num_args = x0;
1741   Register first_arg_index = x2;
1742   Register spread_arg_out =
1743       (mode == InterpreterPushArgsMode::kWithFinalSpread) ? x2 : no_reg;
1744   GenerateInterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
1745                               receiver_mode, mode);
1746 
1747   // Call the target.
1748   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1749     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1750             RelocInfo::CODE_TARGET);
1751   } else {
1752     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1753             RelocInfo::CODE_TARGET);
1754   }
1755 }
1756 
1757 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1758 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1759     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1760   // ----------- S t a t e -------------
1761   // -- x0 : argument count
1762   // -- x3 : new target
1763   // -- x1 : constructor to call
1764   // -- x2 : allocation site feedback if available, undefined otherwise
1765   // -- x4 : address of the first argument
1766   // -----------------------------------
1767   __ AssertUndefinedOrAllocationSite(x2);
1768 
1769   // Push the arguments. num_args may be updated according to mode.
1770   // spread_arg_out will be updated to contain the last spread argument, when
1771   // mode == InterpreterPushArgsMode::kWithFinalSpread.
1772   Register num_args = x0;
1773   Register first_arg_index = x4;
1774   Register spread_arg_out =
1775       (mode == InterpreterPushArgsMode::kWithFinalSpread) ? x2 : no_reg;
1776   GenerateInterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
1777                               ConvertReceiverMode::kNullOrUndefined, mode);
1778 
1779   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1780     __ AssertFunction(x1);
1781 
1782     // Tail call to the array construct stub (still in the caller
1783     // context at this point).
1784     __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1785             RelocInfo::CODE_TARGET);
1786   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1787     // Call the constructor with x0, x1, and x3 unmodified.
1788     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1789             RelocInfo::CODE_TARGET);
1790   } else {
1791     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1792     // Call the constructor with x0, x1, and x3 unmodified.
1793     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1794   }
1795 }
1796 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1797 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1798   // Initialize the dispatch table register.
1799   __ Mov(
1800       kInterpreterDispatchTableRegister,
1801       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1802 
1803   // Get the bytecode array pointer from the frame.
1804   __ Ldr(kInterpreterBytecodeArrayRegister,
1805          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1806 
1807   if (FLAG_debug_code) {
1808     // Check function data field is actually a BytecodeArray object.
1809     __ AssertNotSmi(
1810         kInterpreterBytecodeArrayRegister,
1811         AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1812     __ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
1813                          BYTECODE_ARRAY_TYPE);
1814     __ Assert(
1815         eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1816   }
1817 
1818   // Get the target bytecode offset from the frame.
1819   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1820               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1821 
1822   if (FLAG_debug_code) {
1823     Label okay;
1824     __ cmp(kInterpreterBytecodeOffsetRegister,
1825            Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1826     __ B(ge, &okay);
1827     __ Unreachable();
1828     __ bind(&okay);
1829   }
1830 
1831   // Set up LR to point to code below, so we return there after we're done
1832   // executing the function.
1833   Label return_from_bytecode_dispatch;
1834   __ Adr(lr, &return_from_bytecode_dispatch);
1835 
1836   // Dispatch to the target bytecode.
1837   __ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
1838                           kInterpreterBytecodeOffsetRegister));
1839   __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
1840   __ Ldr(kJavaScriptCallCodeStartRegister,
1841          MemOperand(kInterpreterDispatchTableRegister, x1));
1842 
1843   {
1844     UseScratchRegisterScope temps(masm);
1845     temps.Exclude(x17);
1846     __ Mov(x17, kJavaScriptCallCodeStartRegister);
1847     __ Jump(x17);
1848   }
1849 
1850   __ Bind(&return_from_bytecode_dispatch);
1851 
1852   // We return here after having executed the function in the interpreter.
1853   // Now jump to the correct point in the interpreter entry trampoline.
1854   Label builtin_trampoline, trampoline_loaded;
1855   Smi interpreter_entry_return_pc_offset(
1856       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1857   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1858 
1859   // If the SFI function_data is an InterpreterData, the function will have a
1860   // custom copy of the interpreter entry trampoline for profiling. If so,
1861   // get the custom trampoline, otherwise grab the entry address of the global
1862   // trampoline.
1863   __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1864   __ LoadTaggedPointerField(
1865       x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
1866   __ LoadTaggedPointerField(
1867       x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
1868   __ CompareObjectType(x1, kInterpreterDispatchTableRegister,
1869                        kInterpreterDispatchTableRegister,
1870                        INTERPRETER_DATA_TYPE);
1871   __ B(ne, &builtin_trampoline);
1872 
1873   __ LoadTaggedPointerField(
1874       x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
1875   __ LoadCodeTEntry(x1, x1);
1876   __ B(&trampoline_loaded);
1877 
1878   __ Bind(&builtin_trampoline);
1879   __ Mov(x1, ExternalReference::
1880                  address_of_interpreter_entry_trampoline_instruction_start(
1881                      masm->isolate()));
1882   __ Ldr(x1, MemOperand(x1));
1883 
1884   __ Bind(&trampoline_loaded);
1885 
1886   {
1887     UseScratchRegisterScope temps(masm);
1888     temps.Exclude(x17);
1889     __ Add(x17, x1, Operand(interpreter_entry_return_pc_offset.value()));
1890     __ Br(x17);
1891   }
1892 }
1893 
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1894 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1895   // Get bytecode array and bytecode offset from the stack frame.
1896   __ ldr(kInterpreterBytecodeArrayRegister,
1897          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1898   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1899               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1900 
1901   Label enter_bytecode, function_entry_bytecode;
1902   __ cmp(kInterpreterBytecodeOffsetRegister,
1903          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1904                  kFunctionEntryBytecodeOffset));
1905   __ B(eq, &function_entry_bytecode);
1906 
1907   // Load the current bytecode.
1908   __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
1909                          kInterpreterBytecodeOffsetRegister));
1910 
1911   // Advance to the next bytecode.
1912   Label if_return;
1913   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1914                                 kInterpreterBytecodeOffsetRegister, x1, x2, x3,
1915                                 &if_return);
1916 
1917   __ bind(&enter_bytecode);
1918   // Convert new bytecode offset to a Smi and save in the stackframe.
1919   __ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
1920   __ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1921 
1922   Generate_InterpreterEnterBytecode(masm);
1923 
1924   __ bind(&function_entry_bytecode);
1925   // If the code deoptimizes during the implicit function entry stack interrupt
1926   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1927   // not a valid bytecode offset. Detect this case and advance to the first
1928   // actual bytecode.
1929   __ Mov(kInterpreterBytecodeOffsetRegister,
1930          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1931   __ B(&enter_bytecode);
1932 
1933   // We should never take the if_return path.
1934   __ bind(&if_return);
1935   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1936 }
1937 
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1938 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1939   Generate_InterpreterEnterBytecode(masm);
1940 }
1941 
1942 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1943 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1944                                       bool java_script_builtin,
1945                                       bool with_result) {
1946   const RegisterConfiguration* config(RegisterConfiguration::Default());
1947   int allocatable_register_count = config->num_allocatable_general_registers();
1948   int frame_size = BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp +
1949                    (allocatable_register_count +
1950                     BuiltinContinuationFrameConstants::PaddingSlotCount(
1951                         allocatable_register_count)) *
1952                        kSystemPointerSize;
1953 
1954   UseScratchRegisterScope temps(masm);
1955   Register scratch = temps.AcquireX();  // Temp register is not allocatable.
1956 
1957   // Set up frame pointer.
1958   __ Add(fp, sp, frame_size);
1959 
1960   if (with_result) {
1961     if (java_script_builtin) {
1962       __ mov(scratch, x0);
1963     } else {
1964       // Overwrite the hole inserted by the deoptimizer with the return value
1965       // from the LAZY deopt point.
1966       __ Str(x0, MemOperand(
1967                      fp, BuiltinContinuationFrameConstants::kCallerSPOffset));
1968     }
1969   }
1970 
1971   // Restore registers in pairs.
1972   int offset = -BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
1973                allocatable_register_count * kSystemPointerSize;
1974   for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
1975     int code1 = config->GetAllocatableGeneralCode(i);
1976     int code2 = config->GetAllocatableGeneralCode(i - 1);
1977     Register reg1 = Register::from_code(code1);
1978     Register reg2 = Register::from_code(code2);
1979     __ Ldp(reg1, reg2, MemOperand(fp, offset));
1980     offset += 2 * kSystemPointerSize;
1981   }
1982 
1983   // Restore first register separately, if number of registers is odd.
1984   if (allocatable_register_count % 2 != 0) {
1985     int code = config->GetAllocatableGeneralCode(0);
1986     __ Ldr(Register::from_code(code), MemOperand(fp, offset));
1987   }
1988 
1989   if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
1990 
1991   if (java_script_builtin && with_result) {
1992     // Overwrite the hole inserted by the deoptimizer with the return value from
1993     // the LAZY deopt point. x0 contains the arguments count, the return value
1994     // from LAZY is always the last argument.
1995     constexpr int return_offset =
1996         BuiltinContinuationFrameConstants::kCallerSPOffset /
1997             kSystemPointerSize -
1998         kJSArgcReceiverSlots;
1999     __ add(x0, x0, return_offset);
2000     __ Str(scratch, MemOperand(fp, x0, LSL, kSystemPointerSizeLog2));
2001     // Recover argument count.
2002     __ sub(x0, x0, return_offset);
2003   }
2004 
2005   // Load builtin index (stored as a Smi) and use it to get the builtin start
2006   // address from the builtins table.
2007   Register builtin = scratch;
2008   __ Ldr(
2009       builtin,
2010       MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinIndexOffset));
2011 
2012   // Restore fp, lr.
2013   __ Mov(sp, fp);
2014   __ Pop<TurboAssembler::kAuthLR>(fp, lr);
2015 
2016   __ LoadEntryFromBuiltinIndex(builtin);
2017   __ Jump(builtin);
2018 }
2019 }  // namespace
2020 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)2021 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
2022   Generate_ContinueToBuiltinHelper(masm, false, false);
2023 }
2024 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)2025 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
2026     MacroAssembler* masm) {
2027   Generate_ContinueToBuiltinHelper(masm, false, true);
2028 }
2029 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)2030 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
2031   Generate_ContinueToBuiltinHelper(masm, true, false);
2032 }
2033 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)2034 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
2035     MacroAssembler* masm) {
2036   Generate_ContinueToBuiltinHelper(masm, true, true);
2037 }
2038 
Generate_NotifyDeoptimized(MacroAssembler * masm)2039 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
2040   {
2041     FrameScope scope(masm, StackFrame::INTERNAL);
2042     __ CallRuntime(Runtime::kNotifyDeoptimized);
2043   }
2044 
2045   // Pop TOS register and padding.
2046   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), x0.code());
2047   __ Pop(x0, padreg);
2048   __ Ret();
2049 }
2050 
2051 namespace {
2052 
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand (0))2053 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
2054                        Operand offset = Operand(0)) {
2055   // Pop the return address to this function's caller from the return stack
2056   // buffer, since we'll never return to it.
2057   Label jump;
2058   __ Adr(lr, &jump);
2059   __ Ret();
2060 
2061   __ Bind(&jump);
2062 
2063   UseScratchRegisterScope temps(masm);
2064   temps.Exclude(x17);
2065   if (offset.IsZero()) {
2066     __ Mov(x17, entry_address);
2067   } else {
2068     __ Add(x17, entry_address, offset);
2069   }
2070   __ Br(x17);
2071 }
2072 
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)2073 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
2074   ASM_CODE_COMMENT(masm);
2075   {
2076     FrameScope scope(masm, StackFrame::INTERNAL);
2077     __ CallRuntime(Runtime::kCompileOptimizedOSR);
2078   }
2079 
2080   // If the code object is null, just return to the caller.
2081   Label skip;
2082   __ CompareTaggedAndBranch(x0, Smi::zero(), ne, &skip);
2083   __ Ret();
2084 
2085   __ Bind(&skip);
2086 
2087   if (is_interpreter) {
2088     // Drop the handler frame that is be sitting on top of the actual
2089     // JavaScript frame. This is the case then OSR is triggered from bytecode.
2090     __ LeaveFrame(StackFrame::STUB);
2091   }
2092 
2093   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
2094     __ LoadCodeDataContainerCodeNonBuiltin(x0, x0);
2095   }
2096 
2097   // Load deoptimization data from the code object.
2098   // <deopt_data> = <code>[#deoptimization_data_offset]
2099   __ LoadTaggedPointerField(
2100       x1,
2101       FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset));
2102 
2103   // Load the OSR entrypoint offset from the deoptimization data.
2104   // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
2105   __ SmiUntagField(
2106       x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
2107                                   DeoptimizationData::kOsrPcOffsetIndex)));
2108 
2109   // Compute the target address = code_obj + header_size + osr_offset
2110   // <entry_addr> = <code_obj> + #header_size + <osr_offset>
2111   __ Add(x0, x0, x1);
2112   Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag);
2113 }
2114 
2115 }  // namespace
2116 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)2117 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2118   return OnStackReplacement(masm, true);
2119 }
2120 
Generate_BaselineOnStackReplacement(MacroAssembler * masm)2121 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2122   __ ldr(kContextRegister,
2123          MemOperand(fp, BaselineFrameConstants::kContextOffset));
2124   return OnStackReplacement(masm, false);
2125 }
2126 
2127 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)2128 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
2129   // ----------- S t a t e -------------
2130   //  -- x0       : argc
2131   //  -- sp[0]    : receiver
2132   //  -- sp[8]    : thisArg  (if argc >= 1)
2133   //  -- sp[16]   : argArray (if argc == 2)
2134   // -----------------------------------
2135 
2136   ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
2137 
2138   Register argc = x0;
2139   Register receiver = x1;
2140   Register arg_array = x2;
2141   Register this_arg = x3;
2142   Register undefined_value = x4;
2143   Register null_value = x5;
2144 
2145   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2146   __ LoadRoot(null_value, RootIndex::kNullValue);
2147 
2148   // 1. Load receiver into x1, argArray into x2 (if present), remove all
2149   // arguments from the stack (including the receiver), and push thisArg (if
2150   // present) instead.
2151   {
2152     Label done;
2153     __ Mov(this_arg, undefined_value);
2154     __ Mov(arg_array, undefined_value);
2155     __ Peek(receiver, 0);
2156     __ Cmp(argc, Immediate(JSParameterCount(1)));
2157     __ B(lt, &done);
2158     __ Peek(this_arg, kSystemPointerSize);
2159     __ B(eq, &done);
2160     __ Peek(arg_array, 2 * kSystemPointerSize);
2161     __ bind(&done);
2162   }
2163   __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
2164   __ PushArgument(this_arg);
2165 
2166   // ----------- S t a t e -------------
2167   //  -- x2      : argArray
2168   //  -- x1      : receiver
2169   //  -- sp[0]   : thisArg
2170   // -----------------------------------
2171 
2172   // 2. We don't need to check explicitly for callable receiver here,
2173   // since that's the first thing the Call/CallWithArrayLike builtins
2174   // will do.
2175 
2176   // 3. Tail call with no arguments if argArray is null or undefined.
2177   Label no_arguments;
2178   __ CmpTagged(arg_array, null_value);
2179   __ CcmpTagged(arg_array, undefined_value, ZFlag, ne);
2180   __ B(eq, &no_arguments);
2181 
2182   // 4a. Apply the receiver to the given argArray.
2183   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2184           RelocInfo::CODE_TARGET);
2185 
2186   // 4b. The argArray is either null or undefined, so we tail call without any
2187   // arguments to the receiver.
2188   __ Bind(&no_arguments);
2189   {
2190     __ Mov(x0, JSParameterCount(0));
2191     DCHECK_EQ(receiver, x1);
2192     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2193   }
2194 }
2195 
2196 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)2197 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
2198   Register argc = x0;
2199   Register function = x1;
2200 
2201   ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
2202 
2203   // 1. Get the callable to call (passed as receiver) from the stack.
2204   __ Peek(function, __ ReceiverOperand(argc));
2205 
2206   // 2. Handle case with no arguments.
2207   {
2208     Label non_zero;
2209     Register scratch = x10;
2210     __ Cmp(argc, JSParameterCount(0));
2211     __ B(gt, &non_zero);
2212     __ LoadRoot(scratch, RootIndex::kUndefinedValue);
2213     // Overwrite receiver with undefined, which will be the new receiver.
2214     // We do not need to overwrite the padding slot above it with anything.
2215     __ Poke(scratch, 0);
2216     // Call function. The argument count is already zero.
2217     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2218     __ Bind(&non_zero);
2219   }
2220 
2221   Label arguments_ready;
2222   // 3. Shift arguments. It depends if the arguments is even or odd.
2223   // That is if padding exists or not.
2224   {
2225     Label even;
2226     Register copy_from = x10;
2227     Register copy_to = x11;
2228     Register count = x12;
2229     UseScratchRegisterScope temps(masm);
2230     Register argc_without_receiver = temps.AcquireX();
2231     __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots);
2232 
2233     // CopyDoubleWords changes the count argument.
2234     __ Mov(count, argc_without_receiver);
2235     __ Tbz(argc_without_receiver, 0, &even);
2236 
2237     // Shift arguments one slot down on the stack (overwriting the original
2238     // receiver).
2239     __ SlotAddress(copy_from, 1);
2240     __ Sub(copy_to, copy_from, kSystemPointerSize);
2241     __ CopyDoubleWords(copy_to, copy_from, count);
2242     // Overwrite the duplicated remaining last argument.
2243     __ Poke(padreg, Operand(argc_without_receiver, LSL, kXRegSizeLog2));
2244     __ B(&arguments_ready);
2245 
2246     // Copy arguments one slot higher in memory, overwriting the original
2247     // receiver and padding.
2248     __ Bind(&even);
2249     __ SlotAddress(copy_from, count);
2250     __ Add(copy_to, copy_from, kSystemPointerSize);
2251     __ CopyDoubleWords(copy_to, copy_from, count,
2252                        TurboAssembler::kSrcLessThanDst);
2253     __ Drop(2);
2254   }
2255 
2256   // 5. Adjust argument count to make the original first argument the new
2257   //    receiver and call the callable.
2258   __ Bind(&arguments_ready);
2259   __ Sub(argc, argc, 1);
2260   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2261 }
2262 
Generate_ReflectApply(MacroAssembler * masm)2263 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2264   // ----------- S t a t e -------------
2265   //  -- x0     : argc
2266   //  -- sp[0]  : receiver
2267   //  -- sp[8]  : target         (if argc >= 1)
2268   //  -- sp[16] : thisArgument   (if argc >= 2)
2269   //  -- sp[24] : argumentsList  (if argc == 3)
2270   // -----------------------------------
2271 
2272   ASM_LOCATION("Builtins::Generate_ReflectApply");
2273 
2274   Register argc = x0;
2275   Register arguments_list = x2;
2276   Register target = x1;
2277   Register this_argument = x4;
2278   Register undefined_value = x3;
2279 
2280   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2281 
2282   // 1. Load target into x1 (if present), argumentsList into x2 (if present),
2283   // remove all arguments from the stack (including the receiver), and push
2284   // thisArgument (if present) instead.
2285   {
2286     Label done;
2287     __ Mov(target, undefined_value);
2288     __ Mov(this_argument, undefined_value);
2289     __ Mov(arguments_list, undefined_value);
2290     __ Cmp(argc, Immediate(JSParameterCount(1)));
2291     __ B(lt, &done);
2292     __ Peek(target, kSystemPointerSize);
2293     __ B(eq, &done);
2294     __ Peek(this_argument, 2 * kSystemPointerSize);
2295     __ Cmp(argc, Immediate(JSParameterCount(3)));
2296     __ B(lt, &done);
2297     __ Peek(arguments_list, 3 * kSystemPointerSize);
2298     __ bind(&done);
2299   }
2300   __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
2301   __ PushArgument(this_argument);
2302 
2303   // ----------- S t a t e -------------
2304   //  -- x2      : argumentsList
2305   //  -- x1      : target
2306   //  -- sp[0]   : thisArgument
2307   // -----------------------------------
2308 
2309   // 2. We don't need to check explicitly for callable target here,
2310   // since that's the first thing the Call/CallWithArrayLike builtins
2311   // will do.
2312 
2313   // 3. Apply the target to the given argumentsList.
2314   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2315           RelocInfo::CODE_TARGET);
2316 }
2317 
Generate_ReflectConstruct(MacroAssembler * masm)2318 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2319   // ----------- S t a t e -------------
2320   //  -- x0       : argc
2321   //  -- sp[0]   : receiver
2322   //  -- sp[8]   : target
2323   //  -- sp[16]  : argumentsList
2324   //  -- sp[24]  : new.target (optional)
2325   // -----------------------------------
2326 
2327   ASM_LOCATION("Builtins::Generate_ReflectConstruct");
2328 
2329   Register argc = x0;
2330   Register arguments_list = x2;
2331   Register target = x1;
2332   Register new_target = x3;
2333   Register undefined_value = x4;
2334 
2335   __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2336 
2337   // 1. Load target into x1 (if present), argumentsList into x2 (if present),
2338   // new.target into x3 (if present, otherwise use target), remove all
2339   // arguments from the stack (including the receiver), and push thisArgument
2340   // (if present) instead.
2341   {
2342     Label done;
2343     __ Mov(target, undefined_value);
2344     __ Mov(arguments_list, undefined_value);
2345     __ Mov(new_target, undefined_value);
2346     __ Cmp(argc, Immediate(JSParameterCount(1)));
2347     __ B(lt, &done);
2348     __ Peek(target, kSystemPointerSize);
2349     __ B(eq, &done);
2350     __ Peek(arguments_list, 2 * kSystemPointerSize);
2351     __ Mov(new_target, target);  // new.target defaults to target
2352     __ Cmp(argc, Immediate(JSParameterCount(3)));
2353     __ B(lt, &done);
2354     __ Peek(new_target, 3 * kSystemPointerSize);
2355     __ bind(&done);
2356   }
2357 
2358   __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
2359 
2360   // Push receiver (undefined).
2361   __ PushArgument(undefined_value);
2362 
2363   // ----------- S t a t e -------------
2364   //  -- x2      : argumentsList
2365   //  -- x1      : target
2366   //  -- x3      : new.target
2367   //  -- sp[0]   : receiver (undefined)
2368   // -----------------------------------
2369 
2370   // 2. We don't need to check explicitly for constructor target here,
2371   // since that's the first thing the Construct/ConstructWithArrayLike
2372   // builtins will do.
2373 
2374   // 3. We don't need to check explicitly for constructor new.target here,
2375   // since that's the second thing the Construct/ConstructWithArrayLike
2376   // builtins will do.
2377 
2378   // 4. Construct the target with the given new.target and argumentsList.
2379   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2380           RelocInfo::CODE_TARGET);
2381 }
2382 
2383 namespace {
2384 
2385 // Prepares the stack for copying the varargs. First we claim the necessary
2386 // slots, taking care of potential padding. Then we copy the existing arguments
2387 // one slot up or one slot down, as needed.
Generate_PrepareForCopyingVarargs(MacroAssembler * masm,Register argc,Register len)2388 void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
2389                                        Register len) {
2390   Label exit, even;
2391   Register slots_to_copy = x10;
2392   Register slots_to_claim = x12;
2393 
2394   __ Mov(slots_to_copy, argc);
2395   __ Mov(slots_to_claim, len);
2396   __ Tbz(slots_to_claim, 0, &even);
2397 
2398   // Claim space we need. If argc (without receiver) is even, slots_to_claim =
2399   // len + 1, as we need one extra padding slot. If argc (without receiver) is
2400   // odd, we know that the original arguments will have a padding slot we can
2401   // reuse (since len is odd), so slots_to_claim = len - 1.
2402   {
2403     Register scratch = x11;
2404     __ Add(slots_to_claim, len, 1);
2405     __ And(scratch, argc, 1);
2406     __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
2407   }
2408 
2409   __ Bind(&even);
2410   __ Cbz(slots_to_claim, &exit);
2411   __ Claim(slots_to_claim);
2412 
2413   // Move the arguments already in the stack including the receiver.
2414   {
2415     Register src = x11;
2416     Register dst = x12;
2417     __ SlotAddress(src, slots_to_claim);
2418     __ SlotAddress(dst, 0);
2419     __ CopyDoubleWords(dst, src, slots_to_copy);
2420   }
2421   __ Bind(&exit);
2422 }
2423 
2424 }  // namespace
2425 
2426 // static
2427 // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<CodeT> code)2428 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2429                                                Handle<CodeT> code) {
2430   // ----------- S t a t e -------------
2431   //  -- x1 : target
2432   //  -- x0 : number of parameters on the stack
2433   //  -- x2 : arguments list (a FixedArray)
2434   //  -- x4 : len (number of elements to push from args)
2435   //  -- x3 : new.target (for [[Construct]])
2436   // -----------------------------------
2437   if (FLAG_debug_code) {
2438     // Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
2439     Label ok, fail;
2440     __ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
2441     __ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
2442     __ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2443     __ Cmp(x13, FIXED_ARRAY_TYPE);
2444     __ B(eq, &ok);
2445     __ Cmp(x13, FIXED_DOUBLE_ARRAY_TYPE);
2446     __ B(ne, &fail);
2447     __ Cmp(x4, 0);
2448     __ B(eq, &ok);
2449     // Fall through.
2450     __ bind(&fail);
2451     __ Abort(AbortReason::kOperandIsNotAFixedArray);
2452 
2453     __ bind(&ok);
2454   }
2455 
2456   Register arguments_list = x2;
2457   Register argc = x0;
2458   Register len = x4;
2459 
2460   Label stack_overflow;
2461   __ StackOverflowCheck(len, &stack_overflow);
2462 
2463   // Skip argument setup if we don't need to push any varargs.
2464   Label done;
2465   __ Cbz(len, &done);
2466 
2467   Generate_PrepareForCopyingVarargs(masm, argc, len);
2468 
2469   // Push varargs.
2470   {
2471     Label loop;
2472     Register src = x10;
2473     Register the_hole_value = x11;
2474     Register undefined_value = x12;
2475     Register scratch = x13;
2476     __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
2477     __ LoadRoot(the_hole_value, RootIndex::kTheHoleValue);
2478     __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2479     // We do not use the CompareRoot macro as it would do a LoadRoot behind the
2480     // scenes and we want to avoid that in a loop.
2481     // TODO(all): Consider using Ldp and Stp.
2482     Register dst = x16;
2483     __ SlotAddress(dst, argc);
2484     __ Add(argc, argc, len);  // Update new argc.
2485     __ Bind(&loop);
2486     __ Sub(len, len, 1);
2487     __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
2488     __ CmpTagged(scratch, the_hole_value);
2489     __ Csel(scratch, scratch, undefined_value, ne);
2490     __ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
2491     __ Cbnz(len, &loop);
2492   }
2493   __ Bind(&done);
2494   // Tail-call to the actual Call or Construct builtin.
2495   __ Jump(code, RelocInfo::CODE_TARGET);
2496 
2497   __ bind(&stack_overflow);
2498   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2499 }
2500 
2501 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<CodeT> code)2502 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2503                                                       CallOrConstructMode mode,
2504                                                       Handle<CodeT> code) {
2505   // ----------- S t a t e -------------
2506   //  -- x0 : the number of arguments
2507   //  -- x3 : the new.target (for [[Construct]] calls)
2508   //  -- x1 : the target to call (can be any Object)
2509   //  -- x2 : start index (to support rest parameters)
2510   // -----------------------------------
2511 
2512   Register argc = x0;
2513   Register start_index = x2;
2514 
2515   // Check if new.target has a [[Construct]] internal method.
2516   if (mode == CallOrConstructMode::kConstruct) {
2517     Label new_target_constructor, new_target_not_constructor;
2518     __ JumpIfSmi(x3, &new_target_not_constructor);
2519     __ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
2520     __ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
2521     __ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
2522                              &new_target_constructor);
2523     __ Bind(&new_target_not_constructor);
2524     {
2525       FrameScope scope(masm, StackFrame::MANUAL);
2526       __ EnterFrame(StackFrame::INTERNAL);
2527       __ PushArgument(x3);
2528       __ CallRuntime(Runtime::kThrowNotConstructor);
2529       __ Unreachable();
2530     }
2531     __ Bind(&new_target_constructor);
2532   }
2533 
2534   Register len = x6;
2535   Label stack_done, stack_overflow;
2536   __ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2537   __ Subs(len, len, kJSArgcReceiverSlots);
2538   __ Subs(len, len, start_index);
2539   __ B(le, &stack_done);
2540   // Check for stack overflow.
2541   __ StackOverflowCheck(len, &stack_overflow);
2542 
2543   Generate_PrepareForCopyingVarargs(masm, argc, len);
2544 
2545   // Push varargs.
2546   {
2547     Register args_fp = x5;
2548     Register dst = x13;
2549     // Point to the fist argument to copy from (skipping receiver).
2550     __ Add(args_fp, fp,
2551            CommonFrameConstants::kFixedFrameSizeAboveFp + kSystemPointerSize);
2552     __ lsl(start_index, start_index, kSystemPointerSizeLog2);
2553     __ Add(args_fp, args_fp, start_index);
2554     // Point to the position to copy to.
2555     __ SlotAddress(dst, argc);
2556     // Update total number of arguments.
2557     __ Add(argc, argc, len);
2558     __ CopyDoubleWords(dst, args_fp, len);
2559   }
2560   __ B(&stack_done);
2561 
2562   __ Bind(&stack_overflow);
2563   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2564   __ Bind(&stack_done);
2565 
2566   __ Jump(code, RelocInfo::CODE_TARGET);
2567 }
2568 
2569 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2570 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2571                                      ConvertReceiverMode mode) {
2572   ASM_LOCATION("Builtins::Generate_CallFunction");
2573   // ----------- S t a t e -------------
2574   //  -- x0 : the number of arguments
2575   //  -- x1 : the function to call (checked to be a JSFunction)
2576   // -----------------------------------
2577   __ AssertCallableFunction(x1);
2578 
2579   __ LoadTaggedPointerField(
2580       x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2581 
2582   // Enter the context of the function; ToObject has to run in the function
2583   // context, and we also need to take the global proxy from the function
2584   // context in case of conversion.
2585   __ LoadTaggedPointerField(cp,
2586                             FieldMemOperand(x1, JSFunction::kContextOffset));
2587   // We need to convert the receiver for non-native sloppy mode functions.
2588   Label done_convert;
2589   __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
2590   __ TestAndBranchIfAnySet(w3,
2591                            SharedFunctionInfo::IsNativeBit::kMask |
2592                                SharedFunctionInfo::IsStrictBit::kMask,
2593                            &done_convert);
2594   {
2595     // ----------- S t a t e -------------
2596     //  -- x0 : the number of arguments
2597     //  -- x1 : the function to call (checked to be a JSFunction)
2598     //  -- x2 : the shared function info.
2599     //  -- cp : the function context.
2600     // -----------------------------------
2601 
2602     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2603       // Patch receiver to global proxy.
2604       __ LoadGlobalProxy(x3);
2605     } else {
2606       Label convert_to_object, convert_receiver;
2607       __ Peek(x3, __ ReceiverOperand(x0));
2608       __ JumpIfSmi(x3, &convert_to_object);
2609       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2610       __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
2611       __ B(hs, &done_convert);
2612       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2613         Label convert_global_proxy;
2614         __ JumpIfRoot(x3, RootIndex::kUndefinedValue, &convert_global_proxy);
2615         __ JumpIfNotRoot(x3, RootIndex::kNullValue, &convert_to_object);
2616         __ Bind(&convert_global_proxy);
2617         {
2618           // Patch receiver to global proxy.
2619           __ LoadGlobalProxy(x3);
2620         }
2621         __ B(&convert_receiver);
2622       }
2623       __ Bind(&convert_to_object);
2624       {
2625         // Convert receiver using ToObject.
2626         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2627         // in the fast case? (fall back to AllocateInNewSpace?)
2628         FrameScope scope(masm, StackFrame::INTERNAL);
2629         __ SmiTag(x0);
2630         __ Push(padreg, x0, x1, cp);
2631         __ Mov(x0, x3);
2632         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2633                 RelocInfo::CODE_TARGET);
2634         __ Mov(x3, x0);
2635         __ Pop(cp, x1, x0, padreg);
2636         __ SmiUntag(x0);
2637       }
2638       __ LoadTaggedPointerField(
2639           x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2640       __ Bind(&convert_receiver);
2641     }
2642     __ Poke(x3, __ ReceiverOperand(x0));
2643   }
2644   __ Bind(&done_convert);
2645 
2646   // ----------- S t a t e -------------
2647   //  -- x0 : the number of arguments
2648   //  -- x1 : the function to call (checked to be a JSFunction)
2649   //  -- x2 : the shared function info.
2650   //  -- cp : the function context.
2651   // -----------------------------------
2652 
2653   __ Ldrh(x2,
2654           FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
2655   __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump);
2656 }
2657 
2658 namespace {
2659 
Generate_PushBoundArguments(MacroAssembler * masm)2660 void Generate_PushBoundArguments(MacroAssembler* masm) {
2661   // ----------- S t a t e -------------
2662   //  -- x0 : the number of arguments
2663   //  -- x1 : target (checked to be a JSBoundFunction)
2664   //  -- x3 : new.target (only in case of [[Construct]])
2665   // -----------------------------------
2666 
2667   Register bound_argc = x4;
2668   Register bound_argv = x2;
2669 
2670   // Load [[BoundArguments]] into x2 and length of that into x4.
2671   Label no_bound_arguments;
2672   __ LoadTaggedPointerField(
2673       bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
2674   __ SmiUntagField(bound_argc,
2675                    FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
2676   __ Cbz(bound_argc, &no_bound_arguments);
2677   {
2678     // ----------- S t a t e -------------
2679     //  -- x0 : the number of arguments
2680     //  -- x1 : target (checked to be a JSBoundFunction)
2681     //  -- x2 : the [[BoundArguments]] (implemented as FixedArray)
2682     //  -- x3 : new.target (only in case of [[Construct]])
2683     //  -- x4 : the number of [[BoundArguments]]
2684     // -----------------------------------
2685 
2686     Register argc = x0;
2687 
2688     // Check for stack overflow.
2689     {
2690       // Check the stack for overflow. We are not trying to catch interruptions
2691       // (i.e. debug break and preemption) here, so check the "real stack
2692       // limit".
2693       Label done;
2694       __ LoadStackLimit(x10, StackLimitKind::kRealStackLimit);
2695       // Make x10 the space we have left. The stack might already be overflowed
2696       // here which will cause x10 to become negative.
2697       __ Sub(x10, sp, x10);
2698       // Check if the arguments will overflow the stack.
2699       __ Cmp(x10, Operand(bound_argc, LSL, kSystemPointerSizeLog2));
2700       __ B(gt, &done);
2701       __ TailCallRuntime(Runtime::kThrowStackOverflow);
2702       __ Bind(&done);
2703     }
2704 
2705     Label copy_bound_args;
2706     Register total_argc = x15;
2707     Register slots_to_claim = x12;
2708     Register scratch = x10;
2709     Register receiver = x14;
2710 
2711     __ Sub(argc, argc, kJSArgcReceiverSlots);
2712     __ Add(total_argc, argc, bound_argc);
2713     __ Peek(receiver, 0);
2714 
2715     // Round up slots_to_claim to an even number if it is odd.
2716     __ Add(slots_to_claim, bound_argc, 1);
2717     __ Bic(slots_to_claim, slots_to_claim, 1);
2718     __ Claim(slots_to_claim, kSystemPointerSize);
2719 
2720     __ Tbz(bound_argc, 0, &copy_bound_args);
2721     {
2722       Label argc_even;
2723       __ Tbz(argc, 0, &argc_even);
2724       // Arguments count is odd (with the receiver it's even), so there's no
2725       // alignment padding above the arguments and we have to "add" it. We
2726       // claimed bound_argc + 1, since it is odd and it was rounded up. +1 here
2727       // is for stack alignment padding.
2728       // 1. Shift args one slot down.
2729       {
2730         Register copy_from = x11;
2731         Register copy_to = x12;
2732         __ SlotAddress(copy_to, slots_to_claim);
2733         __ Add(copy_from, copy_to, kSystemPointerSize);
2734         __ CopyDoubleWords(copy_to, copy_from, argc);
2735       }
2736       // 2. Write a padding in the last slot.
2737       __ Add(scratch, total_argc, 1);
2738       __ Str(padreg, MemOperand(sp, scratch, LSL, kSystemPointerSizeLog2));
2739       __ B(&copy_bound_args);
2740 
2741       __ Bind(&argc_even);
2742       // Arguments count is even (with the receiver it's odd), so there's an
2743       // alignment padding above the arguments and we can reuse it. We need to
2744       // claim bound_argc - 1, but we claimed bound_argc + 1, since it is odd
2745       // and it was rounded up.
2746       // 1. Drop 2.
2747       __ Drop(2);
2748       // 2. Shift args one slot up.
2749       {
2750         Register copy_from = x11;
2751         Register copy_to = x12;
2752         __ SlotAddress(copy_to, total_argc);
2753         __ Sub(copy_from, copy_to, kSystemPointerSize);
2754         __ CopyDoubleWords(copy_to, copy_from, argc,
2755                            TurboAssembler::kSrcLessThanDst);
2756       }
2757     }
2758 
2759     // If bound_argc is even, there is no alignment massage to do, and we have
2760     // already claimed the correct number of slots (bound_argc).
2761     __ Bind(&copy_bound_args);
2762 
2763     // Copy the receiver back.
2764     __ Poke(receiver, 0);
2765     // Copy [[BoundArguments]] to the stack (below the receiver).
2766     {
2767       Label loop;
2768       Register counter = bound_argc;
2769       Register copy_to = x12;
2770       __ Add(bound_argv, bound_argv, FixedArray::kHeaderSize - kHeapObjectTag);
2771       __ SlotAddress(copy_to, 1);
2772       __ Bind(&loop);
2773       __ Sub(counter, counter, 1);
2774       __ LoadAnyTaggedField(scratch,
2775                             MemOperand(bound_argv, kTaggedSize, PostIndex));
2776       __ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
2777       __ Cbnz(counter, &loop);
2778     }
2779     // Update argc.
2780     __ Add(argc, total_argc, kJSArgcReceiverSlots);
2781   }
2782   __ Bind(&no_bound_arguments);
2783 }
2784 
2785 }  // namespace
2786 
2787 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2788 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2789   // ----------- S t a t e -------------
2790   //  -- x0 : the number of arguments
2791   //  -- x1 : the function to call (checked to be a JSBoundFunction)
2792   // -----------------------------------
2793   __ AssertBoundFunction(x1);
2794 
2795   // Patch the receiver to [[BoundThis]].
2796   __ LoadAnyTaggedField(x10,
2797                         FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
2798   __ Poke(x10, __ ReceiverOperand(x0));
2799 
2800   // Push the [[BoundArguments]] onto the stack.
2801   Generate_PushBoundArguments(masm);
2802 
2803   // Call the [[BoundTargetFunction]] via the Call builtin.
2804   __ LoadTaggedPointerField(
2805       x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2806   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2807           RelocInfo::CODE_TARGET);
2808 }
2809 
2810 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2811 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2812   // ----------- S t a t e -------------
2813   //  -- x0 : the number of arguments
2814   //  -- x1 : the target to call (can be any Object).
2815   // -----------------------------------
2816   Register argc = x0;
2817   Register target = x1;
2818   Register map = x4;
2819   Register instance_type = x5;
2820   DCHECK(!AreAliased(argc, target, map, instance_type));
2821 
2822   Label non_callable, class_constructor;
2823   __ JumpIfSmi(target, &non_callable);
2824   __ LoadMap(map, target);
2825   __ CompareInstanceTypeRange(map, instance_type,
2826                               FIRST_CALLABLE_JS_FUNCTION_TYPE,
2827                               LAST_CALLABLE_JS_FUNCTION_TYPE);
2828   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2829           RelocInfo::CODE_TARGET, ls);
2830   __ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
2831   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2832           RelocInfo::CODE_TARGET, eq);
2833 
2834   // Check if target has a [[Call]] internal method.
2835   {
2836     Register flags = x4;
2837     __ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2838     map = no_reg;
2839     __ TestAndBranchIfAllClear(flags, Map::Bits1::IsCallableBit::kMask,
2840                                &non_callable);
2841   }
2842 
2843   // Check if target is a proxy and call CallProxy external builtin
2844   __ Cmp(instance_type, JS_PROXY_TYPE);
2845   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
2846 
2847   // Check if target is a wrapped function and call CallWrappedFunction external
2848   // builtin
2849   __ Cmp(instance_type, JS_WRAPPED_FUNCTION_TYPE);
2850   __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2851           RelocInfo::CODE_TARGET, eq);
2852 
2853   // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2854   // Check that the function is not a "classConstructor".
2855   __ Cmp(instance_type, JS_CLASS_CONSTRUCTOR_TYPE);
2856   __ B(eq, &class_constructor);
2857 
2858   // 2. Call to something else, which might have a [[Call]] internal method (if
2859   // not we raise an exception).
2860   // Overwrite the original receiver with the (original) target.
2861   __ Poke(target, __ ReceiverOperand(argc));
2862 
2863   // Let the "call_as_function_delegate" take care of the rest.
2864   __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2865   __ Jump(masm->isolate()->builtins()->CallFunction(
2866               ConvertReceiverMode::kNotNullOrUndefined),
2867           RelocInfo::CODE_TARGET);
2868 
2869   // 3. Call to something that is not callable.
2870   __ bind(&non_callable);
2871   {
2872     FrameScope scope(masm, StackFrame::INTERNAL);
2873     __ PushArgument(target);
2874     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2875     __ Unreachable();
2876   }
2877 
2878   // 4. The function is a "classConstructor", need to raise an exception.
2879   __ bind(&class_constructor);
2880   {
2881     FrameScope frame(masm, StackFrame::INTERNAL);
2882     __ PushArgument(target);
2883     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2884     __ Unreachable();
2885   }
2886 }
2887 
2888 // static
Generate_ConstructFunction(MacroAssembler * masm)2889 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2890   // ----------- S t a t e -------------
2891   //  -- x0 : the number of arguments
2892   //  -- x1 : the constructor to call (checked to be a JSFunction)
2893   //  -- x3 : the new target (checked to be a constructor)
2894   // -----------------------------------
2895   __ AssertConstructor(x1);
2896   __ AssertFunction(x1);
2897 
2898   // Calling convention for function specific ConstructStubs require
2899   // x2 to contain either an AllocationSite or undefined.
2900   __ LoadRoot(x2, RootIndex::kUndefinedValue);
2901 
2902   Label call_generic_stub;
2903 
2904   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2905   __ LoadTaggedPointerField(
2906       x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2907   __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
2908   __ TestAndBranchIfAllClear(
2909       w4, SharedFunctionInfo::ConstructAsBuiltinBit::kMask, &call_generic_stub);
2910 
2911   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2912           RelocInfo::CODE_TARGET);
2913 
2914   __ bind(&call_generic_stub);
2915   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2916           RelocInfo::CODE_TARGET);
2917 }
2918 
2919 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2920 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2921   // ----------- S t a t e -------------
2922   //  -- x0 : the number of arguments
2923   //  -- x1 : the function to call (checked to be a JSBoundFunction)
2924   //  -- x3 : the new target (checked to be a constructor)
2925   // -----------------------------------
2926   __ AssertConstructor(x1);
2927   __ AssertBoundFunction(x1);
2928 
2929   // Push the [[BoundArguments]] onto the stack.
2930   Generate_PushBoundArguments(masm);
2931 
2932   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2933   {
2934     Label done;
2935     __ CmpTagged(x1, x3);
2936     __ B(ne, &done);
2937     __ LoadTaggedPointerField(
2938         x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2939     __ Bind(&done);
2940   }
2941 
2942   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2943   __ LoadTaggedPointerField(
2944       x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2945   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2946 }
2947 
2948 // static
Generate_Construct(MacroAssembler * masm)2949 void Builtins::Generate_Construct(MacroAssembler* masm) {
2950   // ----------- S t a t e -------------
2951   //  -- x0 : the number of arguments
2952   //  -- x1 : the constructor to call (can be any Object)
2953   //  -- x3 : the new target (either the same as the constructor or
2954   //          the JSFunction on which new was invoked initially)
2955   // -----------------------------------
2956   Register argc = x0;
2957   Register target = x1;
2958   Register map = x4;
2959   Register instance_type = x5;
2960   DCHECK(!AreAliased(argc, target, map, instance_type));
2961 
2962   // Check if target is a Smi.
2963   Label non_constructor, non_proxy;
2964   __ JumpIfSmi(target, &non_constructor);
2965 
2966   // Check if target has a [[Construct]] internal method.
2967   __ LoadTaggedPointerField(map,
2968                             FieldMemOperand(target, HeapObject::kMapOffset));
2969   {
2970     Register flags = x2;
2971     DCHECK(!AreAliased(argc, target, map, instance_type, flags));
2972     __ Ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2973     __ TestAndBranchIfAllClear(flags, Map::Bits1::IsConstructorBit::kMask,
2974                                &non_constructor);
2975   }
2976 
2977   // Dispatch based on instance type.
2978   __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
2979                               LAST_JS_FUNCTION_TYPE);
2980   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2981           RelocInfo::CODE_TARGET, ls);
2982 
2983   // Only dispatch to bound functions after checking whether they are
2984   // constructors.
2985   __ Cmp(instance_type, JS_BOUND_FUNCTION_TYPE);
2986   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2987           RelocInfo::CODE_TARGET, eq);
2988 
2989   // Only dispatch to proxies after checking whether they are constructors.
2990   __ Cmp(instance_type, JS_PROXY_TYPE);
2991   __ B(ne, &non_proxy);
2992   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2993           RelocInfo::CODE_TARGET);
2994 
2995   // Called Construct on an exotic Object with a [[Construct]] internal method.
2996   __ bind(&non_proxy);
2997   {
2998     // Overwrite the original receiver with the (original) target.
2999     __ Poke(target, __ ReceiverOperand(argc));
3000 
3001     // Let the "call_as_constructor_delegate" take care of the rest.
3002     __ LoadNativeContextSlot(target,
3003                              Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
3004     __ Jump(masm->isolate()->builtins()->CallFunction(),
3005             RelocInfo::CODE_TARGET);
3006   }
3007 
3008   // Called Construct on an Object that doesn't have a [[Construct]] internal
3009   // method.
3010   __ bind(&non_constructor);
3011   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
3012           RelocInfo::CODE_TARGET);
3013 }
3014 
3015 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)3016 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
3017   // The function index was put in w8 by the jump table trampoline.
3018   // Sign extend and convert to Smi for the runtime call.
3019   __ sxtw(kWasmCompileLazyFuncIndexRegister,
3020           kWasmCompileLazyFuncIndexRegister.W());
3021   __ SmiTag(kWasmCompileLazyFuncIndexRegister);
3022 
3023   // Compute register lists for parameters to be saved. We save all parameter
3024   // registers (see wasm-linkage.h). They might be overwritten in the runtime
3025   // call below. We don't have any callee-saved registers in wasm, so no need to
3026   // store anything else.
3027   constexpr RegList kSavedGpRegs = ([]() constexpr {
3028     RegList saved_gp_regs;
3029     for (Register gp_param_reg : wasm::kGpParamRegisters) {
3030       saved_gp_regs.set(gp_param_reg);
3031     }
3032     // Also push x1, because we must push multiples of 16 bytes (see
3033     // {TurboAssembler::PushCPURegList}.
3034     saved_gp_regs.set(x1);
3035     // All set registers were unique.
3036     CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters) + 1);
3037     // We push a multiple of 16 bytes.
3038     CHECK_EQ(0, saved_gp_regs.Count() % 2);
3039     // The Wasm instance must be part of the saved registers.
3040     CHECK(saved_gp_regs.has(kWasmInstanceRegister));
3041     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
3042              saved_gp_regs.Count());
3043     return saved_gp_regs;
3044   })();
3045 
3046   constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
3047     DoubleRegList saved_fp_regs;
3048     for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
3049       saved_fp_regs.set(fp_param_reg);
3050     }
3051 
3052     CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
3053     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
3054              saved_fp_regs.Count());
3055     return saved_fp_regs;
3056   })();
3057 
3058   {
3059     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
3060     FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
3061 
3062     // Save registers that we need to keep alive across the runtime call.
3063     __ PushXRegList(kSavedGpRegs);
3064     __ PushQRegList(kSavedFpRegs);
3065 
3066     // Pass instance and function index as explicit arguments to the runtime
3067     // function.
3068     __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
3069     // Initialize the JavaScript context with 0. CEntry will use it to
3070     // set the current context on the isolate.
3071     __ Mov(cp, Smi::zero());
3072     __ CallRuntime(Runtime::kWasmCompileLazy, 2);
3073 
3074     // Untag the returned Smi into into x17, for later use.
3075     static_assert(!kSavedGpRegs.has(x17));
3076     __ SmiUntag(x17, kReturnRegister0);
3077 
3078     // Restore registers.
3079     __ PopQRegList(kSavedFpRegs);
3080     __ PopXRegList(kSavedGpRegs);
3081   }
3082 
3083   // The runtime function returned the jump table slot offset as a Smi (now in
3084   // x17). Use that to compute the jump target.
3085   static_assert(!kSavedGpRegs.has(x18));
3086   __ ldr(x18, MemOperand(
3087                   kWasmInstanceRegister,
3088                   WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
3089   __ add(x17, x18, Operand(x17));
3090   // Finally, jump to the jump table slot for the function.
3091   __ Jump(x17);
3092 }
3093 
Generate_WasmDebugBreak(MacroAssembler * masm)3094 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
3095   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
3096   {
3097     FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
3098 
3099     // Save all parameter registers. They might hold live values, we restore
3100     // them after the runtime call.
3101     __ PushXRegList(WasmDebugBreakFrameConstants::kPushedGpRegs);
3102     __ PushQRegList(WasmDebugBreakFrameConstants::kPushedFpRegs);
3103 
3104     // Initialize the JavaScript context with 0. CEntry will use it to
3105     // set the current context on the isolate.
3106     __ Move(cp, Smi::zero());
3107     __ CallRuntime(Runtime::kWasmDebugBreak, 0);
3108 
3109     // Restore registers.
3110     __ PopQRegList(WasmDebugBreakFrameConstants::kPushedFpRegs);
3111     __ PopXRegList(WasmDebugBreakFrameConstants::kPushedGpRegs);
3112   }
3113   __ Ret();
3114 }
3115 
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)3116 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
3117   // TODO(v8:10701): Implement for this platform.
3118   __ Trap();
3119 }
3120 
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)3121 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
3122   // TODO(v8:12191): Implement for this platform.
3123   __ Trap();
3124 }
3125 
Generate_WasmSuspend(MacroAssembler * masm)3126 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
3127   // TODO(v8:12191): Implement for this platform.
3128   __ Trap();
3129 }
3130 
Generate_WasmResume(MacroAssembler * masm)3131 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
3132   // TODO(v8:12191): Implement for this platform.
3133   __ Trap();
3134 }
3135 
Generate_WasmOnStackReplace(MacroAssembler * masm)3136 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
3137   // Only needed on x64.
3138   __ Trap();
3139 }
3140 #endif  // V8_ENABLE_WEBASSEMBLY
3141 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)3142 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
3143                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
3144                                bool builtin_exit_frame) {
3145   // The Abort mechanism relies on CallRuntime, which in turn relies on
3146   // CEntry, so until this stub has been generated, we have to use a
3147   // fall-back Abort mechanism.
3148   //
3149   // Note that this stub must be generated before any use of Abort.
3150   HardAbortScope hard_aborts(masm);
3151 
3152   ASM_LOCATION("CEntry::Generate entry");
3153 
3154   // Register parameters:
3155   //    x0: argc (including receiver, untagged)
3156   //    x1: target
3157   // If argv_mode == ArgvMode::kRegister:
3158   //    x11: argv (pointer to first argument)
3159   //
3160   // The stack on entry holds the arguments and the receiver, with the receiver
3161   // at the highest address:
3162   //
3163   //    sp[argc-1]: receiver
3164   //    sp[argc-2]: arg[argc-2]
3165   //    ...           ...
3166   //    sp[1]:      arg[1]
3167   //    sp[0]:      arg[0]
3168   //
3169   // The arguments are in reverse order, so that arg[argc-2] is actually the
3170   // first argument to the target function and arg[0] is the last.
3171   const Register& argc_input = x0;
3172   const Register& target_input = x1;
3173 
3174   // Calculate argv, argc and the target address, and store them in
3175   // callee-saved registers so we can retry the call without having to reload
3176   // these arguments.
3177   // TODO(jbramley): If the first call attempt succeeds in the common case (as
3178   // it should), then we might be better off putting these parameters directly
3179   // into their argument registers, rather than using callee-saved registers and
3180   // preserving them on the stack.
3181   const Register& argv = x21;
3182   const Register& argc = x22;
3183   const Register& target = x23;
3184 
3185   // Derive argv from the stack pointer so that it points to the first argument
3186   // (arg[argc-2]), or just below the receiver in case there are no arguments.
3187   //  - Adjust for the arg[] array.
3188   Register temp_argv = x11;
3189   if (argv_mode == ArgvMode::kStack) {
3190     __ SlotAddress(temp_argv, x0);
3191     //  - Adjust for the receiver.
3192     __ Sub(temp_argv, temp_argv, 1 * kSystemPointerSize);
3193   }
3194 
3195   // Reserve three slots to preserve x21-x23 callee-saved registers.
3196   int extra_stack_space = 3;
3197   // Enter the exit frame.
3198   FrameScope scope(masm, StackFrame::MANUAL);
3199   __ EnterExitFrame(
3200       save_doubles == SaveFPRegsMode::kSave, x10, extra_stack_space,
3201       builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
3202 
3203   // Poke callee-saved registers into reserved space.
3204   __ Poke(argv, 1 * kSystemPointerSize);
3205   __ Poke(argc, 2 * kSystemPointerSize);
3206   __ Poke(target, 3 * kSystemPointerSize);
3207 
3208   // We normally only keep tagged values in callee-saved registers, as they
3209   // could be pushed onto the stack by called stubs and functions, and on the
3210   // stack they can confuse the GC. However, we're only calling C functions
3211   // which can push arbitrary data onto the stack anyway, and so the GC won't
3212   // examine that part of the stack.
3213   __ Mov(argc, argc_input);
3214   __ Mov(target, target_input);
3215   __ Mov(argv, temp_argv);
3216 
3217   // x21 : argv
3218   // x22 : argc
3219   // x23 : call target
3220   //
3221   // The stack (on entry) holds the arguments and the receiver, with the
3222   // receiver at the highest address:
3223   //
3224   //         argv[8]:     receiver
3225   // argv -> argv[0]:     arg[argc-2]
3226   //         ...          ...
3227   //         argv[...]:   arg[1]
3228   //         argv[...]:   arg[0]
3229   //
3230   // Immediately below (after) this is the exit frame, as constructed by
3231   // EnterExitFrame:
3232   //         fp[8]:    CallerPC (lr)
3233   //   fp -> fp[0]:    CallerFP (old fp)
3234   //         fp[-8]:   Space reserved for SPOffset.
3235   //         fp[-16]:  CodeObject()
3236   //         sp[...]:  Saved doubles, if saved_doubles is true.
3237   //         sp[32]:   Alignment padding, if necessary.
3238   //         sp[24]:   Preserved x23 (used for target).
3239   //         sp[16]:   Preserved x22 (used for argc).
3240   //         sp[8]:    Preserved x21 (used for argv).
3241   //   sp -> sp[0]:    Space reserved for the return address.
3242   //
3243   // After a successful call, the exit frame, preserved registers (x21-x23) and
3244   // the arguments (including the receiver) are dropped or popped as
3245   // appropriate. The stub then returns.
3246   //
3247   // After an unsuccessful call, the exit frame and suchlike are left
3248   // untouched, and the stub either throws an exception by jumping to one of
3249   // the exception_returned label.
3250 
3251   // Prepare AAPCS64 arguments to pass to the builtin.
3252   __ Mov(x0, argc);
3253   __ Mov(x1, argv);
3254   __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
3255 
3256   __ StoreReturnAddressAndCall(target);
3257 
3258   // Result returned in x0 or x1:x0 - do not destroy these registers!
3259 
3260   //  x0    result0      The return code from the call.
3261   //  x1    result1      For calls which return ObjectPair.
3262   //  x21   argv
3263   //  x22   argc
3264   //  x23   target
3265   const Register& result = x0;
3266 
3267   // Check result for exception sentinel.
3268   Label exception_returned;
3269   __ CompareRoot(result, RootIndex::kException);
3270   __ B(eq, &exception_returned);
3271 
3272   // The call succeeded, so unwind the stack and return.
3273 
3274   // Restore callee-saved registers x21-x23.
3275   __ Mov(x11, argc);
3276 
3277   __ Peek(argv, 1 * kSystemPointerSize);
3278   __ Peek(argc, 2 * kSystemPointerSize);
3279   __ Peek(target, 3 * kSystemPointerSize);
3280 
3281   __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, x10, x9);
3282   if (argv_mode == ArgvMode::kStack) {
3283     // Drop the remaining stack slots and return from the stub.
3284     __ DropArguments(x11);
3285   }
3286   __ AssertFPCRState();
3287   __ Ret();
3288 
3289   // Handling of exception.
3290   __ Bind(&exception_returned);
3291 
3292   ExternalReference pending_handler_context_address = ExternalReference::Create(
3293       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
3294   ExternalReference pending_handler_entrypoint_address =
3295       ExternalReference::Create(
3296           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
3297   ExternalReference pending_handler_fp_address = ExternalReference::Create(
3298       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
3299   ExternalReference pending_handler_sp_address = ExternalReference::Create(
3300       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
3301 
3302   // Ask the runtime for help to determine the handler. This will set x0 to
3303   // contain the current pending exception, don't clobber it.
3304   ExternalReference find_handler =
3305       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
3306   {
3307     FrameScope scope(masm, StackFrame::MANUAL);
3308     __ Mov(x0, 0);  // argc.
3309     __ Mov(x1, 0);  // argv.
3310     __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
3311     __ CallCFunction(find_handler, 3);
3312   }
3313 
3314   // Retrieve the handler context, SP and FP.
3315   __ Mov(cp, pending_handler_context_address);
3316   __ Ldr(cp, MemOperand(cp));
3317   {
3318     UseScratchRegisterScope temps(masm);
3319     Register scratch = temps.AcquireX();
3320     __ Mov(scratch, pending_handler_sp_address);
3321     __ Ldr(scratch, MemOperand(scratch));
3322     __ Mov(sp, scratch);
3323   }
3324   __ Mov(fp, pending_handler_fp_address);
3325   __ Ldr(fp, MemOperand(fp));
3326 
3327   // If the handler is a JS frame, restore the context to the frame. Note that
3328   // the context will be set to (cp == 0) for non-JS frames.
3329   Label not_js_frame;
3330   __ Cbz(cp, &not_js_frame);
3331   __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3332   __ Bind(&not_js_frame);
3333 
3334   {
3335     // Clear c_entry_fp, like we do in `LeaveExitFrame`.
3336     UseScratchRegisterScope temps(masm);
3337     Register scratch = temps.AcquireX();
3338     __ Mov(scratch, ExternalReference::Create(
3339                         IsolateAddressId::kCEntryFPAddress, masm->isolate()));
3340     __ Str(xzr, MemOperand(scratch));
3341   }
3342 
3343   // Compute the handler entry address and jump to it. We use x17 here for the
3344   // jump target, as this jump can occasionally end up at the start of
3345   // InterpreterEnterAtBytecode, which when CFI is enabled starts with
3346   // a "BTI c".
3347   UseScratchRegisterScope temps(masm);
3348   temps.Exclude(x17);
3349   __ Mov(x17, pending_handler_entrypoint_address);
3350   __ Ldr(x17, MemOperand(x17));
3351   __ Br(x17);
3352 }
3353 
Generate_DoubleToI(MacroAssembler * masm)3354 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
3355   Label done;
3356   Register result = x7;
3357 
3358   DCHECK(result.Is64Bits());
3359 
3360   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
3361   UseScratchRegisterScope temps(masm);
3362   Register scratch1 = temps.AcquireX();
3363   Register scratch2 = temps.AcquireX();
3364   DoubleRegister double_scratch = temps.AcquireD();
3365 
3366   // Account for saved regs.
3367   const int kArgumentOffset = 2 * kSystemPointerSize;
3368 
3369   __ Push(result, scratch1);  // scratch1 is also pushed to preserve alignment.
3370   __ Peek(double_scratch, kArgumentOffset);
3371 
3372   // Try to convert with a FPU convert instruction.  This handles all
3373   // non-saturating cases.
3374   __ TryConvertDoubleToInt64(result, double_scratch, &done);
3375   __ Fmov(result, double_scratch);
3376 
3377   // If we reach here we need to manually convert the input to an int32.
3378 
3379   // Extract the exponent.
3380   Register exponent = scratch1;
3381   __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
3382           HeapNumber::kExponentBits);
3383 
3384   // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
3385   // the mantissa gets shifted completely out of the int32_t result.
3386   __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
3387   __ CzeroX(result, ge);
3388   __ B(ge, &done);
3389 
3390   // The Fcvtzs sequence handles all cases except where the conversion causes
3391   // signed overflow in the int64_t target. Since we've already handled
3392   // exponents >= 84, we can guarantee that 63 <= exponent < 84.
3393 
3394   if (FLAG_debug_code) {
3395     __ Cmp(exponent, HeapNumber::kExponentBias + 63);
3396     // Exponents less than this should have been handled by the Fcvt case.
3397     __ Check(ge, AbortReason::kUnexpectedValue);
3398   }
3399 
3400   // Isolate the mantissa bits, and set the implicit '1'.
3401   Register mantissa = scratch2;
3402   __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
3403   __ Orr(mantissa, mantissa, 1ULL << HeapNumber::kMantissaBits);
3404 
3405   // Negate the mantissa if necessary.
3406   __ Tst(result, kXSignMask);
3407   __ Cneg(mantissa, mantissa, ne);
3408 
3409   // Shift the mantissa bits in the correct place. We know that we have to shift
3410   // it left here, because exponent >= 63 >= kMantissaBits.
3411   __ Sub(exponent, exponent,
3412          HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
3413   __ Lsl(result, mantissa, exponent);
3414 
3415   __ Bind(&done);
3416   __ Poke(result, kArgumentOffset);
3417   __ Pop(scratch1, result);
3418   __ Ret();
3419 }
3420 
3421 namespace {
3422 
3423 // The number of register that CallApiFunctionAndReturn will need to save on
3424 // the stack. The space for these registers need to be allocated in the
3425 // ExitFrame before calling CallApiFunctionAndReturn.
3426 constexpr int kCallApiFunctionSpillSpace = 4;
3427 
AddressOffset(ExternalReference ref0,ExternalReference ref1)3428 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3429   return static_cast<int>(ref0.address() - ref1.address());
3430 }
3431 
3432 // Calls an API function. Allocates HandleScope, extracts returned value
3433 // from handle and propagates exceptions.
3434 // 'stack_space' is the space to be unwound on exit (includes the call JS
3435 // arguments space and the additional space allocated for the fast call).
3436 // 'spill_offset' is the offset from the stack pointer where
3437 // CallApiFunctionAndReturn can spill registers.
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,int spill_offset,MemOperand return_value_operand)3438 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
3439                               ExternalReference thunk_ref, int stack_space,
3440                               MemOperand* stack_space_operand, int spill_offset,
3441                               MemOperand return_value_operand) {
3442   ASM_CODE_COMMENT(masm);
3443   ASM_LOCATION("CallApiFunctionAndReturn");
3444   Isolate* isolate = masm->isolate();
3445   ExternalReference next_address =
3446       ExternalReference::handle_scope_next_address(isolate);
3447   const int kNextOffset = 0;
3448   const int kLimitOffset = AddressOffset(
3449       ExternalReference::handle_scope_limit_address(isolate), next_address);
3450   const int kLevelOffset = AddressOffset(
3451       ExternalReference::handle_scope_level_address(isolate), next_address);
3452 
3453   DCHECK(function_address == x1 || function_address == x2);
3454 
3455   Label profiler_enabled, end_profiler_check;
3456   __ Mov(x10, ExternalReference::is_profiling_address(isolate));
3457   __ Ldrb(w10, MemOperand(x10));
3458   __ Cbnz(w10, &profiler_enabled);
3459   __ Mov(x10, ExternalReference::address_of_runtime_stats_flag());
3460   __ Ldrsw(w10, MemOperand(x10));
3461   __ Cbnz(w10, &profiler_enabled);
3462   {
3463     // Call the api function directly.
3464     __ Mov(x3, function_address);
3465     __ B(&end_profiler_check);
3466   }
3467   __ Bind(&profiler_enabled);
3468   {
3469     // Additional parameter is the address of the actual callback.
3470     __ Mov(x3, thunk_ref);
3471   }
3472   __ Bind(&end_profiler_check);
3473 
3474   // Save the callee-save registers we are going to use.
3475   // TODO(all): Is this necessary? ARM doesn't do it.
3476   STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
3477   __ Poke(x19, (spill_offset + 0) * kXRegSize);
3478   __ Poke(x20, (spill_offset + 1) * kXRegSize);
3479   __ Poke(x21, (spill_offset + 2) * kXRegSize);
3480   __ Poke(x22, (spill_offset + 3) * kXRegSize);
3481 
3482   // Allocate HandleScope in callee-save registers.
3483   // We will need to restore the HandleScope after the call to the API function,
3484   // by allocating it in callee-save registers they will be preserved by C code.
3485   Register handle_scope_base = x22;
3486   Register next_address_reg = x19;
3487   Register limit_reg = x20;
3488   Register level_reg = w21;
3489 
3490   __ Mov(handle_scope_base, next_address);
3491   __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
3492   __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
3493   __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
3494   __ Add(level_reg, level_reg, 1);
3495   __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
3496 
3497   __ Mov(x10, x3);  // TODO(arm64): Load target into x10 directly.
3498   __ StoreReturnAddressAndCall(x10);
3499 
3500   Label promote_scheduled_exception;
3501   Label delete_allocated_handles;
3502   Label leave_exit_frame;
3503   Label return_value_loaded;
3504 
3505   // Load value from ReturnValue.
3506   __ Ldr(x0, return_value_operand);
3507   __ Bind(&return_value_loaded);
3508   // No more valid handles (the result handle was the last one). Restore
3509   // previous handle scope.
3510   __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
3511   if (FLAG_debug_code) {
3512     __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
3513     __ Cmp(w1, level_reg);
3514     __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
3515   }
3516   __ Sub(level_reg, level_reg, 1);
3517   __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
3518   __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
3519   __ Cmp(limit_reg, x1);
3520   __ B(ne, &delete_allocated_handles);
3521 
3522   // Leave the API exit frame.
3523   __ Bind(&leave_exit_frame);
3524   // Restore callee-saved registers.
3525   __ Peek(x19, (spill_offset + 0) * kXRegSize);
3526   __ Peek(x20, (spill_offset + 1) * kXRegSize);
3527   __ Peek(x21, (spill_offset + 2) * kXRegSize);
3528   __ Peek(x22, (spill_offset + 3) * kXRegSize);
3529 
3530   if (stack_space_operand != nullptr) {
3531     DCHECK_EQ(stack_space, 0);
3532     // Load the number of stack slots to drop before LeaveExitFrame modifies sp.
3533     __ Ldr(x19, *stack_space_operand);
3534   }
3535 
3536   __ LeaveExitFrame(false, x1, x5);
3537 
3538   // Check if the function scheduled an exception.
3539   __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
3540   __ Ldr(x5, MemOperand(x5));
3541   __ JumpIfNotRoot(x5, RootIndex::kTheHoleValue, &promote_scheduled_exception);
3542 
3543   if (stack_space_operand == nullptr) {
3544     DCHECK_NE(stack_space, 0);
3545     __ DropSlots(stack_space);
3546   } else {
3547     DCHECK_EQ(stack_space, 0);
3548     __ DropArguments(x19);
3549   }
3550 
3551   __ Ret();
3552 
3553   // Re-throw by promoting a scheduled exception.
3554   __ Bind(&promote_scheduled_exception);
3555   __ TailCallRuntime(Runtime::kPromoteScheduledException);
3556 
3557   // HandleScope limit has changed. Delete allocated extensions.
3558   __ Bind(&delete_allocated_handles);
3559   __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
3560   // Save the return value in a callee-save register.
3561   Register saved_result = x19;
3562   __ Mov(saved_result, x0);
3563   __ Mov(x0, ExternalReference::isolate_address(isolate));
3564   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3565   __ Mov(x0, saved_result);
3566   __ B(&leave_exit_frame);
3567 }
3568 
3569 }  // namespace
3570 
Generate_CallApiCallback(MacroAssembler * masm)3571 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3572   // ----------- S t a t e -------------
3573   //  -- cp                  : context
3574   //  -- x1                  : api function address
3575   //  -- x2                  : arguments count (not including the receiver)
3576   //  -- x3                  : call data
3577   //  -- x0                  : holder
3578   //  -- sp[0]               : receiver
3579   //  -- sp[8]               : first argument
3580   //  -- ...
3581   //  -- sp[(argc) * 8]      : last argument
3582   // -----------------------------------
3583 
3584   Register api_function_address = x1;
3585   Register argc = x2;
3586   Register call_data = x3;
3587   Register holder = x0;
3588   Register scratch = x4;
3589 
3590   DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
3591 
3592   using FCA = FunctionCallbackArguments;
3593 
3594   STATIC_ASSERT(FCA::kArgsLength == 6);
3595   STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3596   STATIC_ASSERT(FCA::kDataIndex == 4);
3597   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3598   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3599   STATIC_ASSERT(FCA::kIsolateIndex == 1);
3600   STATIC_ASSERT(FCA::kHolderIndex == 0);
3601 
3602   // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3603   //
3604   // Target state:
3605   //   sp[0 * kSystemPointerSize]: kHolder
3606   //   sp[1 * kSystemPointerSize]: kIsolate
3607   //   sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
3608   //   sp[3 * kSystemPointerSize]: undefined (kReturnValue)
3609   //   sp[4 * kSystemPointerSize]: kData
3610   //   sp[5 * kSystemPointerSize]: undefined (kNewTarget)
3611 
3612   // Reserve space on the stack.
3613   __ Claim(FCA::kArgsLength, kSystemPointerSize);
3614 
3615   // kHolder.
3616   __ Str(holder, MemOperand(sp, 0 * kSystemPointerSize));
3617 
3618   // kIsolate.
3619   __ Mov(scratch, ExternalReference::isolate_address(masm->isolate()));
3620   __ Str(scratch, MemOperand(sp, 1 * kSystemPointerSize));
3621 
3622   // kReturnValueDefaultValue and kReturnValue.
3623   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3624   __ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
3625   __ Str(scratch, MemOperand(sp, 3 * kSystemPointerSize));
3626 
3627   // kData.
3628   __ Str(call_data, MemOperand(sp, 4 * kSystemPointerSize));
3629 
3630   // kNewTarget.
3631   __ Str(scratch, MemOperand(sp, 5 * kSystemPointerSize));
3632 
3633   // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3634   // We use it below to set up the FunctionCallbackInfo object.
3635   __ Mov(scratch, sp);
3636 
3637   // Allocate the v8::Arguments structure in the arguments' space, since it's
3638   // not controlled by GC.
3639   static constexpr int kApiStackSpace = 4;
3640   static constexpr bool kDontSaveDoubles = false;
3641 
3642   FrameScope frame_scope(masm, StackFrame::MANUAL);
3643   __ EnterExitFrame(kDontSaveDoubles, x10,
3644                     kApiStackSpace + kCallApiFunctionSpillSpace);
3645 
3646   // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3647   // Arguments are after the return address (pushed by EnterExitFrame()).
3648   __ Str(scratch, MemOperand(sp, 1 * kSystemPointerSize));
3649 
3650   // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3651   // on the stack).
3652   __ Add(scratch, scratch,
3653          Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3654   __ Str(scratch, MemOperand(sp, 2 * kSystemPointerSize));
3655 
3656   // FunctionCallbackInfo::length_.
3657   __ Str(argc, MemOperand(sp, 3 * kSystemPointerSize));
3658 
3659   // We also store the number of slots to drop from the stack after returning
3660   // from the API function here.
3661   // Note: Unlike on other architectures, this stores the number of slots to
3662   // drop, not the number of bytes. arm64 must always drop a slot count that is
3663   // a multiple of two, and related helper functions (DropArguments) expect a
3664   // register containing the slot count.
3665   __ Add(scratch, argc, Operand(FCA::kArgsLength + 1 /*receiver*/));
3666   __ Str(scratch, MemOperand(sp, 4 * kSystemPointerSize));
3667 
3668   // v8::InvocationCallback's argument.
3669   DCHECK(!AreAliased(x0, api_function_address));
3670   __ add(x0, sp, Operand(1 * kSystemPointerSize));
3671 
3672   ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3673 
3674   // The current frame needs to be aligned.
3675   DCHECK_EQ(FCA::kArgsLength % 2, 0);
3676 
3677   // There are two stack slots above the arguments we constructed on the stack.
3678   // TODO(jgruber): Document what these arguments are.
3679   static constexpr int kStackSlotsAboveFCA = 2;
3680   MemOperand return_value_operand(
3681       fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
3682 
3683   static constexpr int kSpillOffset = 1 + kApiStackSpace;
3684   static constexpr int kUseStackSpaceOperand = 0;
3685   MemOperand stack_space_operand(sp, 4 * kSystemPointerSize);
3686 
3687   AllowExternalCallThatCantCauseGC scope(masm);
3688   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3689                            kUseStackSpaceOperand, &stack_space_operand,
3690                            kSpillOffset, return_value_operand);
3691 }
3692 
Generate_CallApiGetter(MacroAssembler * masm)3693 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3694   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3695   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3696   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3697   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3698   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3699   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3700   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3701   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3702 
3703   Register receiver = ApiGetterDescriptor::ReceiverRegister();
3704   Register holder = ApiGetterDescriptor::HolderRegister();
3705   Register callback = ApiGetterDescriptor::CallbackRegister();
3706   Register data = x4;
3707   Register undef = x5;
3708   Register isolate_address = x6;
3709   Register name = x7;
3710   DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
3711                      name));
3712 
3713   __ LoadAnyTaggedField(data,
3714                         FieldMemOperand(callback, AccessorInfo::kDataOffset));
3715   __ LoadRoot(undef, RootIndex::kUndefinedValue);
3716   __ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
3717   __ LoadTaggedPointerField(
3718       name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3719 
3720   // PropertyCallbackArguments:
3721   //   receiver, data, return value, return value default, isolate, holder,
3722   //   should_throw_on_error
3723   // These are followed by the property name, which is also pushed below the
3724   // exit frame to make the GC aware of it.
3725   __ Push(receiver, data, undef, undef, isolate_address, holder, xzr, name);
3726 
3727   // v8::PropertyCallbackInfo::args_ array and name handle.
3728   static const int kStackUnwindSpace =
3729       PropertyCallbackArguments::kArgsLength + 1;
3730   static_assert(kStackUnwindSpace % 2 == 0,
3731                 "slots must be a multiple of 2 for stack pointer alignment");
3732 
3733   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3734   __ Mov(x0, sp);                          // x0 = Handle<Name>
3735   __ Add(x1, x0, 1 * kSystemPointerSize);  // x1 = v8::PCI::args_
3736 
3737   const int kApiStackSpace = 1;
3738 
3739   FrameScope frame_scope(masm, StackFrame::MANUAL);
3740   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
3741 
3742   // Create v8::PropertyCallbackInfo object on the stack and initialize
3743   // it's args_ field.
3744   __ Poke(x1, 1 * kSystemPointerSize);
3745   __ SlotAddress(x1, 1);
3746   // x1 = v8::PropertyCallbackInfo&
3747 
3748   ExternalReference thunk_ref =
3749       ExternalReference::invoke_accessor_getter_callback();
3750 
3751   Register api_function_address = x2;
3752   Register js_getter = x4;
3753   __ LoadTaggedPointerField(
3754       js_getter, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3755 
3756   __ LoadExternalPointerField(
3757       api_function_address,
3758       FieldMemOperand(js_getter, Foreign::kForeignAddressOffset),
3759       kForeignForeignAddressTag);
3760 
3761   const int spill_offset = 1 + kApiStackSpace;
3762   // +3 is to skip prolog, return address and name handle.
3763   MemOperand return_value_operand(
3764       fp,
3765       (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
3766   MemOperand* const kUseStackSpaceConstant = nullptr;
3767   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3768                            kStackUnwindSpace, kUseStackSpaceConstant,
3769                            spill_offset, return_value_operand);
3770 }
3771 
Generate_DirectCEntry(MacroAssembler * masm)3772 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3773   // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3774   // purpose Code object) to be able to call into C functions that may trigger
3775   // GC and thus move the caller.
3776   //
3777   // DirectCEntry places the return address on the stack (updated by the GC),
3778   // making the call GC safe. The irregexp backend relies on this.
3779 
3780   __ Poke<TurboAssembler::kSignLR>(lr, 0);  // Store the return address.
3781   __ Blr(x10);                              // Call the C++ function.
3782   __ Peek<TurboAssembler::kAuthLR>(lr, 0);  // Return to calling code.
3783   __ AssertFPCRState();
3784   __ Ret();
3785 }
3786 
3787 namespace {
3788 
CopyRegListToFrame(MacroAssembler * masm,const Register & dst,int dst_offset,const CPURegList & reg_list,const Register & temp0,const Register & temp1,int src_offset=0)3789 void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
3790                         int dst_offset, const CPURegList& reg_list,
3791                         const Register& temp0, const Register& temp1,
3792                         int src_offset = 0) {
3793   ASM_CODE_COMMENT(masm);
3794   DCHECK_EQ(reg_list.Count() % 2, 0);
3795   UseScratchRegisterScope temps(masm);
3796   CPURegList copy_to_input = reg_list;
3797   int reg_size = reg_list.RegisterSizeInBytes();
3798   DCHECK_EQ(temp0.SizeInBytes(), reg_size);
3799   DCHECK_EQ(temp1.SizeInBytes(), reg_size);
3800 
3801   // Compute some temporary addresses to avoid having the macro assembler set
3802   // up a temp with an offset for accesses out of the range of the addressing
3803   // mode.
3804   Register src = temps.AcquireX();
3805   masm->Add(src, sp, src_offset);
3806   masm->Add(dst, dst, dst_offset);
3807 
3808   // Write reg_list into the frame pointed to by dst.
3809   for (int i = 0; i < reg_list.Count(); i += 2) {
3810     masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
3811 
3812     CPURegister reg0 = copy_to_input.PopLowestIndex();
3813     CPURegister reg1 = copy_to_input.PopLowestIndex();
3814     int offset0 = reg0.code() * reg_size;
3815     int offset1 = reg1.code() * reg_size;
3816 
3817     // Pair up adjacent stores, otherwise write them separately.
3818     if (offset1 == offset0 + reg_size) {
3819       masm->Stp(temp0, temp1, MemOperand(dst, offset0));
3820     } else {
3821       masm->Str(temp0, MemOperand(dst, offset0));
3822       masm->Str(temp1, MemOperand(dst, offset1));
3823     }
3824   }
3825   masm->Sub(dst, dst, dst_offset);
3826 }
3827 
RestoreRegList(MacroAssembler * masm,const CPURegList & reg_list,const Register & src_base,int src_offset)3828 void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
3829                     const Register& src_base, int src_offset) {
3830   ASM_CODE_COMMENT(masm);
3831   DCHECK_EQ(reg_list.Count() % 2, 0);
3832   UseScratchRegisterScope temps(masm);
3833   CPURegList restore_list = reg_list;
3834   int reg_size = restore_list.RegisterSizeInBytes();
3835 
3836   // Compute a temporary addresses to avoid having the macro assembler set
3837   // up a temp with an offset for accesses out of the range of the addressing
3838   // mode.
3839   Register src = temps.AcquireX();
3840   masm->Add(src, src_base, src_offset);
3841 
3842   // No need to restore padreg.
3843   restore_list.Remove(padreg);
3844 
3845   // Restore every register in restore_list from src.
3846   while (!restore_list.IsEmpty()) {
3847     CPURegister reg0 = restore_list.PopLowestIndex();
3848     CPURegister reg1 = restore_list.PopLowestIndex();
3849     int offset0 = reg0.code() * reg_size;
3850 
3851     if (reg1 == NoCPUReg) {
3852       masm->Ldr(reg0, MemOperand(src, offset0));
3853       break;
3854     }
3855 
3856     int offset1 = reg1.code() * reg_size;
3857 
3858     // Pair up adjacent loads, otherwise read them separately.
3859     if (offset1 == offset0 + reg_size) {
3860       masm->Ldp(reg0, reg1, MemOperand(src, offset0));
3861     } else {
3862       masm->Ldr(reg0, MemOperand(src, offset0));
3863       masm->Ldr(reg1, MemOperand(src, offset1));
3864     }
3865   }
3866 }
3867 
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3868 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3869                                   DeoptimizeKind deopt_kind) {
3870   Isolate* isolate = masm->isolate();
3871 
3872   // TODO(all): This code needs to be revisited. We probably only need to save
3873   // caller-saved registers here. Callee-saved registers can be stored directly
3874   // in the input frame.
3875 
3876   // Save all allocatable double registers.
3877   CPURegList saved_double_registers(
3878       kDRegSizeInBits,
3879       DoubleRegList::FromBits(
3880           RegisterConfiguration::Default()->allocatable_double_codes_mask()));
3881   DCHECK_EQ(saved_double_registers.Count() % 2, 0);
3882   __ PushCPURegList(saved_double_registers);
3883 
3884   // We save all the registers except sp, lr, platform register (x18) and the
3885   // masm scratches.
3886   CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
3887   saved_registers.Remove(ip0);
3888   saved_registers.Remove(ip1);
3889   saved_registers.Remove(x18);
3890   saved_registers.Combine(fp);
3891   saved_registers.Align();
3892   DCHECK_EQ(saved_registers.Count() % 2, 0);
3893   __ PushCPURegList(saved_registers);
3894 
3895   __ Mov(x3, Operand(ExternalReference::Create(
3896                  IsolateAddressId::kCEntryFPAddress, isolate)));
3897   __ Str(fp, MemOperand(x3));
3898 
3899   const int kSavedRegistersAreaSize =
3900       (saved_registers.Count() * kXRegSize) +
3901       (saved_double_registers.Count() * kDRegSize);
3902 
3903   // Floating point registers are saved on the stack above core registers.
3904   const int kDoubleRegistersOffset = saved_registers.Count() * kXRegSize;
3905 
3906   Register code_object = x2;
3907   Register fp_to_sp = x3;
3908   // Get the address of the location in the code object. This is the return
3909   // address for lazy deoptimization.
3910   __ Mov(code_object, lr);
3911   // Compute the fp-to-sp delta.
3912   __ Add(fp_to_sp, sp, kSavedRegistersAreaSize);
3913   __ Sub(fp_to_sp, fp, fp_to_sp);
3914 
3915   // Allocate a new deoptimizer object.
3916   __ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3917 
3918   // Ensure we can safely load from below fp.
3919   DCHECK_GT(kSavedRegistersAreaSize, -StandardFrameConstants::kFunctionOffset);
3920   __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3921 
3922   // If x1 is a smi, zero x0.
3923   __ Tst(x1, kSmiTagMask);
3924   __ CzeroX(x0, eq);
3925 
3926   __ Mov(x1, static_cast<int>(deopt_kind));
3927   // Following arguments are already loaded:
3928   //  - x2: code object address
3929   //  - x3: fp-to-sp delta
3930   __ Mov(x4, ExternalReference::isolate_address(isolate));
3931 
3932   {
3933     // Call Deoptimizer::New().
3934     AllowExternalCallThatCantCauseGC scope(masm);
3935     __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3936   }
3937 
3938   // Preserve "deoptimizer" object in register x0.
3939   Register deoptimizer = x0;
3940 
3941   // Get the input frame descriptor pointer.
3942   __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
3943 
3944   // Copy core registers into the input frame.
3945   CopyRegListToFrame(masm, x1, FrameDescription::registers_offset(),
3946                      saved_registers, x2, x3);
3947 
3948   // Copy double registers to the input frame.
3949   CopyRegListToFrame(masm, x1, FrameDescription::double_registers_offset(),
3950                      saved_double_registers, x2, x3, kDoubleRegistersOffset);
3951 
3952   // Mark the stack as not iterable for the CPU profiler which won't be able to
3953   // walk the stack without the return address.
3954   {
3955     UseScratchRegisterScope temps(masm);
3956     Register is_iterable = temps.AcquireX();
3957     __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3958     __ strb(xzr, MemOperand(is_iterable));
3959   }
3960 
3961   // Remove the saved registers from the stack.
3962   DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
3963   __ Drop(kSavedRegistersAreaSize / kXRegSize);
3964 
3965   // Compute a pointer to the unwinding limit in register x2; that is
3966   // the first stack slot not part of the input frame.
3967   Register unwind_limit = x2;
3968   __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
3969 
3970   // Unwind the stack down to - but not including - the unwinding
3971   // limit and copy the contents of the activation frame to the input
3972   // frame description.
3973   __ Add(x3, x1, FrameDescription::frame_content_offset());
3974   __ SlotAddress(x1, 0);
3975   __ Lsr(unwind_limit, unwind_limit, kSystemPointerSizeLog2);
3976   __ Mov(x5, unwind_limit);
3977   __ CopyDoubleWords(x3, x1, x5);
3978   // Since {unwind_limit} is the frame size up to the parameter count, we might
3979   // end up with a unaligned stack pointer. This is later recovered when
3980   // setting the stack pointer to {caller_frame_top_offset}.
3981   __ Bic(unwind_limit, unwind_limit, 1);
3982   __ Drop(unwind_limit);
3983 
3984   // Compute the output frame in the deoptimizer.
3985   __ Push(padreg, x0);  // Preserve deoptimizer object across call.
3986   {
3987     // Call Deoptimizer::ComputeOutputFrames().
3988     AllowExternalCallThatCantCauseGC scope(masm);
3989     __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3990   }
3991   __ Pop(x4, padreg);  // Restore deoptimizer object (class Deoptimizer).
3992 
3993   {
3994     UseScratchRegisterScope temps(masm);
3995     Register scratch = temps.AcquireX();
3996     __ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
3997     __ Mov(sp, scratch);
3998   }
3999 
4000   // Replace the current (input) frame with the output frames.
4001   Label outer_push_loop, outer_loop_header;
4002   __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
4003   __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
4004   __ Add(x1, x0, Operand(x1, LSL, kSystemPointerSizeLog2));
4005   __ B(&outer_loop_header);
4006 
4007   __ Bind(&outer_push_loop);
4008   Register current_frame = x2;
4009   Register frame_size = x3;
4010   __ Ldr(current_frame, MemOperand(x0, kSystemPointerSize, PostIndex));
4011   __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
4012   __ Lsr(frame_size, x3, kSystemPointerSizeLog2);
4013   __ Claim(frame_size);
4014 
4015   __ Add(x7, current_frame, FrameDescription::frame_content_offset());
4016   __ SlotAddress(x6, 0);
4017   __ CopyDoubleWords(x6, x7, frame_size);
4018 
4019   __ Bind(&outer_loop_header);
4020   __ Cmp(x0, x1);
4021   __ B(lt, &outer_push_loop);
4022 
4023   __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
4024   RestoreRegList(masm, saved_double_registers, x1,
4025                  FrameDescription::double_registers_offset());
4026 
4027   {
4028     UseScratchRegisterScope temps(masm);
4029     Register is_iterable = temps.AcquireX();
4030     Register one = x4;
4031     __ Mov(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
4032     __ Mov(one, Operand(1));
4033     __ strb(one, MemOperand(is_iterable));
4034   }
4035 
4036   // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
4037   // stack, then pops it all into registers. Here, we try to load it directly
4038   // into the relevant registers. Is this correct? If so, we should improve the
4039   // ARM code.
4040 
4041   // Restore registers from the last output frame.
4042   // Note that lr is not in the list of saved_registers and will be restored
4043   // later. We can use it to hold the address of last output frame while
4044   // reloading the other registers.
4045   DCHECK(!saved_registers.IncludesAliasOf(lr));
4046   Register last_output_frame = lr;
4047   __ Mov(last_output_frame, current_frame);
4048 
4049   RestoreRegList(masm, saved_registers, last_output_frame,
4050                  FrameDescription::registers_offset());
4051 
4052   UseScratchRegisterScope temps(masm);
4053   temps.Exclude(x17);
4054   Register continuation = x17;
4055   __ Ldr(continuation, MemOperand(last_output_frame,
4056                                   FrameDescription::continuation_offset()));
4057   __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
4058 #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
4059   __ Autibsp();
4060 #endif
4061   __ Br(continuation);
4062 }
4063 
4064 }  // namespace
4065 
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)4066 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
4067   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
4068 }
4069 
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)4070 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
4071   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
4072 }
4073 
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)4074 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
4075   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
4076 }
4077 
4078 namespace {
4079 
4080 // Restarts execution either at the current or next (in execution order)
4081 // bytecode. If there is baseline code on the shared function info, converts an
4082 // interpreter frame into a baseline frame and continues execution in baseline
4083 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)4084 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
4085                                          bool next_bytecode,
4086                                          bool is_osr = false) {
4087   Label start;
4088   __ bind(&start);
4089 
4090   // Get function from the frame.
4091   Register closure = x1;
4092   __ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
4093 
4094   // Get the Code object from the shared function info.
4095   Register code_obj = x22;
4096   __ LoadTaggedPointerField(
4097       code_obj,
4098       FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
4099   __ LoadTaggedPointerField(
4100       code_obj,
4101       FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
4102 
4103   // Check if we have baseline code. For OSR entry it is safe to assume we
4104   // always have baseline code.
4105   if (!is_osr) {
4106     Label start_with_baseline;
4107     __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
4108     __ B(eq, &start_with_baseline);
4109 
4110     // Start with bytecode as there is no baseline code.
4111     Builtin builtin_id = next_bytecode
4112                              ? Builtin::kInterpreterEnterAtNextBytecode
4113                              : Builtin::kInterpreterEnterAtBytecode;
4114     __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
4115             RelocInfo::CODE_TARGET);
4116 
4117     // Start with baseline code.
4118     __ bind(&start_with_baseline);
4119   } else if (FLAG_debug_code) {
4120     __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
4121     __ Assert(eq, AbortReason::kExpectedBaselineData);
4122   }
4123 
4124   if (FLAG_debug_code) {
4125     AssertCodeTIsBaseline(masm, code_obj, x3);
4126   }
4127   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
4128     __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
4129   }
4130 
4131   // Load the feedback vector.
4132   Register feedback_vector = x2;
4133   __ LoadTaggedPointerField(
4134       feedback_vector,
4135       FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
4136   __ LoadTaggedPointerField(
4137       feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
4138 
4139   Label install_baseline_code;
4140   // Check if feedback vector is valid. If not, call prepare for baseline to
4141   // allocate it.
4142   __ CompareObjectType(feedback_vector, x3, x3, FEEDBACK_VECTOR_TYPE);
4143   __ B(ne, &install_baseline_code);
4144 
4145   // Save BytecodeOffset from the stack frame.
4146   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
4147               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
4148   // Replace BytecodeOffset with the feedback vector.
4149   __ Str(feedback_vector,
4150          MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
4151   feedback_vector = no_reg;
4152 
4153   // Compute baseline pc for bytecode offset.
4154   ExternalReference get_baseline_pc_extref;
4155   if (next_bytecode || is_osr) {
4156     get_baseline_pc_extref =
4157         ExternalReference::baseline_pc_for_next_executed_bytecode();
4158   } else {
4159     get_baseline_pc_extref =
4160         ExternalReference::baseline_pc_for_bytecode_offset();
4161   }
4162   Register get_baseline_pc = x3;
4163   __ Mov(get_baseline_pc, get_baseline_pc_extref);
4164 
4165   // If the code deoptimizes during the implicit function entry stack interrupt
4166   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
4167   // not a valid bytecode offset.
4168   // TODO(pthier): Investigate if it is feasible to handle this special case
4169   // in TurboFan instead of here.
4170   Label valid_bytecode_offset, function_entry_bytecode;
4171   if (!is_osr) {
4172     __ cmp(kInterpreterBytecodeOffsetRegister,
4173            Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
4174                    kFunctionEntryBytecodeOffset));
4175     __ B(eq, &function_entry_bytecode);
4176   }
4177 
4178   __ Sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
4179          (BytecodeArray::kHeaderSize - kHeapObjectTag));
4180 
4181   __ bind(&valid_bytecode_offset);
4182   // Get bytecode array from the stack frame.
4183   __ ldr(kInterpreterBytecodeArrayRegister,
4184          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
4185   // Save the accumulator register, since it's clobbered by the below call.
4186   __ Push(padreg, kInterpreterAccumulatorRegister);
4187   {
4188     Register arg_reg_1 = x0;
4189     Register arg_reg_2 = x1;
4190     Register arg_reg_3 = x2;
4191     __ Mov(arg_reg_1, code_obj);
4192     __ Mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
4193     __ Mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
4194     FrameScope scope(masm, StackFrame::INTERNAL);
4195     __ CallCFunction(get_baseline_pc, 3, 0);
4196   }
4197   __ Add(code_obj, code_obj, kReturnRegister0);
4198   __ Pop(kInterpreterAccumulatorRegister, padreg);
4199 
4200   if (is_osr) {
4201     // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
4202     // disarm Sparkplug here.
4203     ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
4204     Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
4205   } else {
4206     __ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
4207     __ Jump(code_obj);
4208   }
4209   __ Trap();  // Unreachable.
4210 
4211   if (!is_osr) {
4212     __ bind(&function_entry_bytecode);
4213     // If the bytecode offset is kFunctionEntryOffset, get the start address of
4214     // the first bytecode.
4215     __ Mov(kInterpreterBytecodeOffsetRegister, Operand(0));
4216     if (next_bytecode) {
4217       __ Mov(get_baseline_pc,
4218              ExternalReference::baseline_pc_for_bytecode_offset());
4219     }
4220     __ B(&valid_bytecode_offset);
4221   }
4222 
4223   __ bind(&install_baseline_code);
4224   {
4225     FrameScope scope(masm, StackFrame::INTERNAL);
4226     __ Push(padreg, kInterpreterAccumulatorRegister);
4227     __ PushArgument(closure);
4228     __ CallRuntime(Runtime::kInstallBaselineCode, 1);
4229     __ Pop(kInterpreterAccumulatorRegister, padreg);
4230   }
4231   // Retry from the start after installing baseline code.
4232   __ B(&start);
4233 }
4234 
4235 }  // namespace
4236 
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)4237 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
4238     MacroAssembler* masm) {
4239   Generate_BaselineOrInterpreterEntry(masm, false);
4240 }
4241 
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)4242 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
4243     MacroAssembler* masm) {
4244   Generate_BaselineOrInterpreterEntry(masm, true);
4245 }
4246 
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)4247 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
4248     MacroAssembler* masm) {
4249   Generate_BaselineOrInterpreterEntry(masm, false, true);
4250 }
4251 
4252 #undef __
4253 
4254 }  // namespace internal
4255 }  // namespace v8
4256 
4257 #endif  // V8_TARGET_ARCH_ARM
4258