• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM64
6 
7 #include "src/arm64/macro-assembler-arm64-inl.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/counters.h"
11 #include "src/debug/debug.h"
12 #include "src/deoptimizer.h"
13 #include "src/frame-constants.h"
14 #include "src/frames.h"
15 #include "src/objects-inl.h"
16 #include "src/objects/js-generator.h"
17 #include "src/runtime/runtime.h"
18 #include "src/wasm/wasm-objects.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 #define __ ACCESS_MASM(masm)
24 
Generate_Adaptor(MacroAssembler * masm,Address address,ExitFrameType exit_frame_type)25 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
26                                 ExitFrameType exit_frame_type) {
27   __ Mov(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
28   if (exit_frame_type == BUILTIN_EXIT) {
29     __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
30             RelocInfo::CODE_TARGET);
31   } else {
32     DCHECK(exit_frame_type == EXIT);
33     __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithExitFrame),
34             RelocInfo::CODE_TARGET);
35   }
36 }
37 
Generate_InternalArrayConstructor(MacroAssembler * masm)38 void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
39   // ----------- S t a t e -------------
40   //  -- x0     : number of arguments
41   //  -- lr     : return address
42   //  -- sp[...]: constructor arguments
43   // -----------------------------------
44   ASM_LOCATION("Builtins::Generate_InternalArrayConstructor");
45   Label generic_array_code;
46 
47   if (FLAG_debug_code) {
48     // Initial map for the builtin InternalArray functions should be maps.
49     __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
50     __ Tst(x10, kSmiTagMask);
51     __ Assert(ne, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
52     __ CompareObjectType(x10, x11, x12, MAP_TYPE);
53     __ Assert(eq, AbortReason::kUnexpectedInitialMapForInternalArrayFunction);
54   }
55 
56   // Run the native code for the InternalArray function called as a normal
57   // function.
58   __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
59   __ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
60           RelocInfo::CODE_TARGET);
61 }
62 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)63 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
64                                            Runtime::FunctionId function_id) {
65   // ----------- S t a t e -------------
66   //  -- x0 : argument count (preserved for callee)
67   //  -- x1 : target function (preserved for callee)
68   //  -- x3 : new target (preserved for callee)
69   // -----------------------------------
70   {
71     FrameScope scope(masm, StackFrame::INTERNAL);
72     // Push a copy of the target function and the new target.
73     // Push another copy as a parameter to the runtime call.
74     __ SmiTag(x0);
75     __ Push(x0, x1, x3, padreg);
76     __ PushArgument(x1);
77 
78     __ CallRuntime(function_id, 1);
79     __ Mov(x2, x0);
80 
81     // Restore target function and new target.
82     __ Pop(padreg, x3, x1, x0);
83     __ SmiUntag(x0);
84   }
85 
86   static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
87   __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
88   __ Br(x2);
89 }
90 
91 namespace {
92 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)93 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
94   Label post_instantiation_deopt_entry;
95 
96   // ----------- S t a t e -------------
97   //  -- x0     : number of arguments
98   //  -- x1     : constructor function
99   //  -- x3     : new target
100   //  -- cp     : context
101   //  -- lr     : return address
102   //  -- sp[...]: constructor arguments
103   // -----------------------------------
104 
105   ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
106 
107   // Enter a construct frame.
108   {
109     FrameScope scope(masm, StackFrame::CONSTRUCT);
110     Label already_aligned;
111     Register argc = x0;
112 
113     if (__ emit_debug_code()) {
114       // Check that FrameScope pushed the context on to the stack already.
115       __ Peek(x2, 0);
116       __ Cmp(x2, cp);
117       __ Check(eq, AbortReason::kUnexpectedValue);
118     }
119 
120     // Push number of arguments.
121     __ SmiTag(x11, argc);
122     __ Push(x11, padreg);
123 
124     // Add a slot for the receiver, and round up to maintain alignment.
125     Register slot_count = x2;
126     Register slot_count_without_rounding = x12;
127     __ Add(slot_count_without_rounding, argc, 2);
128     __ Bic(slot_count, slot_count_without_rounding, 1);
129     __ Claim(slot_count);
130 
131     // Preserve the incoming parameters on the stack.
132     __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
133 
134     // Compute a pointer to the slot immediately above the location on the
135     // stack to which arguments will be later copied.
136     __ SlotAddress(x2, argc);
137 
138     // Poke the hole (receiver) in the highest slot.
139     __ Str(x10, MemOperand(x2));
140     __ Tbnz(slot_count_without_rounding, 0, &already_aligned);
141 
142     // Store padding, if needed.
143     __ Str(padreg, MemOperand(x2, 1 * kPointerSize));
144     __ Bind(&already_aligned);
145 
146     // Copy arguments to the expression stack.
147     {
148       Register count = x2;
149       Register dst = x10;
150       Register src = x11;
151       __ Mov(count, argc);
152       __ SlotAddress(dst, 0);
153       __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
154       __ CopyDoubleWords(dst, src, count);
155     }
156 
157     // ----------- S t a t e -------------
158     //  --                     x0: number of arguments (untagged)
159     //  --                     x1: constructor function
160     //  --                     x3: new target
161     // If argc is odd:
162     //  --     sp[0*kPointerSize]: argument n - 1
163     //  --             ...
164     //  -- sp[(n-1)*kPointerSize]: argument 0
165     //  -- sp[(n+0)*kPointerSize]: the hole (receiver)
166     //  -- sp[(n+1)*kPointerSize]: padding
167     //  -- sp[(n+2)*kPointerSize]: padding
168     //  -- sp[(n+3)*kPointerSize]: number of arguments (tagged)
169     //  -- sp[(n+4)*kPointerSize]: context (pushed by FrameScope)
170     // If argc is even:
171     //  --     sp[0*kPointerSize]: argument n - 1
172     //  --             ...
173     //  -- sp[(n-1)*kPointerSize]: argument 0
174     //  -- sp[(n+0)*kPointerSize]: the hole (receiver)
175     //  -- sp[(n+1)*kPointerSize]: padding
176     //  -- sp[(n+2)*kPointerSize]: number of arguments (tagged)
177     //  -- sp[(n+3)*kPointerSize]: context (pushed by FrameScope)
178     // -----------------------------------
179 
180     // Call the function.
181     ParameterCount actual(argc);
182     __ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
183 
184     // Restore the context from the frame.
185     __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
186     // Restore smi-tagged arguments count from the frame. Use fp relative
187     // addressing to avoid the circular dependency between padding existence and
188     // argc parity.
189     __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
190     // Leave construct frame.
191   }
192 
193   // Remove caller arguments from the stack and return.
194   __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
195   __ Ret();
196 }
197 
198 }  // namespace
199 
200 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)201 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
202   // ----------- S t a t e -------------
203   //  -- x0     : number of arguments
204   //  -- x1     : constructor function
205   //  -- x3     : new target
206   //  -- lr     : return address
207   //  -- cp     : context pointer
208   //  -- sp[...]: constructor arguments
209   // -----------------------------------
210 
211   ASM_LOCATION("Builtins::Generate_JSConstructStubGeneric");
212 
213   // Enter a construct frame.
214   {
215     FrameScope scope(masm, StackFrame::CONSTRUCT);
216     Label post_instantiation_deopt_entry, not_create_implicit_receiver;
217 
218     if (__ emit_debug_code()) {
219       // Check that FrameScope pushed the context on to the stack already.
220       __ Peek(x2, 0);
221       __ Cmp(x2, cp);
222       __ Check(eq, AbortReason::kUnexpectedValue);
223     }
224 
225     // Preserve the incoming parameters on the stack.
226     __ SmiTag(x0);
227     __ Push(x0, x1, padreg, x3);
228 
229     // ----------- S t a t e -------------
230     //  --        sp[0*kPointerSize]: new target
231     //  --        sp[1*kPointerSize]: padding
232     //  -- x1 and sp[2*kPointerSize]: constructor function
233     //  --        sp[3*kPointerSize]: number of arguments (tagged)
234     //  --        sp[4*kPointerSize]: context (pushed by FrameScope)
235     // -----------------------------------
236 
237     __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
238     __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
239     __ TestAndBranchIfAnySet(w4,
240                              SharedFunctionInfo::IsDerivedConstructorBit::kMask,
241                              &not_create_implicit_receiver);
242 
243     // If not derived class constructor: Allocate the new receiver object.
244     __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
245                         x4, x5);
246     __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
247             RelocInfo::CODE_TARGET);
248     __ B(&post_instantiation_deopt_entry);
249 
250     // Else: use TheHoleValue as receiver for constructor call
251     __ Bind(&not_create_implicit_receiver);
252     __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
253 
254     // ----------- S t a t e -------------
255     //  --                          x0: receiver
256     //  -- Slot 4 / sp[0*kPointerSize]: new target
257     //  -- Slot 3 / sp[1*kPointerSize]: padding
258     //  -- Slot 2 / sp[2*kPointerSize]: constructor function
259     //  -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
260     //  -- Slot 0 / sp[4*kPointerSize]: context
261     // -----------------------------------
262     // Deoptimizer enters here.
263     masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
264         masm->pc_offset());
265 
266     __ Bind(&post_instantiation_deopt_entry);
267 
268     // Restore new target from the top of the stack.
269     __ Peek(x3, 0 * kPointerSize);
270 
271     // Restore constructor function and argument count.
272     __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
273     __ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
274 
275     // Copy arguments to the expression stack. The called function pops the
276     // receiver along with its arguments, so we need an extra receiver on the
277     // stack, in case we have to return it later.
278 
279     // Overwrite the new target with a receiver.
280     __ Poke(x0, 0);
281 
282     // Push two further copies of the receiver. One will be popped by the called
283     // function. The second acts as padding if the number of arguments plus
284     // receiver is odd - pushing receiver twice avoids branching. It also means
285     // that we don't have to handle the even and odd cases specially on
286     // InvokeFunction's return, as top of stack will be the receiver in either
287     // case.
288     __ Push(x0, x0);
289 
290     // ----------- S t a t e -------------
291     //  --                        x3: new target
292     //  --                       x12: number of arguments (untagged)
293     //  --        sp[0*kPointerSize]: implicit receiver (overwrite if argc odd)
294     //  --        sp[1*kPointerSize]: implicit receiver
295     //  --        sp[2*kPointerSize]: implicit receiver
296     //  --        sp[3*kPointerSize]: padding
297     //  -- x1 and sp[4*kPointerSize]: constructor function
298     //  --        sp[5*kPointerSize]: number of arguments (tagged)
299     //  --        sp[6*kPointerSize]: context
300     // -----------------------------------
301 
302     // Round the number of arguments down to the next even number, and claim
303     // slots for the arguments. If the number of arguments was odd, the last
304     // argument will overwrite one of the receivers pushed above.
305     __ Bic(x10, x12, 1);
306     __ Claim(x10);
307 
308     // Copy the arguments.
309     {
310       Register count = x2;
311       Register dst = x10;
312       Register src = x11;
313       __ Mov(count, x12);
314       __ SlotAddress(dst, 0);
315       __ Add(src, fp, StandardFrameConstants::kCallerSPOffset);
316       __ CopyDoubleWords(dst, src, count);
317     }
318 
319     // Call the function.
320     __ Mov(x0, x12);
321     ParameterCount actual(x0);
322     __ InvokeFunction(x1, x3, actual, CALL_FUNCTION);
323 
324     // ----------- S t a t e -------------
325     //  -- sp[0*kPointerSize]: implicit receiver
326     //  -- sp[1*kPointerSize]: padding
327     //  -- sp[2*kPointerSize]: constructor function
328     //  -- sp[3*kPointerSize]: number of arguments
329     //  -- sp[4*kPointerSize]: context
330     // -----------------------------------
331 
332     // Store offset of return address for deoptimizer.
333     masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
334         masm->pc_offset());
335 
336     // Restore the context from the frame.
337     __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
338 
339     // If the result is an object (in the ECMA sense), we should get rid
340     // of the receiver and use the result; see ECMA-262 section 13.2.2-7
341     // on page 74.
342     Label use_receiver, do_throw, leave_frame;
343 
344     // If the result is undefined, we jump out to using the implicit receiver.
345     __ CompareRoot(x0, Heap::kUndefinedValueRootIndex);
346     __ B(eq, &use_receiver);
347 
348     // Otherwise we do a smi check and fall through to check if the return value
349     // is a valid receiver.
350 
351     // If the result is a smi, it is *not* an object in the ECMA sense.
352     __ JumpIfSmi(x0, &use_receiver);
353 
354     // If the type of the result (stored in its map) is less than
355     // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
356     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
357     __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_frame, ge);
358     __ B(&use_receiver);
359 
360     __ Bind(&do_throw);
361     __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
362 
363     // Throw away the result of the constructor invocation and use the
364     // on-stack receiver as the result.
365     __ Bind(&use_receiver);
366     __ Peek(x0, 0 * kPointerSize);
367     __ CompareRoot(x0, Heap::kTheHoleValueRootIndex);
368     __ B(eq, &do_throw);
369 
370     __ Bind(&leave_frame);
371     // Restore smi-tagged arguments count from the frame.
372     __ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
373     // Leave construct frame.
374   }
375   // Remove caller arguments from the stack and return.
376   __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
377   __ Ret();
378 }
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)379 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
380   Generate_JSBuiltinsConstructStubHelper(masm);
381 }
382 
Generate_ConstructedNonConstructable(MacroAssembler * masm)383 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
384   FrameScope scope(masm, StackFrame::INTERNAL);
385   __ PushArgument(x1);
386   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
387 }
388 
389 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)390 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
391   // ----------- S t a t e -------------
392   //  -- x0 : the value to pass to the generator
393   //  -- x1 : the JSGeneratorObject to resume
394   //  -- lr : return address
395   // -----------------------------------
396   __ AssertGeneratorObject(x1);
397 
398   // Store input value into generator object.
399   __ Str(x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
400   __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, x3,
401                       kLRHasNotBeenSaved, kDontSaveFPRegs);
402 
403   // Load suspended function and context.
404   __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
405   __ Ldr(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
406 
407   // Flood function if we are stepping.
408   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
409   Label stepping_prepared;
410   ExternalReference debug_hook =
411       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
412   __ Mov(x10, debug_hook);
413   __ Ldrsb(x10, MemOperand(x10));
414   __ CompareAndBranch(x10, Operand(0), ne, &prepare_step_in_if_stepping);
415 
416   // Flood function if we need to continue stepping in the suspended generator.
417   ExternalReference debug_suspended_generator =
418       ExternalReference::debug_suspended_generator_address(masm->isolate());
419   __ Mov(x10, debug_suspended_generator);
420   __ Ldr(x10, MemOperand(x10));
421   __ CompareAndBranch(x10, Operand(x1), eq,
422                       &prepare_step_in_suspended_generator);
423   __ Bind(&stepping_prepared);
424 
425   // Check the stack for overflow. We are not trying to catch interruptions
426   // (i.e. debug break and preemption) here, so check the "real stack limit".
427   Label stack_overflow;
428   __ CompareRoot(sp, Heap::kRealStackLimitRootIndex);
429   __ B(lo, &stack_overflow);
430 
431   // Get number of arguments for generator function.
432   __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
433   __ Ldrh(w10, FieldMemOperand(
434                    x10, SharedFunctionInfo::kFormalParameterCountOffset));
435 
436   // Claim slots for arguments and receiver (rounded up to a multiple of two).
437   __ Add(x11, x10, 2);
438   __ Bic(x11, x11, 1);
439   __ Claim(x11);
440 
441   // Store padding (which might be replaced by the receiver).
442   __ Sub(x11, x11, 1);
443   __ Poke(padreg, Operand(x11, LSL, kPointerSizeLog2));
444 
445   // Poke receiver into highest claimed slot.
446   __ Ldr(x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
447   __ Poke(x5, Operand(x10, LSL, kPointerSizeLog2));
448 
449   // ----------- S t a t e -------------
450   //  -- x1                       : the JSGeneratorObject to resume
451   //  -- x4                       : generator function
452   //  -- x10                      : argument count
453   //  -- cp                       : generator context
454   //  -- lr                       : return address
455   //  -- sp[arg count]            : generator receiver
456   //  -- sp[0 .. arg count - 1]   : claimed for args
457   // -----------------------------------
458 
459   // Copy the function arguments from the generator object's register file.
460 
461   __ Ldr(x5,
462          FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
463   {
464     Label loop, done;
465     __ Cbz(x10, &done);
466     __ Mov(x12, 0);
467 
468     __ Bind(&loop);
469     __ Sub(x10, x10, 1);
470     __ Add(x11, x5, Operand(x12, LSL, kPointerSizeLog2));
471     __ Ldr(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
472     __ Poke(x11, Operand(x10, LSL, kPointerSizeLog2));
473     __ Add(x12, x12, 1);
474     __ Cbnz(x10, &loop);
475     __ Bind(&done);
476   }
477 
478   // Underlying function needs to have bytecode available.
479   if (FLAG_debug_code) {
480     Label check_has_bytecode_array;
481     __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
482     __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
483     __ CompareObjectType(x3, x0, x0, INTERPRETER_DATA_TYPE);
484     __ B(ne, &check_has_bytecode_array);
485     __ Ldr(x3, FieldMemOperand(x3, InterpreterData::kBytecodeArrayOffset));
486     __ Bind(&check_has_bytecode_array);
487     __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
488     __ Assert(eq, AbortReason::kMissingBytecodeArray);
489   }
490 
491   // Resume (Ignition/TurboFan) generator object.
492   {
493     __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
494     __ Ldrh(w0, FieldMemOperand(
495                     x0, SharedFunctionInfo::kFormalParameterCountOffset));
496     // We abuse new.target both to indicate that this is a resume call and to
497     // pass in the generator object.  In ordinary calls, new.target is always
498     // undefined because generator functions are non-constructable.
499     __ Mov(x3, x1);
500     __ Mov(x1, x4);
501     static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
502     __ Ldr(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
503     __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag);
504     __ Jump(x2);
505   }
506 
507   __ Bind(&prepare_step_in_if_stepping);
508   {
509     FrameScope scope(masm, StackFrame::INTERNAL);
510     // Push hole as receiver since we do not use it for stepping.
511     __ LoadRoot(x5, Heap::kTheHoleValueRootIndex);
512     __ Push(x1, padreg, x4, x5);
513     __ CallRuntime(Runtime::kDebugOnFunctionCall);
514     __ Pop(padreg, x1);
515     __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
516   }
517   __ B(&stepping_prepared);
518 
519   __ Bind(&prepare_step_in_suspended_generator);
520   {
521     FrameScope scope(masm, StackFrame::INTERNAL);
522     __ Push(x1, padreg);
523     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
524     __ Pop(padreg, x1);
525     __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
526   }
527   __ B(&stepping_prepared);
528 
529   __ bind(&stack_overflow);
530   {
531     FrameScope scope(masm, StackFrame::INTERNAL);
532     __ CallRuntime(Runtime::kThrowStackOverflow);
533     __ Unreachable();  // This should be unreachable.
534   }
535 }
536 
Generate_StackOverflowCheck(MacroAssembler * masm,Register num_args,Label * stack_overflow)537 static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
538                                         Label* stack_overflow) {
539   UseScratchRegisterScope temps(masm);
540   Register scratch = temps.AcquireX();
541 
542   // Check the stack for overflow.
543   // We are not trying to catch interruptions (e.g. debug break and
544   // preemption) here, so the "real stack limit" is checked.
545   Label enough_stack_space;
546   __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
547   // Make scratch the space we have left. The stack might already be overflowed
548   // here which will cause scratch to become negative.
549   __ Sub(scratch, sp, scratch);
550   // Check if the arguments will overflow the stack.
551   __ Cmp(scratch, Operand(num_args, LSL, kPointerSizeLog2));
552   __ B(le, stack_overflow);
553 }
554 
555 // Input:
556 //   x0: new.target.
557 //   x1: function.
558 //   x2: receiver.
559 //   x3: argc.
560 //   x4: argv.
561 // Output:
562 //   x0: result.
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)563 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
564                                              bool is_construct) {
565   Register new_target = x0;
566   Register function = x1;
567   Register receiver = x2;
568   Register argc = x3;
569   Register argv = x4;
570   Register scratch = x10;
571   Register slots_to_claim = x11;
572 
573   ProfileEntryHookStub::MaybeCallEntryHook(masm);
574 
575   {
576     // Enter an internal frame.
577     FrameScope scope(masm, StackFrame::INTERNAL);
578 
579     // Setup the context (we need to use the caller context from the isolate).
580     __ Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
581                                               masm->isolate()));
582     __ Ldr(cp, MemOperand(scratch));
583 
584     // Claim enough space for the arguments, the receiver and the function,
585     // including an optional slot of padding.
586     __ Add(slots_to_claim, argc, 3);
587     __ Bic(slots_to_claim, slots_to_claim, 1);
588 
589     // Check if we have enough stack space to push all arguments.
590     Label enough_stack_space, stack_overflow;
591     Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
592     __ B(&enough_stack_space);
593 
594     __ Bind(&stack_overflow);
595     __ CallRuntime(Runtime::kThrowStackOverflow);
596     __ Unreachable();
597 
598     __ Bind(&enough_stack_space);
599     __ Claim(slots_to_claim);
600 
601     // Store padding (which might be overwritten).
602     __ SlotAddress(scratch, slots_to_claim);
603     __ Str(padreg, MemOperand(scratch, -kPointerSize));
604 
605     // Store receiver and function on the stack.
606     __ SlotAddress(scratch, argc);
607     __ Stp(receiver, function, MemOperand(scratch));
608 
609     // Copy arguments to the stack in a loop, in reverse order.
610     // x3: argc.
611     // x4: argv.
612     Label loop, done;
613 
614     // Skip the argument set up if we have no arguments.
615     __ Cbz(argc, &done);
616 
617     // scratch has been set to point to the location of the receiver, which
618     // marks the end of the argument copy.
619 
620     __ Bind(&loop);
621     // Load the handle.
622     __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex));
623     // Dereference the handle.
624     __ Ldr(x11, MemOperand(x11));
625     // Poke the result into the stack.
626     __ Str(x11, MemOperand(scratch, -kPointerSize, PreIndex));
627     // Loop if we've not reached the end of copy marker.
628     __ Cmp(sp, scratch);
629     __ B(lt, &loop);
630 
631     __ Bind(&done);
632 
633     __ Mov(scratch, argc);
634     __ Mov(argc, new_target);
635     __ Mov(new_target, scratch);
636     // x0: argc.
637     // x3: new.target.
638 
639     // Initialize all JavaScript callee-saved registers, since they will be seen
640     // by the garbage collector as part of handlers.
641     // The original values have been saved in JSEntryStub::GenerateBody().
642     __ LoadRoot(x19, Heap::kUndefinedValueRootIndex);
643     __ Mov(x20, x19);
644     __ Mov(x21, x19);
645     __ Mov(x22, x19);
646     __ Mov(x23, x19);
647     __ Mov(x24, x19);
648     __ Mov(x25, x19);
649     __ Mov(x28, x19);
650     // Don't initialize the reserved registers.
651     // x26 : root register (kRootRegister).
652     // x27 : context pointer (cp).
653     // x29 : frame pointer (fp).
654 
655     Handle<Code> builtin = is_construct
656                                ? BUILTIN_CODE(masm->isolate(), Construct)
657                                : masm->isolate()->builtins()->Call();
658     __ Call(builtin, RelocInfo::CODE_TARGET);
659 
660     // Exit the JS internal frame and remove the parameters (except function),
661     // and return.
662   }
663 
664   // Result is in x0. Return.
665   __ Ret();
666 }
667 
Generate_JSEntryTrampoline(MacroAssembler * masm)668 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
669   Generate_JSEntryTrampolineHelper(masm, false);
670 }
671 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)672 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
673   Generate_JSEntryTrampolineHelper(masm, true);
674 }
675 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2,Register scratch3)676 static void ReplaceClosureCodeWithOptimizedCode(
677     MacroAssembler* masm, Register optimized_code, Register closure,
678     Register scratch1, Register scratch2, Register scratch3) {
679   // Store code entry in the closure.
680   __ Str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
681   __ Mov(scratch1, optimized_code);  // Write barrier clobbers scratch1 below.
682   __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
683                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
684                       OMIT_SMI_CHECK);
685 }
686 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch)687 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
688   Register args_size = scratch;
689 
690   // Get the arguments + receiver count.
691   __ Ldr(args_size,
692          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
693   __ Ldr(args_size.W(),
694          FieldMemOperand(args_size, BytecodeArray::kParameterSizeOffset));
695 
696   // Leave the frame (also dropping the register file).
697   __ LeaveFrame(StackFrame::INTERPRETED);
698 
699   // Drop receiver + arguments.
700   if (__ emit_debug_code()) {
701     __ Tst(args_size, kPointerSize - 1);
702     __ Check(eq, AbortReason::kUnexpectedValue);
703   }
704   __ Lsr(args_size, args_size, kPointerSizeLog2);
705   __ DropArguments(args_size);
706 }
707 
708 // Tail-call |function_id| if |smi_entry| == |marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register smi_entry,OptimizationMarker marker,Runtime::FunctionId function_id)709 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
710                                           Register smi_entry,
711                                           OptimizationMarker marker,
712                                           Runtime::FunctionId function_id) {
713   Label no_match;
714   __ CompareAndBranch(smi_entry, Operand(Smi::FromEnum(marker)), ne, &no_match);
715   GenerateTailCallToReturnedCode(masm, function_id);
716   __ bind(&no_match);
717 }
718 
MaybeTailCallOptimizedCodeSlot(MacroAssembler * masm,Register feedback_vector,Register scratch1,Register scratch2,Register scratch3)719 static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
720                                            Register feedback_vector,
721                                            Register scratch1, Register scratch2,
722                                            Register scratch3) {
723   // ----------- S t a t e -------------
724   //  -- x0 : argument count (preserved for callee if needed, and caller)
725   //  -- x3 : new target (preserved for callee if needed, and caller)
726   //  -- x1 : target function (preserved for callee if needed, and caller)
727   //  -- feedback vector (preserved for caller if needed)
728   // -----------------------------------
729   DCHECK(
730       !AreAliased(feedback_vector, x0, x1, x3, scratch1, scratch2, scratch3));
731 
732   Label optimized_code_slot_is_weak_ref, fallthrough;
733 
734   Register closure = x1;
735   Register optimized_code_entry = scratch1;
736 
737   __ Ldr(
738       optimized_code_entry,
739       FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
740 
741   // Check if the code entry is a Smi. If yes, we interpret it as an
742   // optimisation marker. Otherwise, interpret is at a weak reference to a code
743   // object.
744   __ JumpIfNotSmi(optimized_code_entry, &optimized_code_slot_is_weak_ref);
745 
746   {
747     // Optimized code slot is a Smi optimization marker.
748 
749     // Fall through if no optimization trigger.
750     __ CompareAndBranch(optimized_code_entry,
751                         Operand(Smi::FromEnum(OptimizationMarker::kNone)), eq,
752                         &fallthrough);
753 
754     TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
755                                   OptimizationMarker::kLogFirstExecution,
756                                   Runtime::kFunctionFirstExecution);
757     TailCallRuntimeIfMarkerEquals(masm, optimized_code_entry,
758                                   OptimizationMarker::kCompileOptimized,
759                                   Runtime::kCompileOptimized_NotConcurrent);
760     TailCallRuntimeIfMarkerEquals(
761         masm, optimized_code_entry,
762         OptimizationMarker::kCompileOptimizedConcurrent,
763         Runtime::kCompileOptimized_Concurrent);
764 
765     {
766       // Otherwise, the marker is InOptimizationQueue, so fall through hoping
767       // that an interrupt will eventually update the slot with optimized code.
768       if (FLAG_debug_code) {
769         __ Cmp(
770             optimized_code_entry,
771             Operand(Smi::FromEnum(OptimizationMarker::kInOptimizationQueue)));
772         __ Assert(eq, AbortReason::kExpectedOptimizationSentinel);
773       }
774       __ B(&fallthrough);
775     }
776   }
777 
778   {
779     // Optimized code slot is a weak reference.
780     __ bind(&optimized_code_slot_is_weak_ref);
781 
782     __ LoadWeakValue(optimized_code_entry, optimized_code_entry, &fallthrough);
783 
784     // Check if the optimized code is marked for deopt. If it is, call the
785     // runtime to clear it.
786     Label found_deoptimized_code;
787     __ Ldr(scratch2, FieldMemOperand(optimized_code_entry,
788                                      Code::kCodeDataContainerOffset));
789     __ Ldr(
790         scratch2,
791         FieldMemOperand(scratch2, CodeDataContainer::kKindSpecificFlagsOffset));
792     __ TestAndBranchIfAnySet(scratch2, 1 << Code::kMarkedForDeoptimizationBit,
793                              &found_deoptimized_code);
794 
795     // Optimized code is good, get it into the closure and link the closure into
796     // the optimized functions list, then tail call the optimized code.
797     // The feedback vector is no longer used, so re-use it as a scratch
798     // register.
799     ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
800                                         scratch2, scratch3, feedback_vector);
801     static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
802     __ Add(x2, optimized_code_entry,
803            Operand(Code::kHeaderSize - kHeapObjectTag));
804     __ Jump(x2);
805 
806     // Optimized code slot contains deoptimized code, evict it and re-enter the
807     // closure's code.
808     __ bind(&found_deoptimized_code);
809     GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
810   }
811 
812   // Fall-through if the optimized code cell is clear and there is no
813   // optimization marker.
814   __ bind(&fallthrough);
815 }
816 
817 // Advance the current bytecode offset. This simulates what all bytecode
818 // handlers do upon completion of the underlying operation. Will bail out to a
819 // label if the bytecode (without prefix) is a return bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Label * if_return)820 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
821                                           Register bytecode_array,
822                                           Register bytecode_offset,
823                                           Register bytecode, Register scratch1,
824                                           Label* if_return) {
825   Register bytecode_size_table = scratch1;
826   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
827                      bytecode));
828 
829   __ Mov(bytecode_size_table, ExternalReference::bytecode_size_table_address());
830 
831   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
832   Label process_bytecode, extra_wide;
833   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
834   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
835   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
836   STATIC_ASSERT(3 ==
837                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
838   __ Cmp(bytecode, Operand(0x3));
839   __ B(hi, &process_bytecode);
840   __ Tst(bytecode, Operand(0x1));
841   __ B(ne, &extra_wide);
842 
843   // Load the next bytecode and update table to the wide scaled table.
844   __ Add(bytecode_offset, bytecode_offset, Operand(1));
845   __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
846   __ Add(bytecode_size_table, bytecode_size_table,
847          Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
848   __ B(&process_bytecode);
849 
850   __ Bind(&extra_wide);
851   // Load the next bytecode and update table to the extra wide scaled table.
852   __ Add(bytecode_offset, bytecode_offset, Operand(1));
853   __ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
854   __ Add(bytecode_size_table, bytecode_size_table,
855          Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
856 
857   __ Bind(&process_bytecode);
858 
859 // Bailout to the return label if this is a return bytecode.
860 #define JUMP_IF_EQUAL(NAME)                                              \
861   __ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
862   __ B(if_return, eq);
863   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
864 #undef JUMP_IF_EQUAL
865 
866   // Otherwise, load the size of the current bytecode and advance the offset.
867   __ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
868   __ Add(bytecode_offset, bytecode_offset, scratch1);
869 }
870 
871 // Generate code for entering a JS function with the interpreter.
872 // On entry to the function the receiver and arguments have been pushed on the
873 // stack left to right.  The actual argument count matches the formal parameter
874 // count expected by the function.
875 //
876 // The live registers are:
877 //   - x1: the JS function object being called.
878 //   - x3: the incoming new target or generator object
879 //   - cp: our context.
880 //   - fp: our caller's frame pointer.
881 //   - lr: return address.
882 //
883 // The function builds an interpreter frame.  See InterpreterFrameConstants in
884 // frames.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)885 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
886   ProfileEntryHookStub::MaybeCallEntryHook(masm);
887 
888   Register closure = x1;
889   Register feedback_vector = x2;
890 
891   // Load the feedback vector from the closure.
892   __ Ldr(feedback_vector,
893          FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
894   __ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
895   // Read off the optimized code slot in the feedback vector, and if there
896   // is optimized code or an optimization marker, call that instead.
897   MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
898 
899   // Open a frame scope to indicate that there is a frame on the stack.  The
900   // MANUAL indicates that the scope shouldn't actually generate code to set up
901   // the frame (that is done below).
902   FrameScope frame_scope(masm, StackFrame::MANUAL);
903   __ Push(lr, fp, cp, closure);
904   __ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
905 
906   // Get the bytecode array from the function object and load it into
907   // kInterpreterBytecodeArrayRegister.
908   Label has_bytecode_array;
909   __ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
910   __ Ldr(kInterpreterBytecodeArrayRegister,
911          FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
912   __ CompareObjectType(kInterpreterBytecodeArrayRegister, x11, x11,
913                        INTERPRETER_DATA_TYPE);
914   __ B(ne, &has_bytecode_array);
915   __ Ldr(kInterpreterBytecodeArrayRegister,
916          FieldMemOperand(kInterpreterBytecodeArrayRegister,
917                          InterpreterData::kBytecodeArrayOffset));
918   __ Bind(&has_bytecode_array);
919 
920   // Increment invocation count for the function.
921   __ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
922   __ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
923   __ Ldr(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
924   __ Add(w10, w10, Operand(1));
925   __ Str(w10, FieldMemOperand(x11, FeedbackVector::kInvocationCountOffset));
926 
927   // Check function data field is actually a BytecodeArray object.
928   if (FLAG_debug_code) {
929     __ AssertNotSmi(
930         kInterpreterBytecodeArrayRegister,
931         AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
932     __ CompareObjectType(kInterpreterBytecodeArrayRegister, x0, x0,
933                          BYTECODE_ARRAY_TYPE);
934     __ Assert(
935         eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
936   }
937 
938   // Reset code age.
939   __ Mov(x10, Operand(BytecodeArray::kNoAgeBytecodeAge));
940   __ Strb(x10, FieldMemOperand(kInterpreterBytecodeArrayRegister,
941                                BytecodeArray::kBytecodeAgeOffset));
942 
943   // Load the initial bytecode offset.
944   __ Mov(kInterpreterBytecodeOffsetRegister,
945          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
946 
947   // Push bytecode array and Smi tagged bytecode array offset.
948   __ SmiTag(x0, kInterpreterBytecodeOffsetRegister);
949   __ Push(kInterpreterBytecodeArrayRegister, x0);
950 
951   // Allocate the local and temporary register file on the stack.
952   {
953     // Load frame size from the BytecodeArray object.
954     __ Ldr(w11, FieldMemOperand(kInterpreterBytecodeArrayRegister,
955                                 BytecodeArray::kFrameSizeOffset));
956 
957     // Do a stack check to ensure we don't go over the limit.
958     Label ok;
959     __ Sub(x10, sp, Operand(x11));
960     __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
961     __ B(hs, &ok);
962     __ CallRuntime(Runtime::kThrowStackOverflow);
963     __ Bind(&ok);
964 
965     // If ok, push undefined as the initial value for all register file entries.
966     // Note: there should always be at least one stack slot for the return
967     // register in the register file.
968     Label loop_header;
969     __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
970     __ Lsr(x11, x11, kPointerSizeLog2);
971     // Round up the number of registers to a multiple of 2, to align the stack
972     // to 16 bytes.
973     __ Add(x11, x11, 1);
974     __ Bic(x11, x11, 1);
975     __ PushMultipleTimes(x10, x11);
976     __ Bind(&loop_header);
977   }
978 
979   // If the bytecode array has a valid incoming new target or generator object
980   // register, initialize it with incoming value which was passed in x3.
981   Label no_incoming_new_target_or_generator_register;
982   __ Ldrsw(x10,
983            FieldMemOperand(
984                kInterpreterBytecodeArrayRegister,
985                BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
986   __ Cbz(x10, &no_incoming_new_target_or_generator_register);
987   __ Str(x3, MemOperand(fp, x10, LSL, kPointerSizeLog2));
988   __ Bind(&no_incoming_new_target_or_generator_register);
989 
990   // Load accumulator with undefined.
991   __ LoadRoot(kInterpreterAccumulatorRegister, Heap::kUndefinedValueRootIndex);
992 
993   // Load the dispatch table into a register and dispatch to the bytecode
994   // handler at the current bytecode offset.
995   Label do_dispatch;
996   __ bind(&do_dispatch);
997   __ Mov(
998       kInterpreterDispatchTableRegister,
999       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1000   __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
1001                           kInterpreterBytecodeOffsetRegister));
1002   __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
1003   __ Ldr(kJavaScriptCallCodeStartRegister,
1004          MemOperand(kInterpreterDispatchTableRegister, x1));
1005   __ Call(kJavaScriptCallCodeStartRegister);
1006   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1007 
1008   // Any returns to the entry trampoline are either due to the return bytecode
1009   // or the interpreter tail calling a builtin and then a dispatch.
1010 
1011   // Get bytecode array and bytecode offset from the stack frame.
1012   __ Ldr(kInterpreterBytecodeArrayRegister,
1013          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1014   __ Ldr(kInterpreterBytecodeOffsetRegister,
1015          MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1016   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1017 
1018   // Either return, or advance to the next bytecode and dispatch.
1019   Label do_return;
1020   __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
1021                          kInterpreterBytecodeOffsetRegister));
1022   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1023                                 kInterpreterBytecodeOffsetRegister, x1, x2,
1024                                 &do_return);
1025   __ B(&do_dispatch);
1026 
1027   __ bind(&do_return);
1028   // The return value is in x0.
1029   LeaveInterpreterFrame(masm, x2);
1030   __ Ret();
1031 }
1032 
Generate_InterpreterPushArgs(MacroAssembler * masm,Register num_args,Register first_arg_index,Register spread_arg_out,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1033 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
1034                                          Register num_args,
1035                                          Register first_arg_index,
1036                                          Register spread_arg_out,
1037                                          ConvertReceiverMode receiver_mode,
1038                                          InterpreterPushArgsMode mode) {
1039   Register last_arg_addr = x10;
1040   Register stack_addr = x11;
1041   Register slots_to_claim = x12;
1042   Register slots_to_copy = x13;  // May include receiver, unlike num_args.
1043 
1044   DCHECK(!AreAliased(num_args, first_arg_index, last_arg_addr, stack_addr,
1045                      slots_to_claim, slots_to_copy));
1046   // spread_arg_out may alias with the first_arg_index input.
1047   DCHECK(!AreAliased(spread_arg_out, last_arg_addr, stack_addr, slots_to_claim,
1048                      slots_to_copy));
1049 
1050   // Add one slot for the receiver.
1051   __ Add(slots_to_claim, num_args, 1);
1052 
1053   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1054     // Exclude final spread from slots to claim and the number of arguments.
1055     __ Sub(slots_to_claim, slots_to_claim, 1);
1056     __ Sub(num_args, num_args, 1);
1057   }
1058 
1059   // Add a stack check before pushing arguments.
1060   Label stack_overflow, done;
1061   Generate_StackOverflowCheck(masm, slots_to_claim, &stack_overflow);
1062   __ B(&done);
1063   __ Bind(&stack_overflow);
1064   __ TailCallRuntime(Runtime::kThrowStackOverflow);
1065   __ Unreachable();
1066   __ Bind(&done);
1067 
1068   // Round up to an even number of slots and claim them.
1069   __ Add(slots_to_claim, slots_to_claim, 1);
1070   __ Bic(slots_to_claim, slots_to_claim, 1);
1071   __ Claim(slots_to_claim);
1072 
1073   {
1074     // Store padding, which may be overwritten.
1075     UseScratchRegisterScope temps(masm);
1076     Register scratch = temps.AcquireX();
1077     __ Sub(scratch, slots_to_claim, 1);
1078     __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2));
1079   }
1080 
1081   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1082     // Store "undefined" as the receiver arg if we need to.
1083     Register receiver = x14;
1084     __ LoadRoot(receiver, Heap::kUndefinedValueRootIndex);
1085     __ SlotAddress(stack_addr, num_args);
1086     __ Str(receiver, MemOperand(stack_addr));
1087     __ Mov(slots_to_copy, num_args);
1088   } else {
1089     // If we're not given an explicit receiver to store, we'll need to copy it
1090     // together with the rest of the arguments.
1091     __ Add(slots_to_copy, num_args, 1);
1092   }
1093 
1094   __ Sub(last_arg_addr, first_arg_index,
1095          Operand(slots_to_copy, LSL, kPointerSizeLog2));
1096   __ Add(last_arg_addr, last_arg_addr, kPointerSize);
1097 
1098   // Load the final spread argument into spread_arg_out, if necessary.
1099   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1100     __ Ldr(spread_arg_out, MemOperand(last_arg_addr, -kPointerSize));
1101   }
1102 
1103   // Copy the rest of the arguments.
1104   __ SlotAddress(stack_addr, 0);
1105   __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy);
1106 }
1107 
1108 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1109 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1110     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1111     InterpreterPushArgsMode mode) {
1112   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1113   // ----------- S t a t e -------------
1114   //  -- x0 : the number of arguments (not including the receiver)
1115   //  -- x2 : the address of the first argument to be pushed. Subsequent
1116   //          arguments should be consecutive above this, in the same order as
1117   //          they are to be pushed onto the stack.
1118   //  -- x1 : the target to call (can be any Object).
1119   // -----------------------------------
1120 
1121   // Push the arguments. num_args may be updated according to mode.
1122   // spread_arg_out will be updated to contain the last spread argument, when
1123   // mode == InterpreterPushArgsMode::kWithFinalSpread.
1124   Register num_args = x0;
1125   Register first_arg_index = x2;
1126   Register spread_arg_out =
1127       (mode == InterpreterPushArgsMode::kWithFinalSpread) ? x2 : no_reg;
1128   Generate_InterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
1129                                receiver_mode, mode);
1130 
1131   // Call the target.
1132   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1133     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1134             RelocInfo::CODE_TARGET);
1135   } else {
1136     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1137             RelocInfo::CODE_TARGET);
1138   }
1139 }
1140 
1141 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1142 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1143     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1144   // ----------- S t a t e -------------
1145   // -- x0 : argument count (not including receiver)
1146   // -- x3 : new target
1147   // -- x1 : constructor to call
1148   // -- x2 : allocation site feedback if available, undefined otherwise
1149   // -- x4 : address of the first argument
1150   // -----------------------------------
1151   __ AssertUndefinedOrAllocationSite(x2);
1152 
1153   // Push the arguments. num_args may be updated according to mode.
1154   // spread_arg_out will be updated to contain the last spread argument, when
1155   // mode == InterpreterPushArgsMode::kWithFinalSpread.
1156   Register num_args = x0;
1157   Register first_arg_index = x4;
1158   Register spread_arg_out =
1159       (mode == InterpreterPushArgsMode::kWithFinalSpread) ? x2 : no_reg;
1160   Generate_InterpreterPushArgs(masm, num_args, first_arg_index, spread_arg_out,
1161                                ConvertReceiverMode::kNullOrUndefined, mode);
1162 
1163   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1164     __ AssertFunction(x1);
1165 
1166     // Tail call to the array construct stub (still in the caller
1167     // context at this point).
1168     Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1169     __ Jump(code, RelocInfo::CODE_TARGET);
1170   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1171     // Call the constructor with x0, x1, and x3 unmodified.
1172     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1173             RelocInfo::CODE_TARGET);
1174   } else {
1175     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1176     // Call the constructor with x0, x1, and x3 unmodified.
1177     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1178   }
1179 }
1180 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1181 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1182   // Set the return address to the correct point in the interpreter entry
1183   // trampoline.
1184   Label builtin_trampoline, trampoline_loaded;
1185   Smi* interpreter_entry_return_pc_offset(
1186       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1187   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::kZero);
1188 
1189   // If the SFI function_data is an InterpreterData, get the trampoline stored
1190   // in it, otherwise get the trampoline from the builtins list.
1191   __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1192   __ Ldr(x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
1193   __ Ldr(x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
1194   __ CompareObjectType(x1, kInterpreterDispatchTableRegister,
1195                        kInterpreterDispatchTableRegister,
1196                        INTERPRETER_DATA_TYPE);
1197   __ B(ne, &builtin_trampoline);
1198 
1199   __ Ldr(x1,
1200          FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
1201   __ B(&trampoline_loaded);
1202 
1203   __ Bind(&builtin_trampoline);
1204   __ LoadObject(x1, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
1205 
1206   __ Bind(&trampoline_loaded);
1207   __ Add(lr, x1, Operand(interpreter_entry_return_pc_offset->value() +
1208                          Code::kHeaderSize - kHeapObjectTag));
1209 
1210   // Initialize the dispatch table register.
1211   __ Mov(
1212       kInterpreterDispatchTableRegister,
1213       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1214 
1215   // Get the bytecode array pointer from the frame.
1216   __ Ldr(kInterpreterBytecodeArrayRegister,
1217          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1218 
1219   if (FLAG_debug_code) {
1220     // Check function data field is actually a BytecodeArray object.
1221     __ AssertNotSmi(
1222         kInterpreterBytecodeArrayRegister,
1223         AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1224     __ CompareObjectType(kInterpreterBytecodeArrayRegister, x1, x1,
1225                          BYTECODE_ARRAY_TYPE);
1226     __ Assert(
1227         eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1228   }
1229 
1230   // Get the target bytecode offset from the frame.
1231   __ Ldr(kInterpreterBytecodeOffsetRegister,
1232          MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1233   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1234 
1235   // Dispatch to the target bytecode.
1236   __ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
1237                           kInterpreterBytecodeOffsetRegister));
1238   __ Mov(x1, Operand(x18, LSL, kPointerSizeLog2));
1239   __ Ldr(kJavaScriptCallCodeStartRegister,
1240          MemOperand(kInterpreterDispatchTableRegister, x1));
1241   __ Jump(kJavaScriptCallCodeStartRegister);
1242 }
1243 
Generate_InterpreterEnterBytecodeAdvance(MacroAssembler * masm)1244 void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
1245   // Get bytecode array and bytecode offset from the stack frame.
1246   __ ldr(kInterpreterBytecodeArrayRegister,
1247          MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1248   __ ldr(kInterpreterBytecodeOffsetRegister,
1249          MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1250   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1251 
1252   // Load the current bytecode.
1253   __ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
1254                          kInterpreterBytecodeOffsetRegister));
1255 
1256   // Advance to the next bytecode.
1257   Label if_return;
1258   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1259                                 kInterpreterBytecodeOffsetRegister, x1, x2,
1260                                 &if_return);
1261 
1262   // Convert new bytecode offset to a Smi and save in the stackframe.
1263   __ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
1264   __ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1265 
1266   Generate_InterpreterEnterBytecode(masm);
1267 
1268   // We should never take the if_return path.
1269   __ bind(&if_return);
1270   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1271 }
1272 
Generate_InterpreterEnterBytecodeDispatch(MacroAssembler * masm)1273 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1274   Generate_InterpreterEnterBytecode(masm);
1275 }
1276 
Generate_InstantiateAsmJs(MacroAssembler * masm)1277 void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
1278   // ----------- S t a t e -------------
1279   //  -- x0 : argument count (preserved for callee)
1280   //  -- x1 : new target (preserved for callee)
1281   //  -- x3 : target function (preserved for callee)
1282   // -----------------------------------
1283   Register argc = x0;
1284   Register new_target = x1;
1285   Register target = x3;
1286 
1287   Label failed;
1288   {
1289     FrameScope scope(masm, StackFrame::INTERNAL);
1290 
1291     // Push argument count, a copy of the target function and the new target,
1292     // together with some padding to maintain 16-byte alignment.
1293     __ SmiTag(argc);
1294     __ Push(argc, new_target, target, padreg);
1295 
1296     // Push another copy of new target as a parameter to the runtime call and
1297     // copy the rest of the arguments from caller (stdlib, foreign, heap).
1298     Label args_done;
1299     Register undef = x10;
1300     Register scratch1 = x12;
1301     Register scratch2 = x13;
1302     Register scratch3 = x14;
1303     __ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
1304 
1305     Label at_least_one_arg;
1306     Label three_args;
1307     DCHECK_NULL(Smi::kZero);
1308     __ Cbnz(argc, &at_least_one_arg);
1309 
1310     // No arguments.
1311     __ Push(new_target, undef, undef, undef);
1312     __ B(&args_done);
1313 
1314     __ Bind(&at_least_one_arg);
1315     // Load two arguments, though we may only use one (for the one arg case).
1316     __ Ldp(scratch2, scratch1,
1317            MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
1318 
1319     // Set flags for determining the value of smi-tagged argc.
1320     //  lt => 1, eq => 2, gt => 3.
1321     __ Cmp(argc, Smi::FromInt(2));
1322     __ B(gt, &three_args);
1323 
1324     // One or two arguments.
1325     // If there is one argument (flags are lt), scratch2 contains that argument,
1326     // and scratch1 must be undefined.
1327     __ CmovX(scratch1, scratch2, lt);
1328     __ CmovX(scratch2, undef, lt);
1329     __ Push(new_target, scratch1, scratch2, undef);
1330     __ B(&args_done);
1331 
1332     // Three arguments.
1333     __ Bind(&three_args);
1334     __ Ldr(scratch3, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
1335                                         2 * kPointerSize));
1336     __ Push(new_target, scratch3, scratch1, scratch2);
1337 
1338     __ Bind(&args_done);
1339 
1340     // Call runtime, on success unwind frame, and parent frame.
1341     __ CallRuntime(Runtime::kInstantiateAsmJs, 4);
1342 
1343     // A smi 0 is returned on failure, an object on success.
1344     __ JumpIfSmi(x0, &failed);
1345 
1346     // Peek the argument count from the stack, untagging at the same time.
1347     __ SmiUntag(x4, MemOperand(sp, 3 * kPointerSize));
1348     __ Drop(4);
1349     scope.GenerateLeaveFrame();
1350 
1351     // Drop arguments and receiver.
1352     __ DropArguments(x4, TurboAssembler::kCountExcludesReceiver);
1353     __ Ret();
1354 
1355     __ Bind(&failed);
1356     // Restore target function and new target.
1357     __ Pop(padreg, target, new_target, argc);
1358     __ SmiUntag(argc);
1359   }
1360   // On failure, tail call back to regular js by re-calling the function
1361   // which has be reset to the compile lazy builtin.
1362   __ Ldr(x4, FieldMemOperand(new_target, JSFunction::kCodeOffset));
1363   __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
1364   __ Jump(x4);
1365 }
1366 
1367 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1368 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1369                                       bool java_script_builtin,
1370                                       bool with_result) {
1371   const RegisterConfiguration* config(RegisterConfiguration::Default());
1372   int allocatable_register_count = config->num_allocatable_general_registers();
1373   int frame_size = BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp +
1374                    (allocatable_register_count +
1375                     BuiltinContinuationFrameConstants::PaddingSlotCount(
1376                         allocatable_register_count)) *
1377                        kPointerSize;
1378 
1379   // Set up frame pointer.
1380   __ Add(fp, sp, frame_size);
1381 
1382   if (with_result) {
1383     // Overwrite the hole inserted by the deoptimizer with the return value from
1384     // the LAZY deopt point.
1385     __ Str(x0,
1386            MemOperand(fp, BuiltinContinuationFrameConstants::kCallerSPOffset));
1387   }
1388 
1389   // Restore registers in pairs.
1390   int offset = -BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
1391                allocatable_register_count * kPointerSize;
1392   for (int i = allocatable_register_count - 1; i > 0; i -= 2) {
1393     int code1 = config->GetAllocatableGeneralCode(i);
1394     int code2 = config->GetAllocatableGeneralCode(i - 1);
1395     Register reg1 = Register::from_code(code1);
1396     Register reg2 = Register::from_code(code2);
1397     __ Ldp(reg1, reg2, MemOperand(fp, offset));
1398     offset += 2 * kPointerSize;
1399   }
1400 
1401   // Restore first register separately, if number of registers is odd.
1402   if (allocatable_register_count % 2 != 0) {
1403     int code = config->GetAllocatableGeneralCode(0);
1404     __ Ldr(Register::from_code(code), MemOperand(fp, offset));
1405   }
1406 
1407   if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister);
1408 
1409   // Load builtin object.
1410   UseScratchRegisterScope temps(masm);
1411   Register builtin = temps.AcquireX();
1412   __ Ldr(builtin,
1413          MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinOffset));
1414 
1415   // Restore fp, lr.
1416   __ Mov(sp, fp);
1417   __ Pop(fp, lr);
1418 
1419   // Call builtin.
1420   __ Add(builtin, builtin, Code::kHeaderSize - kHeapObjectTag);
1421   __ Br(builtin);
1422 }
1423 }  // namespace
1424 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1425 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1426   Generate_ContinueToBuiltinHelper(masm, false, false);
1427 }
1428 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1429 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1430     MacroAssembler* masm) {
1431   Generate_ContinueToBuiltinHelper(masm, false, true);
1432 }
1433 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1434 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1435   Generate_ContinueToBuiltinHelper(masm, true, false);
1436 }
1437 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1438 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1439     MacroAssembler* masm) {
1440   Generate_ContinueToBuiltinHelper(masm, true, true);
1441 }
1442 
Generate_NotifyDeoptimized(MacroAssembler * masm)1443 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1444   {
1445     FrameScope scope(masm, StackFrame::INTERNAL);
1446     __ CallRuntime(Runtime::kNotifyDeoptimized);
1447   }
1448 
1449   // Pop TOS register and padding.
1450   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), x0.code());
1451   __ Pop(x0, padreg);
1452   __ Ret();
1453 }
1454 
Generate_OnStackReplacementHelper(MacroAssembler * masm,bool has_handler_frame)1455 static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
1456                                               bool has_handler_frame) {
1457   // Lookup the function in the JavaScript frame.
1458   if (has_handler_frame) {
1459     __ Ldr(x0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1460     __ Ldr(x0, MemOperand(x0, JavaScriptFrameConstants::kFunctionOffset));
1461   } else {
1462     __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1463   }
1464 
1465   {
1466     FrameScope scope(masm, StackFrame::INTERNAL);
1467     // Pass function as argument.
1468     __ PushArgument(x0);
1469     __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1470   }
1471 
1472   // If the code object is null, just return to the caller.
1473   Label skip;
1474   __ CompareAndBranch(x0, Smi::kZero, ne, &skip);
1475   __ Ret();
1476 
1477   __ Bind(&skip);
1478 
1479   // Drop any potential handler frame that is be sitting on top of the actual
1480   // JavaScript frame. This is the case then OSR is triggered from bytecode.
1481   if (has_handler_frame) {
1482     __ LeaveFrame(StackFrame::STUB);
1483   }
1484 
1485   // Load deoptimization data from the code object.
1486   // <deopt_data> = <code>[#deoptimization_data_offset]
1487   __ Ldr(x1, MemOperand(x0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
1488 
1489   // Load the OSR entrypoint offset from the deoptimization data.
1490   // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1491   __ SmiUntag(x1,
1492               FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
1493                                       DeoptimizationData::kOsrPcOffsetIndex)));
1494 
1495   // Compute the target address = code_obj + header_size + osr_offset
1496   // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1497   __ Add(x0, x0, x1);
1498   __ Add(lr, x0, Code::kHeaderSize - kHeapObjectTag);
1499 
1500   // And "return" to the OSR entry point of the function.
1501   __ Ret();
1502 }
1503 
Generate_OnStackReplacement(MacroAssembler * masm)1504 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
1505   Generate_OnStackReplacementHelper(masm, false);
1506 }
1507 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1508 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1509   Generate_OnStackReplacementHelper(masm, true);
1510 }
1511 
1512 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1513 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1514   // ----------- S t a t e -------------
1515   //  -- x0       : argc
1516   //  -- sp[0]    : argArray (if argc == 2)
1517   //  -- sp[8]    : thisArg  (if argc >= 1)
1518   //  -- sp[16]   : receiver
1519   // -----------------------------------
1520   ASM_LOCATION("Builtins::Generate_FunctionPrototypeApply");
1521 
1522   Register argc = x0;
1523   Register arg_array = x2;
1524   Register receiver = x1;
1525   Register this_arg = x0;
1526   Register undefined_value = x3;
1527   Register null_value = x4;
1528 
1529   __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
1530   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
1531 
1532   // 1. Load receiver into x1, argArray into x2 (if present), remove all
1533   // arguments from the stack (including the receiver), and push thisArg (if
1534   // present) instead.
1535   {
1536     Register saved_argc = x10;
1537     Register scratch = x11;
1538 
1539     // Push two undefined values on the stack, to put it in a consistent state
1540     // so that we can always read three arguments from it.
1541     __ Push(undefined_value, undefined_value);
1542 
1543     // The state of the stack (with arrows pointing to the slots we will read)
1544     // is as follows:
1545     //
1546     //       argc = 0               argc = 1                argc = 2
1547     // -> sp[16]: receiver    -> sp[24]: receiver     -> sp[32]: receiver
1548     // -> sp[8]:  undefined   -> sp[16]: this_arg     -> sp[24]: this_arg
1549     // -> sp[0]:  undefined   -> sp[8]:  undefined    -> sp[16]: arg_array
1550     //                           sp[0]:  undefined       sp[8]:  undefined
1551     //                                                   sp[0]:  undefined
1552     //
1553     // There are now always three arguments to read, in the slots starting from
1554     // slot argc.
1555     __ SlotAddress(scratch, argc);
1556 
1557     __ Mov(saved_argc, argc);
1558     __ Ldp(arg_array, this_arg, MemOperand(scratch));  // Overwrites argc.
1559     __ Ldr(receiver, MemOperand(scratch, 2 * kPointerSize));
1560 
1561     __ Drop(2);  // Drop the undefined values we pushed above.
1562     __ DropArguments(saved_argc, TurboAssembler::kCountExcludesReceiver);
1563 
1564     __ PushArgument(this_arg);
1565   }
1566 
1567   // ----------- S t a t e -------------
1568   //  -- x2      : argArray
1569   //  -- x1      : receiver
1570   //  -- sp[0]   : thisArg
1571   // -----------------------------------
1572 
1573   // 2. We don't need to check explicitly for callable receiver here,
1574   // since that's the first thing the Call/CallWithArrayLike builtins
1575   // will do.
1576 
1577   // 3. Tail call with no arguments if argArray is null or undefined.
1578   Label no_arguments;
1579   __ Cmp(arg_array, null_value);
1580   __ Ccmp(arg_array, undefined_value, ZFlag, ne);
1581   __ B(eq, &no_arguments);
1582 
1583   // 4a. Apply the receiver to the given argArray.
1584   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1585           RelocInfo::CODE_TARGET);
1586 
1587   // 4b. The argArray is either null or undefined, so we tail call without any
1588   // arguments to the receiver.
1589   __ Bind(&no_arguments);
1590   {
1591     __ Mov(x0, 0);
1592     DCHECK(receiver.Is(x1));
1593     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1594   }
1595 }
1596 
1597 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1598 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1599   Register argc = x0;
1600   Register function = x1;
1601 
1602   ASM_LOCATION("Builtins::Generate_FunctionPrototypeCall");
1603 
1604   // 1. Get the callable to call (passed as receiver) from the stack.
1605   __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
1606 
1607   // 2. Handle case with no arguments.
1608   {
1609     Label non_zero;
1610     Register scratch = x10;
1611     __ Cbnz(argc, &non_zero);
1612     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
1613     // Overwrite receiver with undefined, which will be the new receiver.
1614     // We do not need to overwrite the padding slot above it with anything.
1615     __ Poke(scratch, 0);
1616     // Call function. The argument count is already zero.
1617     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1618     __ Bind(&non_zero);
1619   }
1620 
1621   // 3. Overwrite the receiver with padding. If argc is odd, this is all we
1622   //    need to do.
1623   Label arguments_ready;
1624   __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
1625   __ Tbnz(argc, 0, &arguments_ready);
1626 
1627   // 4. If argc is even:
1628   //    Copy arguments two slots higher in memory, overwriting the original
1629   //    receiver and padding.
1630   {
1631     Label loop;
1632     Register copy_from = x10;
1633     Register copy_to = x11;
1634     Register count = x12;
1635     Register last_arg_slot = x13;
1636     __ Mov(count, argc);
1637     __ Sub(last_arg_slot, argc, 1);
1638     __ SlotAddress(copy_from, last_arg_slot);
1639     __ Add(copy_to, copy_from, 2 * kPointerSize);
1640     __ CopyDoubleWords(copy_to, copy_from, count,
1641                        TurboAssembler::kSrcLessThanDst);
1642     // Drop two slots. These are copies of the last two arguments.
1643     __ Drop(2);
1644   }
1645 
1646   // 5. Adjust argument count to make the original first argument the new
1647   //    receiver and call the callable.
1648   __ Bind(&arguments_ready);
1649   __ Sub(argc, argc, 1);
1650   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1651 }
1652 
Generate_ReflectApply(MacroAssembler * masm)1653 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1654   // ----------- S t a t e -------------
1655   //  -- x0       : argc
1656   //  -- sp[0]    : argumentsList (if argc == 3)
1657   //  -- sp[8]    : thisArgument  (if argc >= 2)
1658   //  -- sp[16]   : target        (if argc >= 1)
1659   //  -- sp[24]   : receiver
1660   // -----------------------------------
1661   ASM_LOCATION("Builtins::Generate_ReflectApply");
1662 
1663   Register argc = x0;
1664   Register arguments_list = x2;
1665   Register target = x1;
1666   Register this_argument = x4;
1667   Register undefined_value = x3;
1668 
1669   __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
1670 
1671   // 1. Load target into x1 (if present), argumentsList into x2 (if present),
1672   // remove all arguments from the stack (including the receiver), and push
1673   // thisArgument (if present) instead.
1674   {
1675     // Push four undefined values on the stack, to put it in a consistent state
1676     // so that we can always read the three arguments we need from it. The
1677     // fourth value is used for stack alignment.
1678     __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
1679 
1680     // The state of the stack (with arrows pointing to the slots we will read)
1681     // is as follows:
1682     //
1683     //       argc = 0               argc = 1                argc = 2
1684     //    sp[32]: receiver       sp[40]: receiver        sp[48]: receiver
1685     // -> sp[24]: undefined   -> sp[32]: target       -> sp[40]: target
1686     // -> sp[16]: undefined   -> sp[24]: undefined    -> sp[32]: this_argument
1687     // -> sp[8]:  undefined   -> sp[16]: undefined    -> sp[24]: undefined
1688     //    sp[0]:  undefined      sp[8]:  undefined       sp[16]: undefined
1689     //                           sp[0]:  undefined       sp[8]:  undefined
1690     //                                                   sp[0]:  undefined
1691     //       argc = 3
1692     //    sp[56]: receiver
1693     // -> sp[48]: target
1694     // -> sp[40]: this_argument
1695     // -> sp[32]: arguments_list
1696     //    sp[24]: undefined
1697     //    sp[16]: undefined
1698     //    sp[8]:  undefined
1699     //    sp[0]:  undefined
1700     //
1701     // There are now always three arguments to read, in the slots starting from
1702     // slot (argc + 1).
1703     Register scratch = x10;
1704     __ SlotAddress(scratch, argc);
1705     __ Ldp(arguments_list, this_argument,
1706            MemOperand(scratch, 1 * kPointerSize));
1707     __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
1708 
1709     __ Drop(4);  // Drop the undefined values we pushed above.
1710     __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
1711 
1712     __ PushArgument(this_argument);
1713   }
1714 
1715   // ----------- S t a t e -------------
1716   //  -- x2      : argumentsList
1717   //  -- x1      : target
1718   //  -- sp[0]   : thisArgument
1719   // -----------------------------------
1720 
1721   // 2. We don't need to check explicitly for callable target here,
1722   // since that's the first thing the Call/CallWithArrayLike builtins
1723   // will do.
1724 
1725   // 3. Apply the target to the given argumentsList.
1726   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1727           RelocInfo::CODE_TARGET);
1728 }
1729 
Generate_ReflectConstruct(MacroAssembler * masm)1730 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1731   // ----------- S t a t e -------------
1732   //  -- x0       : argc
1733   //  -- sp[0]    : new.target (optional)
1734   //  -- sp[8]    : argumentsList
1735   //  -- sp[16]   : target
1736   //  -- sp[24]   : receiver
1737   // -----------------------------------
1738   ASM_LOCATION("Builtins::Generate_ReflectConstruct");
1739 
1740   Register argc = x0;
1741   Register arguments_list = x2;
1742   Register target = x1;
1743   Register new_target = x3;
1744   Register undefined_value = x4;
1745 
1746   __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
1747 
1748   // 1. Load target into x1 (if present), argumentsList into x2 (if present),
1749   // new.target into x3 (if present, otherwise use target), remove all
1750   // arguments from the stack (including the receiver), and push thisArgument
1751   // (if present) instead.
1752   {
1753     // Push four undefined values on the stack, to put it in a consistent state
1754     // so that we can always read the three arguments we need from it. The
1755     // fourth value is used for stack alignment.
1756     __ Push(undefined_value, undefined_value, undefined_value, undefined_value);
1757 
1758     // The state of the stack (with arrows pointing to the slots we will read)
1759     // is as follows:
1760     //
1761     //       argc = 0               argc = 1                argc = 2
1762     //    sp[32]: receiver       sp[40]: receiver        sp[48]: receiver
1763     // -> sp[24]: undefined   -> sp[32]: target       -> sp[40]: target
1764     // -> sp[16]: undefined   -> sp[24]: undefined    -> sp[32]: arguments_list
1765     // -> sp[8]:  undefined   -> sp[16]: undefined    -> sp[24]: undefined
1766     //    sp[0]:  undefined      sp[8]:  undefined       sp[16]: undefined
1767     //                           sp[0]:  undefined       sp[8]:  undefined
1768     //                                                   sp[0]:  undefined
1769     //       argc = 3
1770     //    sp[56]: receiver
1771     // -> sp[48]: target
1772     // -> sp[40]: arguments_list
1773     // -> sp[32]: new_target
1774     //    sp[24]: undefined
1775     //    sp[16]: undefined
1776     //    sp[8]:  undefined
1777     //    sp[0]:  undefined
1778     //
1779     // There are now always three arguments to read, in the slots starting from
1780     // slot (argc + 1).
1781     Register scratch = x10;
1782     __ SlotAddress(scratch, argc);
1783     __ Ldp(new_target, arguments_list, MemOperand(scratch, 1 * kPointerSize));
1784     __ Ldr(target, MemOperand(scratch, 3 * kPointerSize));
1785 
1786     __ Cmp(argc, 2);
1787     __ CmovX(new_target, target, ls);  // target if argc <= 2.
1788 
1789     __ Drop(4);  // Drop the undefined values we pushed above.
1790     __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
1791 
1792     // Push receiver (undefined).
1793     __ PushArgument(undefined_value);
1794   }
1795 
1796   // ----------- S t a t e -------------
1797   //  -- x2      : argumentsList
1798   //  -- x1      : target
1799   //  -- x3      : new.target
1800   //  -- sp[0]   : receiver (undefined)
1801   // -----------------------------------
1802 
1803   // 2. We don't need to check explicitly for constructor target here,
1804   // since that's the first thing the Construct/ConstructWithArrayLike
1805   // builtins will do.
1806 
1807   // 3. We don't need to check explicitly for constructor new.target here,
1808   // since that's the second thing the Construct/ConstructWithArrayLike
1809   // builtins will do.
1810 
1811   // 4. Construct the target with the given new.target and argumentsList.
1812   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1813           RelocInfo::CODE_TARGET);
1814 }
1815 
1816 namespace {
1817 
EnterArgumentsAdaptorFrame(MacroAssembler * masm)1818 void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1819   __ Push(lr, fp);
1820   __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
1821   __ Push(x11, x1);  // x1: function
1822   __ SmiTag(x11, x0);  // x0: number of arguments.
1823   __ Push(x11, padreg);
1824   __ Add(fp, sp, ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp);
1825 }
1826 
LeaveArgumentsAdaptorFrame(MacroAssembler * masm)1827 void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1828   // ----------- S t a t e -------------
1829   //  -- x0 : result being passed through
1830   // -----------------------------------
1831   // Get the number of arguments passed (as a smi), tear down the frame and
1832   // then drop the parameters and the receiver.
1833   __ Ldr(x10, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1834   __ Mov(sp, fp);
1835   __ Pop(fp, lr);
1836 
1837   // Drop actual parameters and receiver.
1838   __ SmiUntag(x10);
1839   __ DropArguments(x10, TurboAssembler::kCountExcludesReceiver);
1840 }
1841 
1842 // Prepares the stack for copying the varargs. First we claim the necessary
1843 // slots, taking care of potential padding. Then we copy the existing arguments
1844 // one slot up or one slot down, as needed.
Generate_PrepareForCopyingVarargs(MacroAssembler * masm,Register argc,Register len)1845 void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
1846                                        Register len) {
1847   Label len_odd, exit;
1848   Register slots_to_copy = x10;  // If needed.
1849   __ Add(slots_to_copy, argc, 1);
1850   __ Add(argc, argc, len);
1851   __ Tbnz(len, 0, &len_odd);
1852   __ Claim(len);
1853   __ B(&exit);
1854 
1855   __ Bind(&len_odd);
1856   // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
1857   // one extra padding slot. If argc is odd, we know that the original arguments
1858   // will have a padding slot we can reuse (since len is odd), so
1859   // slots_to_claim = len - 1.
1860   {
1861     Register scratch = x11;
1862     Register slots_to_claim = x12;
1863     __ Add(slots_to_claim, len, 1);
1864     __ And(scratch, argc, 1);
1865     __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
1866     __ Claim(slots_to_claim);
1867   }
1868 
1869   Label copy_down;
1870   __ Tbz(slots_to_copy, 0, &copy_down);
1871 
1872   // Copy existing arguments one slot up.
1873   {
1874     Register src = x11;
1875     Register dst = x12;
1876     Register scratch = x13;
1877     __ Sub(scratch, argc, 1);
1878     __ SlotAddress(src, scratch);
1879     __ SlotAddress(dst, argc);
1880     __ CopyDoubleWords(dst, src, slots_to_copy,
1881                        TurboAssembler::kSrcLessThanDst);
1882   }
1883   __ B(&exit);
1884 
1885   // Copy existing arguments one slot down and add padding.
1886   __ Bind(&copy_down);
1887   {
1888     Register src = x11;
1889     Register dst = x12;
1890     Register scratch = x13;
1891     __ Add(src, len, 1);
1892     __ Mov(dst, len);  // CopySlots will corrupt dst.
1893     __ CopySlots(dst, src, slots_to_copy);
1894     __ Add(scratch, argc, 1);
1895     __ Poke(padreg, Operand(scratch, LSL, kPointerSizeLog2));  // Store padding.
1896   }
1897 
1898   __ Bind(&exit);
1899 }
1900 
1901 }  // namespace
1902 
1903 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)1904 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
1905                                                Handle<Code> code) {
1906   // ----------- S t a t e -------------
1907   //  -- x1 : target
1908   //  -- x0 : number of parameters on the stack (not including the receiver)
1909   //  -- x2 : arguments list (a FixedArray)
1910   //  -- x4 : len (number of elements to push from args)
1911   //  -- x3 : new.target (for [[Construct]])
1912   // -----------------------------------
1913   if (masm->emit_debug_code()) {
1914     // Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
1915     Label ok, fail;
1916     __ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
1917     __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
1918     __ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
1919     __ Cmp(x13, FIXED_ARRAY_TYPE);
1920     __ B(eq, &ok);
1921     __ Cmp(x13, FIXED_DOUBLE_ARRAY_TYPE);
1922     __ B(ne, &fail);
1923     __ Cmp(x4, 0);
1924     __ B(eq, &ok);
1925     // Fall through.
1926     __ bind(&fail);
1927     __ Abort(AbortReason::kOperandIsNotAFixedArray);
1928 
1929     __ bind(&ok);
1930   }
1931 
1932   Register arguments_list = x2;
1933   Register argc = x0;
1934   Register len = x4;
1935 
1936   // Check for stack overflow.
1937   {
1938     // Check the stack for overflow. We are not trying to catch interruptions
1939     // (i.e. debug break and preemption) here, so check the "real stack limit".
1940     Label done;
1941     __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
1942     // Make x10 the space we have left. The stack might already be overflowed
1943     // here which will cause x10 to become negative.
1944     __ Sub(x10, sp, x10);
1945     // Check if the arguments will overflow the stack.
1946     __ Cmp(x10, Operand(len, LSL, kPointerSizeLog2));
1947     __ B(gt, &done);  // Signed comparison.
1948     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1949     __ Bind(&done);
1950   }
1951 
1952   // Skip argument setup if we don't need to push any varargs.
1953   Label done;
1954   __ Cbz(len, &done);
1955 
1956   Generate_PrepareForCopyingVarargs(masm, argc, len);
1957 
1958   // Push varargs.
1959   {
1960     Label loop;
1961     Register src = x10;
1962     Register the_hole_value = x11;
1963     Register undefined_value = x12;
1964     Register scratch = x13;
1965     __ Add(src, arguments_list, FixedArray::kHeaderSize - kHeapObjectTag);
1966     __ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex);
1967     __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
1968     // We do not use the CompareRoot macro as it would do a LoadRoot behind the
1969     // scenes and we want to avoid that in a loop.
1970     // TODO(all): Consider using Ldp and Stp.
1971     __ Bind(&loop);
1972     __ Sub(len, len, 1);
1973     __ Ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
1974     __ Cmp(scratch, the_hole_value);
1975     __ Csel(scratch, scratch, undefined_value, ne);
1976     __ Poke(scratch, Operand(len, LSL, kPointerSizeLog2));
1977     __ Cbnz(len, &loop);
1978   }
1979   __ Bind(&done);
1980 
1981   // Tail-call to the actual Call or Construct builtin.
1982   __ Jump(code, RelocInfo::CODE_TARGET);
1983 }
1984 
1985 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)1986 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
1987                                                       CallOrConstructMode mode,
1988                                                       Handle<Code> code) {
1989   // ----------- S t a t e -------------
1990   //  -- x0 : the number of arguments (not including the receiver)
1991   //  -- x3 : the new.target (for [[Construct]] calls)
1992   //  -- x1 : the target to call (can be any Object)
1993   //  -- x2 : start index (to support rest parameters)
1994   // -----------------------------------
1995 
1996   Register argc = x0;
1997   Register start_index = x2;
1998 
1999   // Check if new.target has a [[Construct]] internal method.
2000   if (mode == CallOrConstructMode::kConstruct) {
2001     Label new_target_constructor, new_target_not_constructor;
2002     __ JumpIfSmi(x3, &new_target_not_constructor);
2003     __ Ldr(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
2004     __ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
2005     __ TestAndBranchIfAnySet(x5, Map::IsConstructorBit::kMask,
2006                              &new_target_constructor);
2007     __ Bind(&new_target_not_constructor);
2008     {
2009       FrameScope scope(masm, StackFrame::MANUAL);
2010       __ EnterFrame(StackFrame::INTERNAL);
2011       __ PushArgument(x3);
2012       __ CallRuntime(Runtime::kThrowNotConstructor);
2013     }
2014     __ Bind(&new_target_constructor);
2015   }
2016 
2017   // Check if we have an arguments adaptor frame below the function frame.
2018   // args_fp will point to the frame that contains the actual arguments, which
2019   // will be the current frame unless we have an arguments adaptor frame, in
2020   // which case args_fp points to the arguments adaptor frame.
2021   Register args_fp = x5;
2022   Register len = x6;
2023   {
2024     Label arguments_adaptor, arguments_done;
2025     Register scratch = x10;
2026     __ Ldr(args_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2027     __ Ldr(x4, MemOperand(args_fp,
2028                           CommonFrameConstants::kContextOrFrameTypeOffset));
2029     __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
2030     __ B(eq, &arguments_adaptor);
2031     {
2032       __ Ldr(scratch,
2033              MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2034       __ Ldr(scratch,
2035              FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
2036       __ Ldrh(len,
2037               FieldMemOperand(scratch,
2038                               SharedFunctionInfo::kFormalParameterCountOffset));
2039       __ Mov(args_fp, fp);
2040     }
2041     __ B(&arguments_done);
2042     __ Bind(&arguments_adaptor);
2043     {
2044       // Just load the length from ArgumentsAdaptorFrame.
2045       __ SmiUntag(
2046           len,
2047           MemOperand(args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
2048     }
2049     __ Bind(&arguments_done);
2050   }
2051 
2052   Label stack_done, stack_overflow;
2053   __ Subs(len, len, start_index);
2054   __ B(le, &stack_done);
2055   // Check for stack overflow.
2056   Generate_StackOverflowCheck(masm, x6, &stack_overflow);
2057 
2058   Generate_PrepareForCopyingVarargs(masm, argc, len);
2059 
2060   // Push varargs.
2061   {
2062     Register dst = x13;
2063     __ Add(args_fp, args_fp, 2 * kPointerSize);
2064     __ SlotAddress(dst, 0);
2065     __ CopyDoubleWords(dst, args_fp, len);
2066   }
2067   __ B(&stack_done);
2068 
2069   __ Bind(&stack_overflow);
2070   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2071   __ Bind(&stack_done);
2072 
2073   __ Jump(code, RelocInfo::CODE_TARGET);
2074 }
2075 
2076 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2077 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2078                                      ConvertReceiverMode mode) {
2079   ASM_LOCATION("Builtins::Generate_CallFunction");
2080   // ----------- S t a t e -------------
2081   //  -- x0 : the number of arguments (not including the receiver)
2082   //  -- x1 : the function to call (checked to be a JSFunction)
2083   // -----------------------------------
2084   __ AssertFunction(x1);
2085 
2086   // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2087   // Check that function is not a "classConstructor".
2088   Label class_constructor;
2089   __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2090   __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
2091   __ TestAndBranchIfAnySet(w3, SharedFunctionInfo::IsClassConstructorBit::kMask,
2092                            &class_constructor);
2093 
2094   // Enter the context of the function; ToObject has to run in the function
2095   // context, and we also need to take the global proxy from the function
2096   // context in case of conversion.
2097   __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
2098   // We need to convert the receiver for non-native sloppy mode functions.
2099   Label done_convert;
2100   __ TestAndBranchIfAnySet(w3,
2101                            SharedFunctionInfo::IsNativeBit::kMask |
2102                                SharedFunctionInfo::IsStrictBit::kMask,
2103                            &done_convert);
2104   {
2105     // ----------- S t a t e -------------
2106     //  -- x0 : the number of arguments (not including the receiver)
2107     //  -- x1 : the function to call (checked to be a JSFunction)
2108     //  -- x2 : the shared function info.
2109     //  -- cp : the function context.
2110     // -----------------------------------
2111 
2112     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2113       // Patch receiver to global proxy.
2114       __ LoadGlobalProxy(x3);
2115     } else {
2116       Label convert_to_object, convert_receiver;
2117       __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
2118       __ JumpIfSmi(x3, &convert_to_object);
2119       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2120       __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
2121       __ B(hs, &done_convert);
2122       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2123         Label convert_global_proxy;
2124         __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex,
2125                       &convert_global_proxy);
2126         __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object);
2127         __ Bind(&convert_global_proxy);
2128         {
2129           // Patch receiver to global proxy.
2130           __ LoadGlobalProxy(x3);
2131         }
2132         __ B(&convert_receiver);
2133       }
2134       __ Bind(&convert_to_object);
2135       {
2136         // Convert receiver using ToObject.
2137         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2138         // in the fast case? (fall back to AllocateInNewSpace?)
2139         FrameScope scope(masm, StackFrame::INTERNAL);
2140         __ SmiTag(x0);
2141         __ Push(padreg, x0, x1, cp);
2142         __ Mov(x0, x3);
2143         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2144                 RelocInfo::CODE_TARGET);
2145         __ Mov(x3, x0);
2146         __ Pop(cp, x1, x0, padreg);
2147         __ SmiUntag(x0);
2148       }
2149       __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2150       __ Bind(&convert_receiver);
2151     }
2152     __ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
2153   }
2154   __ Bind(&done_convert);
2155 
2156   // ----------- S t a t e -------------
2157   //  -- x0 : the number of arguments (not including the receiver)
2158   //  -- x1 : the function to call (checked to be a JSFunction)
2159   //  -- x2 : the shared function info.
2160   //  -- cp : the function context.
2161   // -----------------------------------
2162 
2163   __ Ldrh(x2,
2164           FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
2165   ParameterCount actual(x0);
2166   ParameterCount expected(x2);
2167   __ InvokeFunctionCode(x1, no_reg, expected, actual, JUMP_FUNCTION);
2168 
2169   // The function is a "classConstructor", need to raise an exception.
2170   __ Bind(&class_constructor);
2171   {
2172     FrameScope frame(masm, StackFrame::INTERNAL);
2173     __ PushArgument(x1);
2174     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2175   }
2176 }
2177 
2178 namespace {
2179 
Generate_PushBoundArguments(MacroAssembler * masm)2180 void Generate_PushBoundArguments(MacroAssembler* masm) {
2181   // ----------- S t a t e -------------
2182   //  -- x0 : the number of arguments (not including the receiver)
2183   //  -- x1 : target (checked to be a JSBoundFunction)
2184   //  -- x3 : new.target (only in case of [[Construct]])
2185   // -----------------------------------
2186 
2187   Register bound_argc = x4;
2188   Register bound_argv = x2;
2189 
2190   // Load [[BoundArguments]] into x2 and length of that into x4.
2191   Label no_bound_arguments;
2192   __ Ldr(bound_argv,
2193          FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
2194   __ SmiUntag(bound_argc,
2195               FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
2196   __ Cbz(bound_argc, &no_bound_arguments);
2197   {
2198     // ----------- S t a t e -------------
2199     //  -- x0 : the number of arguments (not including the receiver)
2200     //  -- x1 : target (checked to be a JSBoundFunction)
2201     //  -- x2 : the [[BoundArguments]] (implemented as FixedArray)
2202     //  -- x3 : new.target (only in case of [[Construct]])
2203     //  -- x4 : the number of [[BoundArguments]]
2204     // -----------------------------------
2205 
2206     Register argc = x0;
2207 
2208     // Check for stack overflow.
2209     {
2210       // Check the stack for overflow. We are not trying to catch interruptions
2211       // (i.e. debug break and preemption) here, so check the "real stack
2212       // limit".
2213       Label done;
2214       __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
2215       // Make x10 the space we have left. The stack might already be overflowed
2216       // here which will cause x10 to become negative.
2217       __ Sub(x10, sp, x10);
2218       // Check if the arguments will overflow the stack.
2219       __ Cmp(x10, Operand(bound_argc, LSL, kPointerSizeLog2));
2220       __ B(gt, &done);  // Signed comparison.
2221       __ TailCallRuntime(Runtime::kThrowStackOverflow);
2222       __ Bind(&done);
2223     }
2224 
2225     // Check if we need padding.
2226     Label copy_args, copy_bound_args;
2227     Register total_argc = x15;
2228     Register slots_to_claim = x12;
2229     __ Add(total_argc, argc, bound_argc);
2230     __ Mov(slots_to_claim, bound_argc);
2231     __ Tbz(bound_argc, 0, &copy_args);
2232 
2233     // Load receiver before we start moving the arguments. We will only
2234     // need this in this path because the bound arguments are odd.
2235     Register receiver = x14;
2236     __ Peek(receiver, Operand(argc, LSL, kPointerSizeLog2));
2237 
2238     // Claim space we need. If argc is even, slots_to_claim = bound_argc + 1,
2239     // as we need one extra padding slot. If argc is odd, we know that the
2240     // original arguments will have a padding slot we can reuse (since
2241     // bound_argc is odd), so slots_to_claim = bound_argc - 1.
2242     {
2243       Register scratch = x11;
2244       __ Add(slots_to_claim, bound_argc, 1);
2245       __ And(scratch, total_argc, 1);
2246       __ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
2247     }
2248 
2249     // Copy bound arguments.
2250     __ Bind(&copy_args);
2251     // Skip claim and copy of existing arguments in the special case where we
2252     // do not need to claim any slots (this will be the case when
2253     // bound_argc == 1 and the existing arguments have padding we can reuse).
2254     __ Cbz(slots_to_claim, &copy_bound_args);
2255     __ Claim(slots_to_claim);
2256     {
2257       Register count = x10;
2258       // Relocate arguments to a lower address.
2259       __ Mov(count, argc);
2260       __ CopySlots(0, slots_to_claim, count);
2261 
2262       __ Bind(&copy_bound_args);
2263       // Copy [[BoundArguments]] to the stack (below the arguments). The first
2264       // element of the array is copied to the highest address.
2265       {
2266         Label loop;
2267         Register counter = x10;
2268         Register scratch = x11;
2269         Register copy_to = x12;
2270         __ Add(bound_argv, bound_argv,
2271                FixedArray::kHeaderSize - kHeapObjectTag);
2272         __ SlotAddress(copy_to, argc);
2273         __ Add(argc, argc,
2274                bound_argc);  // Update argc to include bound arguments.
2275         __ Lsl(counter, bound_argc, kPointerSizeLog2);
2276         __ Bind(&loop);
2277         __ Sub(counter, counter, kPointerSize);
2278         __ Ldr(scratch, MemOperand(bound_argv, counter));
2279         // Poke into claimed area of stack.
2280         __ Str(scratch, MemOperand(copy_to, kPointerSize, PostIndex));
2281         __ Cbnz(counter, &loop);
2282       }
2283 
2284       {
2285         Label done;
2286         Register scratch = x10;
2287         __ Tbz(bound_argc, 0, &done);
2288         // Store receiver.
2289         __ Add(scratch, sp, Operand(total_argc, LSL, kPointerSizeLog2));
2290         __ Str(receiver, MemOperand(scratch, kPointerSize, PostIndex));
2291         __ Tbnz(total_argc, 0, &done);
2292         // Store padding.
2293         __ Str(padreg, MemOperand(scratch));
2294         __ Bind(&done);
2295       }
2296     }
2297   }
2298   __ Bind(&no_bound_arguments);
2299 }
2300 
2301 }  // namespace
2302 
2303 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2304 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2305   // ----------- S t a t e -------------
2306   //  -- x0 : the number of arguments (not including the receiver)
2307   //  -- x1 : the function to call (checked to be a JSBoundFunction)
2308   // -----------------------------------
2309   __ AssertBoundFunction(x1);
2310 
2311   // Patch the receiver to [[BoundThis]].
2312   __ Ldr(x10, FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
2313   __ Poke(x10, Operand(x0, LSL, kPointerSizeLog2));
2314 
2315   // Push the [[BoundArguments]] onto the stack.
2316   Generate_PushBoundArguments(masm);
2317 
2318   // Call the [[BoundTargetFunction]] via the Call builtin.
2319   __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2320   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2321           RelocInfo::CODE_TARGET);
2322 }
2323 
2324 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2325 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2326   // ----------- S t a t e -------------
2327   //  -- x0 : the number of arguments (not including the receiver)
2328   //  -- x1 : the target to call (can be any Object).
2329   // -----------------------------------
2330 
2331   Label non_callable, non_function, non_smi;
2332   __ JumpIfSmi(x1, &non_callable);
2333   __ Bind(&non_smi);
2334   __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
2335   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2336           RelocInfo::CODE_TARGET, eq);
2337   __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
2338   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2339           RelocInfo::CODE_TARGET, eq);
2340 
2341   // Check if target has a [[Call]] internal method.
2342   __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
2343   __ TestAndBranchIfAllClear(x4, Map::IsCallableBit::kMask, &non_callable);
2344 
2345   // Check if target is a proxy and call CallProxy external builtin
2346   __ Cmp(x5, JS_PROXY_TYPE);
2347   __ B(ne, &non_function);
2348   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
2349 
2350   // 2. Call to something else, which might have a [[Call]] internal method (if
2351   // not we raise an exception).
2352   __ Bind(&non_function);
2353   // Overwrite the original receiver with the (original) target.
2354   __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
2355   // Let the "call_as_function_delegate" take care of the rest.
2356   __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
2357   __ Jump(masm->isolate()->builtins()->CallFunction(
2358               ConvertReceiverMode::kNotNullOrUndefined),
2359           RelocInfo::CODE_TARGET);
2360 
2361   // 3. Call to something that is not callable.
2362   __ bind(&non_callable);
2363   {
2364     FrameScope scope(masm, StackFrame::INTERNAL);
2365     __ PushArgument(x1);
2366     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2367   }
2368 }
2369 
2370 // static
Generate_ConstructFunction(MacroAssembler * masm)2371 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2372   // ----------- S t a t e -------------
2373   //  -- x0 : the number of arguments (not including the receiver)
2374   //  -- x1 : the constructor to call (checked to be a JSFunction)
2375   //  -- x3 : the new target (checked to be a constructor)
2376   // -----------------------------------
2377   __ AssertConstructor(x1);
2378   __ AssertFunction(x1);
2379 
2380   // Calling convention for function specific ConstructStubs require
2381   // x2 to contain either an AllocationSite or undefined.
2382   __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
2383 
2384   Label call_generic_stub;
2385 
2386   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2387   __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
2388   __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
2389   __ TestAndBranchIfAllClear(
2390       w4, SharedFunctionInfo::ConstructAsBuiltinBit::kMask, &call_generic_stub);
2391 
2392   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2393           RelocInfo::CODE_TARGET);
2394 
2395   __ bind(&call_generic_stub);
2396   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2397           RelocInfo::CODE_TARGET);
2398 }
2399 
2400 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2401 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2402   // ----------- S t a t e -------------
2403   //  -- x0 : the number of arguments (not including the receiver)
2404   //  -- x1 : the function to call (checked to be a JSBoundFunction)
2405   //  -- x3 : the new target (checked to be a constructor)
2406   // -----------------------------------
2407   __ AssertConstructor(x1);
2408   __ AssertBoundFunction(x1);
2409 
2410   // Push the [[BoundArguments]] onto the stack.
2411   Generate_PushBoundArguments(masm);
2412 
2413   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2414   {
2415     Label done;
2416     __ Cmp(x1, x3);
2417     __ B(ne, &done);
2418     __ Ldr(x3,
2419            FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2420     __ Bind(&done);
2421   }
2422 
2423   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2424   __ Ldr(x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
2425   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2426 }
2427 
2428 // static
Generate_Construct(MacroAssembler * masm)2429 void Builtins::Generate_Construct(MacroAssembler* masm) {
2430   // ----------- S t a t e -------------
2431   //  -- x0 : the number of arguments (not including the receiver)
2432   //  -- x1 : the constructor to call (can be any Object)
2433   //  -- x3 : the new target (either the same as the constructor or
2434   //          the JSFunction on which new was invoked initially)
2435   // -----------------------------------
2436 
2437   // Check if target is a Smi.
2438   Label non_constructor, non_proxy;
2439   __ JumpIfSmi(x1, &non_constructor);
2440 
2441   // Check if target has a [[Construct]] internal method.
2442   __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
2443   __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
2444   __ TestAndBranchIfAllClear(x2, Map::IsConstructorBit::kMask,
2445                              &non_constructor);
2446 
2447   // Dispatch based on instance type.
2448   __ CompareInstanceType(x4, x5, JS_FUNCTION_TYPE);
2449   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2450           RelocInfo::CODE_TARGET, eq);
2451 
2452   // Only dispatch to bound functions after checking whether they are
2453   // constructors.
2454   __ Cmp(x5, JS_BOUND_FUNCTION_TYPE);
2455   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2456           RelocInfo::CODE_TARGET, eq);
2457 
2458   // Only dispatch to proxies after checking whether they are constructors.
2459   __ Cmp(x5, JS_PROXY_TYPE);
2460   __ B(ne, &non_proxy);
2461   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2462           RelocInfo::CODE_TARGET);
2463 
2464   // Called Construct on an exotic Object with a [[Construct]] internal method.
2465   __ bind(&non_proxy);
2466   {
2467     // Overwrite the original receiver with the (original) target.
2468     __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
2469     // Let the "call_as_constructor_delegate" take care of the rest.
2470     __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
2471     __ Jump(masm->isolate()->builtins()->CallFunction(),
2472             RelocInfo::CODE_TARGET);
2473   }
2474 
2475   // Called Construct on an Object that doesn't have a [[Construct]] internal
2476   // method.
2477   __ bind(&non_constructor);
2478   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2479           RelocInfo::CODE_TARGET);
2480 }
2481 
Generate_ArgumentsAdaptorTrampoline(MacroAssembler * masm)2482 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
2483   ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
2484   // ----------- S t a t e -------------
2485   //  -- x0 : actual number of arguments
2486   //  -- x1 : function (passed through to callee)
2487   //  -- x2 : expected number of arguments
2488   //  -- x3 : new target (passed through to callee)
2489   // -----------------------------------
2490 
2491   // The frame we are about to construct will look like:
2492   //
2493   //  slot      Adaptor frame
2494   //       +-----------------+--------------------------------
2495   //  -n-1 |    receiver     |                            ^
2496   //       |  (parameter 0)  |                            |
2497   //       |- - - - - - - - -|                            |
2498   //  -n   |                 |                          Caller
2499   //  ...  |       ...       |                       frame slots --> actual args
2500   //  -2   |  parameter n-1  |                            |
2501   //       |- - - - - - - - -|                            |
2502   //  -1   |   parameter n   |                            v
2503   //  -----+-----------------+--------------------------------
2504   //   0   |   return addr   |                            ^
2505   //       |- - - - - - - - -|                            |
2506   //   1   | saved frame ptr | <-- frame ptr              |
2507   //       |- - - - - - - - -|                            |
2508   //   2   |Frame Type Marker|                            |
2509   //       |- - - - - - - - -|                            |
2510   //   3   |    function     |                          Callee
2511   //       |- - - - - - - - -|                        frame slots
2512   //   4   |     num of      |                            |
2513   //       |   actual args   |                            |
2514   //       |- - - - - - - - -|                            |
2515   //   5   |     padding     |                            |
2516   //       |-----------------+----                        |
2517   //  [6]  |    [padding]    |   ^                        |
2518   //       |- - - - - - - - -|   |                        |
2519   // 6+pad |    receiver     |   |                        |
2520   //       |  (parameter 0)  |   |                        |
2521   //       |- - - - - - - - -|   |                        |
2522   // 7+pad |   parameter 1   |   |                        |
2523   //       |- - - - - - - - -| Frame slots ----> expected args
2524   // 8+pad |   parameter 2   |   |                        |
2525   //       |- - - - - - - - -|   |                        |
2526   //       |                 |   |                        |
2527   //  ...  |       ...       |   |                        |
2528   //       |   parameter m   |   |                        |
2529   //       |- - - - - - - - -|   |                        |
2530   //       |   [undefined]   |   |                        |
2531   //       |- - - - - - - - -|   |                        |
2532   //       |                 |   |                        |
2533   //       |       ...       |   |                        |
2534   //       |   [undefined]   |   v   <-- stack ptr        v
2535   //  -----+-----------------+---------------------------------
2536   //
2537   // There is an optional slot of padding above the receiver to ensure stack
2538   // alignment of the arguments.
2539   // If the number of expected arguments is larger than the number of actual
2540   // arguments, the remaining expected slots will be filled with undefined.
2541 
2542   Register argc_actual = x0;    // Excluding the receiver.
2543   Register argc_expected = x2;  // Excluding the receiver.
2544   Register function = x1;
2545 
2546   Label dont_adapt_arguments, stack_overflow;
2547 
2548   Label enough_arguments;
2549   __ Cmp(argc_expected, SharedFunctionInfo::kDontAdaptArgumentsSentinel);
2550   __ B(eq, &dont_adapt_arguments);
2551 
2552   EnterArgumentsAdaptorFrame(masm);
2553 
2554   Register copy_from = x10;
2555   Register copy_end = x11;
2556   Register copy_to = x12;
2557   Register argc_to_copy = x13;
2558   Register argc_unused_actual = x14;
2559   Register scratch1 = x15, scratch2 = x16;
2560 
2561   // We need slots for the expected arguments, with one extra slot for the
2562   // receiver.
2563   __ RecordComment("-- Stack check --");
2564   __ Add(scratch1, argc_expected, 1);
2565   Generate_StackOverflowCheck(masm, scratch1, &stack_overflow);
2566 
2567   // Round up number of slots to be even, to maintain stack alignment.
2568   __ RecordComment("-- Allocate callee frame slots --");
2569   __ Add(scratch1, scratch1, 1);
2570   __ Bic(scratch1, scratch1, 1);
2571   __ Claim(scratch1, kPointerSize);
2572 
2573   __ Mov(copy_to, sp);
2574 
2575   // Preparing the expected arguments is done in four steps, the order of
2576   // which is chosen so we can use LDP/STP and avoid conditional branches as
2577   // much as possible.
2578 
2579   // (1) If we don't have enough arguments, fill the remaining expected
2580   // arguments with undefined, otherwise skip this step.
2581   __ Subs(scratch1, argc_actual, argc_expected);
2582   __ Csel(argc_unused_actual, xzr, scratch1, lt);
2583   __ Csel(argc_to_copy, argc_expected, argc_actual, ge);
2584   __ B(ge, &enough_arguments);
2585 
2586   // Fill the remaining expected arguments with undefined.
2587   __ RecordComment("-- Fill slots with undefined --");
2588   __ Sub(copy_end, copy_to, Operand(scratch1, LSL, kPointerSizeLog2));
2589   __ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
2590 
2591   Label fill;
2592   __ Bind(&fill);
2593   __ Stp(scratch1, scratch1, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
2594   // We might write one slot extra, but that is ok because we'll overwrite it
2595   // below.
2596   __ Cmp(copy_end, copy_to);
2597   __ B(hi, &fill);
2598 
2599   // Correct copy_to, for the case where we wrote one additional slot.
2600   __ Mov(copy_to, copy_end);
2601 
2602   __ Bind(&enough_arguments);
2603   // (2) Copy all of the actual arguments, or as many as we need.
2604   Label skip_copy;
2605   __ RecordComment("-- Copy actual arguments --");
2606   __ Cbz(argc_to_copy, &skip_copy);
2607   __ Add(copy_end, copy_to, Operand(argc_to_copy, LSL, kPointerSizeLog2));
2608   __ Add(copy_from, fp, 2 * kPointerSize);
2609   // Adjust for difference between actual and expected arguments.
2610   __ Add(copy_from, copy_from,
2611          Operand(argc_unused_actual, LSL, kPointerSizeLog2));
2612 
2613   // Copy arguments. We use load/store pair instructions, so we might overshoot
2614   // by one slot, but since we copy the arguments starting from the last one, if
2615   // we do overshoot, the extra slot will be overwritten later by the receiver.
2616   Label copy_2_by_2;
2617   __ Bind(&copy_2_by_2);
2618   __ Ldp(scratch1, scratch2,
2619          MemOperand(copy_from, 2 * kPointerSize, PostIndex));
2620   __ Stp(scratch1, scratch2, MemOperand(copy_to, 2 * kPointerSize, PostIndex));
2621   __ Cmp(copy_end, copy_to);
2622   __ B(hi, &copy_2_by_2);
2623   __ Bind(&skip_copy);
2624 
2625   // (3) Store padding, which might be overwritten by the receiver, if it is not
2626   // necessary.
2627   __ RecordComment("-- Store padding --");
2628   __ Str(padreg, MemOperand(fp, -5 * kPointerSize));
2629 
2630   // (4) Store receiver. Calculate target address from the sp to avoid checking
2631   // for padding. Storing the receiver will overwrite either the extra slot
2632   // we copied with the actual arguments, if we did copy one, or the padding we
2633   // stored above.
2634   __ RecordComment("-- Store receiver --");
2635   __ Add(copy_from, fp, 2 * kPointerSize);
2636   __ Ldr(scratch1, MemOperand(copy_from, argc_actual, LSL, kPointerSizeLog2));
2637   __ Str(scratch1, MemOperand(sp, argc_expected, LSL, kPointerSizeLog2));
2638 
2639   // Arguments have been adapted. Now call the entry point.
2640   __ RecordComment("-- Call entry point --");
2641   __ Mov(argc_actual, argc_expected);
2642   // x0 : expected number of arguments
2643   // x1 : function (passed through to callee)
2644   // x3 : new target (passed through to callee)
2645   static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
2646   __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
2647   __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
2648   __ Call(x2);
2649 
2650   // Store offset of return address for deoptimizer.
2651   masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
2652 
2653   // Exit frame and return.
2654   LeaveArgumentsAdaptorFrame(masm);
2655   __ Ret();
2656 
2657   // Call the entry point without adapting the arguments.
2658   __ RecordComment("-- Call without adapting args --");
2659   __ Bind(&dont_adapt_arguments);
2660   static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
2661   __ Ldr(x2, FieldMemOperand(function, JSFunction::kCodeOffset));
2662   __ Add(x2, x2, Operand(Code::kHeaderSize - kHeapObjectTag));
2663   __ Jump(x2);
2664 
2665   __ Bind(&stack_overflow);
2666   __ RecordComment("-- Stack overflow --");
2667   {
2668     FrameScope frame(masm, StackFrame::MANUAL);
2669     __ CallRuntime(Runtime::kThrowStackOverflow);
2670     __ Unreachable();
2671   }
2672 }
2673 
Generate_WasmCompileLazy(MacroAssembler * masm)2674 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2675   // The function index was put in w8 by the jump table trampoline.
2676   // Sign extend and convert to Smi for the runtime call.
2677   __ sxtw(x8, w8);
2678   __ SmiTag(x8, x8);
2679   {
2680     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2681     FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2682 
2683     // Save all parameter registers (see wasm-linkage.cc). They might be
2684     // overwritten in the runtime call below. We don't have any callee-saved
2685     // registers in wasm, so no need to store anything else.
2686     constexpr RegList gp_regs =
2687         Register::ListOf<x0, x1, x2, x3, x4, x5, x6, x7>();
2688     constexpr RegList fp_regs =
2689         Register::ListOf<d0, d1, d2, d3, d4, d5, d6, d7>();
2690     __ PushXRegList(gp_regs);
2691     __ PushDRegList(fp_regs);
2692 
2693     // Pass instance and function index as explicit arguments to the runtime
2694     // function.
2695     __ Push(kWasmInstanceRegister, x8);
2696     // Load the correct CEntry builtin from the instance object.
2697     __ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
2698                                WasmInstanceObject::kCEntryStubOffset));
2699     // Initialize the JavaScript context with 0. CEntry will use it to
2700     // set the current context on the isolate.
2701     __ Mov(cp, Smi::kZero);
2702     __ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, x2);
2703     // The entrypoint address is the return value.
2704     __ mov(x8, kReturnRegister0);
2705 
2706     // Restore registers.
2707     __ PopDRegList(fp_regs);
2708     __ PopXRegList(gp_regs);
2709   }
2710   // Finally, jump to the entrypoint.
2711   __ Jump(x8);
2712 }
2713 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2714 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2715                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2716                                bool builtin_exit_frame) {
2717   // The Abort mechanism relies on CallRuntime, which in turn relies on
2718   // CEntry, so until this stub has been generated, we have to use a
2719   // fall-back Abort mechanism.
2720   //
2721   // Note that this stub must be generated before any use of Abort.
2722   HardAbortScope hard_aborts(masm);
2723 
2724   ASM_LOCATION("CEntry::Generate entry");
2725   ProfileEntryHookStub::MaybeCallEntryHook(masm);
2726 
2727   // Register parameters:
2728   //    x0: argc (including receiver, untagged)
2729   //    x1: target
2730   // If argv_mode == kArgvInRegister:
2731   //    x11: argv (pointer to first argument)
2732   //
2733   // The stack on entry holds the arguments and the receiver, with the receiver
2734   // at the highest address:
2735   //
2736   //    sp]argc-1]: receiver
2737   //    sp[argc-2]: arg[argc-2]
2738   //    ...           ...
2739   //    sp[1]:      arg[1]
2740   //    sp[0]:      arg[0]
2741   //
2742   // The arguments are in reverse order, so that arg[argc-2] is actually the
2743   // first argument to the target function and arg[0] is the last.
2744   const Register& argc_input = x0;
2745   const Register& target_input = x1;
2746 
2747   // Calculate argv, argc and the target address, and store them in
2748   // callee-saved registers so we can retry the call without having to reload
2749   // these arguments.
2750   // TODO(jbramley): If the first call attempt succeeds in the common case (as
2751   // it should), then we might be better off putting these parameters directly
2752   // into their argument registers, rather than using callee-saved registers and
2753   // preserving them on the stack.
2754   const Register& argv = x21;
2755   const Register& argc = x22;
2756   const Register& target = x23;
2757 
2758   // Derive argv from the stack pointer so that it points to the first argument
2759   // (arg[argc-2]), or just below the receiver in case there are no arguments.
2760   //  - Adjust for the arg[] array.
2761   Register temp_argv = x11;
2762   if (argv_mode == kArgvOnStack) {
2763     __ SlotAddress(temp_argv, x0);
2764     //  - Adjust for the receiver.
2765     __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
2766   }
2767 
2768   // Reserve three slots to preserve x21-x23 callee-saved registers.
2769   int extra_stack_space = 3;
2770   // Enter the exit frame.
2771   FrameScope scope(masm, StackFrame::MANUAL);
2772   __ EnterExitFrame(
2773       save_doubles == kSaveFPRegs, x10, extra_stack_space,
2774       builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2775 
2776   // Poke callee-saved registers into reserved space.
2777   __ Poke(argv, 1 * kPointerSize);
2778   __ Poke(argc, 2 * kPointerSize);
2779   __ Poke(target, 3 * kPointerSize);
2780 
2781   // We normally only keep tagged values in callee-saved registers, as they
2782   // could be pushed onto the stack by called stubs and functions, and on the
2783   // stack they can confuse the GC. However, we're only calling C functions
2784   // which can push arbitrary data onto the stack anyway, and so the GC won't
2785   // examine that part of the stack.
2786   __ Mov(argc, argc_input);
2787   __ Mov(target, target_input);
2788   __ Mov(argv, temp_argv);
2789 
2790   // x21 : argv
2791   // x22 : argc
2792   // x23 : call target
2793   //
2794   // The stack (on entry) holds the arguments and the receiver, with the
2795   // receiver at the highest address:
2796   //
2797   //         argv[8]:     receiver
2798   // argv -> argv[0]:     arg[argc-2]
2799   //         ...          ...
2800   //         argv[...]:   arg[1]
2801   //         argv[...]:   arg[0]
2802   //
2803   // Immediately below (after) this is the exit frame, as constructed by
2804   // EnterExitFrame:
2805   //         fp[8]:    CallerPC (lr)
2806   //   fp -> fp[0]:    CallerFP (old fp)
2807   //         fp[-8]:   Space reserved for SPOffset.
2808   //         fp[-16]:  CodeObject()
2809   //         sp[...]:  Saved doubles, if saved_doubles is true.
2810   //         sp[32]:   Alignment padding, if necessary.
2811   //         sp[24]:   Preserved x23 (used for target).
2812   //         sp[16]:   Preserved x22 (used for argc).
2813   //         sp[8]:    Preserved x21 (used for argv).
2814   //   sp -> sp[0]:    Space reserved for the return address.
2815   //
2816   // After a successful call, the exit frame, preserved registers (x21-x23) and
2817   // the arguments (including the receiver) are dropped or popped as
2818   // appropriate. The stub then returns.
2819   //
2820   // After an unsuccessful call, the exit frame and suchlike are left
2821   // untouched, and the stub either throws an exception by jumping to one of
2822   // the exception_returned label.
2823 
2824   // Prepare AAPCS64 arguments to pass to the builtin.
2825   __ Mov(x0, argc);
2826   __ Mov(x1, argv);
2827   __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
2828 
2829   Label return_location;
2830   __ Adr(x12, &return_location);
2831   __ Poke(x12, 0);
2832 
2833   if (__ emit_debug_code()) {
2834     // Verify that the slot below fp[kSPOffset]-8 points to the return location
2835     // (currently in x12).
2836     UseScratchRegisterScope temps(masm);
2837     Register temp = temps.AcquireX();
2838     __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
2839     __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
2840     __ Cmp(temp, x12);
2841     __ Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
2842   }
2843 
2844   // Call the builtin.
2845   __ Blr(target);
2846   __ Bind(&return_location);
2847 
2848   // Result returned in x0 or x1:x0 - do not destroy these registers!
2849 
2850   //  x0    result0      The return code from the call.
2851   //  x1    result1      For calls which return ObjectPair.
2852   //  x21   argv
2853   //  x22   argc
2854   //  x23   target
2855   const Register& result = x0;
2856 
2857   // Check result for exception sentinel.
2858   Label exception_returned;
2859   __ CompareRoot(result, Heap::kExceptionRootIndex);
2860   __ B(eq, &exception_returned);
2861 
2862   // The call succeeded, so unwind the stack and return.
2863 
2864   // Restore callee-saved registers x21-x23.
2865   __ Mov(x11, argc);
2866 
2867   __ Peek(argv, 1 * kPointerSize);
2868   __ Peek(argc, 2 * kPointerSize);
2869   __ Peek(target, 3 * kPointerSize);
2870 
2871   __ LeaveExitFrame(save_doubles == kSaveFPRegs, x10, x9);
2872   if (argv_mode == kArgvOnStack) {
2873     // Drop the remaining stack slots and return from the stub.
2874     __ DropArguments(x11);
2875   }
2876   __ AssertFPCRState();
2877   __ Ret();
2878 
2879   // Handling of exception.
2880   __ Bind(&exception_returned);
2881 
2882   ExternalReference pending_handler_context_address = ExternalReference::Create(
2883       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2884   ExternalReference pending_handler_entrypoint_address =
2885       ExternalReference::Create(
2886           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2887   ExternalReference pending_handler_fp_address = ExternalReference::Create(
2888       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2889   ExternalReference pending_handler_sp_address = ExternalReference::Create(
2890       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2891 
2892   // Ask the runtime for help to determine the handler. This will set x0 to
2893   // contain the current pending exception, don't clobber it.
2894   ExternalReference find_handler =
2895       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2896   {
2897     FrameScope scope(masm, StackFrame::MANUAL);
2898     __ Mov(x0, 0);  // argc.
2899     __ Mov(x1, 0);  // argv.
2900     __ Mov(x2, ExternalReference::isolate_address(masm->isolate()));
2901     __ CallCFunction(find_handler, 3);
2902   }
2903 
2904   // Retrieve the handler context, SP and FP.
2905   __ Mov(cp, pending_handler_context_address);
2906   __ Ldr(cp, MemOperand(cp));
2907   {
2908     UseScratchRegisterScope temps(masm);
2909     Register scratch = temps.AcquireX();
2910     __ Mov(scratch, pending_handler_sp_address);
2911     __ Ldr(scratch, MemOperand(scratch));
2912     __ Mov(sp, scratch);
2913   }
2914   __ Mov(fp, pending_handler_fp_address);
2915   __ Ldr(fp, MemOperand(fp));
2916 
2917   // If the handler is a JS frame, restore the context to the frame. Note that
2918   // the context will be set to (cp == 0) for non-JS frames.
2919   Label not_js_frame;
2920   __ Cbz(cp, &not_js_frame);
2921   __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2922   __ Bind(&not_js_frame);
2923 
2924   // Reset the masking register. This is done independent of the underlying
2925   // feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
2926   // both configurations. It is safe to always do this, because the underlying
2927   // register is caller-saved and can be arbitrarily clobbered.
2928   __ ResetSpeculationPoisonRegister();
2929 
2930   // Compute the handler entry address and jump to it.
2931   __ Mov(x10, pending_handler_entrypoint_address);
2932   __ Ldr(x10, MemOperand(x10));
2933   __ Br(x10);
2934 }
2935 
Generate_DoubleToI(MacroAssembler * masm)2936 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2937   Label done;
2938   Register result = x7;
2939 
2940   DCHECK(result.Is64Bits());
2941 
2942   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2943   UseScratchRegisterScope temps(masm);
2944   Register scratch1 = temps.AcquireX();
2945   Register scratch2 = temps.AcquireX();
2946   DoubleRegister double_scratch = temps.AcquireD();
2947 
2948   // Account for saved regs.
2949   const int kArgumentOffset = 2 * kPointerSize;
2950 
2951   __ Push(result, scratch1);  // scratch1 is also pushed to preserve alignment.
2952   __ Peek(double_scratch, kArgumentOffset);
2953 
2954   // Try to convert with a FPU convert instruction.  This handles all
2955   // non-saturating cases.
2956   __ TryConvertDoubleToInt64(result, double_scratch, &done);
2957   __ Fmov(result, double_scratch);
2958 
2959   // If we reach here we need to manually convert the input to an int32.
2960 
2961   // Extract the exponent.
2962   Register exponent = scratch1;
2963   __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
2964           HeapNumber::kExponentBits);
2965 
2966   // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
2967   // the mantissa gets shifted completely out of the int32_t result.
2968   __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
2969   __ CzeroX(result, ge);
2970   __ B(ge, &done);
2971 
2972   // The Fcvtzs sequence handles all cases except where the conversion causes
2973   // signed overflow in the int64_t target. Since we've already handled
2974   // exponents >= 84, we can guarantee that 63 <= exponent < 84.
2975 
2976   if (masm->emit_debug_code()) {
2977     __ Cmp(exponent, HeapNumber::kExponentBias + 63);
2978     // Exponents less than this should have been handled by the Fcvt case.
2979     __ Check(ge, AbortReason::kUnexpectedValue);
2980   }
2981 
2982   // Isolate the mantissa bits, and set the implicit '1'.
2983   Register mantissa = scratch2;
2984   __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
2985   __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
2986 
2987   // Negate the mantissa if necessary.
2988   __ Tst(result, kXSignMask);
2989   __ Cneg(mantissa, mantissa, ne);
2990 
2991   // Shift the mantissa bits in the correct place. We know that we have to shift
2992   // it left here, because exponent >= 63 >= kMantissaBits.
2993   __ Sub(exponent, exponent,
2994          HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
2995   __ Lsl(result, mantissa, exponent);
2996 
2997   __ Bind(&done);
2998   __ Poke(result, kArgumentOffset);
2999   __ Pop(scratch1, result);
3000   __ Ret();
3001 }
3002 
Generate_MathPowInternal(MacroAssembler * masm)3003 void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
3004   Register exponent_integer = x12;
3005   Register saved_lr = x19;
3006   VRegister result_double = d0;
3007   VRegister base_double = d0;
3008   VRegister exponent_double = d1;
3009   VRegister base_double_copy = d2;
3010   VRegister scratch1_double = d6;
3011   VRegister scratch0_double = d7;
3012 
3013   // A fast-path for integer exponents.
3014   Label exponent_is_integer;
3015   // Allocate a heap number for the result, and return it.
3016   Label done;
3017 
3018   // Unpack the inputs.
3019 
3020   // Handle double (heap number) exponents.
3021   // Detect integer exponents stored as doubles and handle those in the
3022   // integer fast-path.
3023   __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
3024                                scratch0_double, &exponent_is_integer);
3025 
3026   {
3027     AllowExternalCallThatCantCauseGC scope(masm);
3028     __ Mov(saved_lr, lr);
3029     __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
3030     __ Mov(lr, saved_lr);
3031     __ B(&done);
3032   }
3033 
3034   __ Bind(&exponent_is_integer);
3035 
3036   // Find abs(exponent). For negative exponents, we can find the inverse later.
3037   Register exponent_abs = x13;
3038   __ Cmp(exponent_integer, 0);
3039   __ Cneg(exponent_abs, exponent_integer, mi);
3040 
3041   // Repeatedly multiply to calculate the power.
3042   //  result = 1.0;
3043   //  For each bit n (exponent_integer{n}) {
3044   //    if (exponent_integer{n}) {
3045   //      result *= base;
3046   //    }
3047   //    base *= base;
3048   //    if (remaining bits in exponent_integer are all zero) {
3049   //      break;
3050   //    }
3051   //  }
3052   Label power_loop, power_loop_entry, power_loop_exit;
3053   __ Fmov(scratch1_double, base_double);
3054   __ Fmov(base_double_copy, base_double);
3055   __ Fmov(result_double, 1.0);
3056   __ B(&power_loop_entry);
3057 
3058   __ Bind(&power_loop);
3059   __ Fmul(scratch1_double, scratch1_double, scratch1_double);
3060   __ Lsr(exponent_abs, exponent_abs, 1);
3061   __ Cbz(exponent_abs, &power_loop_exit);
3062 
3063   __ Bind(&power_loop_entry);
3064   __ Tbz(exponent_abs, 0, &power_loop);
3065   __ Fmul(result_double, result_double, scratch1_double);
3066   __ B(&power_loop);
3067 
3068   __ Bind(&power_loop_exit);
3069 
3070   // If the exponent was positive, result_double holds the result.
3071   __ Tbz(exponent_integer, kXSignBit, &done);
3072 
3073   // The exponent was negative, so find the inverse.
3074   __ Fmov(scratch0_double, 1.0);
3075   __ Fdiv(result_double, scratch0_double, result_double);
3076   // ECMA-262 only requires Math.pow to return an 'implementation-dependent
3077   // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
3078   // to calculate the subnormal value 2^-1074. This method of calculating
3079   // negative powers doesn't work because 2^1074 overflows to infinity. To
3080   // catch this corner-case, we bail out if the result was 0. (This can only
3081   // occur if the divisor is infinity or the base is zero.)
3082   __ Fcmp(result_double, 0.0);
3083   __ B(&done, ne);
3084 
3085   AllowExternalCallThatCantCauseGC scope(masm);
3086   __ Mov(saved_lr, lr);
3087   __ Fmov(base_double, base_double_copy);
3088   __ Scvtf(exponent_double, exponent_integer);
3089   __ CallCFunction(ExternalReference::power_double_double_function(), 0, 2);
3090   __ Mov(lr, saved_lr);
3091   __ Bind(&done);
3092   __ Ret();
3093 }
3094 
3095 namespace {
3096 
GenerateInternalArrayConstructorCase(MacroAssembler * masm,ElementsKind kind)3097 void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
3098                                           ElementsKind kind) {
3099   Label zero_case, n_case;
3100   Register argc = x0;
3101 
3102   __ Cbz(argc, &zero_case);
3103   __ CompareAndBranch(argc, 1, ne, &n_case);
3104 
3105   // One argument.
3106   if (IsFastPackedElementsKind(kind)) {
3107     Label packed_case;
3108 
3109     // We might need to create a holey array; look at the first argument.
3110     __ Peek(x10, 0);
3111     __ Cbz(x10, &packed_case);
3112 
3113     __ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
3114                 masm->isolate(), GetHoleyElementsKind(kind))
3115                 .code(),
3116             RelocInfo::CODE_TARGET);
3117 
3118     __ Bind(&packed_case);
3119   }
3120 
3121   __ Jump(
3122       CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
3123           .code(),
3124       RelocInfo::CODE_TARGET);
3125 
3126   __ Bind(&zero_case);
3127   // No arguments.
3128   __ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
3129               .code(),
3130           RelocInfo::CODE_TARGET);
3131 
3132   __ Bind(&n_case);
3133   // N arguments.
3134   Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
3135   __ Jump(code, RelocInfo::CODE_TARGET);
3136 }
3137 
3138 }  // namespace
3139 
Generate_InternalArrayConstructorImpl(MacroAssembler * masm)3140 void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
3141   // ----------- S t a t e -------------
3142   //  -- x0 : argc
3143   //  -- x1 : constructor
3144   //  -- sp[0] : return address
3145   //  -- sp[4] : last argument
3146   // -----------------------------------
3147 
3148   Register constructor = x1;
3149 
3150   if (FLAG_debug_code) {
3151     // The array construct code is only set for the global and natives
3152     // builtin Array functions which always have maps.
3153 
3154     Label unexpected_map, map_ok;
3155     // Initial map for the builtin Array function should be a map.
3156     __ Ldr(x10, FieldMemOperand(constructor,
3157                                 JSFunction::kPrototypeOrInitialMapOffset));
3158     // Will both indicate a nullptr and a Smi.
3159     __ JumpIfSmi(x10, &unexpected_map);
3160     __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
3161     __ Bind(&unexpected_map);
3162     __ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
3163     __ Bind(&map_ok);
3164   }
3165 
3166   Register kind = w3;
3167   // Figure out the right elements kind
3168   __ Ldr(x10, FieldMemOperand(constructor,
3169                               JSFunction::kPrototypeOrInitialMapOffset));
3170 
3171   // Retrieve elements_kind from map.
3172   __ LoadElementsKindFromMap(kind, x10);
3173 
3174   if (FLAG_debug_code) {
3175     Label done;
3176     __ Cmp(x3, PACKED_ELEMENTS);
3177     __ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
3178     __ Assert(
3179         eq,
3180         AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3181   }
3182 
3183   Label fast_elements_case;
3184   __ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
3185   GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
3186 
3187   __ Bind(&fast_elements_case);
3188   GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
3189 }
3190 
3191 #undef __
3192 
3193 }  // namespace internal
3194 }  // namespace v8
3195 
3196 #endif  // V8_TARGET_ARCH_ARM
3197