• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X64
6 
7 #include "src/api/api-arguments.h"
8 #include "src/base/bits-iterator.h"
9 #include "src/base/iterator.h"
10 #include "src/codegen/code-factory.h"
11 #include "src/codegen/interface-descriptors-inl.h"
12 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
13 #include "src/codegen/macro-assembler-inl.h"
14 #include "src/codegen/register-configuration.h"
15 #include "src/codegen/x64/assembler-x64.h"
16 #include "src/common/globals.h"
17 #include "src/deoptimizer/deoptimizer.h"
18 #include "src/execution/frame-constants.h"
19 #include "src/execution/frames.h"
20 #include "src/heap/heap-inl.h"
21 #include "src/logging/counters.h"
22 #include "src/objects/cell.h"
23 #include "src/objects/code.h"
24 #include "src/objects/debug-objects.h"
25 #include "src/objects/foreign.h"
26 #include "src/objects/heap-number.h"
27 #include "src/objects/js-generator.h"
28 #include "src/objects/objects-inl.h"
29 #include "src/objects/smi.h"
30 
31 #if V8_ENABLE_WEBASSEMBLY
32 #include "src/wasm/baseline/liftoff-assembler-defs.h"
33 #include "src/wasm/object-access.h"
34 #include "src/wasm/wasm-constants.h"
35 #include "src/wasm/wasm-linkage.h"
36 #include "src/wasm/wasm-objects.h"
37 #endif  // V8_ENABLE_WEBASSEMBLY
38 
39 namespace v8 {
40 namespace internal {
41 
42 #define __ ACCESS_MASM(masm)
43 
Generate_Adaptor(MacroAssembler * masm,Address address)44 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
45   __ LoadAddress(kJavaScriptCallExtraArg1Register,
46                  ExternalReference::Create(address));
47   __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
48           RelocInfo::CODE_TARGET);
49 }
50 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id,JumpMode jump_mode=JumpMode::kJump)51 static void GenerateTailCallToReturnedCode(
52     MacroAssembler* masm, Runtime::FunctionId function_id,
53     JumpMode jump_mode = JumpMode::kJump) {
54   // ----------- S t a t e -------------
55   //  -- rax : actual argument count
56   //  -- rdx : new target (preserved for callee)
57   //  -- rdi : target function (preserved for callee)
58   // -----------------------------------
59   ASM_CODE_COMMENT(masm);
60   {
61     FrameScope scope(masm, StackFrame::INTERNAL);
62     // Push a copy of the target function, the new target and the actual
63     // argument count.
64     __ Push(kJavaScriptCallTargetRegister);
65     __ Push(kJavaScriptCallNewTargetRegister);
66     __ SmiTag(kJavaScriptCallArgCountRegister);
67     __ Push(kJavaScriptCallArgCountRegister);
68     // Function is also the parameter to the runtime call.
69     __ Push(kJavaScriptCallTargetRegister);
70 
71     __ CallRuntime(function_id, 1);
72     __ movq(rcx, rax);
73 
74     // Restore target function, new target and actual argument count.
75     __ Pop(kJavaScriptCallArgCountRegister);
76     __ SmiUntag(kJavaScriptCallArgCountRegister);
77     __ Pop(kJavaScriptCallNewTargetRegister);
78     __ Pop(kJavaScriptCallTargetRegister);
79   }
80   static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
81   __ JumpCodeTObject(rcx, jump_mode);
82 }
83 
84 namespace {
85 
86 enum class ArgumentsElementType {
87   kRaw,    // Push arguments as they are.
88   kHandle  // Dereference arguments before pushing.
89 };
90 
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,ArgumentsElementType element_type)91 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
92                             Register scratch,
93                             ArgumentsElementType element_type) {
94   DCHECK(!AreAliased(array, argc, scratch, kScratchRegister));
95   Register counter = scratch;
96   Label loop, entry;
97   __ leaq(counter, Operand(argc, -kJSArgcReceiverSlots));
98   __ jmp(&entry);
99   __ bind(&loop);
100   Operand value(array, counter, times_system_pointer_size, 0);
101   if (element_type == ArgumentsElementType::kHandle) {
102     __ movq(kScratchRegister, value);
103     value = Operand(kScratchRegister, 0);
104   }
105   __ Push(value);
106   __ bind(&entry);
107   __ decq(counter);
108   __ j(greater_equal, &loop, Label::kNear);
109 }
110 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)111 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
112   // ----------- S t a t e -------------
113   //  -- rax: number of arguments
114   //  -- rdi: constructor function
115   //  -- rdx: new target
116   //  -- rsi: context
117   // -----------------------------------
118 
119   Label stack_overflow;
120   __ StackOverflowCheck(rax, &stack_overflow, Label::kFar);
121 
122   // Enter a construct frame.
123   {
124     FrameScope scope(masm, StackFrame::CONSTRUCT);
125 
126     // Preserve the incoming parameters on the stack.
127     __ SmiTag(rcx, rax);
128     __ Push(rsi);
129     __ Push(rcx);
130 
131     // TODO(victorgomes): When the arguments adaptor is completely removed, we
132     // should get the formal parameter count and copy the arguments in its
133     // correct position (including any undefined), instead of delaying this to
134     // InvokeFunction.
135 
136     // Set up pointer to first argument (skip receiver).
137     __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
138                                   kSystemPointerSize));
139     // Copy arguments to the expression stack.
140     // rbx: Pointer to start of arguments.
141     // rax: Number of arguments.
142     Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
143     // The receiver for the builtin/api call.
144     __ PushRoot(RootIndex::kTheHoleValue);
145 
146     // Call the function.
147     // rax: number of arguments (untagged)
148     // rdi: constructor function
149     // rdx: new target
150     __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall);
151 
152     // Restore smi-tagged arguments count from the frame.
153     __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
154 
155     // Leave construct frame.
156   }
157 
158   // Remove caller arguments from the stack and return.
159   __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
160                    TurboAssembler::kCountIncludesReceiver);
161 
162   __ ret(0);
163 
164   __ bind(&stack_overflow);
165   {
166     FrameScope scope(masm, StackFrame::INTERNAL);
167     __ CallRuntime(Runtime::kThrowStackOverflow);
168     __ int3();  // This should be unreachable.
169   }
170 }
171 
172 }  // namespace
173 
174 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)175 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
176   // ----------- S t a t e -------------
177   //  -- rax: number of arguments (untagged)
178   //  -- rdi: constructor function
179   //  -- rdx: new target
180   //  -- rsi: context
181   //  -- sp[...]: constructor arguments
182   // -----------------------------------
183 
184   FrameScope scope(masm, StackFrame::MANUAL);
185   // Enter a construct frame.
186   __ EnterFrame(StackFrame::CONSTRUCT);
187   Label post_instantiation_deopt_entry, not_create_implicit_receiver;
188 
189   // Preserve the incoming parameters on the stack.
190   __ SmiTag(rcx, rax);
191   __ Push(rsi);
192   __ Push(rcx);
193   __ Push(rdi);
194   __ PushRoot(RootIndex::kTheHoleValue);
195   __ Push(rdx);
196 
197   // ----------- S t a t e -------------
198   //  --         sp[0*kSystemPointerSize]: new target
199   //  --         sp[1*kSystemPointerSize]: padding
200   //  -- rdi and sp[2*kSystemPointerSize]: constructor function
201   //  --         sp[3*kSystemPointerSize]: argument count
202   //  --         sp[4*kSystemPointerSize]: context
203   // -----------------------------------
204 
205   __ LoadTaggedPointerField(
206       rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
207   __ movl(rbx, FieldOperand(rbx, SharedFunctionInfo::kFlagsOffset));
208   __ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
209   __ JumpIfIsInRange(
210       rbx, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
211       static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
212       &not_create_implicit_receiver, Label::kNear);
213 
214   // If not derived class constructor: Allocate the new receiver object.
215   __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
216   __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
217   __ jmp(&post_instantiation_deopt_entry, Label::kNear);
218 
219   // Else: use TheHoleValue as receiver for constructor call
220   __ bind(&not_create_implicit_receiver);
221   __ LoadRoot(rax, RootIndex::kTheHoleValue);
222 
223   // ----------- S t a t e -------------
224   //  -- rax                          implicit receiver
225   //  -- Slot 4 / sp[0*kSystemPointerSize]  new target
226   //  -- Slot 3 / sp[1*kSystemPointerSize]  padding
227   //  -- Slot 2 / sp[2*kSystemPointerSize]  constructor function
228   //  -- Slot 1 / sp[3*kSystemPointerSize]  number of arguments (tagged)
229   //  -- Slot 0 / sp[4*kSystemPointerSize]  context
230   // -----------------------------------
231   // Deoptimizer enters here.
232   masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
233       masm->pc_offset());
234   __ bind(&post_instantiation_deopt_entry);
235 
236   // Restore new target.
237   __ Pop(rdx);
238 
239   // Push the allocated receiver to the stack.
240   __ Push(rax);
241 
242   // We need two copies because we may have to return the original one
243   // and the calling conventions dictate that the called function pops the
244   // receiver. The second copy is pushed after the arguments, we saved in r8
245   // since rax needs to store the number of arguments before
246   // InvokingFunction.
247   __ movq(r8, rax);
248 
249   // Set up pointer to first argument (skip receiver).
250   __ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
251                                 kSystemPointerSize));
252 
253   // Restore constructor function and argument count.
254   __ movq(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
255   __ SmiUntag(rax, Operand(rbp, ConstructFrameConstants::kLengthOffset));
256 
257   // Check if we have enough stack space to push all arguments.
258   // Argument count in rax.
259   Label stack_overflow;
260   __ StackOverflowCheck(rax, &stack_overflow);
261 
262   // TODO(victorgomes): When the arguments adaptor is completely removed, we
263   // should get the formal parameter count and copy the arguments in its
264   // correct position (including any undefined), instead of delaying this to
265   // InvokeFunction.
266 
267   // Copy arguments to the expression stack.
268   // rbx: Pointer to start of arguments.
269   // rax: Number of arguments.
270   Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
271 
272   // Push implicit receiver.
273   __ Push(r8);
274 
275   // Call the function.
276   __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall);
277 
278   // ----------- S t a t e -------------
279   //  -- rax                 constructor result
280   //  -- sp[0*kSystemPointerSize]  implicit receiver
281   //  -- sp[1*kSystemPointerSize]  padding
282   //  -- sp[2*kSystemPointerSize]  constructor function
283   //  -- sp[3*kSystemPointerSize]  number of arguments
284   //  -- sp[4*kSystemPointerSize]  context
285   // -----------------------------------
286 
287   // Store offset of return address for deoptimizer.
288   masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
289       masm->pc_offset());
290 
291   // If the result is an object (in the ECMA sense), we should get rid
292   // of the receiver and use the result; see ECMA-262 section 13.2.2-7
293   // on page 74.
294   Label use_receiver, do_throw, leave_and_return, check_result;
295 
296   // If the result is undefined, we'll use the implicit receiver. Otherwise we
297   // do a smi check and fall through to check if the return value is a valid
298   // receiver.
299   __ JumpIfNotRoot(rax, RootIndex::kUndefinedValue, &check_result,
300                    Label::kNear);
301 
302   // Throw away the result of the constructor invocation and use the
303   // on-stack receiver as the result.
304   __ bind(&use_receiver);
305   __ movq(rax, Operand(rsp, 0 * kSystemPointerSize));
306   __ JumpIfRoot(rax, RootIndex::kTheHoleValue, &do_throw, Label::kNear);
307 
308   __ bind(&leave_and_return);
309   // Restore the arguments count.
310   __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
311   __ LeaveFrame(StackFrame::CONSTRUCT);
312   // Remove caller arguments from the stack and return.
313   __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
314                    TurboAssembler::kCountIncludesReceiver);
315   __ ret(0);
316 
317   // If the result is a smi, it is *not* an object in the ECMA sense.
318   __ bind(&check_result);
319   __ JumpIfSmi(rax, &use_receiver, Label::kNear);
320 
321   // If the type of the result (stored in its map) is less than
322   // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
323   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
324   __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
325   __ j(above_equal, &leave_and_return, Label::kNear);
326   __ jmp(&use_receiver);
327 
328   __ bind(&do_throw);
329   // Restore context from the frame.
330   __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
331   __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
332   // We don't return here.
333   __ int3();
334 
335   __ bind(&stack_overflow);
336   // Restore the context from the frame.
337   __ movq(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
338   __ CallRuntime(Runtime::kThrowStackOverflow);
339   // This should be unreachable.
340   __ int3();
341 }
342 
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)343 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
344   Generate_JSBuiltinsConstructStubHelper(masm);
345 }
346 
Generate_ConstructedNonConstructable(MacroAssembler * masm)347 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
348   FrameScope scope(masm, StackFrame::INTERNAL);
349   __ Push(rdi);
350   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
351 }
352 
353 namespace {
354 
355 // Called with the native C calling convention. The corresponding function
356 // signature is either:
357 //   using JSEntryFunction = GeneratedCode<Address(
358 //       Address root_register_value, Address new_target, Address target,
359 //       Address receiver, intptr_t argc, Address** argv)>;
360 // or
361 //   using JSEntryFunction = GeneratedCode<Address(
362 //       Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)363 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
364                              Builtin entry_trampoline) {
365   Label invoke, handler_entry, exit;
366   Label not_outermost_js, not_outermost_js_2;
367 
368   {
369     NoRootArrayScope uninitialized_root_register(masm);
370     // Set up frame.
371     __ pushq(rbp);
372     __ movq(rbp, rsp);
373 
374     // Push the stack frame type.
375     __ Push(Immediate(StackFrame::TypeToMarker(type)));
376     // Reserve a slot for the context. It is filled after the root register has
377     // been set up.
378     __ AllocateStackSpace(kSystemPointerSize);
379     // Save callee-saved registers (X64/X32/Win64 calling conventions).
380     __ pushq(r12);
381     __ pushq(r13);
382     __ pushq(r14);
383     __ pushq(r15);
384 #ifdef V8_TARGET_OS_WIN
385     __ pushq(rdi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
386     __ pushq(rsi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
387 #endif
388     __ pushq(rbx);
389 
390 #ifdef V8_TARGET_OS_WIN
391     // On Win64 XMM6-XMM15 are callee-save.
392     __ AllocateStackSpace(EntryFrameConstants::kXMMRegistersBlockSize);
393     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
394     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
395     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
396     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
397     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
398     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
399     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
400     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
401     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
402     __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
403     STATIC_ASSERT(EntryFrameConstants::kCalleeSaveXMMRegisters == 10);
404     STATIC_ASSERT(EntryFrameConstants::kXMMRegistersBlockSize ==
405                   EntryFrameConstants::kXMMRegisterSize *
406                       EntryFrameConstants::kCalleeSaveXMMRegisters);
407 #endif
408 
409     // Initialize the root register.
410     // C calling convention. The first argument is passed in arg_reg_1.
411     __ movq(kRootRegister, arg_reg_1);
412 
413 #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
414     // Initialize the pointer cage base register.
415     __ LoadRootRelative(kPtrComprCageBaseRegister,
416                         IsolateData::cage_base_offset());
417 #endif
418   }
419 
420   // Save copies of the top frame descriptor on the stack.
421   ExternalReference c_entry_fp = ExternalReference::Create(
422       IsolateAddressId::kCEntryFPAddress, masm->isolate());
423   {
424     Operand c_entry_fp_operand = masm->ExternalReferenceAsOperand(c_entry_fp);
425     __ Push(c_entry_fp_operand);
426 
427     // Clear c_entry_fp, now we've pushed its previous value to the stack.
428     // If the c_entry_fp is not already zero and we don't clear it, the
429     // SafeStackFrameIterator will assume we are executing C++ and miss the JS
430     // frames on top.
431     __ Move(c_entry_fp_operand, 0);
432   }
433 
434   // Store the context address in the previously-reserved slot.
435   ExternalReference context_address = ExternalReference::Create(
436       IsolateAddressId::kContextAddress, masm->isolate());
437   __ Load(kScratchRegister, context_address);
438   static constexpr int kOffsetToContextSlot = -2 * kSystemPointerSize;
439   __ movq(Operand(rbp, kOffsetToContextSlot), kScratchRegister);
440 
441   // If this is the outermost JS call, set js_entry_sp value.
442   ExternalReference js_entry_sp = ExternalReference::Create(
443       IsolateAddressId::kJSEntrySPAddress, masm->isolate());
444   __ Load(rax, js_entry_sp);
445   __ testq(rax, rax);
446   __ j(not_zero, &not_outermost_js);
447   __ Push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
448   __ movq(rax, rbp);
449   __ Store(js_entry_sp, rax);
450   Label cont;
451   __ jmp(&cont);
452   __ bind(&not_outermost_js);
453   __ Push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
454   __ bind(&cont);
455 
456   // Jump to a faked try block that does the invoke, with a faked catch
457   // block that sets the pending exception.
458   __ jmp(&invoke);
459   __ bind(&handler_entry);
460 
461   // Store the current pc as the handler offset. It's used later to create the
462   // handler table.
463   masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
464 
465   // Caught exception: Store result (exception) in the pending exception
466   // field in the JSEnv and return a failure sentinel.
467   ExternalReference pending_exception = ExternalReference::Create(
468       IsolateAddressId::kPendingExceptionAddress, masm->isolate());
469   __ Store(pending_exception, rax);
470   __ LoadRoot(rax, RootIndex::kException);
471   __ jmp(&exit);
472 
473   // Invoke: Link this frame into the handler chain.
474   __ bind(&invoke);
475   __ PushStackHandler();
476 
477   // Invoke the function by calling through JS entry trampoline builtin and
478   // pop the faked function when we return.
479   Handle<CodeT> trampoline_code =
480       masm->isolate()->builtins()->code_handle(entry_trampoline);
481   __ Call(trampoline_code, RelocInfo::CODE_TARGET);
482 
483   // Unlink this frame from the handler chain.
484   __ PopStackHandler();
485 
486   __ bind(&exit);
487   // Check if the current stack frame is marked as the outermost JS frame.
488   __ Pop(rbx);
489   __ cmpq(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
490   __ j(not_equal, &not_outermost_js_2);
491   __ Move(kScratchRegister, js_entry_sp);
492   __ movq(Operand(kScratchRegister, 0), Immediate(0));
493   __ bind(&not_outermost_js_2);
494 
495   // Restore the top frame descriptor from the stack.
496   {
497     Operand c_entry_fp_operand = masm->ExternalReferenceAsOperand(c_entry_fp);
498     __ Pop(c_entry_fp_operand);
499   }
500 
501   // Restore callee-saved registers (X64 conventions).
502 #ifdef V8_TARGET_OS_WIN
503   // On Win64 XMM6-XMM15 are callee-save
504   __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
505   __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
506   __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
507   __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
508   __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
509   __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
510   __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
511   __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
512   __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
513   __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
514   __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
515 #endif
516 
517   __ popq(rbx);
518 #ifdef V8_TARGET_OS_WIN
519   // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
520   __ popq(rsi);
521   __ popq(rdi);
522 #endif
523   __ popq(r15);
524   __ popq(r14);
525   __ popq(r13);
526   __ popq(r12);
527   __ addq(rsp, Immediate(2 * kSystemPointerSize));  // remove markers
528 
529   // Restore frame pointer and return.
530   __ popq(rbp);
531   __ ret(0);
532 }
533 
534 }  // namespace
535 
Generate_JSEntry(MacroAssembler * masm)536 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
537   Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
538 }
539 
Generate_JSConstructEntry(MacroAssembler * masm)540 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
541   Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
542                           Builtin::kJSConstructEntryTrampoline);
543 }
544 
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)545 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
546   Generate_JSEntryVariant(masm, StackFrame::ENTRY,
547                           Builtin::kRunMicrotasksTrampoline);
548 }
549 
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)550 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
551                                              bool is_construct) {
552   // Expects six C++ function parameters.
553   // - Address root_register_value
554   // - Address new_target (tagged Object pointer)
555   // - Address function (tagged JSFunction pointer)
556   // - Address receiver (tagged Object pointer)
557   // - intptr_t argc
558   // - Address** argv (pointer to array of tagged Object pointers)
559   // (see Handle::Invoke in execution.cc).
560 
561   // Open a C++ scope for the FrameScope.
562   {
563     // Platform specific argument handling. After this, the stack contains
564     // an internal frame and the pushed function and receiver, and
565     // register rax and rbx holds the argument count and argument array,
566     // while rdi holds the function pointer, rsi the context, and rdx the
567     // new.target.
568 
569     // MSVC parameters in:
570     // rcx        : root_register_value
571     // rdx        : new_target
572     // r8         : function
573     // r9         : receiver
574     // [rsp+0x20] : argc
575     // [rsp+0x28] : argv
576     //
577     // GCC parameters in:
578     // rdi : root_register_value
579     // rsi : new_target
580     // rdx : function
581     // rcx : receiver
582     // r8  : argc
583     // r9  : argv
584 
585     __ movq(rdi, arg_reg_3);
586     __ Move(rdx, arg_reg_2);
587     // rdi : function
588     // rdx : new_target
589 
590     // Clear the context before we push it when entering the internal frame.
591     __ Move(rsi, 0);
592 
593     // Enter an internal frame.
594     FrameScope scope(masm, StackFrame::INTERNAL);
595 
596     // Setup the context (we need to use the caller context from the isolate).
597     ExternalReference context_address = ExternalReference::Create(
598         IsolateAddressId::kContextAddress, masm->isolate());
599     __ movq(rsi, masm->ExternalReferenceAsOperand(context_address));
600 
601     // Push the function onto the stack.
602     __ Push(rdi);
603 
604 #ifdef V8_TARGET_OS_WIN
605     // Load the previous frame pointer to access C arguments on stack
606     __ movq(kScratchRegister, Operand(rbp, 0));
607     // Load the number of arguments and setup pointer to the arguments.
608     __ movq(rax, Operand(kScratchRegister, EntryFrameConstants::kArgcOffset));
609     __ movq(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
610 #else   // V8_TARGET_OS_WIN
611     // Load the number of arguments and setup pointer to the arguments.
612     __ movq(rax, r8);
613     __ movq(rbx, r9);
614     __ movq(r9, arg_reg_4);  // Temporarily saving the receiver.
615 #endif  // V8_TARGET_OS_WIN
616 
617     // Current stack contents:
618     // [rsp + kSystemPointerSize]     : Internal frame
619     // [rsp]                          : function
620     // Current register contents:
621     // rax : argc
622     // rbx : argv
623     // rsi : context
624     // rdi : function
625     // rdx : new.target
626     // r9  : receiver
627 
628     // Check if we have enough stack space to push all arguments.
629     // Argument count in rax.
630     Label enough_stack_space, stack_overflow;
631     __ StackOverflowCheck(rax, &stack_overflow, Label::kNear);
632     __ jmp(&enough_stack_space, Label::kNear);
633 
634     __ bind(&stack_overflow);
635     __ CallRuntime(Runtime::kThrowStackOverflow);
636     // This should be unreachable.
637     __ int3();
638 
639     __ bind(&enough_stack_space);
640 
641     // Copy arguments to the stack.
642     // Register rbx points to array of pointers to handle locations.
643     // Push the values of these handles.
644     // rbx: Pointer to start of arguments.
645     // rax: Number of arguments.
646     Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kHandle);
647 
648     // Push the receiver.
649     __ Push(r9);
650 
651     // Invoke the builtin code.
652     Handle<CodeT> builtin = is_construct
653                                 ? BUILTIN_CODE(masm->isolate(), Construct)
654                                 : masm->isolate()->builtins()->Call();
655     __ Call(builtin, RelocInfo::CODE_TARGET);
656 
657     // Exit the internal frame. Notice that this also removes the empty
658     // context and the function left on the stack by the code
659     // invocation.
660   }
661 
662   __ ret(0);
663 }
664 
Generate_JSEntryTrampoline(MacroAssembler * masm)665 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
666   Generate_JSEntryTrampolineHelper(masm, false);
667 }
668 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)669 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
670   Generate_JSEntryTrampolineHelper(masm, true);
671 }
672 
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)673 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
674   // arg_reg_2: microtask_queue
675   __ movq(RunMicrotasksDescriptor::MicrotaskQueueRegister(), arg_reg_2);
676   __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
677 }
678 
AssertCodeTIsBaselineAllowClobber(MacroAssembler * masm,Register code,Register scratch)679 static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
680                                               Register code, Register scratch) {
681   // Verify that the code kind is baseline code via the CodeKind.
682   __ movl(scratch, FieldOperand(code, CodeT::kFlagsOffset));
683   __ DecodeField<CodeT::KindField>(scratch);
684   __ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
685   __ Assert(equal, AbortReason::kExpectedBaselineData);
686 }
687 
AssertCodeTIsBaseline(MacroAssembler * masm,Register code,Register scratch)688 static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
689                                   Register scratch) {
690   DCHECK(!AreAliased(code, scratch));
691   return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
692 }
693 
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)694 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
695                                                     Register sfi_data,
696                                                     Register scratch1,
697                                                     Label* is_baseline) {
698   ASM_CODE_COMMENT(masm);
699   Label done;
700   __ LoadMap(scratch1, sfi_data);
701 
702   __ CmpInstanceType(scratch1, CODET_TYPE);
703   if (FLAG_debug_code) {
704     Label not_baseline;
705     __ j(not_equal, &not_baseline);
706     AssertCodeTIsBaseline(masm, sfi_data, scratch1);
707     __ j(equal, is_baseline);
708     __ bind(&not_baseline);
709   } else {
710     __ j(equal, is_baseline);
711   }
712 
713   __ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
714   __ j(not_equal, &done, Label::kNear);
715 
716   __ LoadTaggedPointerField(
717       sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
718 
719   __ bind(&done);
720 }
721 
722 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)723 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
724   // ----------- S t a t e -------------
725   //  -- rax    : the value to pass to the generator
726   //  -- rdx    : the JSGeneratorObject to resume
727   //  -- rsp[0] : return address
728   // -----------------------------------
729 
730   // Store input value into generator object.
731   __ StoreTaggedField(
732       FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
733   Register object = WriteBarrierDescriptor::ObjectRegister();
734   __ Move(object, rdx);
735   __ RecordWriteField(object, JSGeneratorObject::kInputOrDebugPosOffset, rax,
736                       WriteBarrierDescriptor::SlotAddressRegister(),
737                       SaveFPRegsMode::kIgnore);
738   // Check that rdx is still valid, RecordWrite might have clobbered it.
739   __ AssertGeneratorObject(rdx);
740 
741   Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
742 
743   // Load suspended function and context.
744   __ LoadTaggedPointerField(
745       rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
746   __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
747 
748   // Flood function if we are stepping.
749   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
750   Label stepping_prepared;
751   ExternalReference debug_hook =
752       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
753   Operand debug_hook_operand = masm->ExternalReferenceAsOperand(debug_hook);
754   __ cmpb(debug_hook_operand, Immediate(0));
755   __ j(not_equal, &prepare_step_in_if_stepping);
756 
757   // Flood function if we need to continue stepping in the suspended generator.
758   ExternalReference debug_suspended_generator =
759       ExternalReference::debug_suspended_generator_address(masm->isolate());
760   Operand debug_suspended_generator_operand =
761       masm->ExternalReferenceAsOperand(debug_suspended_generator);
762   __ cmpq(rdx, debug_suspended_generator_operand);
763   __ j(equal, &prepare_step_in_suspended_generator);
764   __ bind(&stepping_prepared);
765 
766   // Check the stack for overflow. We are not trying to catch interruptions
767   // (i.e. debug break and preemption) here, so check the "real stack limit".
768   Label stack_overflow;
769   __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
770   __ j(below, &stack_overflow);
771 
772   // Pop return address.
773   __ PopReturnAddressTo(rax);
774 
775   // ----------- S t a t e -------------
776   //  -- rax    : return address
777   //  -- rdx    : the JSGeneratorObject to resume
778   //  -- rdi    : generator function
779   //  -- rsi    : generator context
780   // -----------------------------------
781 
782   // Copy the function arguments from the generator object's register file.
783   __ LoadTaggedPointerField(
784       rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
785   __ movzxwq(
786       rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
787   __ decq(rcx);  // Exclude receiver.
788   __ LoadTaggedPointerField(
789       rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
790 
791   {
792     Label done_loop, loop;
793     __ bind(&loop);
794     __ decq(rcx);
795     __ j(less, &done_loop, Label::kNear);
796     __ PushTaggedAnyField(
797         FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
798         decompr_scratch1);
799     __ jmp(&loop);
800     __ bind(&done_loop);
801 
802     // Push the receiver.
803     __ PushTaggedPointerField(
804         FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
805         decompr_scratch1);
806   }
807 
808   // Underlying function needs to have bytecode available.
809   if (FLAG_debug_code) {
810     Label is_baseline, ok;
811     __ LoadTaggedPointerField(
812         rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
813     __ LoadTaggedPointerField(
814         rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
815     GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
816                                             &is_baseline);
817     __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
818     __ Assert(equal, AbortReason::kMissingBytecodeArray);
819     __ jmp(&ok);
820 
821     __ bind(&is_baseline);
822     __ CmpObjectType(rcx, CODET_TYPE, rcx);
823     __ Assert(equal, AbortReason::kMissingBytecodeArray);
824 
825     __ bind(&ok);
826   }
827 
828   // Resume (Ignition/TurboFan) generator object.
829   {
830     __ PushReturnAddressFrom(rax);
831     __ LoadTaggedPointerField(
832         rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
833     __ movzxwq(rax, FieldOperand(
834                         rax, SharedFunctionInfo::kFormalParameterCountOffset));
835     // We abuse new.target both to indicate that this is a resume call and to
836     // pass in the generator object.  In ordinary calls, new.target is always
837     // undefined because generator functions are non-constructable.
838     static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
839     __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
840     __ JumpCodeTObject(rcx);
841   }
842 
843   __ bind(&prepare_step_in_if_stepping);
844   {
845     FrameScope scope(masm, StackFrame::INTERNAL);
846     __ Push(rdx);
847     __ Push(rdi);
848     // Push hole as receiver since we do not use it for stepping.
849     __ PushRoot(RootIndex::kTheHoleValue);
850     __ CallRuntime(Runtime::kDebugOnFunctionCall);
851     __ Pop(rdx);
852     __ LoadTaggedPointerField(
853         rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
854   }
855   __ jmp(&stepping_prepared);
856 
857   __ bind(&prepare_step_in_suspended_generator);
858   {
859     FrameScope scope(masm, StackFrame::INTERNAL);
860     __ Push(rdx);
861     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
862     __ Pop(rdx);
863     __ LoadTaggedPointerField(
864         rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
865   }
866   __ jmp(&stepping_prepared);
867 
868   __ bind(&stack_overflow);
869   {
870     FrameScope scope(masm, StackFrame::INTERNAL);
871     __ CallRuntime(Runtime::kThrowStackOverflow);
872     __ int3();  // This should be unreachable.
873   }
874 }
875 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register slot_address)876 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
877                                                 Register optimized_code,
878                                                 Register closure,
879                                                 Register scratch1,
880                                                 Register slot_address) {
881   ASM_CODE_COMMENT(masm);
882   DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
883   DCHECK_EQ(closure, kJSFunctionRegister);
884   // Store the optimized code in the closure.
885   __ AssertCodeT(optimized_code);
886   __ StoreTaggedField(FieldOperand(closure, JSFunction::kCodeOffset),
887                       optimized_code);
888   // Write barrier clobbers scratch1 below.
889   Register value = scratch1;
890   __ movq(value, optimized_code);
891 
892   __ RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
893                       SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
894                       SmiCheck::kOmit);
895 }
896 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)897 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
898                                   Register scratch2) {
899   ASM_CODE_COMMENT(masm);
900   Register params_size = scratch1;
901   // Get the size of the formal parameters (in bytes).
902   __ movq(params_size,
903           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
904   __ movl(params_size,
905           FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
906 
907   Register actual_params_size = scratch2;
908   // Compute the size of the actual parameters (in bytes).
909   __ movq(actual_params_size,
910           Operand(rbp, StandardFrameConstants::kArgCOffset));
911   __ leaq(actual_params_size,
912           Operand(actual_params_size, times_system_pointer_size, 0));
913 
914   // If actual is bigger than formal, then we should use it to free up the stack
915   // arguments.
916   Label corrected_args_count;
917   __ cmpq(params_size, actual_params_size);
918   __ j(greater_equal, &corrected_args_count, Label::kNear);
919   __ movq(params_size, actual_params_size);
920   __ bind(&corrected_args_count);
921 
922   // Leave the frame (also dropping the register file).
923   __ leave();
924 
925   // Drop receiver + arguments.
926   __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
927                    TurboAssembler::kCountIncludesReceiver);
928 }
929 
930 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)931 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
932                                          Register actual_state,
933                                          TieringState expected_state,
934                                          Runtime::FunctionId function_id) {
935   ASM_CODE_COMMENT(masm);
936   Label no_match;
937   __ Cmp(actual_state, static_cast<int>(expected_state));
938   __ j(not_equal, &no_match);
939   GenerateTailCallToReturnedCode(masm, function_id);
940   __ bind(&no_match);
941 }
942 
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)943 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
944                               Register tiering_state) {
945   // ----------- S t a t e -------------
946   //  -- rax : actual argument count
947   //  -- rdx : new target (preserved for callee if needed, and caller)
948   //  -- rdi : target function (preserved for callee if needed, and caller)
949   //  -- feedback vector (preserved for caller if needed)
950   //  -- tiering_state : a Smi containing a non-zero tiering state.
951   // -----------------------------------
952   ASM_CODE_COMMENT(masm);
953   DCHECK(!AreAliased(feedback_vector, rdx, rdi, tiering_state));
954 
955   TailCallRuntimeIfStateEquals(masm, tiering_state,
956                                TieringState::kRequestMaglev_Synchronous,
957                                Runtime::kCompileMaglev_Synchronous);
958   TailCallRuntimeIfStateEquals(masm, tiering_state,
959                                TieringState::kRequestMaglev_Concurrent,
960                                Runtime::kCompileMaglev_Concurrent);
961   TailCallRuntimeIfStateEquals(masm, tiering_state,
962                                TieringState::kRequestTurbofan_Synchronous,
963                                Runtime::kCompileTurbofan_Synchronous);
964   TailCallRuntimeIfStateEquals(masm, tiering_state,
965                                TieringState::kRequestTurbofan_Concurrent,
966                                Runtime::kCompileTurbofan_Concurrent);
967 
968   __ int3();
969 }
970 
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register closure,Register scratch1,Register scratch2,JumpMode jump_mode)971 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
972                                       Register optimized_code_entry,
973                                       Register closure, Register scratch1,
974                                       Register scratch2, JumpMode jump_mode) {
975   // ----------- S t a t e -------------
976   //  rax : actual argument count
977   //  rdx : new target (preserved for callee if needed, and caller)
978   //  rsi : current context, used for the runtime call
979   //  rdi : target function (preserved for callee if needed, and caller)
980   // -----------------------------------
981   ASM_CODE_COMMENT(masm);
982   DCHECK_EQ(closure, kJSFunctionRegister);
983   DCHECK(!AreAliased(rax, rdx, closure, rsi, optimized_code_entry, scratch1,
984                      scratch2));
985 
986   Label heal_optimized_code_slot;
987 
988   // If the optimized code is cleared, go to runtime to update the optimization
989   // marker field.
990   __ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
991 
992   // Check if the optimized code is marked for deopt. If it is, call the
993   // runtime to clear it.
994   __ AssertCodeT(optimized_code_entry);
995   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
996     __ testl(FieldOperand(optimized_code_entry,
997                           CodeDataContainer::kKindSpecificFlagsOffset),
998              Immediate(1 << Code::kMarkedForDeoptimizationBit));
999   } else {
1000     __ LoadTaggedPointerField(
1001         scratch1,
1002         FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
1003     __ testl(
1004         FieldOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset),
1005         Immediate(1 << Code::kMarkedForDeoptimizationBit));
1006   }
1007   __ j(not_zero, &heal_optimized_code_slot);
1008 
1009   // Optimized code is good, get it into the closure and link the closure into
1010   // the optimized functions list, then tail call the optimized code.
1011   ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
1012                                       scratch1, scratch2);
1013   static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
1014   __ Move(rcx, optimized_code_entry);
1015   __ JumpCodeTObject(rcx, jump_mode);
1016 
1017   // Optimized code slot contains deoptimized code or code is cleared and
1018   // optimized code marker isn't updated. Evict the code, update the marker
1019   // and re-enter the closure's code.
1020   __ bind(&heal_optimized_code_slot);
1021   GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot,
1022                                  jump_mode);
1023 }
1024 
1025 // Advance the current bytecode offset. This simulates what all bytecode
1026 // handlers do upon completion of the underlying operation. Will bail out to a
1027 // label if the bytecode (without prefix) is a return bytecode. Will not advance
1028 // the bytecode offset if the current bytecode is a JumpLoop, instead just
1029 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Label * if_return)1030 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
1031                                           Register bytecode_array,
1032                                           Register bytecode_offset,
1033                                           Register bytecode, Register scratch1,
1034                                           Register scratch2, Label* if_return) {
1035   ASM_CODE_COMMENT(masm);
1036   Register bytecode_size_table = scratch1;
1037 
1038   // The bytecode offset value will be increased by one in wide and extra wide
1039   // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
1040   // will restore the original bytecode. In order to simplify the code, we have
1041   // a backup of it.
1042   Register original_bytecode_offset = scratch2;
1043   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
1044                      bytecode_size_table, original_bytecode_offset));
1045 
1046   __ movq(original_bytecode_offset, bytecode_offset);
1047 
1048   __ Move(bytecode_size_table,
1049           ExternalReference::bytecode_size_table_address());
1050 
1051   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1052   Label process_bytecode, extra_wide;
1053   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
1054   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1055   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
1056   STATIC_ASSERT(3 ==
1057                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1058   __ cmpb(bytecode, Immediate(0x3));
1059   __ j(above, &process_bytecode, Label::kNear);
1060   // The code to load the next bytecode is common to both wide and extra wide.
1061   // We can hoist them up here. incl has to happen before testb since it
1062   // modifies the ZF flag.
1063   __ incl(bytecode_offset);
1064   __ testb(bytecode, Immediate(0x1));
1065   __ movzxbq(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
1066   __ j(not_equal, &extra_wide, Label::kNear);
1067 
1068   // Update table to the wide scaled table.
1069   __ addq(bytecode_size_table,
1070           Immediate(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1071   __ jmp(&process_bytecode, Label::kNear);
1072 
1073   __ bind(&extra_wide);
1074   // Update table to the extra wide scaled table.
1075   __ addq(bytecode_size_table,
1076           Immediate(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
1077 
1078   __ bind(&process_bytecode);
1079 
1080 // Bailout to the return label if this is a return bytecode.
1081 #define JUMP_IF_EQUAL(NAME)                                             \
1082   __ cmpb(bytecode,                                                     \
1083           Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
1084   __ j(equal, if_return, Label::kFar);
1085   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1086 #undef JUMP_IF_EQUAL
1087 
1088   // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1089   // of the loop.
1090   Label end, not_jump_loop;
1091   __ cmpb(bytecode,
1092           Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1093   __ j(not_equal, &not_jump_loop, Label::kNear);
1094   // We need to restore the original bytecode_offset since we might have
1095   // increased it to skip the wide / extra-wide prefix bytecode.
1096   __ movq(bytecode_offset, original_bytecode_offset);
1097   __ jmp(&end, Label::kNear);
1098 
1099   __ bind(&not_jump_loop);
1100   // Otherwise, load the size of the current bytecode and advance the offset.
1101   __ movzxbl(kScratchRegister,
1102              Operand(bytecode_size_table, bytecode, times_1, 0));
1103   __ addl(bytecode_offset, kScratchRegister);
1104 
1105   __ bind(&end);
1106 }
1107 
1108 // Read off the optimization state in the feedback vector and check if there
1109 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1110 static void LoadTieringStateAndJumpIfNeedsProcessing(
1111     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1112     Label* has_optimized_code_or_state) {
1113   ASM_CODE_COMMENT(masm);
1114   __ movl(optimization_state,
1115           FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1116   __ testl(
1117       optimization_state,
1118       Immediate(
1119           FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1120   __ j(not_zero, has_optimized_code_or_state);
1121 }
1122 
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Register closure,JumpMode jump_mode=JumpMode::kJump)1123 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1124     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1125     Register closure, JumpMode jump_mode = JumpMode::kJump) {
1126   ASM_CODE_COMMENT(masm);
1127   DCHECK(!AreAliased(optimization_state, feedback_vector, closure));
1128   Label maybe_has_optimized_code;
1129   __ testl(optimization_state,
1130            Immediate(FeedbackVector::kTieringStateIsAnyRequestMask));
1131   __ j(zero, &maybe_has_optimized_code);
1132 
1133   Register tiering_state = optimization_state;
1134   __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1135   MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1136 
1137   __ bind(&maybe_has_optimized_code);
1138   Register optimized_code_entry = optimization_state;
1139   __ LoadAnyTaggedField(
1140       optimized_code_entry,
1141       FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
1142   TailCallOptimizedCodeSlot(masm, optimized_code_entry, closure, r9,
1143                             WriteBarrierDescriptor::SlotAddressRegister(),
1144                             jump_mode);
1145 }
1146 
1147 namespace {
1148 
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array)1149 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
1150                                  Register bytecode_array) {
1151   // Reset the bytecode age and OSR state (optimized to a single write).
1152   static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
1153   STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1154   __ movl(FieldOperand(bytecode_array,
1155                        BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
1156           Immediate(0));
1157 }
1158 
1159 }  // namespace
1160 
1161 // Generate code for entering a JS function with the interpreter.
1162 // On entry to the function the receiver and arguments have been pushed on the
1163 // stack left to right.
1164 //
1165 // The live registers are:
1166 //   o rax: actual argument count
1167 //   o rdi: the JS function object being called
1168 //   o rdx: the incoming new target or generator object
1169 //   o rsi: our context
1170 //   o rbp: the caller's frame pointer
1171 //   o rsp: stack pointer (pointing to return address)
1172 //
1173 // The function builds an interpreter frame. See InterpreterFrameConstants in
1174 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1175 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1176   Register closure = rdi;
1177   Register feedback_vector = rbx;
1178 
1179   // Get the bytecode array from the function object and load it into
1180   // kInterpreterBytecodeArrayRegister.
1181   __ LoadTaggedPointerField(
1182       kScratchRegister,
1183       FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1184   __ LoadTaggedPointerField(
1185       kInterpreterBytecodeArrayRegister,
1186       FieldOperand(kScratchRegister, SharedFunctionInfo::kFunctionDataOffset));
1187 
1188   Label is_baseline;
1189   GetSharedFunctionInfoBytecodeOrBaseline(
1190       masm, kInterpreterBytecodeArrayRegister, kScratchRegister, &is_baseline);
1191 
1192   // The bytecode array could have been flushed from the shared function info,
1193   // if so, call into CompileLazy.
1194   Label compile_lazy;
1195   __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
1196                    kScratchRegister);
1197   __ j(not_equal, &compile_lazy);
1198 
1199   // Load the feedback vector from the closure.
1200   __ LoadTaggedPointerField(
1201       feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1202   __ LoadTaggedPointerField(feedback_vector,
1203                             FieldOperand(feedback_vector, Cell::kValueOffset));
1204 
1205   Label push_stack_frame;
1206   // Check if feedback vector is valid. If valid, check for optimized code
1207   // and update invocation count. Otherwise, setup the stack frame.
1208   __ LoadMap(rcx, feedback_vector);
1209   __ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
1210   __ j(not_equal, &push_stack_frame);
1211 
1212   // Check the tiering state.
1213   Label has_optimized_code_or_state;
1214   Register optimization_state = rcx;
1215   LoadTieringStateAndJumpIfNeedsProcessing(
1216       masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
1217 
1218   Label not_optimized;
1219   __ bind(&not_optimized);
1220 
1221   // Increment invocation count for the function.
1222   __ incl(
1223       FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
1224 
1225   // Open a frame scope to indicate that there is a frame on the stack.  The
1226   // MANUAL indicates that the scope shouldn't actually generate code to set up
1227   // the frame (that is done below).
1228   __ bind(&push_stack_frame);
1229   FrameScope frame_scope(masm, StackFrame::MANUAL);
1230   __ pushq(rbp);  // Caller's frame pointer.
1231   __ movq(rbp, rsp);
1232   __ Push(kContextRegister);                 // Callee's context.
1233   __ Push(kJavaScriptCallTargetRegister);    // Callee's JS function.
1234   __ Push(kJavaScriptCallArgCountRegister);  // Actual argument count.
1235 
1236   ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
1237 
1238   // Load initial bytecode offset.
1239   __ Move(kInterpreterBytecodeOffsetRegister,
1240           BytecodeArray::kHeaderSize - kHeapObjectTag);
1241 
1242   // Push bytecode array and Smi tagged bytecode offset.
1243   __ Push(kInterpreterBytecodeArrayRegister);
1244   __ SmiTag(rcx, kInterpreterBytecodeOffsetRegister);
1245   __ Push(rcx);
1246 
1247   // Allocate the local and temporary register file on the stack.
1248   Label stack_overflow;
1249   {
1250     // Load frame size from the BytecodeArray object.
1251     __ movl(rcx, FieldOperand(kInterpreterBytecodeArrayRegister,
1252                               BytecodeArray::kFrameSizeOffset));
1253 
1254     // Do a stack check to ensure we don't go over the limit.
1255     __ movq(rax, rsp);
1256     __ subq(rax, rcx);
1257     __ cmpq(rax, __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
1258     __ j(below, &stack_overflow);
1259 
1260     // If ok, push undefined as the initial value for all register file entries.
1261     Label loop_header;
1262     Label loop_check;
1263     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1264     __ j(always, &loop_check, Label::kNear);
1265     __ bind(&loop_header);
1266     // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1267     __ Push(kInterpreterAccumulatorRegister);
1268     // Continue loop if not done.
1269     __ bind(&loop_check);
1270     __ subq(rcx, Immediate(kSystemPointerSize));
1271     __ j(greater_equal, &loop_header, Label::kNear);
1272   }
1273 
1274   // If the bytecode array has a valid incoming new target or generator object
1275   // register, initialize it with incoming value which was passed in rdx.
1276   Label no_incoming_new_target_or_generator_register;
1277   __ movsxlq(
1278       rcx,
1279       FieldOperand(kInterpreterBytecodeArrayRegister,
1280                    BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1281   __ testl(rcx, rcx);
1282   __ j(zero, &no_incoming_new_target_or_generator_register, Label::kNear);
1283   __ movq(Operand(rbp, rcx, times_system_pointer_size, 0), rdx);
1284   __ bind(&no_incoming_new_target_or_generator_register);
1285 
1286   // Perform interrupt stack check.
1287   // TODO(solanes): Merge with the real stack limit check above.
1288   Label stack_check_interrupt, after_stack_check_interrupt;
1289   __ cmpq(rsp, __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
1290   __ j(below, &stack_check_interrupt);
1291   __ bind(&after_stack_check_interrupt);
1292 
1293   // The accumulator is already loaded with undefined.
1294 
1295   // Load the dispatch table into a register and dispatch to the bytecode
1296   // handler at the current bytecode offset.
1297   Label do_dispatch;
1298   __ bind(&do_dispatch);
1299   __ Move(
1300       kInterpreterDispatchTableRegister,
1301       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1302   __ movzxbq(kScratchRegister,
1303              Operand(kInterpreterBytecodeArrayRegister,
1304                      kInterpreterBytecodeOffsetRegister, times_1, 0));
1305   __ movq(kJavaScriptCallCodeStartRegister,
1306           Operand(kInterpreterDispatchTableRegister, kScratchRegister,
1307                   times_system_pointer_size, 0));
1308   __ call(kJavaScriptCallCodeStartRegister);
1309   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1310 
1311   // Any returns to the entry trampoline are either due to the return bytecode
1312   // or the interpreter tail calling a builtin and then a dispatch.
1313 
1314   // Get bytecode array and bytecode offset from the stack frame.
1315   __ movq(kInterpreterBytecodeArrayRegister,
1316           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1317   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1318               Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1319 
1320   // Either return, or advance to the next bytecode and dispatch.
1321   Label do_return;
1322   __ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
1323                           kInterpreterBytecodeOffsetRegister, times_1, 0));
1324   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1325                                 kInterpreterBytecodeOffsetRegister, rbx, rcx,
1326                                 r8, &do_return);
1327   __ jmp(&do_dispatch);
1328 
1329   __ bind(&do_return);
1330   // The return value is in rax.
1331   LeaveInterpreterFrame(masm, rbx, rcx);
1332   __ ret(0);
1333 
1334   __ bind(&stack_check_interrupt);
1335   // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1336   // for the call to the StackGuard.
1337   __ Move(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
1338           Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1339                        kFunctionEntryBytecodeOffset));
1340   __ CallRuntime(Runtime::kStackGuard);
1341 
1342   // After the call, restore the bytecode array, bytecode offset and accumulator
1343   // registers again. Also, restore the bytecode offset in the stack to its
1344   // previous value.
1345   __ movq(kInterpreterBytecodeArrayRegister,
1346           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1347   __ Move(kInterpreterBytecodeOffsetRegister,
1348           BytecodeArray::kHeaderSize - kHeapObjectTag);
1349   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1350 
1351   __ SmiTag(rcx, kInterpreterBytecodeArrayRegister);
1352   __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rcx);
1353 
1354   __ jmp(&after_stack_check_interrupt);
1355 
1356   __ bind(&compile_lazy);
1357   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1358   __ int3();  // Should not return.
1359 
1360   __ bind(&has_optimized_code_or_state);
1361   MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1362                                                feedback_vector, closure);
1363 
1364   __ bind(&is_baseline);
1365   {
1366     // Load the feedback vector from the closure.
1367     __ LoadTaggedPointerField(
1368         feedback_vector,
1369         FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1370     __ LoadTaggedPointerField(
1371         feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
1372 
1373     Label install_baseline_code;
1374     // Check if feedback vector is valid. If not, call prepare for baseline to
1375     // allocate it.
1376     __ LoadMap(rcx, feedback_vector);
1377     __ CmpInstanceType(rcx, FEEDBACK_VECTOR_TYPE);
1378     __ j(not_equal, &install_baseline_code);
1379 
1380     // Check the tiering state.
1381     LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1382                                              feedback_vector,
1383                                              &has_optimized_code_or_state);
1384 
1385     // Load the baseline code into the closure.
1386     __ Move(rcx, kInterpreterBytecodeArrayRegister);
1387     static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
1388     ReplaceClosureCodeWithOptimizedCode(
1389         masm, rcx, closure, kInterpreterBytecodeArrayRegister,
1390         WriteBarrierDescriptor::SlotAddressRegister());
1391     __ JumpCodeTObject(rcx);
1392 
1393     __ bind(&install_baseline_code);
1394     GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1395   }
1396 
1397   __ bind(&stack_overflow);
1398   __ CallRuntime(Runtime::kThrowStackOverflow);
1399   __ int3();  // Should not return.
1400 }
1401 
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1402 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1403                                         Register start_address,
1404                                         Register scratch) {
1405   ASM_CODE_COMMENT(masm);
1406   // Find the argument with lowest address.
1407   __ movq(scratch, num_args);
1408   __ negq(scratch);
1409   __ leaq(start_address,
1410           Operand(start_address, scratch, times_system_pointer_size,
1411                   kSystemPointerSize));
1412   // Push the arguments.
1413   __ PushArray(start_address, num_args, scratch,
1414                TurboAssembler::PushArrayOrder::kReverse);
1415 }
1416 
1417 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1418 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1419     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1420     InterpreterPushArgsMode mode) {
1421   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1422   // ----------- S t a t e -------------
1423   //  -- rax : the number of arguments
1424   //  -- rbx : the address of the first argument to be pushed. Subsequent
1425   //           arguments should be consecutive above this, in the same order as
1426   //           they are to be pushed onto the stack.
1427   //  -- rdi : the target to call (can be any Object).
1428   // -----------------------------------
1429   Label stack_overflow;
1430 
1431   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1432     // The spread argument should not be pushed.
1433     __ decl(rax);
1434   }
1435 
1436   __ movl(rcx, rax);
1437   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1438     __ decl(rcx);  // Exclude receiver.
1439   }
1440 
1441   // Add a stack check before pushing arguments.
1442   __ StackOverflowCheck(rcx, &stack_overflow);
1443 
1444   // Pop return address to allow tail-call after pushing arguments.
1445   __ PopReturnAddressTo(kScratchRegister);
1446 
1447   // rbx and rdx will be modified.
1448   GenerateInterpreterPushArgs(masm, rcx, rbx, rdx);
1449 
1450   // Push "undefined" as the receiver arg if we need to.
1451   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1452     __ PushRoot(RootIndex::kUndefinedValue);
1453   }
1454 
1455   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1456     // Pass the spread in the register rbx.
1457     // rbx already points to the penultime argument, the spread
1458     // is below that.
1459     __ movq(rbx, Operand(rbx, -kSystemPointerSize));
1460   }
1461 
1462   // Call the target.
1463   __ PushReturnAddressFrom(kScratchRegister);  // Re-push return address.
1464 
1465   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1466     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1467             RelocInfo::CODE_TARGET);
1468   } else {
1469     __ Jump(masm->isolate()->builtins()->Call(receiver_mode),
1470             RelocInfo::CODE_TARGET);
1471   }
1472 
1473   // Throw stack overflow exception.
1474   __ bind(&stack_overflow);
1475   {
1476     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1477     // This should be unreachable.
1478     __ int3();
1479   }
1480 }
1481 
1482 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1483 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1484     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1485   // ----------- S t a t e -------------
1486   //  -- rax : the number of arguments
1487   //  -- rdx : the new target (either the same as the constructor or
1488   //           the JSFunction on which new was invoked initially)
1489   //  -- rdi : the constructor to call (can be any Object)
1490   //  -- rbx : the allocation site feedback if available, undefined otherwise
1491   //  -- rcx : the address of the first argument to be pushed. Subsequent
1492   //           arguments should be consecutive above this, in the same order as
1493   //           they are to be pushed onto the stack.
1494   // -----------------------------------
1495   Label stack_overflow;
1496 
1497   // Add a stack check before pushing arguments.
1498   __ StackOverflowCheck(rax, &stack_overflow);
1499 
1500   // Pop return address to allow tail-call after pushing arguments.
1501   __ PopReturnAddressTo(kScratchRegister);
1502 
1503   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1504     // The spread argument should not be pushed.
1505     __ decl(rax);
1506   }
1507 
1508   // rcx and r8 will be modified.
1509   Register argc_without_receiver = r11;
1510   __ leaq(argc_without_receiver, Operand(rax, -kJSArgcReceiverSlots));
1511   GenerateInterpreterPushArgs(masm, argc_without_receiver, rcx, r8);
1512 
1513   // Push slot for the receiver to be constructed.
1514   __ Push(Immediate(0));
1515 
1516   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1517     // Pass the spread in the register rbx.
1518     __ movq(rbx, Operand(rcx, -kSystemPointerSize));
1519     // Push return address in preparation for the tail-call.
1520     __ PushReturnAddressFrom(kScratchRegister);
1521   } else {
1522     __ PushReturnAddressFrom(kScratchRegister);
1523     __ AssertUndefinedOrAllocationSite(rbx);
1524   }
1525 
1526   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1527     // Tail call to the array construct stub (still in the caller
1528     // context at this point).
1529     __ AssertFunction(rdi);
1530     // Jump to the constructor function (rax, rbx, rdx passed on).
1531     __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1532             RelocInfo::CODE_TARGET);
1533   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1534     // Call the constructor (rax, rdx, rdi passed on).
1535     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1536             RelocInfo::CODE_TARGET);
1537   } else {
1538     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1539     // Call the constructor (rax, rdx, rdi passed on).
1540     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1541   }
1542 
1543   // Throw stack overflow exception.
1544   __ bind(&stack_overflow);
1545   {
1546     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1547     // This should be unreachable.
1548     __ int3();
1549   }
1550 }
1551 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1552 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1553   // Set the return address to the correct point in the interpreter entry
1554   // trampoline.
1555   Label builtin_trampoline, trampoline_loaded;
1556   Smi interpreter_entry_return_pc_offset(
1557       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1558   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1559 
1560   // If the SFI function_data is an InterpreterData, the function will have a
1561   // custom copy of the interpreter entry trampoline for profiling. If so,
1562   // get the custom trampoline, otherwise grab the entry address of the global
1563   // trampoline.
1564   __ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
1565   __ LoadTaggedPointerField(
1566       rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
1567   __ LoadTaggedPointerField(
1568       rbx, FieldOperand(rbx, SharedFunctionInfo::kFunctionDataOffset));
1569   __ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
1570   __ j(not_equal, &builtin_trampoline, Label::kNear);
1571 
1572   __ LoadTaggedPointerField(
1573       rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
1574   __ LoadCodeTEntry(rbx, rbx);
1575   __ jmp(&trampoline_loaded, Label::kNear);
1576 
1577   __ bind(&builtin_trampoline);
1578   // TODO(jgruber): Replace this by a lookup in the builtin entry table.
1579   __ movq(rbx,
1580           __ ExternalReferenceAsOperand(
1581               ExternalReference::
1582                   address_of_interpreter_entry_trampoline_instruction_start(
1583                       masm->isolate()),
1584               kScratchRegister));
1585 
1586   __ bind(&trampoline_loaded);
1587   __ addq(rbx, Immediate(interpreter_entry_return_pc_offset.value()));
1588   __ Push(rbx);
1589 
1590   // Initialize dispatch table register.
1591   __ Move(
1592       kInterpreterDispatchTableRegister,
1593       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1594 
1595   // Get the bytecode array pointer from the frame.
1596   __ movq(kInterpreterBytecodeArrayRegister,
1597           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1598 
1599   if (FLAG_debug_code) {
1600     // Check function data field is actually a BytecodeArray object.
1601     __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
1602     __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
1603                      rbx);
1604     __ Assert(
1605         equal,
1606         AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1607   }
1608 
1609   // Get the target bytecode offset from the frame.
1610   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1611               Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1612 
1613   if (FLAG_debug_code) {
1614     Label okay;
1615     __ cmpq(kInterpreterBytecodeOffsetRegister,
1616             Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
1617     __ j(greater_equal, &okay, Label::kNear);
1618     __ int3();
1619     __ bind(&okay);
1620   }
1621 
1622   // Dispatch to the target bytecode.
1623   __ movzxbq(kScratchRegister,
1624              Operand(kInterpreterBytecodeArrayRegister,
1625                      kInterpreterBytecodeOffsetRegister, times_1, 0));
1626   __ movq(kJavaScriptCallCodeStartRegister,
1627           Operand(kInterpreterDispatchTableRegister, kScratchRegister,
1628                   times_system_pointer_size, 0));
1629   __ jmp(kJavaScriptCallCodeStartRegister);
1630 }
1631 
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1632 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1633   // Get bytecode array and bytecode offset from the stack frame.
1634   __ movq(kInterpreterBytecodeArrayRegister,
1635           Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1636   __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1637               Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1638 
1639   Label enter_bytecode, function_entry_bytecode;
1640   __ cmpq(kInterpreterBytecodeOffsetRegister,
1641           Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
1642                     kFunctionEntryBytecodeOffset));
1643   __ j(equal, &function_entry_bytecode);
1644 
1645   // Load the current bytecode.
1646   __ movzxbq(rbx, Operand(kInterpreterBytecodeArrayRegister,
1647                           kInterpreterBytecodeOffsetRegister, times_1, 0));
1648 
1649   // Advance to the next bytecode.
1650   Label if_return;
1651   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1652                                 kInterpreterBytecodeOffsetRegister, rbx, rcx,
1653                                 r8, &if_return);
1654 
1655   __ bind(&enter_bytecode);
1656   // Convert new bytecode offset to a Smi and save in the stackframe.
1657   __ SmiTag(kInterpreterBytecodeOffsetRegister);
1658   __ movq(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
1659           kInterpreterBytecodeOffsetRegister);
1660 
1661   Generate_InterpreterEnterBytecode(masm);
1662 
1663   __ bind(&function_entry_bytecode);
1664   // If the code deoptimizes during the implicit function entry stack interrupt
1665   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1666   // not a valid bytecode offset. Detect this case and advance to the first
1667   // actual bytecode.
1668   __ Move(kInterpreterBytecodeOffsetRegister,
1669           BytecodeArray::kHeaderSize - kHeapObjectTag);
1670   __ jmp(&enter_bytecode);
1671 
1672   // We should never take the if_return path.
1673   __ bind(&if_return);
1674   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1675 }
1676 
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1677 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1678   Generate_InterpreterEnterBytecode(masm);
1679 }
1680 
1681 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1682 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1683   Register feedback_vector = r8;
1684   Register optimization_state = rcx;
1685   Register return_address = r15;
1686 
1687 #ifdef DEBUG
1688   for (auto reg : BaselineOutOfLinePrologueDescriptor::registers()) {
1689     DCHECK(
1690         !AreAliased(feedback_vector, optimization_state, return_address, reg));
1691   }
1692 #endif
1693 
1694   auto descriptor =
1695       Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1696   Register closure = descriptor.GetRegisterParameter(
1697       BaselineOutOfLinePrologueDescriptor::kClosure);
1698   // Load the feedback vector from the closure.
1699   __ LoadTaggedPointerField(
1700       feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1701   __ LoadTaggedPointerField(feedback_vector,
1702                             FieldOperand(feedback_vector, Cell::kValueOffset));
1703   if (FLAG_debug_code) {
1704     __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
1705     __ Assert(equal, AbortReason::kExpectedFeedbackVector);
1706   }
1707 
1708   // Check the tiering state.
1709   Label has_optimized_code_or_state;
1710   LoadTieringStateAndJumpIfNeedsProcessing(
1711       masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
1712 
1713   // Increment invocation count for the function.
1714   __ incl(
1715       FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
1716 
1717     // Save the return address, so that we can push it to the end of the newly
1718     // set-up frame once we're done setting it up.
1719     __ PopReturnAddressTo(return_address);
1720     FrameScope frame_scope(masm, StackFrame::MANUAL);
1721     {
1722       ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1723       __ EnterFrame(StackFrame::BASELINE);
1724 
1725       __ Push(descriptor.GetRegisterParameter(
1726           BaselineOutOfLinePrologueDescriptor::kCalleeContext));  // Callee's
1727                                                                   // context.
1728       Register callee_js_function = descriptor.GetRegisterParameter(
1729           BaselineOutOfLinePrologueDescriptor::kClosure);
1730       DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1731       DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1732       __ Push(callee_js_function);  // Callee's JS function.
1733       __ Push(descriptor.GetRegisterParameter(
1734           BaselineOutOfLinePrologueDescriptor::
1735               kJavaScriptCallArgCount));  // Actual argument
1736                                           // count.
1737 
1738       // We'll use the bytecode for both code age/OSR resetting, and pushing
1739       // onto the frame, so load it into a register.
1740       Register bytecode_array = descriptor.GetRegisterParameter(
1741           BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1742       ResetBytecodeAgeAndOsrState(masm, bytecode_array);
1743       __ Push(bytecode_array);
1744 
1745       // Baseline code frames store the feedback vector where interpreter would
1746       // store the bytecode offset.
1747       __ Push(feedback_vector);
1748     }
1749 
1750   Register new_target = descriptor.GetRegisterParameter(
1751       BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
1752 
1753   Label call_stack_guard;
1754   Register frame_size = descriptor.GetRegisterParameter(
1755       BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1756   {
1757     ASM_CODE_COMMENT_STRING(masm, " Stack/interrupt check");
1758     // Stack check. This folds the checks for both the interrupt stack limit
1759     // check and the real stack limit into one by just checking for the
1760     // interrupt limit. The interrupt limit is either equal to the real stack
1761     // limit or tighter. By ensuring we have space until that limit after
1762     // building the frame we can quickly precheck both at once.
1763     //
1764     // TODO(v8:11429): Backport this folded check to the
1765     // InterpreterEntryTrampoline.
1766     __ Move(kScratchRegister, rsp);
1767     DCHECK_NE(frame_size, new_target);
1768     __ subq(kScratchRegister, frame_size);
1769     __ cmpq(kScratchRegister,
1770             __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit));
1771     __ j(below, &call_stack_guard);
1772   }
1773 
1774   // Push the return address back onto the stack for return.
1775   __ PushReturnAddressFrom(return_address);
1776   // Return to caller pushed pc, without any frame teardown.
1777   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1778   __ Ret();
1779 
1780   __ bind(&has_optimized_code_or_state);
1781   {
1782     ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1783     // Drop the return address, rebalancing the return stack buffer by using
1784     // JumpMode::kPushAndReturn. We can't leave the slot and overwrite it on
1785     // return since we may do a runtime call along the way that requires the
1786     // stack to only contain valid frames.
1787     __ Drop(1);
1788     MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1789                                                  feedback_vector, closure,
1790                                                  JumpMode::kPushAndReturn);
1791     __ Trap();
1792   }
1793 
1794   __ bind(&call_stack_guard);
1795   {
1796     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1797     {
1798       // Push the baseline code return address now, as if it had been pushed by
1799       // the call to this builtin.
1800       __ PushReturnAddressFrom(return_address);
1801       FrameScope inner_frame_scope(masm, StackFrame::INTERNAL);
1802       // Save incoming new target or generator
1803       __ Push(new_target);
1804       __ SmiTag(frame_size);
1805       __ Push(frame_size);
1806       __ CallRuntime(Runtime::kStackGuardWithGap, 1);
1807       __ Pop(new_target);
1808     }
1809 
1810     // Return to caller pushed pc, without any frame teardown.
1811     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1812     __ Ret();
1813   }
1814 }
1815 
1816 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1817 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1818                                       bool java_script_builtin,
1819                                       bool with_result) {
1820   ASM_CODE_COMMENT(masm);
1821   const RegisterConfiguration* config(RegisterConfiguration::Default());
1822   int allocatable_register_count = config->num_allocatable_general_registers();
1823   if (with_result) {
1824     if (java_script_builtin) {
1825       // kScratchRegister is not included in the allocateable registers.
1826       __ movq(kScratchRegister, rax);
1827     } else {
1828       // Overwrite the hole inserted by the deoptimizer with the return value
1829       // from the LAZY deopt point.
1830       __ movq(
1831           Operand(rsp, config->num_allocatable_general_registers() *
1832                                kSystemPointerSize +
1833                            BuiltinContinuationFrameConstants::kFixedFrameSize),
1834           rax);
1835     }
1836   }
1837   for (int i = allocatable_register_count - 1; i >= 0; --i) {
1838     int code = config->GetAllocatableGeneralCode(i);
1839     __ popq(Register::from_code(code));
1840     if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1841       __ SmiUntag(Register::from_code(code));
1842     }
1843   }
1844   if (with_result && java_script_builtin) {
1845     // Overwrite the hole inserted by the deoptimizer with the return value from
1846     // the LAZY deopt point. rax contains the arguments count, the return value
1847     // from LAZY is always the last argument.
1848     __ movq(Operand(rsp, rax, times_system_pointer_size,
1849                     BuiltinContinuationFrameConstants::kFixedFrameSize -
1850                         kJSArgcReceiverSlots * kSystemPointerSize),
1851             kScratchRegister);
1852   }
1853   __ movq(
1854       rbp,
1855       Operand(rsp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1856   const int offsetToPC =
1857       BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
1858       kSystemPointerSize;
1859   __ popq(Operand(rsp, offsetToPC));
1860   __ Drop(offsetToPC / kSystemPointerSize);
1861 
1862   // Replace the builtin index Smi on the stack with the instruction start
1863   // address of the builtin from the builtins table, and then Ret to this
1864   // address
1865   __ movq(kScratchRegister, Operand(rsp, 0));
1866   __ movq(kScratchRegister,
1867           __ EntryFromBuiltinIndexAsOperand(kScratchRegister));
1868   __ movq(Operand(rsp, 0), kScratchRegister);
1869 
1870   __ Ret();
1871 }
1872 }  // namespace
1873 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1874 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1875   Generate_ContinueToBuiltinHelper(masm, false, false);
1876 }
1877 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1878 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1879     MacroAssembler* masm) {
1880   Generate_ContinueToBuiltinHelper(masm, false, true);
1881 }
1882 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1883 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1884   Generate_ContinueToBuiltinHelper(masm, true, false);
1885 }
1886 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1887 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1888     MacroAssembler* masm) {
1889   Generate_ContinueToBuiltinHelper(masm, true, true);
1890 }
1891 
Generate_NotifyDeoptimized(MacroAssembler * masm)1892 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1893   // Enter an internal frame.
1894   {
1895     FrameScope scope(masm, StackFrame::INTERNAL);
1896     __ CallRuntime(Runtime::kNotifyDeoptimized);
1897     // Tear down internal frame.
1898   }
1899 
1900   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), rax.code());
1901   __ movq(rax, Operand(rsp, kPCOnStackSize));
1902   __ ret(1 * kSystemPointerSize);  // Remove rax.
1903 }
1904 
1905 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1906 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1907   // ----------- S t a t e -------------
1908   //  -- rax     : argc
1909   //  -- rsp[0]  : return address
1910   //  -- rsp[1]  : receiver
1911   //  -- rsp[2]  : thisArg
1912   //  -- rsp[3]  : argArray
1913   // -----------------------------------
1914 
1915   // 1. Load receiver into rdi, argArray into rbx (if present), remove all
1916   // arguments from the stack (including the receiver), and push thisArg (if
1917   // present) instead.
1918   {
1919     Label no_arg_array, no_this_arg;
1920     StackArgumentsAccessor args(rax);
1921     __ LoadRoot(rdx, RootIndex::kUndefinedValue);
1922     __ movq(rbx, rdx);
1923     __ movq(rdi, args[0]);
1924     __ cmpq(rax, Immediate(JSParameterCount(0)));
1925     __ j(equal, &no_this_arg, Label::kNear);
1926     {
1927       __ movq(rdx, args[1]);
1928       __ cmpq(rax, Immediate(JSParameterCount(1)));
1929       __ j(equal, &no_arg_array, Label::kNear);
1930       __ movq(rbx, args[2]);
1931       __ bind(&no_arg_array);
1932     }
1933     __ bind(&no_this_arg);
1934     __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
1935                                        TurboAssembler::kCountIsInteger,
1936                                        TurboAssembler::kCountIncludesReceiver);
1937   }
1938 
1939   // ----------- S t a t e -------------
1940   //  -- rbx     : argArray
1941   //  -- rdi     : receiver
1942   //  -- rsp[0]  : return address
1943   //  -- rsp[8]  : thisArg
1944   // -----------------------------------
1945 
1946   // 2. We don't need to check explicitly for callable receiver here,
1947   // since that's the first thing the Call/CallWithArrayLike builtins
1948   // will do.
1949 
1950   // 3. Tail call with no arguments if argArray is null or undefined.
1951   Label no_arguments;
1952   __ JumpIfRoot(rbx, RootIndex::kNullValue, &no_arguments, Label::kNear);
1953   __ JumpIfRoot(rbx, RootIndex::kUndefinedValue, &no_arguments, Label::kNear);
1954 
1955   // 4a. Apply the receiver to the given argArray.
1956   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1957           RelocInfo::CODE_TARGET);
1958 
1959   // 4b. The argArray is either null or undefined, so we tail call without any
1960   // arguments to the receiver. Since we did not create a frame for
1961   // Function.prototype.apply() yet, we use a normal Call builtin here.
1962   __ bind(&no_arguments);
1963   {
1964     __ Move(rax, JSParameterCount(0));
1965     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1966   }
1967 }
1968 
1969 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1970 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1971   // Stack Layout:
1972   // rsp[0]           : Return address
1973   // rsp[8]           : Argument 0 (receiver: callable to call)
1974   // rsp[16]          : Argument 1
1975   //  ...
1976   // rsp[8 * n]       : Argument n-1
1977   // rsp[8 * (n + 1)] : Argument n
1978   // rax contains the number of arguments, n.
1979 
1980   // 1. Get the callable to call (passed as receiver) from the stack.
1981   {
1982     StackArgumentsAccessor args(rax);
1983     __ movq(rdi, args.GetReceiverOperand());
1984   }
1985 
1986   // 2. Save the return address and drop the callable.
1987   __ PopReturnAddressTo(rbx);
1988   __ Pop(kScratchRegister);
1989 
1990   // 3. Make sure we have at least one argument.
1991   {
1992     Label done;
1993     __ cmpq(rax, Immediate(JSParameterCount(0)));
1994     __ j(greater, &done, Label::kNear);
1995     __ PushRoot(RootIndex::kUndefinedValue);
1996     __ incq(rax);
1997     __ bind(&done);
1998   }
1999 
2000   // 4. Push back the return address one slot down on the stack (overwriting the
2001   // original callable), making the original first argument the new receiver.
2002   __ PushReturnAddressFrom(rbx);
2003   __ decq(rax);  // One fewer argument (first argument is new receiver).
2004 
2005   // 5. Call the callable.
2006   // Since we did not create a frame for Function.prototype.call() yet,
2007   // we use a normal Call builtin here.
2008   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2009 }
2010 
Generate_ReflectApply(MacroAssembler * masm)2011 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2012   // ----------- S t a t e -------------
2013   //  -- rax     : argc
2014   //  -- rsp[0]  : return address
2015   //  -- rsp[8]  : receiver
2016   //  -- rsp[16] : target         (if argc >= 1)
2017   //  -- rsp[24] : thisArgument   (if argc >= 2)
2018   //  -- rsp[32] : argumentsList  (if argc == 3)
2019   // -----------------------------------
2020 
2021   // 1. Load target into rdi (if present), argumentsList into rbx (if present),
2022   // remove all arguments from the stack (including the receiver), and push
2023   // thisArgument (if present) instead.
2024   {
2025     Label done;
2026     StackArgumentsAccessor args(rax);
2027     __ LoadRoot(rdi, RootIndex::kUndefinedValue);
2028     __ movq(rdx, rdi);
2029     __ movq(rbx, rdi);
2030     __ cmpq(rax, Immediate(JSParameterCount(1)));
2031     __ j(below, &done, Label::kNear);
2032     __ movq(rdi, args[1]);  // target
2033     __ j(equal, &done, Label::kNear);
2034     __ movq(rdx, args[2]);  // thisArgument
2035     __ cmpq(rax, Immediate(JSParameterCount(3)));
2036     __ j(below, &done, Label::kNear);
2037     __ movq(rbx, args[3]);  // argumentsList
2038     __ bind(&done);
2039     __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
2040                                        TurboAssembler::kCountIsInteger,
2041                                        TurboAssembler::kCountIncludesReceiver);
2042   }
2043 
2044   // ----------- S t a t e -------------
2045   //  -- rbx     : argumentsList
2046   //  -- rdi     : target
2047   //  -- rsp[0]  : return address
2048   //  -- rsp[8]  : thisArgument
2049   // -----------------------------------
2050 
2051   // 2. We don't need to check explicitly for callable target here,
2052   // since that's the first thing the Call/CallWithArrayLike builtins
2053   // will do.
2054 
2055   // 3. Apply the target to the given argumentsList.
2056   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2057           RelocInfo::CODE_TARGET);
2058 }
2059 
Generate_ReflectConstruct(MacroAssembler * masm)2060 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2061   // ----------- S t a t e -------------
2062   //  -- rax     : argc
2063   //  -- rsp[0]  : return address
2064   //  -- rsp[8]  : receiver
2065   //  -- rsp[16] : target
2066   //  -- rsp[24] : argumentsList
2067   //  -- rsp[32] : new.target (optional)
2068   // -----------------------------------
2069 
2070   // 1. Load target into rdi (if present), argumentsList into rbx (if present),
2071   // new.target into rdx (if present, otherwise use target), remove all
2072   // arguments from the stack (including the receiver), and push thisArgument
2073   // (if present) instead.
2074   {
2075     Label done;
2076     StackArgumentsAccessor args(rax);
2077     __ LoadRoot(rdi, RootIndex::kUndefinedValue);
2078     __ movq(rdx, rdi);
2079     __ movq(rbx, rdi);
2080     __ cmpq(rax, Immediate(JSParameterCount(1)));
2081     __ j(below, &done, Label::kNear);
2082     __ movq(rdi, args[1]);                     // target
2083     __ movq(rdx, rdi);                         // new.target defaults to target
2084     __ j(equal, &done, Label::kNear);
2085     __ movq(rbx, args[2]);  // argumentsList
2086     __ cmpq(rax, Immediate(JSParameterCount(3)));
2087     __ j(below, &done, Label::kNear);
2088     __ movq(rdx, args[3]);  // new.target
2089     __ bind(&done);
2090     __ DropArgumentsAndPushNewReceiver(
2091         rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
2092         TurboAssembler::kCountIsInteger,
2093         TurboAssembler::kCountIncludesReceiver);
2094   }
2095 
2096   // ----------- S t a t e -------------
2097   //  -- rbx     : argumentsList
2098   //  -- rdx     : new.target
2099   //  -- rdi     : target
2100   //  -- rsp[0]  : return address
2101   //  -- rsp[8]  : receiver (undefined)
2102   // -----------------------------------
2103 
2104   // 2. We don't need to check explicitly for constructor target here,
2105   // since that's the first thing the Construct/ConstructWithArrayLike
2106   // builtins will do.
2107 
2108   // 3. We don't need to check explicitly for constructor new.target here,
2109   // since that's the second thing the Construct/ConstructWithArrayLike
2110   // builtins will do.
2111 
2112   // 4. Construct the target with the given new.target and argumentsList.
2113   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2114           RelocInfo::CODE_TARGET);
2115 }
2116 
2117 namespace {
2118 
2119 // Allocate new stack space for |count| arguments and shift all existing
2120 // arguments already on the stack. |pointer_to_new_space_out| points to the
2121 // first free slot on the stack to copy additional arguments to and
2122 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2)2123 void Generate_AllocateSpaceAndShiftExistingArguments(
2124     MacroAssembler* masm, Register count, Register argc_in_out,
2125     Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
2126   DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2127                      scratch2, kScratchRegister));
2128   // Use pointer_to_new_space_out as scratch until we set it to the correct
2129   // value at the end.
2130   Register old_rsp = pointer_to_new_space_out;
2131   Register new_space = kScratchRegister;
2132   __ movq(old_rsp, rsp);
2133 
2134   __ leaq(new_space, Operand(count, times_system_pointer_size, 0));
2135   __ AllocateStackSpace(new_space);
2136 
2137   Register copy_count = argc_in_out;
2138   Register current = scratch2;
2139   Register value = kScratchRegister;
2140 
2141   Label loop, entry;
2142   __ Move(current, 0);
2143   __ jmp(&entry);
2144   __ bind(&loop);
2145   __ movq(value, Operand(old_rsp, current, times_system_pointer_size, 0));
2146   __ movq(Operand(rsp, current, times_system_pointer_size, 0), value);
2147   __ incq(current);
2148   __ bind(&entry);
2149   __ cmpq(current, copy_count);
2150   __ j(less_equal, &loop, Label::kNear);
2151 
2152   // Point to the next free slot above the shifted arguments (copy_count + 1
2153   // slot for the return address).
2154   __ leaq(
2155       pointer_to_new_space_out,
2156       Operand(rsp, copy_count, times_system_pointer_size, kSystemPointerSize));
2157   // We use addl instead of addq here because we can omit REX.W, saving 1 byte.
2158   // We are especially constrained here because we are close to reaching the
2159   // limit for a near jump to the stackoverflow label, so every byte counts.
2160   __ addl(argc_in_out, count);  // Update total number of arguments.
2161 }
2162 
2163 }  // namespace
2164 
2165 // static
2166 // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<CodeT> code)2167 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2168                                                Handle<CodeT> code) {
2169   // ----------- S t a t e -------------
2170   //  -- rdi    : target
2171   //  -- rax    : number of parameters on the stack
2172   //  -- rbx    : arguments list (a FixedArray)
2173   //  -- rcx    : len (number of elements to push from args)
2174   //  -- rdx    : new.target (for [[Construct]])
2175   //  -- rsp[0] : return address
2176   // -----------------------------------
2177 
2178   if (FLAG_debug_code) {
2179     // Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0.
2180     Label ok, fail;
2181     __ AssertNotSmi(rbx);
2182     Register map = r9;
2183     __ LoadMap(map, rbx);
2184     __ CmpInstanceType(map, FIXED_ARRAY_TYPE);
2185     __ j(equal, &ok);
2186     __ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
2187     __ j(not_equal, &fail);
2188     __ Cmp(rcx, 0);
2189     __ j(equal, &ok);
2190     // Fall through.
2191     __ bind(&fail);
2192     __ Abort(AbortReason::kOperandIsNotAFixedArray);
2193 
2194     __ bind(&ok);
2195   }
2196 
2197   Label stack_overflow;
2198   __ StackOverflowCheck(rcx, &stack_overflow, Label::kNear);
2199 
2200   // Push additional arguments onto the stack.
2201   // Move the arguments already in the stack,
2202   // including the receiver and the return address.
2203   // rcx: Number of arguments to make room for.
2204   // rax: Number of arguments already on the stack.
2205   // r8: Points to first free slot on the stack after arguments were shifted.
2206   Generate_AllocateSpaceAndShiftExistingArguments(masm, rcx, rax, r8, r9, r12);
2207   // Copy the additional arguments onto the stack.
2208   {
2209     Register value = r12;
2210     Register src = rbx, dest = r8, num = rcx, current = r9;
2211     __ Move(current, 0);
2212     Label done, push, loop;
2213     __ bind(&loop);
2214     __ cmpl(current, num);
2215     __ j(equal, &done, Label::kNear);
2216     // Turn the hole into undefined as we go.
2217     __ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
2218                                               FixedArray::kHeaderSize));
2219     __ CompareRoot(value, RootIndex::kTheHoleValue);
2220     __ j(not_equal, &push, Label::kNear);
2221     __ LoadRoot(value, RootIndex::kUndefinedValue);
2222     __ bind(&push);
2223     __ movq(Operand(dest, current, times_system_pointer_size, 0), value);
2224     __ incl(current);
2225     __ jmp(&loop);
2226     __ bind(&done);
2227   }
2228 
2229   // Tail-call to the actual Call or Construct builtin.
2230   __ Jump(code, RelocInfo::CODE_TARGET);
2231 
2232   __ bind(&stack_overflow);
2233   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2234 }
2235 
2236 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<CodeT> code)2237 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2238                                                       CallOrConstructMode mode,
2239                                                       Handle<CodeT> code) {
2240   // ----------- S t a t e -------------
2241   //  -- rax : the number of arguments
2242   //  -- rdx : the new target (for [[Construct]] calls)
2243   //  -- rdi : the target to call (can be any Object)
2244   //  -- rcx : start index (to support rest parameters)
2245   // -----------------------------------
2246 
2247   // Check if new.target has a [[Construct]] internal method.
2248   if (mode == CallOrConstructMode::kConstruct) {
2249     Label new_target_constructor, new_target_not_constructor;
2250     __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
2251     __ LoadMap(rbx, rdx);
2252     __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
2253              Immediate(Map::Bits1::IsConstructorBit::kMask));
2254     __ j(not_zero, &new_target_constructor, Label::kNear);
2255     __ bind(&new_target_not_constructor);
2256     {
2257       FrameScope scope(masm, StackFrame::MANUAL);
2258       __ EnterFrame(StackFrame::INTERNAL);
2259       __ Push(rdx);
2260       __ CallRuntime(Runtime::kThrowNotConstructor);
2261     }
2262     __ bind(&new_target_constructor);
2263   }
2264 
2265   Label stack_done, stack_overflow;
2266   __ movq(r8, Operand(rbp, StandardFrameConstants::kArgCOffset));
2267   __ decq(r8);  // Exclude receiver.
2268   __ subl(r8, rcx);
2269   __ j(less_equal, &stack_done);
2270   {
2271     // ----------- S t a t e -------------
2272     //  -- rax : the number of arguments already in the stack
2273     //  -- rbp : point to the caller stack frame
2274     //  -- rcx : start index (to support rest parameters)
2275     //  -- rdx : the new target (for [[Construct]] calls)
2276     //  -- rdi : the target to call (can be any Object)
2277     //  -- r8  : number of arguments to copy, i.e. arguments count - start index
2278     // -----------------------------------
2279 
2280     // Check for stack overflow.
2281     __ StackOverflowCheck(r8, &stack_overflow, Label::kNear);
2282 
2283     // Forward the arguments from the caller frame.
2284     // Move the arguments already in the stack,
2285     // including the receiver and the return address.
2286     // r8: Number of arguments to make room for.
2287     // rax: Number of arguments already on the stack.
2288     // r9: Points to first free slot on the stack after arguments were shifted.
2289     Generate_AllocateSpaceAndShiftExistingArguments(masm, r8, rax, r9, r12,
2290                                                     r15);
2291 
2292     // Point to the first argument to copy (skipping receiver).
2293     __ leaq(rcx, Operand(rcx, times_system_pointer_size,
2294                          CommonFrameConstants::kFixedFrameSizeAboveFp +
2295                              kSystemPointerSize));
2296     __ addq(rcx, rbp);
2297 
2298     // Copy the additional caller arguments onto the stack.
2299     // TODO(victorgomes): Consider using forward order as potentially more cache
2300     // friendly.
2301     {
2302       Register src = rcx, dest = r9, num = r8;
2303       Label loop;
2304       __ bind(&loop);
2305       __ decq(num);
2306       __ movq(kScratchRegister,
2307               Operand(src, num, times_system_pointer_size, 0));
2308       __ movq(Operand(dest, num, times_system_pointer_size, 0),
2309               kScratchRegister);
2310       __ j(not_zero, &loop);
2311     }
2312   }
2313   __ jmp(&stack_done, Label::kNear);
2314   __ bind(&stack_overflow);
2315   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2316   __ bind(&stack_done);
2317 
2318   // Tail-call to the {code} handler.
2319   __ Jump(code, RelocInfo::CODE_TARGET);
2320 }
2321 
2322 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2323 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2324                                      ConvertReceiverMode mode) {
2325   // ----------- S t a t e -------------
2326   //  -- rax : the number of arguments
2327   //  -- rdi : the function to call (checked to be a JSFunction)
2328   // -----------------------------------
2329 
2330   StackArgumentsAccessor args(rax);
2331   __ AssertCallableFunction(rdi);
2332 
2333   __ LoadTaggedPointerField(
2334       rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2335   // ----------- S t a t e -------------
2336   //  -- rax : the number of arguments
2337   //  -- rdx : the shared function info.
2338   //  -- rdi : the function to call (checked to be a JSFunction)
2339   // -----------------------------------
2340 
2341   // Enter the context of the function; ToObject has to run in the function
2342   // context, and we also need to take the global proxy from the function
2343   // context in case of conversion.
2344   __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
2345   // We need to convert the receiver for non-native sloppy mode functions.
2346   Label done_convert;
2347   __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
2348            Immediate(SharedFunctionInfo::IsNativeBit::kMask |
2349                      SharedFunctionInfo::IsStrictBit::kMask));
2350   __ j(not_zero, &done_convert);
2351   {
2352     // ----------- S t a t e -------------
2353     //  -- rax : the number of arguments
2354     //  -- rdx : the shared function info.
2355     //  -- rdi : the function to call (checked to be a JSFunction)
2356     //  -- rsi : the function context.
2357     // -----------------------------------
2358 
2359     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2360       // Patch receiver to global proxy.
2361       __ LoadGlobalProxy(rcx);
2362     } else {
2363       Label convert_to_object, convert_receiver;
2364       __ movq(rcx, args.GetReceiverOperand());
2365       __ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
2366       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2367       __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
2368       __ j(above_equal, &done_convert);
2369       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2370         Label convert_global_proxy;
2371         __ JumpIfRoot(rcx, RootIndex::kUndefinedValue, &convert_global_proxy,
2372                       Label::kNear);
2373         __ JumpIfNotRoot(rcx, RootIndex::kNullValue, &convert_to_object,
2374                          Label::kNear);
2375         __ bind(&convert_global_proxy);
2376         {
2377           // Patch receiver to global proxy.
2378           __ LoadGlobalProxy(rcx);
2379         }
2380         __ jmp(&convert_receiver);
2381       }
2382       __ bind(&convert_to_object);
2383       {
2384         // Convert receiver using ToObject.
2385         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2386         // in the fast case? (fall back to AllocateInNewSpace?)
2387         FrameScope scope(masm, StackFrame::INTERNAL);
2388         __ SmiTag(rax);
2389         __ Push(rax);
2390         __ Push(rdi);
2391         __ movq(rax, rcx);
2392         __ Push(rsi);
2393         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2394                 RelocInfo::CODE_TARGET);
2395         __ Pop(rsi);
2396         __ movq(rcx, rax);
2397         __ Pop(rdi);
2398         __ Pop(rax);
2399         __ SmiUntag(rax);
2400       }
2401       __ LoadTaggedPointerField(
2402           rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2403       __ bind(&convert_receiver);
2404     }
2405     __ movq(args.GetReceiverOperand(), rcx);
2406   }
2407   __ bind(&done_convert);
2408 
2409   // ----------- S t a t e -------------
2410   //  -- rax : the number of arguments
2411   //  -- rdx : the shared function info.
2412   //  -- rdi : the function to call (checked to be a JSFunction)
2413   //  -- rsi : the function context.
2414   // -----------------------------------
2415 
2416   __ movzxwq(
2417       rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
2418   __ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
2419 }
2420 
2421 namespace {
2422 
Generate_PushBoundArguments(MacroAssembler * masm)2423 void Generate_PushBoundArguments(MacroAssembler* masm) {
2424   // ----------- S t a t e -------------
2425   //  -- rax : the number of arguments
2426   //  -- rdx : new.target (only in case of [[Construct]])
2427   //  -- rdi : target (checked to be a JSBoundFunction)
2428   // -----------------------------------
2429 
2430   // Load [[BoundArguments]] into rcx and length of that into rbx.
2431   Label no_bound_arguments;
2432   __ LoadTaggedPointerField(
2433       rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
2434   __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
2435   __ testl(rbx, rbx);
2436   __ j(zero, &no_bound_arguments);
2437   {
2438     // ----------- S t a t e -------------
2439     //  -- rax : the number of arguments
2440     //  -- rdx : new.target (only in case of [[Construct]])
2441     //  -- rdi : target (checked to be a JSBoundFunction)
2442     //  -- rcx : the [[BoundArguments]] (implemented as FixedArray)
2443     //  -- rbx : the number of [[BoundArguments]] (checked to be non-zero)
2444     // -----------------------------------
2445 
2446     // TODO(victor): Use Generate_StackOverflowCheck here.
2447     // Check the stack for overflow.
2448     {
2449       Label done;
2450       __ shlq(rbx, Immediate(kSystemPointerSizeLog2));
2451       __ movq(kScratchRegister, rsp);
2452       __ subq(kScratchRegister, rbx);
2453 
2454       // We are not trying to catch interruptions (i.e. debug break and
2455       // preemption) here, so check the "real stack limit".
2456       __ cmpq(kScratchRegister,
2457               __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
2458       __ j(above_equal, &done, Label::kNear);
2459       {
2460         FrameScope scope(masm, StackFrame::MANUAL);
2461         __ EnterFrame(StackFrame::INTERNAL);
2462         __ CallRuntime(Runtime::kThrowStackOverflow);
2463       }
2464       __ bind(&done);
2465     }
2466 
2467     // Save Return Address and Receiver into registers.
2468     __ Pop(r8);
2469     __ Pop(r10);
2470 
2471     // Push [[BoundArguments]] to the stack.
2472     {
2473       Label loop;
2474       __ LoadTaggedPointerField(
2475           rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
2476       __ SmiUntagField(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
2477       __ addq(rax, rbx);  // Adjust effective number of arguments.
2478       __ bind(&loop);
2479       // Instead of doing decl(rbx) here subtract kTaggedSize from the header
2480       // offset in order to be able to move decl(rbx) right before the loop
2481       // condition. This is necessary in order to avoid flags corruption by
2482       // pointer decompression code.
2483       __ LoadAnyTaggedField(
2484           r12, FieldOperand(rcx, rbx, times_tagged_size,
2485                             FixedArray::kHeaderSize - kTaggedSize));
2486       __ Push(r12);
2487       __ decl(rbx);
2488       __ j(greater, &loop);
2489     }
2490 
2491     // Recover Receiver and Return Address.
2492     __ Push(r10);
2493     __ Push(r8);
2494   }
2495   __ bind(&no_bound_arguments);
2496 }
2497 
2498 }  // namespace
2499 
2500 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2501 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2502   // ----------- S t a t e -------------
2503   //  -- rax : the number of arguments
2504   //  -- rdi : the function to call (checked to be a JSBoundFunction)
2505   // -----------------------------------
2506   __ AssertBoundFunction(rdi);
2507 
2508   // Patch the receiver to [[BoundThis]].
2509   StackArgumentsAccessor args(rax);
2510   __ LoadAnyTaggedField(rbx,
2511                         FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
2512   __ movq(args.GetReceiverOperand(), rbx);
2513 
2514   // Push the [[BoundArguments]] onto the stack.
2515   Generate_PushBoundArguments(masm);
2516 
2517   // Call the [[BoundTargetFunction]] via the Call builtin.
2518   __ LoadTaggedPointerField(
2519       rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
2520   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2521           RelocInfo::CODE_TARGET);
2522 }
2523 
2524 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2525 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2526   // ----------- S t a t e -------------
2527   //  -- rax : the number of arguments
2528   //  -- rdi : the target to call (can be any Object)
2529   // -----------------------------------
2530   Register argc = rax;
2531   Register target = rdi;
2532   Register map = rcx;
2533   Register instance_type = rdx;
2534   DCHECK(!AreAliased(argc, target, map, instance_type));
2535 
2536   StackArgumentsAccessor args(argc);
2537 
2538   Label non_callable, class_constructor;
2539   __ JumpIfSmi(target, &non_callable);
2540   __ LoadMap(map, target);
2541   __ CmpInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2542                           LAST_CALLABLE_JS_FUNCTION_TYPE);
2543   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2544           RelocInfo::CODE_TARGET, below_equal);
2545 
2546   __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
2547   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2548           RelocInfo::CODE_TARGET, equal);
2549 
2550   // Check if target has a [[Call]] internal method.
2551   __ testb(FieldOperand(map, Map::kBitFieldOffset),
2552            Immediate(Map::Bits1::IsCallableBit::kMask));
2553   __ j(zero, &non_callable, Label::kNear);
2554 
2555   // Check if target is a proxy and call CallProxy external builtin
2556   __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
2557   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET,
2558           equal);
2559 
2560   // Check if target is a wrapped function and call CallWrappedFunction external
2561   // builtin
2562   __ cmpw(instance_type, Immediate(JS_WRAPPED_FUNCTION_TYPE));
2563   __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2564           RelocInfo::CODE_TARGET, equal);
2565 
2566   // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2567   // Check that the function is not a "classConstructor".
2568   __ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
2569   __ j(equal, &class_constructor);
2570 
2571   // 2. Call to something else, which might have a [[Call]] internal method (if
2572   // not we raise an exception).
2573 
2574   // Overwrite the original receiver with the (original) target.
2575   __ movq(args.GetReceiverOperand(), target);
2576   // Let the "call_as_function_delegate" take care of the rest.
2577   __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2578   __ Jump(masm->isolate()->builtins()->CallFunction(
2579               ConvertReceiverMode::kNotNullOrUndefined),
2580           RelocInfo::CODE_TARGET);
2581 
2582   // 3. Call to something that is not callable.
2583   __ bind(&non_callable);
2584   {
2585     FrameScope scope(masm, StackFrame::INTERNAL);
2586     __ Push(target);
2587     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2588     __ Trap();  // Unreachable.
2589   }
2590 
2591   // 4. The function is a "classConstructor", need to raise an exception.
2592   __ bind(&class_constructor);
2593   {
2594     FrameScope frame(masm, StackFrame::INTERNAL);
2595     __ Push(target);
2596     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2597     __ Trap();  // Unreachable.
2598   }
2599 }
2600 
2601 // static
Generate_ConstructFunction(MacroAssembler * masm)2602 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2603   // ----------- S t a t e -------------
2604   //  -- rax : the number of arguments
2605   //  -- rdx : the new target (checked to be a constructor)
2606   //  -- rdi : the constructor to call (checked to be a JSFunction)
2607   // -----------------------------------
2608   __ AssertConstructor(rdi);
2609   __ AssertFunction(rdi);
2610 
2611   // Calling convention for function specific ConstructStubs require
2612   // rbx to contain either an AllocationSite or undefined.
2613   __ LoadRoot(rbx, RootIndex::kUndefinedValue);
2614 
2615   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2616   __ LoadTaggedPointerField(
2617       rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
2618   __ testl(FieldOperand(rcx, SharedFunctionInfo::kFlagsOffset),
2619            Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2620   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2621           RelocInfo::CODE_TARGET, not_zero);
2622 
2623   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2624           RelocInfo::CODE_TARGET);
2625 }
2626 
2627 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2628 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2629   // ----------- S t a t e -------------
2630   //  -- rax : the number of arguments
2631   //  -- rdx : the new target (checked to be a constructor)
2632   //  -- rdi : the constructor to call (checked to be a JSBoundFunction)
2633   // -----------------------------------
2634   __ AssertConstructor(rdi);
2635   __ AssertBoundFunction(rdi);
2636 
2637   // Push the [[BoundArguments]] onto the stack.
2638   Generate_PushBoundArguments(masm);
2639 
2640   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2641   {
2642     Label done;
2643     __ cmpq(rdi, rdx);
2644     __ j(not_equal, &done, Label::kNear);
2645     __ LoadTaggedPointerField(
2646         rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
2647     __ bind(&done);
2648   }
2649 
2650   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2651   __ LoadTaggedPointerField(
2652       rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
2653   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2654 }
2655 
2656 // static
Generate_Construct(MacroAssembler * masm)2657 void Builtins::Generate_Construct(MacroAssembler* masm) {
2658   // ----------- S t a t e -------------
2659   //  -- rax : the number of arguments
2660   //  -- rdx : the new target (either the same as the constructor or
2661   //           the JSFunction on which new was invoked initially)
2662   //  -- rdi : the constructor to call (can be any Object)
2663   // -----------------------------------
2664   Register argc = rax;
2665   Register target = rdi;
2666   Register map = rcx;
2667   Register instance_type = r8;
2668   DCHECK(!AreAliased(argc, target, map, instance_type));
2669 
2670   StackArgumentsAccessor args(argc);
2671 
2672   // Check if target is a Smi.
2673   Label non_constructor;
2674   __ JumpIfSmi(target, &non_constructor);
2675 
2676   // Check if target has a [[Construct]] internal method.
2677   __ LoadMap(map, target);
2678   __ testb(FieldOperand(map, Map::kBitFieldOffset),
2679            Immediate(Map::Bits1::IsConstructorBit::kMask));
2680   __ j(zero, &non_constructor);
2681 
2682   // Dispatch based on instance type.
2683   __ CmpInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
2684                           LAST_JS_FUNCTION_TYPE);
2685   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2686           RelocInfo::CODE_TARGET, below_equal);
2687 
2688   // Only dispatch to bound functions after checking whether they are
2689   // constructors.
2690   __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
2691   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2692           RelocInfo::CODE_TARGET, equal);
2693 
2694   // Only dispatch to proxies after checking whether they are constructors.
2695   __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
2696   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), RelocInfo::CODE_TARGET,
2697           equal);
2698 
2699   // Called Construct on an exotic Object with a [[Construct]] internal method.
2700   {
2701     // Overwrite the original receiver with the (original) target.
2702     __ movq(args.GetReceiverOperand(), target);
2703     // Let the "call_as_constructor_delegate" take care of the rest.
2704     __ LoadNativeContextSlot(target,
2705                              Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2706     __ Jump(masm->isolate()->builtins()->CallFunction(),
2707             RelocInfo::CODE_TARGET);
2708   }
2709 
2710   // Called Construct on an Object that doesn't have a [[Construct]] internal
2711   // method.
2712   __ bind(&non_constructor);
2713   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2714           RelocInfo::CODE_TARGET);
2715 }
2716 
2717 namespace {
2718 
Generate_OSREntry(MacroAssembler * masm,Register entry_address)2719 void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
2720   // Overwrite the return address on the stack.
2721   __ movq(StackOperandForReturnAddress(0), entry_address);
2722 
2723   // And "return" to the OSR entry point of the function.
2724   __ ret(0);
2725 }
2726 
2727 enum class OsrSourceTier {
2728   kInterpreter,
2729   kBaseline,
2730 };
2731 
OnStackReplacement(MacroAssembler * masm,OsrSourceTier source)2732 void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source) {
2733   {
2734     FrameScope scope(masm, StackFrame::INTERNAL);
2735     __ CallRuntime(Runtime::kCompileOptimizedOSR);
2736   }
2737 
2738   Label jump_to_returned_code;
2739   // If the code object is null, just return to the caller.
2740   __ testq(rax, rax);
2741   __ j(not_equal, &jump_to_returned_code, Label::kNear);
2742   __ ret(0);
2743 
2744   __ bind(&jump_to_returned_code);
2745 
2746   if (source == OsrSourceTier::kInterpreter) {
2747     // Drop the handler frame that is be sitting on top of the actual
2748     // JavaScript frame. This is the case then OSR is triggered from bytecode.
2749     __ leave();
2750   }
2751 
2752   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
2753     __ LoadCodeDataContainerCodeNonBuiltin(rax, rax);
2754   }
2755 
2756   // Load deoptimization data from the code object.
2757   __ LoadTaggedPointerField(
2758       rbx, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
2759 
2760   // Load the OSR entrypoint offset from the deoptimization data.
2761   __ SmiUntagField(
2762       rbx, FieldOperand(rbx, FixedArray::OffsetOfElementAt(
2763                                  DeoptimizationData::kOsrPcOffsetIndex)));
2764 
2765   // Compute the target address = code_obj + header_size + osr_offset
2766   __ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
2767 
2768   Generate_OSREntry(masm, rax);
2769 }
2770 
2771 }  // namespace
2772 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)2773 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2774   OnStackReplacement(masm, OsrSourceTier::kInterpreter);
2775 }
2776 
Generate_BaselineOnStackReplacement(MacroAssembler * masm)2777 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2778   __ movq(kContextRegister,
2779           MemOperand(rbp, BaselineFrameConstants::kContextOffset));
2780   OnStackReplacement(masm, OsrSourceTier::kBaseline);
2781 }
2782 
2783 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2784 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2785   // The function index was pushed to the stack by the caller as int32.
2786   __ Pop(r15);
2787   // Convert to Smi for the runtime call.
2788   __ SmiTag(r15);
2789 
2790   {
2791     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2792     FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2793 
2794     // Save all parameter registers (see wasm-linkage.h). They might be
2795     // overwritten in the runtime call below. We don't have any callee-saved
2796     // registers in wasm, so no need to store anything else.
2797     static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
2798                       arraysize(wasm::kGpParamRegisters),
2799                   "frame size mismatch");
2800     for (Register reg : wasm::kGpParamRegisters) {
2801       __ Push(reg);
2802     }
2803     static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
2804                       arraysize(wasm::kFpParamRegisters),
2805                   "frame size mismatch");
2806     __ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
2807     int offset = 0;
2808     for (DoubleRegister reg : wasm::kFpParamRegisters) {
2809       __ movdqu(Operand(rsp, offset), reg);
2810       offset += kSimd128Size;
2811     }
2812 
2813     // Push the Wasm instance for loading the jump table address after the
2814     // runtime call.
2815     __ Push(kWasmInstanceRegister);
2816 
2817     // Push the Wasm instance again as an explicit argument to the runtime
2818     // function.
2819     __ Push(kWasmInstanceRegister);
2820     // Push the function index as second argument.
2821     __ Push(r15);
2822     // Initialize the JavaScript context with 0. CEntry will use it to
2823     // set the current context on the isolate.
2824     __ Move(kContextRegister, Smi::zero());
2825     __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2826     // The runtime function returns the jump table slot offset as a Smi. Use
2827     // that to compute the jump target in r15.
2828     __ Pop(kWasmInstanceRegister);
2829     __ movq(r15, MemOperand(kWasmInstanceRegister,
2830                             wasm::ObjectAccess::ToTagged(
2831                                 WasmInstanceObject::kJumpTableStartOffset)));
2832     __ SmiUntag(kReturnRegister0);
2833     __ addq(r15, kReturnRegister0);
2834     // r15 now holds the jump table slot where we want to jump to in the end.
2835 
2836     // Restore registers.
2837     for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
2838       offset -= kSimd128Size;
2839       __ movdqu(reg, Operand(rsp, offset));
2840     }
2841     DCHECK_EQ(0, offset);
2842     __ addq(rsp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
2843     for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
2844       __ Pop(reg);
2845     }
2846   }
2847 
2848   // Finally, jump to the jump table slot for the function.
2849   __ jmp(r15);
2850 }
2851 
Generate_WasmDebugBreak(MacroAssembler * masm)2852 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2853   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2854   {
2855     FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2856 
2857     // Save all parameter registers. They might hold live values, we restore
2858     // them after the runtime call.
2859     for (Register reg :
2860          base::Reversed(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
2861       __ Push(reg);
2862     }
2863 
2864     constexpr int kFpStackSize =
2865         kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
2866     __ AllocateStackSpace(kFpStackSize);
2867     int offset = kFpStackSize;
2868     for (DoubleRegister reg :
2869          base::Reversed(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
2870       offset -= kSimd128Size;
2871       __ movdqu(Operand(rsp, offset), reg);
2872     }
2873 
2874     // Initialize the JavaScript context with 0. CEntry will use it to
2875     // set the current context on the isolate.
2876     __ Move(kContextRegister, Smi::zero());
2877     __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2878 
2879     // Restore registers.
2880     for (DoubleRegister reg : WasmDebugBreakFrameConstants::kPushedFpRegs) {
2881       __ movdqu(reg, Operand(rsp, offset));
2882       offset += kSimd128Size;
2883     }
2884     __ addq(rsp, Immediate(kFpStackSize));
2885     for (Register reg : WasmDebugBreakFrameConstants::kPushedGpRegs) {
2886       __ Pop(reg);
2887     }
2888   }
2889 
2890   __ ret(0);
2891 }
2892 
2893 namespace {
2894 // Helper functions for the GenericJSToWasmWrapper.
PrepareForBuiltinCall(MacroAssembler * masm,MemOperand GCScanSlotPlace,const int GCScanSlotCount,Register current_param,Register param_limit,Register current_int_param_slot,Register current_float_param_slot,Register valuetypes_array_ptr,Register wasm_instance,Register function_data)2895 void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
2896                            const int GCScanSlotCount, Register current_param,
2897                            Register param_limit,
2898                            Register current_int_param_slot,
2899                            Register current_float_param_slot,
2900                            Register valuetypes_array_ptr,
2901                            Register wasm_instance, Register function_data) {
2902   // Pushes and puts the values in order onto the stack before builtin calls for
2903   // the GenericJSToWasmWrapper.
2904   __ Move(GCScanSlotPlace, GCScanSlotCount);
2905   __ pushq(current_param);
2906   __ pushq(param_limit);
2907   __ pushq(current_int_param_slot);
2908   __ pushq(current_float_param_slot);
2909   __ pushq(valuetypes_array_ptr);
2910   __ pushq(wasm_instance);
2911   __ pushq(function_data);
2912   // We had to prepare the parameters for the Call: we have to put the context
2913   // into rsi.
2914   __ LoadAnyTaggedField(
2915       rsi,
2916       MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
2917                                     WasmInstanceObject::kNativeContextOffset)));
2918 }
2919 
RestoreAfterBuiltinCall(MacroAssembler * masm,Register function_data,Register wasm_instance,Register valuetypes_array_ptr,Register current_float_param_slot,Register current_int_param_slot,Register param_limit,Register current_param)2920 void RestoreAfterBuiltinCall(MacroAssembler* masm, Register function_data,
2921                              Register wasm_instance,
2922                              Register valuetypes_array_ptr,
2923                              Register current_float_param_slot,
2924                              Register current_int_param_slot,
2925                              Register param_limit, Register current_param) {
2926   // Pop and load values from the stack in order into the registers after
2927   // builtin calls for the GenericJSToWasmWrapper.
2928   __ popq(function_data);
2929   __ popq(wasm_instance);
2930   __ popq(valuetypes_array_ptr);
2931   __ popq(current_float_param_slot);
2932   __ popq(current_int_param_slot);
2933   __ popq(param_limit);
2934   __ popq(current_param);
2935 }
2936 
FillJumpBuffer(MacroAssembler * masm,Register jmpbuf,Label * pc)2937 void FillJumpBuffer(MacroAssembler* masm, Register jmpbuf, Label* pc) {
2938   __ movq(MemOperand(jmpbuf, wasm::kJmpBufSpOffset), rsp);
2939   __ movq(MemOperand(jmpbuf, wasm::kJmpBufFpOffset), rbp);
2940   __ movq(kScratchRegister,
2941           __ StackLimitAsOperand(StackLimitKind::kRealStackLimit));
2942   __ movq(MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset), kScratchRegister);
2943   __ leaq(kScratchRegister, MemOperand(pc, 0));
2944   __ movq(MemOperand(jmpbuf, wasm::kJmpBufPcOffset), kScratchRegister);
2945 }
2946 
LoadJumpBuffer(MacroAssembler * masm,Register jmpbuf,bool load_pc)2947 void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf, bool load_pc) {
2948   __ movq(rsp, MemOperand(jmpbuf, wasm::kJmpBufSpOffset));
2949   __ movq(rbp, MemOperand(jmpbuf, wasm::kJmpBufFpOffset));
2950   if (load_pc) {
2951     __ jmp(MemOperand(jmpbuf, wasm::kJmpBufPcOffset));
2952   }
2953   // The stack limit is set separately under the ExecutionAccess lock.
2954 }
2955 
SaveState(MacroAssembler * masm,Register active_continuation,Register tmp,Label * suspend)2956 void SaveState(MacroAssembler* masm, Register active_continuation, Register tmp,
2957                Label* suspend) {
2958   Register foreign_jmpbuf = tmp;
2959   __ LoadAnyTaggedField(
2960       foreign_jmpbuf,
2961       FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset));
2962   Register jmpbuf = foreign_jmpbuf;
2963   __ LoadExternalPointerField(
2964       jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
2965       kForeignForeignAddressTag, kScratchRegister);
2966   FillJumpBuffer(masm, jmpbuf, suspend);
2967 }
2968 
2969 // Returns the new continuation in rax.
AllocateContinuation(MacroAssembler * masm,Register function_data,Register wasm_instance)2970 void AllocateContinuation(MacroAssembler* masm, Register function_data,
2971                           Register wasm_instance) {
2972   Register suspender = kScratchRegister;
2973   __ LoadAnyTaggedField(
2974       suspender,
2975       FieldOperand(function_data, WasmExportedFunctionData::kSuspenderOffset));
2976   MemOperand GCScanSlotPlace =
2977       MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
2978   __ Move(GCScanSlotPlace, 3);
2979   __ Push(wasm_instance);
2980   __ Push(function_data);
2981   __ Push(suspender);  // Argument.
2982   __ Move(kContextRegister, Smi::zero());
2983   __ CallRuntime(Runtime::kWasmAllocateContinuation);
2984   __ Pop(function_data);
2985   __ Pop(wasm_instance);
2986   STATIC_ASSERT(kReturnRegister0 == rax);
2987   suspender = no_reg;
2988 }
2989 
LoadTargetJumpBuffer(MacroAssembler * masm,Register target_continuation)2990 void LoadTargetJumpBuffer(MacroAssembler* masm, Register target_continuation) {
2991   Register foreign_jmpbuf = target_continuation;
2992   __ LoadAnyTaggedField(
2993       foreign_jmpbuf,
2994       FieldOperand(target_continuation, WasmContinuationObject::kJmpbufOffset));
2995   Register target_jmpbuf = foreign_jmpbuf;
2996   __ LoadExternalPointerField(
2997       target_jmpbuf,
2998       FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
2999       kForeignForeignAddressTag, kScratchRegister);
3000   MemOperand GCScanSlotPlace =
3001       MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
3002   __ Move(GCScanSlotPlace, 0);
3003   // Switch stack!
3004   LoadJumpBuffer(masm, target_jmpbuf, false);
3005 }
3006 
ReloadParentContinuation(MacroAssembler * masm,Register wasm_instance,Register return_reg,Register tmp1,Register tmp2)3007 void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
3008                               Register return_reg, Register tmp1,
3009                               Register tmp2) {
3010   Register active_continuation = tmp1;
3011   __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
3012 
3013   // Set a null pointer in the jump buffer's SP slot to indicate to the stack
3014   // frame iterator that this stack is empty.
3015   Register foreign_jmpbuf = kScratchRegister;
3016   __ LoadAnyTaggedField(
3017       foreign_jmpbuf,
3018       FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset));
3019   Register jmpbuf = foreign_jmpbuf;
3020   __ LoadExternalPointerField(
3021       jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
3022       kForeignForeignAddressTag, tmp2);
3023   __ movq(Operand(jmpbuf, wasm::kJmpBufSpOffset), Immediate(kNullAddress));
3024 
3025   Register parent = tmp2;
3026   __ LoadAnyTaggedField(
3027       parent,
3028       FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
3029 
3030   // Update active continuation root.
3031   __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), parent);
3032   foreign_jmpbuf = tmp1;
3033   __ LoadAnyTaggedField(
3034       foreign_jmpbuf,
3035       FieldOperand(parent, WasmContinuationObject::kJmpbufOffset));
3036   jmpbuf = foreign_jmpbuf;
3037   __ LoadExternalPointerField(
3038       jmpbuf, FieldOperand(foreign_jmpbuf, Foreign::kForeignAddressOffset),
3039       kForeignForeignAddressTag, tmp2);
3040 
3041   // Switch stack!
3042   LoadJumpBuffer(masm, jmpbuf, false);
3043   MemOperand GCScanSlotPlace =
3044       MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
3045   __ Move(GCScanSlotPlace, 1);
3046   __ Push(return_reg);
3047   __ Push(wasm_instance);  // Spill.
3048   __ Move(kContextRegister, Smi::zero());
3049   __ CallRuntime(Runtime::kWasmSyncStackLimit);
3050   __ Pop(wasm_instance);
3051   __ Pop(return_reg);
3052 }
3053 
RestoreParentSuspender(MacroAssembler * masm)3054 void RestoreParentSuspender(MacroAssembler* masm) {
3055   Register suspender = kScratchRegister;
3056   __ LoadRoot(suspender, RootIndex::kActiveSuspender);
3057   __ LoadAnyTaggedField(
3058       suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
3059   __ CompareRoot(suspender, RootIndex::kUndefinedValue);
3060   Label undefined;
3061   __ j(equal, &undefined, Label::kNear);
3062 #ifdef DEBUG
3063   // Check that the parent suspender is inactive.
3064   Label parent_inactive;
3065   Register state = rbx;
3066   __ LoadTaggedSignedField(
3067       state, FieldOperand(suspender, WasmSuspenderObject::kStateOffset));
3068   __ SmiCompare(state, Smi::FromInt(WasmSuspenderObject::Inactive));
3069   __ j(equal, &parent_inactive, Label::kNear);
3070   __ Trap();
3071   __ bind(&parent_inactive);
3072 #endif
3073   __ StoreTaggedSignedField(
3074       FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
3075       Smi::FromInt(WasmSuspenderObject::State::Active));
3076   __ bind(&undefined);
3077   __ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
3078 }
3079 
LoadFunctionDataAndWasmInstance(MacroAssembler * masm,Register function_data,Register wasm_instance)3080 void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
3081                                      Register function_data,
3082                                      Register wasm_instance) {
3083   Register closure = function_data;
3084   Register shared_function_info = closure;
3085   __ LoadAnyTaggedField(
3086       shared_function_info,
3087       MemOperand(
3088           closure,
3089           wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
3090   closure = no_reg;
3091   __ LoadAnyTaggedField(
3092       function_data,
3093       MemOperand(shared_function_info,
3094                  SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
3095   shared_function_info = no_reg;
3096 
3097   __ LoadAnyTaggedField(
3098       wasm_instance,
3099       MemOperand(function_data,
3100                  WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
3101 }
3102 
LoadValueTypesArray(MacroAssembler * masm,Register function_data,Register valuetypes_array_ptr,Register return_count,Register param_count)3103 void LoadValueTypesArray(MacroAssembler* masm, Register function_data,
3104                          Register valuetypes_array_ptr, Register return_count,
3105                          Register param_count) {
3106   Register foreign_signature = valuetypes_array_ptr;
3107   __ LoadAnyTaggedField(
3108       foreign_signature,
3109       MemOperand(function_data,
3110                  WasmExportedFunctionData::kSignatureOffset - kHeapObjectTag));
3111   Register signature = foreign_signature;
3112   __ LoadExternalPointerField(
3113       signature,
3114       FieldOperand(foreign_signature, Foreign::kForeignAddressOffset),
3115       kForeignForeignAddressTag, kScratchRegister);
3116   foreign_signature = no_reg;
3117   __ movq(return_count,
3118           MemOperand(signature, wasm::FunctionSig::kReturnCountOffset));
3119   __ movq(param_count,
3120           MemOperand(signature, wasm::FunctionSig::kParameterCountOffset));
3121   valuetypes_array_ptr = signature;
3122   __ movq(valuetypes_array_ptr,
3123           MemOperand(signature, wasm::FunctionSig::kRepsOffset));
3124 }
3125 
GenericJSToWasmWrapperHelper(MacroAssembler * masm,bool stack_switch)3126 void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
3127   // Set up the stackframe.
3128   __ EnterFrame(stack_switch ? StackFrame::STACK_SWITCH
3129                              : StackFrame::JS_TO_WASM);
3130 
3131   // -------------------------------------------
3132   // Compute offsets and prepare for GC.
3133   // -------------------------------------------
3134   constexpr int kGCScanSlotCountOffset =
3135       BuiltinWasmWrapperConstants::kGCScanSlotCountOffset;
3136   // The number of parameters passed to this function.
3137   constexpr int kInParamCountOffset =
3138       BuiltinWasmWrapperConstants::kInParamCountOffset;
3139   // The number of parameters according to the signature.
3140   constexpr int kParamCountOffset =
3141       BuiltinWasmWrapperConstants::kParamCountOffset;
3142   constexpr int kReturnCountOffset = kParamCountOffset - kSystemPointerSize;
3143   constexpr int kValueTypesArrayStartOffset =
3144       kReturnCountOffset - kSystemPointerSize;
3145   // A boolean flag to check if one of the parameters is a reference. If so, we
3146   // iterate over the parameters two times, first for all value types, and then
3147   // for all references.
3148   constexpr int kHasRefTypesOffset =
3149       kValueTypesArrayStartOffset - kSystemPointerSize;
3150   // We set and use this slot only when moving parameters into the parameter
3151   // registers (so no GC scan is needed).
3152   constexpr int kFunctionDataOffset = kHasRefTypesOffset - kSystemPointerSize;
3153   constexpr int kLastSpillOffset = kFunctionDataOffset;
3154   constexpr int kNumSpillSlots = 7;
3155   __ subq(rsp, Immediate(kNumSpillSlots * kSystemPointerSize));
3156   // Put the in_parameter count on the stack, we only  need it at the very end
3157   // when we pop the parameters off the stack.
3158   Register in_param_count = rax;
3159   __ decq(in_param_count);  // Exclude receiver.
3160   __ movq(MemOperand(rbp, kInParamCountOffset), in_param_count);
3161   in_param_count = no_reg;
3162 
3163   Register function_data = rdi;
3164   Register wasm_instance = rsi;
3165   LoadFunctionDataAndWasmInstance(masm, function_data, wasm_instance);
3166 
3167   Label compile_wrapper, compile_wrapper_done;
3168   if (!stack_switch) {
3169     // -------------------------------------------
3170     // Decrement the budget of the generic wrapper in function data.
3171     // -------------------------------------------
3172     __ SmiAddConstant(
3173         MemOperand(
3174             function_data,
3175             WasmExportedFunctionData::kWrapperBudgetOffset - kHeapObjectTag),
3176         Smi::FromInt(-1));
3177 
3178     // -------------------------------------------
3179     // Check if the budget of the generic wrapper reached 0 (zero).
3180     // -------------------------------------------
3181     // Instead of a specific comparison, we can directly use the flags set
3182     // from the previous addition.
3183     __ j(less_equal, &compile_wrapper);
3184     __ bind(&compile_wrapper_done);
3185   }
3186 
3187   Label suspend;
3188   if (stack_switch) {
3189     Register active_continuation = rbx;
3190     __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
3191     SaveState(masm, active_continuation, rcx, &suspend);
3192     AllocateContinuation(masm, function_data, wasm_instance);
3193     Register target_continuation = rax; /* fixed */
3194     // Save the old stack's rbp in r9, and use it to access the parameters in
3195     // the parent frame.
3196     // We also distribute the spill slots across the two stacks as needed by
3197     // creating a "shadow frame":
3198     //
3199     //      old stack:                    new stack:
3200     //      +-----------------+
3201     //      | <parent frame>  |
3202     // r9-> +-----------------+           +-----------------+
3203     //      | <fixed>         |           | 0 (jmpbuf rbp)  |
3204     //      +-----------------+     rbp-> +-----------------+
3205     //      |kGCScanSlotCount |           |kGCScanSlotCount |
3206     //      +-----------------+           +-----------------+
3207     //      | kParamCount     |           |      /          |
3208     //      +-----------------+           +-----------------+
3209     //      | kInParamCount   |           |      /          |
3210     //      +-----------------+           +-----------------+
3211     //      |      /          |           | kReturnCount    |
3212     //      +-----------------+           +-----------------+
3213     //      |      /          |           |kValueTypesArray |
3214     //      +-----------------+           +-----------------+
3215     //      |      /          |           | kHasRefTypes    |
3216     //      +-----------------+           +-----------------+
3217     //      |      /          |           | kFunctionData   |
3218     //      +-----------------+    rsp->  +-----------------+
3219     //          seal stack                         |
3220     //                                             V
3221     //
3222     // - When we first enter the prompt, we have access to both frames, so it
3223     // does not matter where the values are spilled.
3224     // - When we suspend for the first time, we longjmp to the original frame
3225     // (left).  So the frame needs to contain the necessary information to
3226     // properly deconstruct itself (actual param count and signature param
3227     // count).
3228     // - When we suspend for the second time, we longjmp to the frame that was
3229     // set up by the WasmResume builtin, which has the same layout as the
3230     // original frame (left).
3231     // - When the closure finally resolves, we use the value types pointer
3232     // stored in the shadow frame to get the return type and convert the return
3233     // value accordingly.
3234     __ movq(r9, rbp);
3235     LoadTargetJumpBuffer(masm, target_continuation);
3236     // Push the loaded rbp. We know it is null, because there is no frame yet,
3237     // so we could also push 0 directly. In any case we need to push it, because
3238     // this marks the base of the stack segment for the stack frame iterator.
3239     __ pushq(rbp);
3240     __ movq(rbp, rsp);
3241     __ addq(rsp, Immediate(kLastSpillOffset));
3242   }
3243   Register original_fp = stack_switch ? r9 : rbp;
3244 
3245   // -------------------------------------------
3246   // Load values from the signature.
3247   // -------------------------------------------
3248   Register valuetypes_array_ptr = r11;
3249   Register return_count = r8;
3250   Register param_count = rcx;
3251   LoadValueTypesArray(masm, function_data, valuetypes_array_ptr, return_count,
3252                       param_count);
3253 
3254   // Initialize the {HasRefTypes} slot.
3255   __ movq(MemOperand(rbp, kHasRefTypesOffset), Immediate(0));
3256 
3257   // -------------------------------------------
3258   // Store signature-related values to the stack.
3259   // -------------------------------------------
3260   // We store values on the stack to restore them after function calls.
3261   // We cannot push values onto the stack right before the wasm call. The wasm
3262   // function expects the parameters, that didn't fit into the registers, on the
3263   // top of the stack.
3264   __ movq(MemOperand(original_fp, kParamCountOffset), param_count);
3265   __ movq(MemOperand(rbp, kReturnCountOffset), return_count);
3266   __ movq(MemOperand(rbp, kValueTypesArrayStartOffset), valuetypes_array_ptr);
3267 
3268   // -------------------------------------------
3269   // Parameter handling.
3270   // -------------------------------------------
3271   Label prepare_for_wasm_call;
3272   __ Cmp(param_count, 0);
3273 
3274   // IF we have 0 params: jump through parameter handling.
3275   __ j(equal, &prepare_for_wasm_call);
3276 
3277   // -------------------------------------------
3278   // Create 2 sections for integer and float params.
3279   // -------------------------------------------
3280   // We will create 2 sections on the stack for the evaluated parameters:
3281   // Integer and Float section, both with parameter count size. We will place
3282   // the parameters into these sections depending on their valuetype. This way
3283   // we can easily fill the general purpose and floating point parameter
3284   // registers and place the remaining parameters onto the stack in proper order
3285   // for the Wasm function. These remaining params are the final stack
3286   // parameters for the call to WebAssembly. Example of the stack layout after
3287   // processing 2 int and 1 float parameters when param_count is 4.
3288   //   +-----------------+
3289   //   |      rbp        |
3290   //   |-----------------|-------------------------------
3291   //   |                 |   Slots we defined
3292   //   |   Saved values  |    when setting up
3293   //   |                 |     the stack
3294   //   |                 |
3295   //   +-Integer section-+--- <--- start_int_section ----
3296   //   |  1st int param  |
3297   //   |- - - - - - - - -|
3298   //   |  2nd int param  |
3299   //   |- - - - - - - - -|  <----- current_int_param_slot
3300   //   |                 |       (points to the stackslot
3301   //   |- - - - - - - - -|  where the next int param should be placed)
3302   //   |                 |
3303   //   +--Float section--+--- <--- start_float_section --
3304   //   | 1st float param |
3305   //   |- - - - - - - - -|  <----  current_float_param_slot
3306   //   |                 |       (points to the stackslot
3307   //   |- - - - - - - - -|  where the next float param should be placed)
3308   //   |                 |
3309   //   |- - - - - - - - -|
3310   //   |                 |
3311   //   +---Final stack---+------------------------------
3312   //   +-parameters for--+------------------------------
3313   //   +-the Wasm call---+------------------------------
3314   //   |      . . .      |
3315 
3316   constexpr int kIntegerSectionStartOffset =
3317       kLastSpillOffset - kSystemPointerSize;
3318   // For Integer section.
3319   // Set the current_int_param_slot to point to the start of the section.
3320   Register current_int_param_slot = r10;
3321   __ leaq(current_int_param_slot, MemOperand(rsp, -kSystemPointerSize));
3322   Register params_size = param_count;
3323   param_count = no_reg;
3324   __ shlq(params_size, Immediate(kSystemPointerSizeLog2));
3325   __ subq(rsp, params_size);
3326 
3327   // For Float section.
3328   // Set the current_float_param_slot to point to the start of the section.
3329   Register current_float_param_slot = r15;
3330   __ leaq(current_float_param_slot, MemOperand(rsp, -kSystemPointerSize));
3331   __ subq(rsp, params_size);
3332   params_size = no_reg;
3333   param_count = rcx;
3334   __ movq(param_count, MemOperand(original_fp, kParamCountOffset));
3335 
3336   // -------------------------------------------
3337   // Set up for the param evaluation loop.
3338   // -------------------------------------------
3339   // We will loop through the params starting with the 1st param.
3340   // The order of processing the params is important. We have to evaluate them
3341   // in an increasing order.
3342   //       +-----------------+---------------
3343   //       |     param n     |
3344   //       |- - - - - - - - -|
3345   //       |    param n-1    |   Caller
3346   //       |       ...       | frame slots
3347   //       |     param 1     |
3348   //       |- - - - - - - - -|
3349   //       |    receiver     |
3350   //       +-----------------+---------------
3351   //       |  return addr    |
3352   //   FP->|- - - - - - - - -|
3353   //       |      rbp        |   Spill slots
3354   //       |- - - - - - - - -|
3355   //
3356   // [rbp + current_param] gives us the parameter we are processing.
3357   // We iterate through half-open interval <1st param, [rbp + param_limit]).
3358 
3359   Register current_param = rbx;
3360   Register param_limit = rdx;
3361   constexpr int kReceiverOnStackSize = kSystemPointerSize;
3362   __ Move(current_param,
3363           kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize);
3364   __ movq(param_limit, param_count);
3365   __ shlq(param_limit, Immediate(kSystemPointerSizeLog2));
3366   __ addq(param_limit,
3367           Immediate(kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize));
3368   const int increment = kSystemPointerSize;
3369   Register param = rax;
3370   // We have to check the types of the params. The ValueType array contains
3371   // first the return then the param types.
3372   constexpr int kValueTypeSize = sizeof(wasm::ValueType);
3373   STATIC_ASSERT(kValueTypeSize == 4);
3374   const int32_t kValueTypeSizeLog2 = log2(kValueTypeSize);
3375   // Set the ValueType array pointer to point to the first parameter.
3376   Register returns_size = return_count;
3377   return_count = no_reg;
3378   __ shlq(returns_size, Immediate(kValueTypeSizeLog2));
3379   __ addq(valuetypes_array_ptr, returns_size);
3380   returns_size = no_reg;
3381   Register valuetype = r12;
3382 
3383   // -------------------------------------------
3384   // Param evaluation loop.
3385   // -------------------------------------------
3386   Label loop_through_params;
3387   __ bind(&loop_through_params);
3388 
3389   __ movq(param, MemOperand(original_fp, current_param, times_1, 0));
3390   __ movl(valuetype,
3391           Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
3392 
3393   // -------------------------------------------
3394   // Param conversion.
3395   // -------------------------------------------
3396   // If param is a Smi we can easily convert it. Otherwise we'll call a builtin
3397   // for conversion.
3398   Label convert_param;
3399   __ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
3400   __ j(not_equal, &convert_param);
3401   __ JumpIfNotSmi(param, &convert_param);
3402   // Change the paramfrom Smi to int32.
3403   __ SmiUntag(param);
3404   // Zero extend.
3405   __ movl(param, param);
3406   // Place the param into the proper slot in Integer section.
3407   __ movq(MemOperand(current_int_param_slot, 0), param);
3408   __ subq(current_int_param_slot, Immediate(kSystemPointerSize));
3409 
3410   // -------------------------------------------
3411   // Param conversion done.
3412   // -------------------------------------------
3413   Label param_conversion_done;
3414   __ bind(&param_conversion_done);
3415 
3416   __ addq(current_param, Immediate(increment));
3417   __ addq(valuetypes_array_ptr, Immediate(kValueTypeSize));
3418 
3419   __ cmpq(current_param, param_limit);
3420   __ j(not_equal, &loop_through_params);
3421 
3422   // -------------------------------------------
3423   // Second loop to handle references.
3424   // -------------------------------------------
3425   // In this loop we iterate over all parameters for a second time and copy all
3426   // reference parameters at the end of the integer parameters section.
3427   Label ref_params_done;
3428   // We check if we have seen a reference in the first parameter loop.
3429   Register ref_param_count = param_count;
3430   __ movq(ref_param_count, Immediate(0));
3431   __ cmpq(MemOperand(rbp, kHasRefTypesOffset), Immediate(0));
3432   __ j(equal, &ref_params_done);
3433   // We re-calculate the beginning of the value-types array and the beginning of
3434   // the parameters ({valuetypes_array_ptr} and {current_param}).
3435   __ movq(valuetypes_array_ptr, MemOperand(rbp, kValueTypesArrayStartOffset));
3436   return_count = current_param;
3437   current_param = no_reg;
3438   __ movq(return_count, MemOperand(rbp, kReturnCountOffset));
3439   returns_size = return_count;
3440   return_count = no_reg;
3441   __ shlq(returns_size, Immediate(kValueTypeSizeLog2));
3442   __ addq(valuetypes_array_ptr, returns_size);
3443 
3444   current_param = returns_size;
3445   returns_size = no_reg;
3446   __ Move(current_param,
3447           kFPOnStackSize + kPCOnStackSize + kReceiverOnStackSize);
3448 
3449   Label ref_loop_through_params;
3450   Label ref_loop_end;
3451   // Start of the loop.
3452   __ bind(&ref_loop_through_params);
3453 
3454   // Load the current parameter with type.
3455   __ movq(param, MemOperand(original_fp, current_param, times_1, 0));
3456   __ movl(valuetype,
3457           Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
3458   // Extract the ValueKind of the type, to check for kRef and kOptRef.
3459   __ andl(valuetype, Immediate(wasm::kWasmValueKindBitsMask));
3460   Label move_ref_to_slot;
3461   __ cmpq(valuetype, Immediate(wasm::ValueKind::kOptRef));
3462   __ j(equal, &move_ref_to_slot);
3463   __ cmpq(valuetype, Immediate(wasm::ValueKind::kRef));
3464   __ j(equal, &move_ref_to_slot);
3465   __ jmp(&ref_loop_end);
3466 
3467   // Place the param into the proper slot in Integer section.
3468   __ bind(&move_ref_to_slot);
3469   __ addq(ref_param_count, Immediate(1));
3470   __ movq(MemOperand(current_int_param_slot, 0), param);
3471   __ subq(current_int_param_slot, Immediate(kSystemPointerSize));
3472 
3473   // Move to the next parameter.
3474   __ bind(&ref_loop_end);
3475   __ addq(current_param, Immediate(increment));
3476   __ addq(valuetypes_array_ptr, Immediate(kValueTypeSize));
3477 
3478   // Check if we finished all parameters.
3479   __ cmpq(current_param, param_limit);
3480   __ j(not_equal, &ref_loop_through_params);
3481 
3482   __ bind(&ref_params_done);
3483   __ movq(valuetype, ref_param_count);
3484   ref_param_count = valuetype;
3485   valuetype = no_reg;
3486   // -------------------------------------------
3487   // Move the parameters into the proper param registers.
3488   // -------------------------------------------
3489   // The Wasm function expects that the params can be popped from the top of the
3490   // stack in an increasing order.
3491   // We can always move the values on the beginning of the sections into the GP
3492   // or FP parameter registers. If the parameter count is less than the number
3493   // of parameter registers, we may move values into the registers that are not
3494   // in the section.
3495   // ----------- S t a t e -------------
3496   //  -- r8  : start_int_section
3497   //  -- rdi : start_float_section
3498   //  -- r10 : current_int_param_slot
3499   //  -- r15 : current_float_param_slot
3500   //  -- r11 : valuetypes_array_ptr
3501   //  -- r12 : valuetype
3502   //  -- rsi : wasm_instance
3503   //  -- GpParamRegisters = rax, rdx, rcx, rbx, r9
3504   // -----------------------------------
3505 
3506   Register temp_params_size = rax;
3507   __ movq(temp_params_size, MemOperand(original_fp, kParamCountOffset));
3508   __ shlq(temp_params_size, Immediate(kSystemPointerSizeLog2));
3509   // We want to use the register of the function_data = rdi.
3510   __ movq(MemOperand(rbp, kFunctionDataOffset), function_data);
3511   Register start_float_section = function_data;
3512   function_data = no_reg;
3513   __ movq(start_float_section, rbp);
3514   __ addq(start_float_section, Immediate(kIntegerSectionStartOffset));
3515   __ subq(start_float_section, temp_params_size);
3516   temp_params_size = no_reg;
3517   // Fill the FP param registers.
3518   __ Movsd(xmm1, MemOperand(start_float_section, 0));
3519   __ Movsd(xmm2, MemOperand(start_float_section, -kSystemPointerSize));
3520   __ Movsd(xmm3, MemOperand(start_float_section, -2 * kSystemPointerSize));
3521   __ Movsd(xmm4, MemOperand(start_float_section, -3 * kSystemPointerSize));
3522   __ Movsd(xmm5, MemOperand(start_float_section, -4 * kSystemPointerSize));
3523   __ Movsd(xmm6, MemOperand(start_float_section, -5 * kSystemPointerSize));
3524   // We want the start to point to the last properly placed param.
3525   __ subq(start_float_section, Immediate(5 * kSystemPointerSize));
3526 
3527   Register start_int_section = r8;
3528   __ movq(start_int_section, rbp);
3529   __ addq(start_int_section, Immediate(kIntegerSectionStartOffset));
3530   // Fill the GP param registers.
3531   __ movq(rax, MemOperand(start_int_section, 0));
3532   __ movq(rdx, MemOperand(start_int_section, -kSystemPointerSize));
3533   __ movq(rcx, MemOperand(start_int_section, -2 * kSystemPointerSize));
3534   __ movq(rbx, MemOperand(start_int_section, -3 * kSystemPointerSize));
3535   __ movq(r9, MemOperand(start_int_section, -4 * kSystemPointerSize));
3536   // We want the start to point to the last properly placed param.
3537   __ subq(start_int_section, Immediate(4 * kSystemPointerSize));
3538 
3539   // -------------------------------------------
3540   // Place the final stack parameters to the proper place.
3541   // -------------------------------------------
3542   // We want the current_param_slot (insertion) pointers to point at the last
3543   // param of the section instead of the next free slot.
3544   __ addq(current_int_param_slot, Immediate(kSystemPointerSize));
3545   __ addq(current_float_param_slot, Immediate(kSystemPointerSize));
3546 
3547   // -------------------------------------------
3548   // Final stack parameters loop.
3549   // -------------------------------------------
3550   // The parameters that didn't fit into the registers should be placed on the
3551   // top of the stack contiguously. The interval of parameters between the
3552   // start_section and the current_param_slot pointers define the remaining
3553   // parameters of the section.
3554   // We can iterate through the valuetypes array to decide from which section we
3555   // need to push the parameter onto the top of the stack. By iterating in a
3556   // reversed order we can easily pick the last parameter of the proper section.
3557   // The parameter of the section is pushed on the top of the stack only if the
3558   // interval of remaining params is not empty. This way we ensure that only
3559   // params that didn't fit into param registers are pushed again.
3560 
3561   Label loop_through_valuetypes;
3562   Label loop_place_ref_params;
3563   __ bind(&loop_place_ref_params);
3564   __ testq(ref_param_count, ref_param_count);
3565   __ j(zero, &loop_through_valuetypes);
3566 
3567   __ cmpq(start_int_section, current_int_param_slot);
3568   // if no int or ref param remains, directly iterate valuetypes
3569   __ j(less_equal, &loop_through_valuetypes);
3570 
3571   __ pushq(MemOperand(current_int_param_slot, 0));
3572   __ addq(current_int_param_slot, Immediate(kSystemPointerSize));
3573   __ subq(ref_param_count, Immediate(1));
3574   __ jmp(&loop_place_ref_params);
3575 
3576   valuetype = ref_param_count;
3577   ref_param_count = no_reg;
3578   __ bind(&loop_through_valuetypes);
3579 
3580   // We iterated through the valuetypes array, we are one field over the end in
3581   // the beginning. Also, we have to decrement it in each iteration.
3582   __ subq(valuetypes_array_ptr, Immediate(kValueTypeSize));
3583 
3584   // Check if there are still remaining integer params.
3585   Label continue_loop;
3586   __ cmpq(start_int_section, current_int_param_slot);
3587   // If there are remaining integer params.
3588   __ j(greater, &continue_loop);
3589 
3590   // Check if there are still remaining float params.
3591   __ cmpq(start_float_section, current_float_param_slot);
3592   // If there aren't any params remaining.
3593   Label params_done;
3594   __ j(less_equal, &params_done);
3595 
3596   __ bind(&continue_loop);
3597   __ movl(valuetype,
3598           Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
3599   Label place_integer_param;
3600   Label place_float_param;
3601   __ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
3602   __ j(equal, &place_integer_param);
3603 
3604   __ cmpq(valuetype, Immediate(wasm::kWasmI64.raw_bit_field()));
3605   __ j(equal, &place_integer_param);
3606 
3607   __ cmpq(valuetype, Immediate(wasm::kWasmF32.raw_bit_field()));
3608   __ j(equal, &place_float_param);
3609 
3610   __ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
3611   __ j(equal, &place_float_param);
3612 
3613   // ref params have already been pushed, so go through directly
3614   __ jmp(&loop_through_valuetypes);
3615 
3616   // All other types are reference types. We can just fall through to place them
3617   // in the integer section.
3618 
3619   __ bind(&place_integer_param);
3620   __ cmpq(start_int_section, current_int_param_slot);
3621   // If there aren't any integer params remaining, just floats, then go to the
3622   // next valuetype.
3623   __ j(less_equal, &loop_through_valuetypes);
3624 
3625   // Copy the param from the integer section to the actual parameter area.
3626   __ pushq(MemOperand(current_int_param_slot, 0));
3627   __ addq(current_int_param_slot, Immediate(kSystemPointerSize));
3628   __ jmp(&loop_through_valuetypes);
3629 
3630   __ bind(&place_float_param);
3631   __ cmpq(start_float_section, current_float_param_slot);
3632   // If there aren't any float params remaining, just integers, then go to the
3633   // next valuetype.
3634   __ j(less_equal, &loop_through_valuetypes);
3635 
3636   // Copy the param from the float section to the actual parameter area.
3637   __ pushq(MemOperand(current_float_param_slot, 0));
3638   __ addq(current_float_param_slot, Immediate(kSystemPointerSize));
3639   __ jmp(&loop_through_valuetypes);
3640 
3641   __ bind(&params_done);
3642   // Restore function_data after we are done with parameter placement.
3643   function_data = rdi;
3644   __ movq(function_data, MemOperand(rbp, kFunctionDataOffset));
3645 
3646   __ bind(&prepare_for_wasm_call);
3647   // -------------------------------------------
3648   // Prepare for the Wasm call.
3649   // -------------------------------------------
3650   // Set thread_in_wasm_flag.
3651   Register thread_in_wasm_flag_addr = r12;
3652   __ movq(
3653       thread_in_wasm_flag_addr,
3654       MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
3655   __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1));
3656   thread_in_wasm_flag_addr = no_reg;
3657 
3658   Register function_entry = function_data;
3659   Register scratch = r12;
3660   __ LoadAnyTaggedField(
3661       function_entry,
3662       FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
3663   __ LoadExternalPointerField(
3664       function_entry,
3665       FieldOperand(function_entry, WasmInternalFunction::kForeignAddressOffset),
3666       kForeignForeignAddressTag, scratch);
3667   function_data = no_reg;
3668   scratch = no_reg;
3669 
3670   // We set the indicating value for the GC to the proper one for Wasm call.
3671   constexpr int kWasmCallGCScanSlotCount = 0;
3672   __ Move(MemOperand(rbp, kGCScanSlotCountOffset), kWasmCallGCScanSlotCount);
3673 
3674   // -------------------------------------------
3675   // Call the Wasm function.
3676   // -------------------------------------------
3677   __ call(function_entry);
3678   // Note: we might be returning to a different frame if the stack was suspended
3679   // and resumed during the call. The new frame is set up by WasmResume and has
3680   // a compatible layout.
3681   function_entry = no_reg;
3682 
3683   // -------------------------------------------
3684   // Resetting after the Wasm call.
3685   // -------------------------------------------
3686   // Restore rsp to free the reserved stack slots for the sections.
3687   __ leaq(rsp, MemOperand(rbp, kLastSpillOffset));
3688 
3689   // Unset thread_in_wasm_flag.
3690   thread_in_wasm_flag_addr = r8;
3691   __ movq(
3692       thread_in_wasm_flag_addr,
3693       MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
3694   __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(0));
3695   thread_in_wasm_flag_addr = no_reg;
3696 
3697   // -------------------------------------------
3698   // Return handling.
3699   // -------------------------------------------
3700   return_count = r8;
3701   __ movq(return_count, MemOperand(rbp, kReturnCountOffset));
3702   Register return_reg = rax;
3703 
3704   // If we have 1 return value, then jump to conversion.
3705   __ cmpl(return_count, Immediate(1));
3706   Label convert_return;
3707   __ j(equal, &convert_return);
3708 
3709   // Otherwise load undefined.
3710   __ LoadRoot(return_reg, RootIndex::kUndefinedValue);
3711 
3712   Label return_done;
3713   __ bind(&return_done);
3714   if (stack_switch) {
3715     ReloadParentContinuation(masm, wasm_instance, return_reg, rbx, rcx);
3716     RestoreParentSuspender(masm);
3717   }
3718   __ bind(&suspend);
3719   // No need to process the return value if the stack is suspended, there is a
3720   // single 'externref' value (the promise) which doesn't require conversion.
3721 
3722   __ movq(param_count, MemOperand(rbp, kParamCountOffset));
3723 
3724   // Calculate the number of parameters we have to pop off the stack. This
3725   // number is max(in_param_count, param_count).
3726   in_param_count = rdx;
3727   __ movq(in_param_count, MemOperand(rbp, kInParamCountOffset));
3728   __ cmpq(param_count, in_param_count);
3729   __ cmovq(less, param_count, in_param_count);
3730 
3731   // -------------------------------------------
3732   // Deconstrunct the stack frame.
3733   // -------------------------------------------
3734   __ LeaveFrame(stack_switch ? StackFrame::STACK_SWITCH
3735                              : StackFrame::JS_TO_WASM);
3736 
3737   // We have to remove the caller frame slots:
3738   //  - JS arguments
3739   //  - the receiver
3740   // and transfer the control to the return address (the return address is
3741   // expected to be on the top of the stack).
3742   // We cannot use just the ret instruction for this, because we cannot pass the
3743   // number of slots to remove in a Register as an argument.
3744   __ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger,
3745                    TurboAssembler::kCountExcludesReceiver);
3746   __ ret(0);
3747 
3748   // --------------------------------------------------------------------------
3749   //                          Deferred code.
3750   // --------------------------------------------------------------------------
3751 
3752   // -------------------------------------------
3753   // Param conversion builtins.
3754   // -------------------------------------------
3755   __ bind(&convert_param);
3756   // Restore function_data register (which was clobbered by the code above,
3757   // but was valid when jumping here earlier).
3758   function_data = rdi;
3759   // The order of pushes is important. We want the heap objects, that should be
3760   // scanned by GC, to be on the top of the stack.
3761   // We have to set the indicating value for the GC to the number of values on
3762   // the top of the stack that have to be scanned before calling the builtin
3763   // function.
3764   // The builtin expects the parameter to be in register param = rax.
3765 
3766   constexpr int kBuiltinCallGCScanSlotCount = 2;
3767   PrepareForBuiltinCall(masm, MemOperand(rbp, kGCScanSlotCountOffset),
3768                         kBuiltinCallGCScanSlotCount, current_param, param_limit,
3769                         current_int_param_slot, current_float_param_slot,
3770                         valuetypes_array_ptr, wasm_instance, function_data);
3771 
3772   Label param_kWasmI32_not_smi;
3773   Label param_kWasmI64;
3774   Label param_kWasmF32;
3775   Label param_kWasmF64;
3776 
3777   __ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
3778   __ j(equal, &param_kWasmI32_not_smi);
3779 
3780   __ cmpq(valuetype, Immediate(wasm::kWasmI64.raw_bit_field()));
3781   __ j(equal, &param_kWasmI64);
3782 
3783   __ cmpq(valuetype, Immediate(wasm::kWasmF32.raw_bit_field()));
3784   __ j(equal, &param_kWasmF32);
3785 
3786   __ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
3787   __ j(equal, &param_kWasmF64);
3788 
3789   // The parameter is a reference. We do not convert the parameter immediately.
3790   // Instead we will later loop over all parameters again to handle reference
3791   // parameters. The reason is that later value type parameters may trigger a
3792   // GC, and we cannot keep reference parameters alive then. Instead we leave
3793   // reference parameters at their initial place on the stack and only copy them
3794   // once no GC can happen anymore.
3795   // As an optimization we set a flag here that indicates that we have seen a
3796   // reference so far. If there was no reference parameter, we would not iterate
3797   // over the parameters for a second time.
3798   __ movq(MemOperand(rbp, kHasRefTypesOffset), Immediate(1));
3799   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3800                           valuetypes_array_ptr, current_float_param_slot,
3801                           current_int_param_slot, param_limit, current_param);
3802   __ jmp(&param_conversion_done);
3803 
3804   __ int3();
3805 
3806   __ bind(&param_kWasmI32_not_smi);
3807   __ Call(BUILTIN_CODE(masm->isolate(), WasmTaggedNonSmiToInt32),
3808           RelocInfo::CODE_TARGET);
3809   // Param is the result of the builtin.
3810   __ AssertZeroExtended(param);
3811   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3812                           valuetypes_array_ptr, current_float_param_slot,
3813                           current_int_param_slot, param_limit, current_param);
3814   __ movq(MemOperand(current_int_param_slot, 0), param);
3815   __ subq(current_int_param_slot, Immediate(kSystemPointerSize));
3816   __ jmp(&param_conversion_done);
3817 
3818   __ bind(&param_kWasmI64);
3819   __ Call(BUILTIN_CODE(masm->isolate(), BigIntToI64), RelocInfo::CODE_TARGET);
3820   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3821                           valuetypes_array_ptr, current_float_param_slot,
3822                           current_int_param_slot, param_limit, current_param);
3823   __ movq(MemOperand(current_int_param_slot, 0), param);
3824   __ subq(current_int_param_slot, Immediate(kSystemPointerSize));
3825   __ jmp(&param_conversion_done);
3826 
3827   __ bind(&param_kWasmF32);
3828   __ Call(BUILTIN_CODE(masm->isolate(), WasmTaggedToFloat64),
3829           RelocInfo::CODE_TARGET);
3830   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3831                           valuetypes_array_ptr, current_float_param_slot,
3832                           current_int_param_slot, param_limit, current_param);
3833   // Clear higher bits.
3834   __ Xorpd(xmm1, xmm1);
3835   // Truncate float64 to float32.
3836   __ Cvtsd2ss(xmm1, xmm0);
3837   __ Movsd(MemOperand(current_float_param_slot, 0), xmm1);
3838   __ subq(current_float_param_slot, Immediate(kSystemPointerSize));
3839   __ jmp(&param_conversion_done);
3840 
3841   __ bind(&param_kWasmF64);
3842   __ Call(BUILTIN_CODE(masm->isolate(), WasmTaggedToFloat64),
3843           RelocInfo::CODE_TARGET);
3844   RestoreAfterBuiltinCall(masm, function_data, wasm_instance,
3845                           valuetypes_array_ptr, current_float_param_slot,
3846                           current_int_param_slot, param_limit, current_param);
3847   __ Movsd(MemOperand(current_float_param_slot, 0), xmm0);
3848   __ subq(current_float_param_slot, Immediate(kSystemPointerSize));
3849   __ jmp(&param_conversion_done);
3850 
3851   // -------------------------------------------
3852   // Return conversions.
3853   // -------------------------------------------
3854   __ bind(&convert_return);
3855   // We have to make sure that the kGCScanSlotCount is set correctly when we
3856   // call the builtins for conversion. For these builtins it's the same as for
3857   // the Wasm call, that is, kGCScanSlotCount = 0, so we don't have to reset it.
3858   // We don't need the JS context for these builtin calls.
3859 
3860   __ movq(valuetypes_array_ptr, MemOperand(rbp, kValueTypesArrayStartOffset));
3861   // The first valuetype of the array is the return's valuetype.
3862   __ movl(valuetype,
3863           Operand(valuetypes_array_ptr, wasm::ValueType::bit_field_offset()));
3864 
3865   Label return_kWasmI32;
3866   Label return_kWasmI64;
3867   Label return_kWasmF32;
3868   Label return_kWasmF64;
3869   Label return_kWasmFuncRef;
3870 
3871   __ cmpq(valuetype, Immediate(wasm::kWasmI32.raw_bit_field()));
3872   __ j(equal, &return_kWasmI32);
3873 
3874   __ cmpq(valuetype, Immediate(wasm::kWasmI64.raw_bit_field()));
3875   __ j(equal, &return_kWasmI64);
3876 
3877   __ cmpq(valuetype, Immediate(wasm::kWasmF32.raw_bit_field()));
3878   __ j(equal, &return_kWasmF32);
3879 
3880   __ cmpq(valuetype, Immediate(wasm::kWasmF64.raw_bit_field()));
3881   __ j(equal, &return_kWasmF64);
3882 
3883   __ cmpq(valuetype, Immediate(wasm::kWasmFuncRef.raw_bit_field()));
3884   __ j(equal, &return_kWasmFuncRef);
3885 
3886   // All types that are not SIMD are reference types.
3887   __ cmpq(valuetype, Immediate(wasm::kWasmS128.raw_bit_field()));
3888   // References can be passed to JavaScript as is.
3889   __ j(not_equal, &return_done);
3890 
3891   __ int3();
3892 
3893   __ bind(&return_kWasmI32);
3894   Label to_heapnumber;
3895   // If pointer compression is disabled, we can convert the return to a smi.
3896   if (SmiValuesAre32Bits()) {
3897     __ SmiTag(return_reg);
3898   } else {
3899     Register temp = rbx;
3900     __ movq(temp, return_reg);
3901     // Double the return value to test if it can be a Smi.
3902     __ addl(temp, return_reg);
3903     temp = no_reg;
3904     // If there was overflow, convert the return value to a HeapNumber.
3905     __ j(overflow, &to_heapnumber);
3906     // If there was no overflow, we can convert to Smi.
3907     __ SmiTag(return_reg);
3908   }
3909   __ jmp(&return_done);
3910 
3911   // Handle the conversion of the I32 return value to HeapNumber when it cannot
3912   // be a smi.
3913   __ bind(&to_heapnumber);
3914   __ Call(BUILTIN_CODE(masm->isolate(), WasmInt32ToHeapNumber),
3915           RelocInfo::CODE_TARGET);
3916   __ jmp(&return_done);
3917 
3918   __ bind(&return_kWasmI64);
3919   __ Call(BUILTIN_CODE(masm->isolate(), I64ToBigInt), RelocInfo::CODE_TARGET);
3920   __ jmp(&return_done);
3921 
3922   __ bind(&return_kWasmF32);
3923   // The builtin expects the value to be in xmm0.
3924   __ Movss(xmm0, xmm1);
3925   __ Call(BUILTIN_CODE(masm->isolate(), WasmFloat32ToNumber),
3926           RelocInfo::CODE_TARGET);
3927   __ jmp(&return_done);
3928 
3929   __ bind(&return_kWasmF64);
3930   // The builtin expects the value to be in xmm0.
3931   __ Movsd(xmm0, xmm1);
3932   __ Call(BUILTIN_CODE(masm->isolate(), WasmFloat64ToNumber),
3933           RelocInfo::CODE_TARGET);
3934   __ jmp(&return_done);
3935 
3936   __ bind(&return_kWasmFuncRef);
3937   __ Call(BUILTIN_CODE(masm->isolate(), WasmFuncRefToJS),
3938           RelocInfo::CODE_TARGET);
3939   __ jmp(&return_done);
3940 
3941   if (!stack_switch) {
3942     // -------------------------------------------
3943     // Kick off compilation.
3944     // -------------------------------------------
3945     __ bind(&compile_wrapper);
3946     // Enable GC.
3947     MemOperand GCScanSlotPlace = MemOperand(rbp, kGCScanSlotCountOffset);
3948     __ Move(GCScanSlotPlace, 4);
3949     // Save registers to the stack.
3950     __ pushq(wasm_instance);
3951     __ pushq(function_data);
3952     // Push the arguments for the runtime call.
3953     __ Push(wasm_instance);  // first argument
3954     __ Push(function_data);  // second argument
3955                              // Set up context.
3956     __ Move(kContextRegister, Smi::zero());
3957     // Call the runtime function that kicks off compilation.
3958     __ CallRuntime(Runtime::kWasmCompileWrapper, 2);
3959     // Pop the result.
3960     __ movq(r9, kReturnRegister0);
3961     // Restore registers from the stack.
3962     __ popq(function_data);
3963     __ popq(wasm_instance);
3964     __ jmp(&compile_wrapper_done);
3965   }
3966 }
3967 }  // namespace
3968 
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)3969 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
3970   GenericJSToWasmWrapperHelper(masm, false);
3971 }
3972 
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)3973 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
3974   GenericJSToWasmWrapperHelper(masm, true);
3975 }
3976 
Generate_WasmSuspend(MacroAssembler * masm)3977 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
3978   // Set up the stackframe.
3979   __ EnterFrame(StackFrame::STACK_SWITCH);
3980 
3981   Register promise = rax;
3982   Register suspender = rbx;
3983 
3984   __ subq(rsp, Immediate(-(BuiltinWasmWrapperConstants::kGCScanSlotCountOffset -
3985                            TypedFrameConstants::kFixedFrameSizeFromFp)));
3986 
3987   // TODO(thibaudm): Throw if any of the following holds:
3988   // - caller is null
3989   // - ActiveSuspender is undefined
3990   // - 'suspender' is not the active suspender
3991 
3992   // -------------------------------------------
3993   // Save current state in active jump buffer.
3994   // -------------------------------------------
3995   Label resume;
3996   Register continuation = rcx;
3997   __ LoadRoot(continuation, RootIndex::kActiveContinuation);
3998   Register jmpbuf = rdx;
3999   __ LoadAnyTaggedField(
4000       jmpbuf,
4001       FieldOperand(continuation, WasmContinuationObject::kJmpbufOffset));
4002   __ LoadExternalPointerField(
4003       jmpbuf, FieldOperand(jmpbuf, Foreign::kForeignAddressOffset),
4004       kForeignForeignAddressTag, r8);
4005   FillJumpBuffer(masm, jmpbuf, &resume);
4006   __ StoreTaggedSignedField(
4007       FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
4008       Smi::FromInt(WasmSuspenderObject::Suspended));
4009   jmpbuf = no_reg;
4010   // live: [rax, rbx, rcx]
4011 
4012 #ifdef DEBUG
4013   // -------------------------------------------
4014   // Check that the suspender's continuation is the active continuation.
4015   // -------------------------------------------
4016   // TODO(thibaudm): Once we add core stack-switching instructions, this check
4017   // will not hold anymore: it's possible that the active continuation changed
4018   // (due to an internal switch), so we have to update the suspender.
4019   Register suspender_continuation = rdx;
4020   __ LoadAnyTaggedField(
4021       suspender_continuation,
4022       FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
4023   __ cmpq(suspender_continuation, continuation);
4024   Label ok;
4025   __ j(equal, &ok);
4026   __ Trap();
4027   __ bind(&ok);
4028 #endif
4029 
4030   // -------------------------------------------
4031   // Update roots.
4032   // -------------------------------------------
4033   Register caller = rcx;
4034   __ LoadAnyTaggedField(
4035       caller,
4036       FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
4037   __ LoadAnyTaggedField(
4038       caller, FieldOperand(caller, WasmContinuationObject::kParentOffset));
4039   __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
4040   Register parent = rdx;
4041   __ LoadAnyTaggedField(
4042       parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
4043   __ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
4044   parent = no_reg;
4045   // live: [rax, rcx]
4046 
4047   // -------------------------------------------
4048   // Load jump buffer.
4049   // -------------------------------------------
4050   MemOperand GCScanSlotPlace =
4051       MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
4052   __ Move(GCScanSlotPlace, 2);
4053   __ Push(promise);
4054   __ Push(caller);
4055   __ Move(kContextRegister, Smi::zero());
4056   __ CallRuntime(Runtime::kWasmSyncStackLimit);
4057   __ Pop(caller);
4058   __ Pop(promise);
4059   jmpbuf = caller;
4060   __ LoadAnyTaggedField(
4061       jmpbuf, FieldOperand(caller, WasmContinuationObject::kJmpbufOffset));
4062   caller = no_reg;
4063   __ LoadExternalPointerField(
4064       jmpbuf, FieldOperand(jmpbuf, Foreign::kForeignAddressOffset),
4065       kForeignForeignAddressTag, r8);
4066   __ movq(kReturnRegister0, promise);
4067   __ Move(GCScanSlotPlace, 0);
4068   LoadJumpBuffer(masm, jmpbuf, true);
4069   __ Trap();
4070   __ bind(&resume);
4071   __ LeaveFrame(StackFrame::STACK_SWITCH);
4072   __ ret(0);
4073 }
4074 
4075 // Resume the suspender stored in the closure.
Generate_WasmResume(MacroAssembler * masm)4076 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
4077   __ EnterFrame(StackFrame::STACK_SWITCH);
4078 
4079   Register param_count = rax;
4080   __ decq(param_count);                    // Exclude receiver.
4081   Register closure = kJSFunctionRegister;  // rdi
4082 
4083   // These slots are not used in this builtin. But when we return from the
4084   // resumed continuation, we return to the GenericJSToWasmWrapper code, which
4085   // expects these slots to be set.
4086   constexpr int kInParamCountOffset =
4087       BuiltinWasmWrapperConstants::kInParamCountOffset;
4088   constexpr int kParamCountOffset =
4089       BuiltinWasmWrapperConstants::kParamCountOffset;
4090   __ subq(rsp, Immediate(3 * kSystemPointerSize));
4091   __ movq(MemOperand(rbp, kParamCountOffset), param_count);
4092   __ movq(MemOperand(rbp, kInParamCountOffset), param_count);
4093 
4094   param_count = no_reg;
4095 
4096   // -------------------------------------------
4097   // Load suspender from closure.
4098   // -------------------------------------------
4099   Register sfi = closure;
4100   __ LoadAnyTaggedField(
4101       sfi,
4102       MemOperand(
4103           closure,
4104           wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
4105   Register function_data = sfi;
4106   __ LoadAnyTaggedField(
4107       function_data,
4108       FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
4109   Register suspender = rax;
4110   __ LoadAnyTaggedField(
4111       suspender,
4112       FieldOperand(function_data, WasmOnFulfilledData::kSuspenderOffset));
4113   // Check the suspender state.
4114   Label suspender_is_suspended;
4115   Register state = rdx;
4116   __ LoadTaggedSignedField(
4117       state, FieldOperand(suspender, WasmSuspenderObject::kStateOffset));
4118   __ SmiCompare(state, Smi::FromInt(WasmSuspenderObject::Suspended));
4119   __ j(equal, &suspender_is_suspended);
4120   __ Trap();  // TODO(thibaudm): Throw a wasm trap.
4121   closure = no_reg;
4122   sfi = no_reg;
4123 
4124   __ bind(&suspender_is_suspended);
4125   // -------------------------------------------
4126   // Save current state.
4127   // -------------------------------------------
4128   Label suspend;
4129   Register active_continuation = r9;
4130   __ LoadRoot(active_continuation, RootIndex::kActiveContinuation);
4131   Register current_jmpbuf = rdi;
4132   __ LoadAnyTaggedField(
4133       current_jmpbuf,
4134       FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset));
4135   __ LoadExternalPointerField(
4136       current_jmpbuf,
4137       FieldOperand(current_jmpbuf, Foreign::kForeignAddressOffset),
4138       kForeignForeignAddressTag, rdx);
4139   FillJumpBuffer(masm, current_jmpbuf, &suspend);
4140   current_jmpbuf = no_reg;
4141 
4142   // -------------------------------------------
4143   // Set suspender's parent to active continuation.
4144   // -------------------------------------------
4145   __ StoreTaggedSignedField(
4146       FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
4147       Smi::FromInt(WasmSuspenderObject::Active));
4148   Register target_continuation = rdi;
4149   __ LoadAnyTaggedField(
4150       target_continuation,
4151       FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
4152   Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
4153   __ StoreTaggedField(
4154       FieldOperand(target_continuation, WasmContinuationObject::kParentOffset),
4155       active_continuation);
4156   __ RecordWriteField(
4157       target_continuation, WasmContinuationObject::kParentOffset,
4158       active_continuation, slot_address, SaveFPRegsMode::kIgnore);
4159   active_continuation = no_reg;
4160 
4161   // -------------------------------------------
4162   // Update roots.
4163   // -------------------------------------------
4164   __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation),
4165           target_continuation);
4166   __ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
4167   suspender = no_reg;
4168 
4169   MemOperand GCScanSlotPlace =
4170       MemOperand(rbp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset);
4171   __ Move(GCScanSlotPlace, 1);
4172   __ Push(target_continuation);
4173   __ Move(kContextRegister, Smi::zero());
4174   __ CallRuntime(Runtime::kWasmSyncStackLimit);
4175   __ Pop(target_continuation);
4176 
4177   // -------------------------------------------
4178   // Load state from target jmpbuf (longjmp).
4179   // -------------------------------------------
4180   Register target_jmpbuf = target_continuation;
4181   __ LoadAnyTaggedField(
4182       target_jmpbuf,
4183       FieldOperand(target_continuation, WasmContinuationObject::kJmpbufOffset));
4184   __ LoadExternalPointerField(
4185       target_jmpbuf,
4186       FieldOperand(target_jmpbuf, Foreign::kForeignAddressOffset),
4187       kForeignForeignAddressTag, rax);
4188   // Move resolved value to return register.
4189   __ movq(kReturnRegister0, Operand(rbp, 3 * kSystemPointerSize));
4190   __ Move(GCScanSlotPlace, 0);
4191   LoadJumpBuffer(masm, target_jmpbuf, true);
4192   __ Trap();
4193   __ bind(&suspend);
4194   __ LeaveFrame(StackFrame::STACK_SWITCH);
4195   __ ret(3);
4196 }
4197 
Generate_WasmOnStackReplace(MacroAssembler * masm)4198 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
4199   MemOperand OSRTargetSlot(rbp, -wasm::kOSRTargetOffset);
4200   __ movq(kScratchRegister, OSRTargetSlot);
4201   __ Move(OSRTargetSlot, 0);
4202   __ jmp(kScratchRegister);
4203 }
4204 
4205 #endif  // V8_ENABLE_WEBASSEMBLY
4206 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)4207 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
4208                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
4209                                bool builtin_exit_frame) {
4210   // rax: number of arguments including receiver
4211   // rbx: pointer to C function  (C callee-saved)
4212   // rbp: frame pointer of calling JS frame (restored after C call)
4213   // rsp: stack pointer  (restored after C call)
4214   // rsi: current context (restored)
4215   //
4216   // If argv_mode == ArgvMode::kRegister:
4217   // r15: pointer to the first argument
4218 
4219 #ifdef V8_TARGET_OS_WIN
4220   // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9. It requires the
4221   // stack to be aligned to 16 bytes. It only allows a single-word to be
4222   // returned in register rax. Larger return sizes must be written to an address
4223   // passed as a hidden first argument.
4224   const Register kCCallArg0 = rcx;
4225   const Register kCCallArg1 = rdx;
4226   const Register kCCallArg2 = r8;
4227   const Register kCCallArg3 = r9;
4228   const int kArgExtraStackSpace = 2;
4229   const int kMaxRegisterResultSize = 1;
4230 #else
4231   // GCC / Clang passes arguments in rdi, rsi, rdx, rcx, r8, r9. Simple results
4232   // are returned in rax, and a struct of two pointers are returned in rax+rdx.
4233   // Larger return sizes must be written to an address passed as a hidden first
4234   // argument.
4235   const Register kCCallArg0 = rdi;
4236   const Register kCCallArg1 = rsi;
4237   const Register kCCallArg2 = rdx;
4238   const Register kCCallArg3 = rcx;
4239   const int kArgExtraStackSpace = 0;
4240   const int kMaxRegisterResultSize = 2;
4241 #endif  // V8_TARGET_OS_WIN
4242 
4243   // Enter the exit frame that transitions from JavaScript to C++.
4244   int arg_stack_space =
4245       kArgExtraStackSpace +
4246       (result_size <= kMaxRegisterResultSize ? 0 : result_size);
4247   if (argv_mode == ArgvMode::kRegister) {
4248     DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
4249     DCHECK(!builtin_exit_frame);
4250     __ EnterApiExitFrame(arg_stack_space);
4251     // Move argc into r12 (argv is already in r15).
4252     __ movq(r12, rax);
4253   } else {
4254     __ EnterExitFrame(
4255         arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
4256         builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
4257   }
4258 
4259   // rbx: pointer to builtin function  (C callee-saved).
4260   // rbp: frame pointer of exit frame  (restored after C call).
4261   // rsp: stack pointer (restored after C call).
4262   // r12: number of arguments including receiver (C callee-saved).
4263   // r15: argv pointer (C callee-saved).
4264 
4265   // Check stack alignment.
4266   if (FLAG_debug_code) {
4267     __ CheckStackAlignment();
4268   }
4269 
4270   // Call C function. The arguments object will be created by stubs declared by
4271   // DECLARE_RUNTIME_FUNCTION().
4272   if (result_size <= kMaxRegisterResultSize) {
4273     // Pass a pointer to the Arguments object as the first argument.
4274     // Return result in single register (rax), or a register pair (rax, rdx).
4275     __ movq(kCCallArg0, r12);  // argc.
4276     __ movq(kCCallArg1, r15);  // argv.
4277     __ Move(kCCallArg2, ExternalReference::isolate_address(masm->isolate()));
4278   } else {
4279     DCHECK_LE(result_size, 2);
4280     // Pass a pointer to the result location as the first argument.
4281     __ leaq(kCCallArg0, StackSpaceOperand(kArgExtraStackSpace));
4282     // Pass a pointer to the Arguments object as the second argument.
4283     __ movq(kCCallArg1, r12);  // argc.
4284     __ movq(kCCallArg2, r15);  // argv.
4285     __ Move(kCCallArg3, ExternalReference::isolate_address(masm->isolate()));
4286   }
4287   __ call(rbx);
4288 
4289   if (result_size > kMaxRegisterResultSize) {
4290     // Read result values stored on stack. Result is stored
4291     // above the the two Arguments object slots on Win64.
4292     DCHECK_LE(result_size, 2);
4293     __ movq(kReturnRegister0, StackSpaceOperand(kArgExtraStackSpace + 0));
4294     __ movq(kReturnRegister1, StackSpaceOperand(kArgExtraStackSpace + 1));
4295   }
4296   // Result is in rax or rdx:rax - do not destroy these registers!
4297 
4298   // Check result for exception sentinel.
4299   Label exception_returned;
4300   __ CompareRoot(rax, RootIndex::kException);
4301   __ j(equal, &exception_returned);
4302 
4303   // Check that there is no pending exception, otherwise we
4304   // should have returned the exception sentinel.
4305   if (FLAG_debug_code) {
4306     Label okay;
4307     __ LoadRoot(kScratchRegister, RootIndex::kTheHoleValue);
4308     ExternalReference pending_exception_address = ExternalReference::Create(
4309         IsolateAddressId::kPendingExceptionAddress, masm->isolate());
4310     Operand pending_exception_operand =
4311         masm->ExternalReferenceAsOperand(pending_exception_address);
4312     __ cmp_tagged(kScratchRegister, pending_exception_operand);
4313     __ j(equal, &okay, Label::kNear);
4314     __ int3();
4315     __ bind(&okay);
4316   }
4317 
4318   // Exit the JavaScript to C++ exit frame.
4319   __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
4320                     argv_mode == ArgvMode::kStack);
4321   __ ret(0);
4322 
4323   // Handling of exception.
4324   __ bind(&exception_returned);
4325 
4326   ExternalReference pending_handler_context_address = ExternalReference::Create(
4327       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
4328   ExternalReference pending_handler_entrypoint_address =
4329       ExternalReference::Create(
4330           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
4331   ExternalReference pending_handler_fp_address = ExternalReference::Create(
4332       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
4333   ExternalReference pending_handler_sp_address = ExternalReference::Create(
4334       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
4335 
4336   // Ask the runtime for help to determine the handler. This will set rax to
4337   // contain the current pending exception, don't clobber it.
4338   ExternalReference find_handler =
4339       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
4340   {
4341     FrameScope scope(masm, StackFrame::MANUAL);
4342     __ Move(arg_reg_1, 0);  // argc.
4343     __ Move(arg_reg_2, 0);  // argv.
4344     __ Move(arg_reg_3, ExternalReference::isolate_address(masm->isolate()));
4345     __ PrepareCallCFunction(3);
4346     __ CallCFunction(find_handler, 3);
4347   }
4348 
4349 #ifdef V8_ENABLE_CET_SHADOW_STACK
4350   // Drop frames from the shadow stack.
4351   ExternalReference num_frames_above_pending_handler_address =
4352       ExternalReference::Create(
4353           IsolateAddressId::kNumFramesAbovePendingHandlerAddress,
4354           masm->isolate());
4355   __ movq(rcx, masm->ExternalReferenceAsOperand(
4356                    num_frames_above_pending_handler_address));
4357   __ IncsspqIfSupported(rcx, kScratchRegister);
4358 #endif  // V8_ENABLE_CET_SHADOW_STACK
4359 
4360   // Retrieve the handler context, SP and FP.
4361   __ movq(rsi,
4362           masm->ExternalReferenceAsOperand(pending_handler_context_address));
4363   __ movq(rsp, masm->ExternalReferenceAsOperand(pending_handler_sp_address));
4364   __ movq(rbp, masm->ExternalReferenceAsOperand(pending_handler_fp_address));
4365 
4366   // If the handler is a JS frame, restore the context to the frame. Note that
4367   // the context will be set to (rsi == 0) for non-JS frames.
4368   Label skip;
4369   __ testq(rsi, rsi);
4370   __ j(zero, &skip, Label::kNear);
4371   __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
4372   __ bind(&skip);
4373 
4374   // Clear c_entry_fp, like we do in `LeaveExitFrame`.
4375   ExternalReference c_entry_fp_address = ExternalReference::Create(
4376       IsolateAddressId::kCEntryFPAddress, masm->isolate());
4377   Operand c_entry_fp_operand =
4378       masm->ExternalReferenceAsOperand(c_entry_fp_address);
4379   __ movq(c_entry_fp_operand, Immediate(0));
4380 
4381   // Compute the handler entry address and jump to it.
4382   __ movq(rdi,
4383           masm->ExternalReferenceAsOperand(pending_handler_entrypoint_address));
4384   __ jmp(rdi);
4385 }
4386 
Generate_DoubleToI(MacroAssembler * masm)4387 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
4388   Label check_negative, process_64_bits, done;
4389 
4390   // Account for return address and saved regs.
4391   const int kArgumentOffset = 4 * kSystemPointerSize;
4392 
4393   MemOperand mantissa_operand(MemOperand(rsp, kArgumentOffset));
4394   MemOperand exponent_operand(
4395       MemOperand(rsp, kArgumentOffset + kDoubleSize / 2));
4396 
4397   // The result is returned on the stack.
4398   MemOperand return_operand = mantissa_operand;
4399 
4400   Register scratch1 = rbx;
4401 
4402   // Since we must use rcx for shifts below, use some other register (rax)
4403   // to calculate the result if ecx is the requested return register.
4404   Register result_reg = rax;
4405   // Save ecx if it isn't the return register and therefore volatile, or if it
4406   // is the return register, then save the temp register we use in its stead
4407   // for the result.
4408   Register save_reg = rax;
4409   __ pushq(rcx);
4410   __ pushq(scratch1);
4411   __ pushq(save_reg);
4412 
4413   __ movl(scratch1, mantissa_operand);
4414   __ Movsd(kScratchDoubleReg, mantissa_operand);
4415   __ movl(rcx, exponent_operand);
4416 
4417   __ andl(rcx, Immediate(HeapNumber::kExponentMask));
4418   __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
4419   __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
4420   __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
4421   __ j(below, &process_64_bits, Label::kNear);
4422 
4423   // Result is entirely in lower 32-bits of mantissa
4424   int delta =
4425       HeapNumber::kExponentBias + base::Double::kPhysicalSignificandSize;
4426   __ subl(rcx, Immediate(delta));
4427   __ xorl(result_reg, result_reg);
4428   __ cmpl(rcx, Immediate(31));
4429   __ j(above, &done, Label::kNear);
4430   __ shll_cl(scratch1);
4431   __ jmp(&check_negative, Label::kNear);
4432 
4433   __ bind(&process_64_bits);
4434   __ Cvttsd2siq(result_reg, kScratchDoubleReg);
4435   __ jmp(&done, Label::kNear);
4436 
4437   // If the double was negative, negate the integer result.
4438   __ bind(&check_negative);
4439   __ movl(result_reg, scratch1);
4440   __ negl(result_reg);
4441   __ cmpl(exponent_operand, Immediate(0));
4442   __ cmovl(greater, result_reg, scratch1);
4443 
4444   // Restore registers
4445   __ bind(&done);
4446   __ movl(return_operand, result_reg);
4447   __ popq(save_reg);
4448   __ popq(scratch1);
4449   __ popq(rcx);
4450   __ ret(0);
4451 }
4452 
4453 namespace {
4454 
Offset(ExternalReference ref0,ExternalReference ref1)4455 int Offset(ExternalReference ref0, ExternalReference ref1) {
4456   int64_t offset = (ref0.address() - ref1.address());
4457   // Check that fits into int.
4458   DCHECK(static_cast<int>(offset) == offset);
4459   return static_cast<int>(offset);
4460 }
4461 
4462 // Calls an API function.  Allocates HandleScope, extracts returned value
4463 // from handle and propagates exceptions.  Clobbers r12, r15, rbx and
4464 // caller-save registers.  Restores context.  On return removes
4465 // stack_space * kSystemPointerSize (GCed).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,Register thunk_last_arg,int stack_space,Operand * stack_space_operand,Operand return_value_operand)4466 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
4467                               ExternalReference thunk_ref,
4468                               Register thunk_last_arg, int stack_space,
4469                               Operand* stack_space_operand,
4470                               Operand return_value_operand) {
4471   Label prologue;
4472   Label promote_scheduled_exception;
4473   Label delete_allocated_handles;
4474   Label leave_exit_frame;
4475 
4476   Isolate* isolate = masm->isolate();
4477   Factory* factory = isolate->factory();
4478   ExternalReference next_address =
4479       ExternalReference::handle_scope_next_address(isolate);
4480   const int kNextOffset = 0;
4481   const int kLimitOffset = Offset(
4482       ExternalReference::handle_scope_limit_address(isolate), next_address);
4483   const int kLevelOffset = Offset(
4484       ExternalReference::handle_scope_level_address(isolate), next_address);
4485   ExternalReference scheduled_exception_address =
4486       ExternalReference::scheduled_exception_address(isolate);
4487 
4488   DCHECK(rdx == function_address || r8 == function_address);
4489   // Allocate HandleScope in callee-save registers.
4490   Register prev_next_address_reg = r12;
4491   Register prev_limit_reg = rbx;
4492   Register base_reg = r15;
4493   __ Move(base_reg, next_address);
4494   __ movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
4495   __ movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
4496   __ addl(Operand(base_reg, kLevelOffset), Immediate(1));
4497 
4498   Label profiler_enabled, end_profiler_check;
4499   __ Move(rax, ExternalReference::is_profiling_address(isolate));
4500   __ cmpb(Operand(rax, 0), Immediate(0));
4501   __ j(not_zero, &profiler_enabled);
4502   __ Move(rax, ExternalReference::address_of_runtime_stats_flag());
4503   __ cmpl(Operand(rax, 0), Immediate(0));
4504   __ j(not_zero, &profiler_enabled);
4505   {
4506     // Call the api function directly.
4507     __ Move(rax, function_address);
4508     __ jmp(&end_profiler_check);
4509   }
4510   __ bind(&profiler_enabled);
4511   {
4512     // Third parameter is the address of the actual getter function.
4513     __ Move(thunk_last_arg, function_address);
4514     __ Move(rax, thunk_ref);
4515   }
4516   __ bind(&end_profiler_check);
4517 
4518   // Call the api function!
4519   __ call(rax);
4520 
4521   // Load the value from ReturnValue
4522   __ movq(rax, return_value_operand);
4523   __ bind(&prologue);
4524 
4525   // No more valid handles (the result handle was the last one). Restore
4526   // previous handle scope.
4527   __ subl(Operand(base_reg, kLevelOffset), Immediate(1));
4528   __ movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
4529   __ cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
4530   __ j(not_equal, &delete_allocated_handles);
4531 
4532   // Leave the API exit frame.
4533   __ bind(&leave_exit_frame);
4534   if (stack_space_operand != nullptr) {
4535     DCHECK_EQ(stack_space, 0);
4536     __ movq(rbx, *stack_space_operand);
4537   }
4538   __ LeaveApiExitFrame();
4539 
4540   // Check if the function scheduled an exception.
4541   __ Move(rdi, scheduled_exception_address);
4542   __ Cmp(Operand(rdi, 0), factory->the_hole_value());
4543   __ j(not_equal, &promote_scheduled_exception);
4544 
4545 #if DEBUG
4546   // Check if the function returned a valid JavaScript value.
4547   Label ok;
4548   Register return_value = rax;
4549   Register map = rcx;
4550 
4551   __ JumpIfSmi(return_value, &ok, Label::kNear);
4552   __ LoadMap(map, return_value);
4553   __ CmpInstanceType(map, LAST_NAME_TYPE);
4554   __ j(below_equal, &ok, Label::kNear);
4555 
4556   __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
4557   __ j(above_equal, &ok, Label::kNear);
4558 
4559   __ CompareRoot(map, RootIndex::kHeapNumberMap);
4560   __ j(equal, &ok, Label::kNear);
4561 
4562   __ CompareRoot(map, RootIndex::kBigIntMap);
4563   __ j(equal, &ok, Label::kNear);
4564 
4565   __ CompareRoot(return_value, RootIndex::kUndefinedValue);
4566   __ j(equal, &ok, Label::kNear);
4567 
4568   __ CompareRoot(return_value, RootIndex::kTrueValue);
4569   __ j(equal, &ok, Label::kNear);
4570 
4571   __ CompareRoot(return_value, RootIndex::kFalseValue);
4572   __ j(equal, &ok, Label::kNear);
4573 
4574   __ CompareRoot(return_value, RootIndex::kNullValue);
4575   __ j(equal, &ok, Label::kNear);
4576 
4577   __ Abort(AbortReason::kAPICallReturnedInvalidObject);
4578 
4579   __ bind(&ok);
4580 #endif
4581 
4582   if (stack_space_operand == nullptr) {
4583     DCHECK_NE(stack_space, 0);
4584     __ ret(stack_space * kSystemPointerSize);
4585   } else {
4586     DCHECK_EQ(stack_space, 0);
4587     __ PopReturnAddressTo(rcx);
4588     // {stack_space_operand} was loaded into {rbx} above.
4589     __ addq(rsp, rbx);
4590     // Push and ret (instead of jmp) to keep the RSB and the CET shadow stack
4591     // balanced.
4592     __ PushReturnAddressFrom(rcx);
4593     __ ret(0);
4594   }
4595 
4596   // Re-throw by promoting a scheduled exception.
4597   __ bind(&promote_scheduled_exception);
4598   __ TailCallRuntime(Runtime::kPromoteScheduledException);
4599 
4600   // HandleScope limit has changed. Delete allocated extensions.
4601   __ bind(&delete_allocated_handles);
4602   __ movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
4603   __ movq(prev_limit_reg, rax);
4604   __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
4605   __ LoadAddress(rax, ExternalReference::delete_handle_scope_extensions());
4606   __ call(rax);
4607   __ movq(rax, prev_limit_reg);
4608   __ jmp(&leave_exit_frame);
4609 }
4610 
4611 }  // namespace
4612 
4613 // TODO(jgruber): Instead of explicitly setting up implicit_args_ on the stack
4614 // in CallApiCallback, we could use the calling convention to set up the stack
4615 // correctly in the first place.
4616 //
4617 // TODO(jgruber): I suspect that most of CallApiCallback could be implemented
4618 // as a C++ trampoline, vastly simplifying the assembly implementation.
4619 
Generate_CallApiCallback(MacroAssembler * masm)4620 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
4621   // ----------- S t a t e -------------
4622   //  -- rsi                 : context
4623   //  -- rdx                 : api function address
4624   //  -- rcx                 : arguments count (not including the receiver)
4625   //  -- rbx                 : call data
4626   //  -- rdi                 : holder
4627   //  -- rsp[0]              : return address
4628   //  -- rsp[8]              : argument 0 (receiver)
4629   //  -- rsp[16]             : argument 1
4630   //  -- ...
4631   //  -- rsp[argc * 8]       : argument (argc - 1)
4632   //  -- rsp[(argc + 1) * 8] : argument argc
4633   // -----------------------------------
4634 
4635   Register api_function_address = rdx;
4636   Register argc = rcx;
4637   Register call_data = rbx;
4638   Register holder = rdi;
4639 
4640   DCHECK(!AreAliased(api_function_address, argc, holder, call_data,
4641                      kScratchRegister));
4642 
4643   using FCA = FunctionCallbackArguments;
4644 
4645   STATIC_ASSERT(FCA::kArgsLength == 6);
4646   STATIC_ASSERT(FCA::kNewTargetIndex == 5);
4647   STATIC_ASSERT(FCA::kDataIndex == 4);
4648   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
4649   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
4650   STATIC_ASSERT(FCA::kIsolateIndex == 1);
4651   STATIC_ASSERT(FCA::kHolderIndex == 0);
4652 
4653   // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
4654   //
4655   // Current state:
4656   //   rsp[0]: return address
4657   //
4658   // Target state:
4659   //   rsp[0 * kSystemPointerSize]: return address
4660   //   rsp[1 * kSystemPointerSize]: kHolder
4661   //   rsp[2 * kSystemPointerSize]: kIsolate
4662   //   rsp[3 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
4663   //   rsp[4 * kSystemPointerSize]: undefined (kReturnValue)
4664   //   rsp[5 * kSystemPointerSize]: kData
4665   //   rsp[6 * kSystemPointerSize]: undefined (kNewTarget)
4666 
4667   __ PopReturnAddressTo(rax);
4668   __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
4669   __ Push(kScratchRegister);
4670   __ Push(call_data);
4671   __ Push(kScratchRegister);
4672   __ Push(kScratchRegister);
4673   __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
4674   __ Push(holder);
4675   __ PushReturnAddressFrom(rax);
4676 
4677   // Keep a pointer to kHolder (= implicit_args) in a scratch register.
4678   // We use it below to set up the FunctionCallbackInfo object.
4679   Register scratch = rbx;
4680   __ leaq(scratch, Operand(rsp, 1 * kSystemPointerSize));
4681 
4682   // Allocate the v8::Arguments structure in the arguments' space since
4683   // it's not controlled by GC.
4684   static constexpr int kApiStackSpace = 4;
4685   __ EnterApiExitFrame(kApiStackSpace);
4686 
4687   // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
4688   __ movq(StackSpaceOperand(0), scratch);
4689 
4690   // FunctionCallbackInfo::values_ (points at the first varargs argument passed
4691   // on the stack).
4692   __ leaq(scratch,
4693           Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
4694   __ movq(StackSpaceOperand(1), scratch);
4695 
4696   // FunctionCallbackInfo::length_.
4697   __ movq(StackSpaceOperand(2), argc);
4698 
4699   // We also store the number of bytes to drop from the stack after returning
4700   // from the API function here.
4701   __ leaq(kScratchRegister,
4702           Operand(argc, times_system_pointer_size,
4703                   (FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
4704   __ movq(StackSpaceOperand(3), kScratchRegister);
4705 
4706   Register arguments_arg = arg_reg_1;
4707   Register callback_arg = arg_reg_2;
4708 
4709   // It's okay if api_function_address == callback_arg
4710   // but not arguments_arg
4711   DCHECK(api_function_address != arguments_arg);
4712 
4713   // v8::InvocationCallback's argument.
4714   __ leaq(arguments_arg, StackSpaceOperand(0));
4715 
4716   ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
4717 
4718   // There are two stack slots above the arguments we constructed on the stack:
4719   // the stored ebp (pushed by EnterApiExitFrame), and the return address.
4720   static constexpr int kStackSlotsAboveFCA = 2;
4721   Operand return_value_operand(
4722       rbp,
4723       (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
4724 
4725   static constexpr int kUseStackSpaceOperand = 0;
4726   Operand stack_space_operand = StackSpaceOperand(3);
4727   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
4728                            kUseStackSpaceOperand, &stack_space_operand,
4729                            return_value_operand);
4730 }
4731 
Generate_CallApiGetter(MacroAssembler * masm)4732 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
4733   Register name_arg = arg_reg_1;
4734   Register accessor_info_arg = arg_reg_2;
4735   Register getter_arg = arg_reg_3;
4736   Register api_function_address = r8;
4737   Register receiver = ApiGetterDescriptor::ReceiverRegister();
4738   Register holder = ApiGetterDescriptor::HolderRegister();
4739   Register callback = ApiGetterDescriptor::CallbackRegister();
4740   Register scratch = rax;
4741   Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r15 : no_reg;
4742 
4743   DCHECK(!AreAliased(receiver, holder, callback, scratch, decompr_scratch1));
4744 
4745   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
4746   // name below the exit frame to make GC aware of them.
4747   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
4748   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
4749   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
4750   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
4751   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
4752   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
4753   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
4754   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
4755 
4756   // Insert additional parameters into the stack frame above return address.
4757   __ PopReturnAddressTo(scratch);
4758   __ Push(receiver);
4759   __ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
4760                         decompr_scratch1);
4761   __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
4762   __ Push(kScratchRegister);  // return value
4763   __ Push(kScratchRegister);  // return value default
4764   __ PushAddress(ExternalReference::isolate_address(masm->isolate()));
4765   __ Push(holder);
4766   __ Push(Smi::zero());  // should_throw_on_error -> false
4767   __ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
4768                             decompr_scratch1);
4769   __ PushReturnAddressFrom(scratch);
4770 
4771   // v8::PropertyCallbackInfo::args_ array and name handle.
4772   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
4773 
4774   // Allocate v8::PropertyCallbackInfo in non-GCed stack space.
4775   const int kArgStackSpace = 1;
4776 
4777   // Load address of v8::PropertyAccessorInfo::args_ array.
4778   __ leaq(scratch, Operand(rsp, 2 * kSystemPointerSize));
4779 
4780   __ EnterApiExitFrame(kArgStackSpace);
4781 
4782   // Create v8::PropertyCallbackInfo object on the stack and initialize
4783   // it's args_ field.
4784   Operand info_object = StackSpaceOperand(0);
4785   __ movq(info_object, scratch);
4786 
4787   __ leaq(name_arg, Operand(scratch, -kSystemPointerSize));
4788   // The context register (rsi) has been saved in EnterApiExitFrame and
4789   // could be used to pass arguments.
4790   __ leaq(accessor_info_arg, info_object);
4791 
4792   ExternalReference thunk_ref =
4793       ExternalReference::invoke_accessor_getter_callback();
4794 
4795   // It's okay if api_function_address == getter_arg
4796   // but not accessor_info_arg or name_arg
4797   DCHECK(api_function_address != accessor_info_arg);
4798   DCHECK(api_function_address != name_arg);
4799   __ LoadTaggedPointerField(
4800       scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
4801   __ LoadExternalPointerField(
4802       api_function_address,
4803       FieldOperand(scratch, Foreign::kForeignAddressOffset),
4804       kForeignForeignAddressTag, kScratchRegister);
4805 
4806   // +3 is to skip prolog, return address and name handle.
4807   Operand return_value_operand(
4808       rbp,
4809       (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
4810   Operand* const kUseStackSpaceConstant = nullptr;
4811   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
4812                            kStackUnwindSpace, kUseStackSpaceConstant,
4813                            return_value_operand);
4814 }
4815 
Generate_DirectCEntry(MacroAssembler * masm)4816 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
4817   __ int3();  // Unused on this architecture.
4818 }
4819 
4820 namespace {
4821 
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)4822 void Generate_DeoptimizationEntry(MacroAssembler* masm,
4823                                   DeoptimizeKind deopt_kind) {
4824   Isolate* isolate = masm->isolate();
4825 
4826   // Save all double registers, they will later be copied to the deoptimizer's
4827   // FrameDescription.
4828   static constexpr int kDoubleRegsSize =
4829       kDoubleSize * XMMRegister::kNumRegisters;
4830   __ AllocateStackSpace(kDoubleRegsSize);
4831 
4832   const RegisterConfiguration* config = RegisterConfiguration::Default();
4833   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4834     int code = config->GetAllocatableDoubleCode(i);
4835     XMMRegister xmm_reg = XMMRegister::from_code(code);
4836     int offset = code * kDoubleSize;
4837     __ Movsd(Operand(rsp, offset), xmm_reg);
4838   }
4839 
4840   // Save all general purpose registers, they will later be copied to the
4841   // deoptimizer's FrameDescription.
4842   static constexpr int kNumberOfRegisters = Register::kNumRegisters;
4843   for (int i = 0; i < kNumberOfRegisters; i++) {
4844     __ pushq(Register::from_code(i));
4845   }
4846 
4847   static constexpr int kSavedRegistersAreaSize =
4848       kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
4849   static constexpr int kCurrentOffsetToReturnAddress = kSavedRegistersAreaSize;
4850   static constexpr int kCurrentOffsetToParentSP =
4851       kCurrentOffsetToReturnAddress + kPCOnStackSize;
4852 
4853   __ Store(
4854       ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate),
4855       rbp);
4856 
4857   // Get the address of the location in the code object
4858   // and compute the fp-to-sp delta in register arg5.
4859   __ movq(arg_reg_3, Operand(rsp, kCurrentOffsetToReturnAddress));
4860   // Load the fp-to-sp-delta.
4861   __ leaq(arg_reg_4, Operand(rsp, kCurrentOffsetToParentSP));
4862   __ subq(arg_reg_4, rbp);
4863   __ negq(arg_reg_4);
4864 
4865   // Allocate a new deoptimizer object.
4866   __ PrepareCallCFunction(5);
4867   __ Move(rax, 0);
4868   Label context_check;
4869   __ movq(rdi, Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset));
4870   __ JumpIfSmi(rdi, &context_check);
4871   __ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
4872   __ bind(&context_check);
4873   __ movq(arg_reg_1, rax);
4874   __ Move(arg_reg_2, static_cast<int>(deopt_kind));
4875   // Args 3 and 4 are already in the right registers.
4876 
4877   // On windows put the arguments on the stack (PrepareCallCFunction
4878   // has created space for this). On linux pass the arguments in r8.
4879 #ifdef V8_TARGET_OS_WIN
4880   Register arg5 = r15;
4881   __ LoadAddress(arg5, ExternalReference::isolate_address(isolate));
4882   __ movq(Operand(rsp, 4 * kSystemPointerSize), arg5);
4883 #else
4884   // r8 is arg_reg_5 on Linux
4885   __ LoadAddress(r8, ExternalReference::isolate_address(isolate));
4886 #endif
4887 
4888   {
4889     AllowExternalCallThatCantCauseGC scope(masm);
4890     __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
4891   }
4892   // Preserve deoptimizer object in register rax and get the input
4893   // frame descriptor pointer.
4894   __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
4895 
4896   // Fill in the input registers.
4897   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
4898     int offset =
4899         (i * kSystemPointerSize) + FrameDescription::registers_offset();
4900     __ PopQuad(Operand(rbx, offset));
4901   }
4902 
4903   // Fill in the double input registers.
4904   int double_regs_offset = FrameDescription::double_registers_offset();
4905   for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
4906     int dst_offset = i * kDoubleSize + double_regs_offset;
4907     __ popq(Operand(rbx, dst_offset));
4908   }
4909 
4910   // Mark the stack as not iterable for the CPU profiler which won't be able to
4911   // walk the stack without the return address.
4912   __ movb(__ ExternalReferenceAsOperand(
4913               ExternalReference::stack_is_iterable_address(isolate)),
4914           Immediate(0));
4915 
4916   // Remove the return address from the stack.
4917   __ addq(rsp, Immediate(kPCOnStackSize));
4918 
4919   // Compute a pointer to the unwinding limit in register rcx; that is
4920   // the first stack slot not part of the input frame.
4921   __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
4922   __ addq(rcx, rsp);
4923 
4924   // Unwind the stack down to - but not including - the unwinding
4925   // limit and copy the contents of the activation frame to the input
4926   // frame description.
4927   __ leaq(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
4928   Label pop_loop_header;
4929   __ jmp(&pop_loop_header);
4930   Label pop_loop;
4931   __ bind(&pop_loop);
4932   __ Pop(Operand(rdx, 0));
4933   __ addq(rdx, Immediate(sizeof(intptr_t)));
4934   __ bind(&pop_loop_header);
4935   __ cmpq(rcx, rsp);
4936   __ j(not_equal, &pop_loop);
4937 
4938   // Compute the output frame in the deoptimizer.
4939   __ pushq(rax);
4940   __ PrepareCallCFunction(2);
4941   __ movq(arg_reg_1, rax);
4942   __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate));
4943   {
4944     AllowExternalCallThatCantCauseGC scope(masm);
4945     __ CallCFunction(ExternalReference::compute_output_frames_function(), 2);
4946   }
4947   __ popq(rax);
4948 
4949   __ movq(rsp, Operand(rax, Deoptimizer::caller_frame_top_offset()));
4950 
4951   // Replace the current (input) frame with the output frames.
4952   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
4953   // Outer loop state: rax = current FrameDescription**, rdx = one past the
4954   // last FrameDescription**.
4955   __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
4956   __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
4957   __ leaq(rdx, Operand(rax, rdx, times_system_pointer_size, 0));
4958   __ jmp(&outer_loop_header);
4959   __ bind(&outer_push_loop);
4960   // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
4961   __ movq(rbx, Operand(rax, 0));
4962   __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
4963   __ jmp(&inner_loop_header);
4964   __ bind(&inner_push_loop);
4965   __ subq(rcx, Immediate(sizeof(intptr_t)));
4966   __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
4967   __ bind(&inner_loop_header);
4968   __ testq(rcx, rcx);
4969   __ j(not_zero, &inner_push_loop);
4970   __ addq(rax, Immediate(kSystemPointerSize));
4971   __ bind(&outer_loop_header);
4972   __ cmpq(rax, rdx);
4973   __ j(below, &outer_push_loop);
4974 
4975   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4976     int code = config->GetAllocatableDoubleCode(i);
4977     XMMRegister xmm_reg = XMMRegister::from_code(code);
4978     int src_offset = code * kDoubleSize + double_regs_offset;
4979     __ Movsd(xmm_reg, Operand(rbx, src_offset));
4980   }
4981 
4982   // Push pc and continuation from the last output frame.
4983   __ PushQuad(Operand(rbx, FrameDescription::pc_offset()));
4984   __ PushQuad(Operand(rbx, FrameDescription::continuation_offset()));
4985 
4986   // Push the registers from the last output frame.
4987   for (int i = 0; i < kNumberOfRegisters; i++) {
4988     int offset =
4989         (i * kSystemPointerSize) + FrameDescription::registers_offset();
4990     __ PushQuad(Operand(rbx, offset));
4991   }
4992 
4993   // Restore the registers from the stack.
4994   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
4995     Register r = Register::from_code(i);
4996     // Do not restore rsp, simply pop the value into the next register
4997     // and overwrite this afterwards.
4998     if (r == rsp) {
4999       DCHECK_GT(i, 0);
5000       r = Register::from_code(i - 1);
5001     }
5002     __ popq(r);
5003   }
5004 
5005   __ movb(__ ExternalReferenceAsOperand(
5006               ExternalReference::stack_is_iterable_address(isolate)),
5007           Immediate(1));
5008 
5009   // Return to the continuation point.
5010   __ ret(0);
5011 }
5012 
5013 }  // namespace
5014 
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)5015 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
5016   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
5017 }
5018 
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)5019 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
5020   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
5021 }
5022 
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)5023 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
5024   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
5025 }
5026 
5027 namespace {
5028 
5029 // Restarts execution either at the current or next (in execution order)
5030 // bytecode. If there is baseline code on the shared function info, converts an
5031 // interpreter frame into a baseline frame and continues execution in baseline
5032 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)5033 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
5034                                          bool next_bytecode,
5035                                          bool is_osr = false) {
5036   Label start;
5037   __ bind(&start);
5038 
5039   // Get function from the frame.
5040   Register closure = rdi;
5041   __ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
5042 
5043   // Get the Code object from the shared function info.
5044   Register code_obj = rbx;
5045   __ LoadTaggedPointerField(
5046       code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
5047   __ LoadTaggedPointerField(
5048       code_obj,
5049       FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
5050 
5051   // Check if we have baseline code. For OSR entry it is safe to assume we
5052   // always have baseline code.
5053   if (!is_osr) {
5054     Label start_with_baseline;
5055     __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
5056     __ j(equal, &start_with_baseline);
5057 
5058     // Start with bytecode as there is no baseline code.
5059     Builtin builtin_id = next_bytecode
5060                              ? Builtin::kInterpreterEnterAtNextBytecode
5061                              : Builtin::kInterpreterEnterAtBytecode;
5062     __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
5063             RelocInfo::CODE_TARGET);
5064 
5065     // Start with baseline code.
5066     __ bind(&start_with_baseline);
5067   } else if (FLAG_debug_code) {
5068     __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
5069     __ Assert(equal, AbortReason::kExpectedBaselineData);
5070   }
5071 
5072   if (FLAG_debug_code) {
5073     AssertCodeTIsBaseline(masm, code_obj, r11);
5074   }
5075   if (V8_EXTERNAL_CODE_SPACE_BOOL) {
5076     __ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
5077   }
5078 
5079   // Load the feedback vector.
5080   Register feedback_vector = r11;
5081   __ LoadTaggedPointerField(
5082       feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
5083   __ LoadTaggedPointerField(feedback_vector,
5084                             FieldOperand(feedback_vector, Cell::kValueOffset));
5085 
5086   Label install_baseline_code;
5087   // Check if feedback vector is valid. If not, call prepare for baseline to
5088   // allocate it.
5089   __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
5090   __ j(not_equal, &install_baseline_code);
5091 
5092   // Save BytecodeOffset from the stack frame.
5093   __ SmiUntag(
5094       kInterpreterBytecodeOffsetRegister,
5095       MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
5096   // Replace BytecodeOffset with the feedback vector.
5097   __ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
5098           feedback_vector);
5099   feedback_vector = no_reg;
5100 
5101   // Compute baseline pc for bytecode offset.
5102   ExternalReference get_baseline_pc_extref;
5103   if (next_bytecode || is_osr) {
5104     get_baseline_pc_extref =
5105         ExternalReference::baseline_pc_for_next_executed_bytecode();
5106   } else {
5107     get_baseline_pc_extref =
5108         ExternalReference::baseline_pc_for_bytecode_offset();
5109   }
5110   Register get_baseline_pc = r11;
5111   __ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
5112 
5113   // If the code deoptimizes during the implicit function entry stack interrupt
5114   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
5115   // not a valid bytecode offset.
5116   // TODO(pthier): Investigate if it is feasible to handle this special case
5117   // in TurboFan instead of here.
5118   Label valid_bytecode_offset, function_entry_bytecode;
5119   if (!is_osr) {
5120     __ cmpq(kInterpreterBytecodeOffsetRegister,
5121             Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
5122                       kFunctionEntryBytecodeOffset));
5123     __ j(equal, &function_entry_bytecode);
5124   }
5125 
5126   __ subq(kInterpreterBytecodeOffsetRegister,
5127           Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
5128 
5129   __ bind(&valid_bytecode_offset);
5130   // Get bytecode array from the stack frame.
5131   __ movq(kInterpreterBytecodeArrayRegister,
5132           MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
5133   __ pushq(kInterpreterAccumulatorRegister);
5134   {
5135     FrameScope scope(masm, StackFrame::INTERNAL);
5136     __ PrepareCallCFunction(3);
5137     __ movq(arg_reg_1, code_obj);
5138     __ movq(arg_reg_2, kInterpreterBytecodeOffsetRegister);
5139     __ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
5140     __ CallCFunction(get_baseline_pc, 3);
5141   }
5142   __ leaq(code_obj,
5143           FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
5144   __ popq(kInterpreterAccumulatorRegister);
5145 
5146   if (is_osr) {
5147     // TODO(pthier): Separate Sparkplug and Turbofan OSR states.
5148     ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
5149     Generate_OSREntry(masm, code_obj);
5150   } else {
5151     __ jmp(code_obj);
5152   }
5153   __ Trap();  // Unreachable.
5154 
5155   if (!is_osr) {
5156     __ bind(&function_entry_bytecode);
5157     // If the bytecode offset is kFunctionEntryOffset, get the start address of
5158     // the first bytecode.
5159     __ Move(kInterpreterBytecodeOffsetRegister, 0);
5160     if (next_bytecode) {
5161       __ LoadAddress(get_baseline_pc,
5162                      ExternalReference::baseline_pc_for_bytecode_offset());
5163     }
5164     __ jmp(&valid_bytecode_offset);
5165   }
5166 
5167   __ bind(&install_baseline_code);
5168   {
5169     FrameScope scope(masm, StackFrame::INTERNAL);
5170     __ pushq(kInterpreterAccumulatorRegister);
5171     __ Push(closure);
5172     __ CallRuntime(Runtime::kInstallBaselineCode, 1);
5173     __ popq(kInterpreterAccumulatorRegister);
5174   }
5175   // Retry from the start after installing baseline code.
5176   __ jmp(&start);
5177 }
5178 
5179 }  // namespace
5180 
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)5181 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
5182     MacroAssembler* masm) {
5183   Generate_BaselineOrInterpreterEntry(masm, false);
5184 }
5185 
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)5186 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
5187     MacroAssembler* masm) {
5188   Generate_BaselineOrInterpreterEntry(masm, true);
5189 }
5190 
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)5191 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
5192     MacroAssembler* masm) {
5193   Generate_BaselineOrInterpreterEntry(masm, false, true);
5194 }
5195 
5196 #undef __
5197 
5198 }  // namespace internal
5199 }  // namespace v8
5200 
5201 #endif  // V8_TARGET_ARCH_X64
5202