1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/codegen/register.h"
6 #if V8_TARGET_ARCH_IA32
7
8 #include "src/api/api-arguments.h"
9 #include "src/base/bits-iterator.h"
10 #include "src/base/iterator.h"
11 #include "src/codegen/code-factory.h"
12 #include "src/codegen/interface-descriptors-inl.h"
13 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
14 #include "src/codegen/macro-assembler-inl.h"
15 #include "src/codegen/register-configuration.h"
16 #include "src/debug/debug.h"
17 #include "src/deoptimizer/deoptimizer.h"
18 #include "src/execution/frame-constants.h"
19 #include "src/execution/frames.h"
20 #include "src/heap/heap-inl.h"
21 #include "src/logging/counters.h"
22 #include "src/objects/cell.h"
23 #include "src/objects/foreign.h"
24 #include "src/objects/heap-number.h"
25 #include "src/objects/js-generator.h"
26 #include "src/objects/objects-inl.h"
27 #include "src/objects/smi.h"
28
29 #if V8_ENABLE_WEBASSEMBLY
30 #include "src/wasm/wasm-linkage.h"
31 #include "src/wasm/wasm-objects.h"
32 #endif // V8_ENABLE_WEBASSEMBLY
33
34 namespace v8 {
35 namespace internal {
36
37 #define __ ACCESS_MASM(masm)
38
Generate_Adaptor(MacroAssembler * masm,Address address)39 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
40 __ Move(kJavaScriptCallExtraArg1Register,
41 Immediate(ExternalReference::Create(address)));
42 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
43 RelocInfo::CODE_TARGET);
44 }
45
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)46 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
47 Runtime::FunctionId function_id) {
48 // ----------- S t a t e -------------
49 // -- eax : actual argument count
50 // -- edx : new target (preserved for callee)
51 // -- edi : target function (preserved for callee)
52 // -----------------------------------
53 ASM_CODE_COMMENT(masm);
54 {
55 FrameScope scope(masm, StackFrame::INTERNAL);
56 // Push a copy of the target function, the new target and the actual
57 // argument count.
58 __ push(kJavaScriptCallTargetRegister);
59 __ push(kJavaScriptCallNewTargetRegister);
60 __ SmiTag(kJavaScriptCallArgCountRegister);
61 __ push(kJavaScriptCallArgCountRegister);
62 // Function is also the parameter to the runtime call.
63 __ push(kJavaScriptCallTargetRegister);
64
65 __ CallRuntime(function_id, 1);
66 __ mov(ecx, eax);
67
68 // Restore target function, new target and actual argument count.
69 __ pop(kJavaScriptCallArgCountRegister);
70 __ SmiUntag(kJavaScriptCallArgCountRegister);
71 __ pop(kJavaScriptCallNewTargetRegister);
72 __ pop(kJavaScriptCallTargetRegister);
73 }
74
75 static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
76 __ JumpCodeObject(ecx);
77 }
78
79 namespace {
80
81 enum class ArgumentsElementType {
82 kRaw, // Push arguments as they are.
83 kHandle // Dereference arguments before pushing.
84 };
85
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch1,Register scratch2,ArgumentsElementType element_type)86 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
87 Register scratch1, Register scratch2,
88 ArgumentsElementType element_type) {
89 DCHECK(!AreAliased(array, argc, scratch1, scratch2));
90 Register counter = scratch1;
91 Label loop, entry;
92 __ lea(counter, Operand(argc, -kJSArgcReceiverSlots));
93 __ jmp(&entry);
94 __ bind(&loop);
95 Operand value(array, counter, times_system_pointer_size, 0);
96 if (element_type == ArgumentsElementType::kHandle) {
97 DCHECK(scratch2 != no_reg);
98 __ mov(scratch2, value);
99 value = Operand(scratch2, 0);
100 }
101 __ Push(value);
102 __ bind(&entry);
103 __ dec(counter);
104 __ j(greater_equal, &loop, Label::kNear);
105 }
106
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)107 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
108 // ----------- S t a t e -------------
109 // -- eax: number of arguments
110 // -- edi: constructor function
111 // -- edx: new target
112 // -- esi: context
113 // -----------------------------------
114
115 Label stack_overflow;
116
117 __ StackOverflowCheck(eax, ecx, &stack_overflow);
118
119 // Enter a construct frame.
120 {
121 FrameScope scope(masm, StackFrame::CONSTRUCT);
122
123 // Preserve the incoming parameters on the stack.
124 __ SmiTag(eax);
125 __ push(esi);
126 __ push(eax);
127 __ SmiUntag(eax);
128
129 // TODO(victorgomes): When the arguments adaptor is completely removed, we
130 // should get the formal parameter count and copy the arguments in its
131 // correct position (including any undefined), instead of delaying this to
132 // InvokeFunction.
133
134 // Set up pointer to first argument (skip receiver).
135 __ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
136 kSystemPointerSize));
137 // Copy arguments to the expression stack.
138 // esi: Pointer to start of arguments.
139 // eax: Number of arguments.
140 Generate_PushArguments(masm, esi, eax, ecx, no_reg,
141 ArgumentsElementType::kRaw);
142 // The receiver for the builtin/api call.
143 __ PushRoot(RootIndex::kTheHoleValue);
144
145 // Call the function.
146 // eax: number of arguments (untagged)
147 // edi: constructor function
148 // edx: new target
149 // Reload context from the frame.
150 __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
151 __ InvokeFunction(edi, edx, eax, InvokeType::kCall);
152
153 // Restore context from the frame.
154 __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
155 // Restore smi-tagged arguments count from the frame.
156 __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
157 // Leave construct frame.
158 }
159
160 // Remove caller arguments from the stack and return.
161 __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
162 TurboAssembler::kCountIncludesReceiver);
163 __ ret(0);
164
165 __ bind(&stack_overflow);
166 {
167 FrameScope scope(masm, StackFrame::INTERNAL);
168 __ CallRuntime(Runtime::kThrowStackOverflow);
169 __ int3(); // This should be unreachable.
170 }
171 }
172
173 } // namespace
174
175 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)176 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
177 // ----------- S t a t e -------------
178 // -- eax: number of arguments (untagged)
179 // -- edi: constructor function
180 // -- edx: new target
181 // -- esi: context
182 // -- sp[...]: constructor arguments
183 // -----------------------------------
184
185 FrameScope scope(masm, StackFrame::MANUAL);
186 // Enter a construct frame.
187 __ EnterFrame(StackFrame::CONSTRUCT);
188
189 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
190
191 // Preserve the incoming parameters on the stack.
192 __ mov(ecx, eax);
193 __ SmiTag(ecx);
194 __ Push(esi);
195 __ Push(ecx);
196 __ Push(edi);
197 __ PushRoot(RootIndex::kTheHoleValue);
198 __ Push(edx);
199
200 // ----------- S t a t e -------------
201 // -- sp[0*kSystemPointerSize]: new target
202 // -- sp[1*kSystemPointerSize]: padding
203 // -- edi and sp[2*kSystemPointerSize]: constructor function
204 // -- sp[3*kSystemPointerSize]: argument count
205 // -- sp[4*kSystemPointerSize]: context
206 // -----------------------------------
207
208 __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
209 __ mov(eax, FieldOperand(eax, SharedFunctionInfo::kFlagsOffset));
210 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(eax);
211 __ JumpIfIsInRange(
212 eax, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
213 static_cast<uint32_t>(FunctionKind::kDerivedConstructor), ecx,
214 ¬_create_implicit_receiver, Label::kNear);
215
216 // If not derived class constructor: Allocate the new receiver object.
217 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
218 eax);
219 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
220 __ jmp(&post_instantiation_deopt_entry, Label::kNear);
221
222 // Else: use TheHoleValue as receiver for constructor call
223 __ bind(¬_create_implicit_receiver);
224 __ LoadRoot(eax, RootIndex::kTheHoleValue);
225
226 // ----------- S t a t e -------------
227 // -- eax: implicit receiver
228 // -- Slot 4 / sp[0*kSystemPointerSize]: new target
229 // -- Slot 3 / sp[1*kSystemPointerSize]: padding
230 // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
231 // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
232 // -- Slot 0 / sp[4*kSystemPointerSize]: context
233 // -----------------------------------
234 // Deoptimizer enters here.
235 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
236 masm->pc_offset());
237 __ bind(&post_instantiation_deopt_entry);
238
239 // Restore new target.
240 __ Pop(edx);
241
242 // Push the allocated receiver to the stack.
243 __ Push(eax);
244
245 // We need two copies because we may have to return the original one
246 // and the calling conventions dictate that the called function pops the
247 // receiver. The second copy is pushed after the arguments, we saved in r8
248 // since rax needs to store the number of arguments before
249 // InvokingFunction.
250 __ movd(xmm0, eax);
251
252 // Set up pointer to first argument (skip receiver).
253 __ lea(edi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
254 kSystemPointerSize));
255
256 // Restore argument count.
257 __ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
258 __ SmiUntag(eax);
259
260 // Check if we have enough stack space to push all arguments.
261 // Argument count in eax. Clobbers ecx.
262 Label stack_overflow;
263 __ StackOverflowCheck(eax, ecx, &stack_overflow);
264
265 // TODO(victorgomes): When the arguments adaptor is completely removed, we
266 // should get the formal parameter count and copy the arguments in its
267 // correct position (including any undefined), instead of delaying this to
268 // InvokeFunction.
269
270 // Copy arguments to the expression stack.
271 // edi: Pointer to start of arguments.
272 // eax: Number of arguments.
273 Generate_PushArguments(masm, edi, eax, ecx, no_reg,
274 ArgumentsElementType::kRaw);
275
276 // Push implicit receiver.
277 __ movd(ecx, xmm0);
278 __ Push(ecx);
279
280 // Restore and and call the constructor function.
281 __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
282 __ InvokeFunction(edi, edx, eax, InvokeType::kCall);
283
284 // ----------- S t a t e -------------
285 // -- eax: constructor result
286 // -- sp[0*kSystemPointerSize]: implicit receiver
287 // -- sp[1*kSystemPointerSize]: padding
288 // -- sp[2*kSystemPointerSize]: constructor function
289 // -- sp[3*kSystemPointerSize]: number of arguments
290 // -- sp[4*kSystemPointerSize]: context
291 // -----------------------------------
292
293 // Store offset of return address for deoptimizer.
294 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
295 masm->pc_offset());
296
297 // If the result is an object (in the ECMA sense), we should get rid
298 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
299 // on page 74.
300
301 Label check_result, use_receiver, do_throw, leave_and_return;
302 // If the result is undefined, we jump out to using the implicit receiver.
303 __ JumpIfNotRoot(eax, RootIndex::kUndefinedValue, &check_result,
304 Label::kNear);
305
306 // Throw away the result of the constructor invocation and use the
307 // on-stack receiver as the result.
308 __ bind(&use_receiver);
309 __ mov(eax, Operand(esp, 0 * kSystemPointerSize));
310 __ JumpIfRoot(eax, RootIndex::kTheHoleValue, &do_throw);
311
312 __ bind(&leave_and_return);
313 // Restore smi-tagged arguments count from the frame.
314 __ mov(edx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
315 __ LeaveFrame(StackFrame::CONSTRUCT);
316
317 // Remove caller arguments from the stack and return.
318 __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
319 TurboAssembler::kCountIncludesReceiver);
320 __ ret(0);
321
322 // Otherwise we do a smi check and fall through to check if the return value
323 // is a valid receiver.
324 __ bind(&check_result);
325
326 // If the result is a smi, it is *not* an object in the ECMA sense.
327 __ JumpIfSmi(eax, &use_receiver, Label::kNear);
328
329 // If the type of the result (stored in its map) is less than
330 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
331 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
332 __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
333 __ j(above_equal, &leave_and_return, Label::kNear);
334 __ jmp(&use_receiver, Label::kNear);
335
336 __ bind(&do_throw);
337 // Restore context from the frame.
338 __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
339 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
340 // This should be unreachable.
341 __ int3();
342
343 __ bind(&stack_overflow);
344 // Restore context from the frame.
345 __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
346 __ CallRuntime(Runtime::kThrowStackOverflow);
347 // This should be unreachable.
348 __ int3();
349 }
350
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)351 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
352 Generate_JSBuiltinsConstructStubHelper(masm);
353 }
354
Generate_ConstructedNonConstructable(MacroAssembler * masm)355 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
356 FrameScope scope(masm, StackFrame::INTERNAL);
357 __ push(edi);
358 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
359 }
360
361 namespace {
362
363 // Called with the native C calling convention. The corresponding function
364 // signature is either:
365 //
366 // using JSEntryFunction = GeneratedCode<Address(
367 // Address root_register_value, Address new_target, Address target,
368 // Address receiver, intptr_t argc, Address** argv)>;
369 // or
370 // using JSEntryFunction = GeneratedCode<Address(
371 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)372 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
373 Builtin entry_trampoline) {
374 Label invoke, handler_entry, exit;
375 Label not_outermost_js, not_outermost_js_2;
376
377 {
378 NoRootArrayScope uninitialized_root_register(masm);
379
380 // Set up frame.
381 __ push(ebp);
382 __ mov(ebp, esp);
383
384 // Push marker in two places.
385 __ push(Immediate(StackFrame::TypeToMarker(type)));
386 // Reserve a slot for the context. It is filled after the root register has
387 // been set up.
388 __ AllocateStackSpace(kSystemPointerSize);
389 // Save callee-saved registers (C calling conventions).
390 __ push(edi);
391 __ push(esi);
392 __ push(ebx);
393
394 // Initialize the root register based on the given Isolate* argument.
395 // C calling convention. The first argument is passed on the stack.
396 __ mov(kRootRegister,
397 Operand(ebp, EntryFrameConstants::kRootRegisterValueOffset));
398 }
399
400 // Save copies of the top frame descriptor on the stack.
401 ExternalReference c_entry_fp = ExternalReference::Create(
402 IsolateAddressId::kCEntryFPAddress, masm->isolate());
403 __ push(__ ExternalReferenceAsOperand(c_entry_fp, edi));
404
405 // Clear c_entry_fp, now we've pushed its previous value to the stack.
406 // If the c_entry_fp is not already zero and we don't clear it, the
407 // SafeStackFrameIterator will assume we are executing C++ and miss the JS
408 // frames on top.
409 __ mov(__ ExternalReferenceAsOperand(c_entry_fp, edi), Immediate(0));
410
411 // Store the context address in the previously-reserved slot.
412 ExternalReference context_address = ExternalReference::Create(
413 IsolateAddressId::kContextAddress, masm->isolate());
414 __ mov(edi, __ ExternalReferenceAsOperand(context_address, edi));
415 static constexpr int kOffsetToContextSlot = -2 * kSystemPointerSize;
416 __ mov(Operand(ebp, kOffsetToContextSlot), edi);
417
418 // If this is the outermost JS call, set js_entry_sp value.
419 ExternalReference js_entry_sp = ExternalReference::Create(
420 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
421 __ cmp(__ ExternalReferenceAsOperand(js_entry_sp, edi), Immediate(0));
422 __ j(not_equal, ¬_outermost_js, Label::kNear);
423 __ mov(__ ExternalReferenceAsOperand(js_entry_sp, edi), ebp);
424 __ push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
425 __ jmp(&invoke, Label::kNear);
426 __ bind(¬_outermost_js);
427 __ push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
428
429 // Jump to a faked try block that does the invoke, with a faked catch
430 // block that sets the pending exception.
431 __ jmp(&invoke);
432 __ bind(&handler_entry);
433
434 // Store the current pc as the handler offset. It's used later to create the
435 // handler table.
436 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
437
438 // Caught exception: Store result (exception) in the pending exception
439 // field in the JSEnv and return a failure sentinel.
440 ExternalReference pending_exception = ExternalReference::Create(
441 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
442 __ mov(__ ExternalReferenceAsOperand(pending_exception, edi), eax);
443 __ Move(eax, masm->isolate()->factory()->exception());
444 __ jmp(&exit);
445
446 // Invoke: Link this frame into the handler chain.
447 __ bind(&invoke);
448 __ PushStackHandler(edi);
449
450 // Invoke the function by calling through JS entry trampoline builtin and
451 // pop the faked function when we return.
452 Handle<Code> trampoline_code =
453 masm->isolate()->builtins()->code_handle(entry_trampoline);
454 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
455
456 // Unlink this frame from the handler chain.
457 __ PopStackHandler(edi);
458
459 __ bind(&exit);
460
461 // Check if the current stack frame is marked as the outermost JS frame.
462 __ pop(edi);
463 __ cmp(edi, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
464 __ j(not_equal, ¬_outermost_js_2);
465 __ mov(__ ExternalReferenceAsOperand(js_entry_sp, edi), Immediate(0));
466 __ bind(¬_outermost_js_2);
467
468 // Restore the top frame descriptor from the stack.
469 __ pop(__ ExternalReferenceAsOperand(c_entry_fp, edi));
470
471 // Restore callee-saved registers (C calling conventions).
472 __ pop(ebx);
473 __ pop(esi);
474 __ pop(edi);
475 __ add(esp, Immediate(2 * kSystemPointerSize)); // remove markers
476
477 // Restore frame pointer and return.
478 __ pop(ebp);
479 __ ret(0);
480 }
481
482 } // namespace
483
Generate_JSEntry(MacroAssembler * masm)484 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
485 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
486 }
487
Generate_JSConstructEntry(MacroAssembler * masm)488 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
489 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
490 Builtin::kJSConstructEntryTrampoline);
491 }
492
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)493 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
494 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
495 Builtin::kRunMicrotasksTrampoline);
496 }
497
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)498 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
499 bool is_construct) {
500 {
501 FrameScope scope(masm, StackFrame::INTERNAL);
502
503 const Register scratch1 = edx;
504 const Register scratch2 = edi;
505
506 // Setup the context (we need to use the caller context from the isolate).
507 ExternalReference context_address = ExternalReference::Create(
508 IsolateAddressId::kContextAddress, masm->isolate());
509 __ mov(esi, __ ExternalReferenceAsOperand(context_address, scratch1));
510
511 // Load the previous frame pointer (edx) to access C arguments
512 __ mov(scratch1, Operand(ebp, 0));
513
514 // Push the function.
515 __ push(Operand(scratch1, EntryFrameConstants::kFunctionArgOffset));
516
517 // Load the number of arguments and setup pointer to the arguments.
518 __ mov(eax, Operand(scratch1, EntryFrameConstants::kArgcOffset));
519 __ mov(scratch1, Operand(scratch1, EntryFrameConstants::kArgvOffset));
520
521 // Check if we have enough stack space to push all arguments.
522 // Argument count in eax. Clobbers ecx.
523 Label enough_stack_space, stack_overflow;
524 __ StackOverflowCheck(eax, ecx, &stack_overflow);
525 __ jmp(&enough_stack_space);
526
527 __ bind(&stack_overflow);
528 __ CallRuntime(Runtime::kThrowStackOverflow);
529 // This should be unreachable.
530 __ int3();
531
532 __ bind(&enough_stack_space);
533
534 // Copy arguments to the stack.
535 // scratch1 (edx): Pointer to start of arguments.
536 // eax: Number of arguments.
537 Generate_PushArguments(masm, scratch1, eax, ecx, scratch2,
538 ArgumentsElementType::kHandle);
539
540 // Load the previous frame pointer to access C arguments
541 __ mov(scratch2, Operand(ebp, 0));
542
543 // Push the receiver onto the stack.
544 __ push(Operand(scratch2, EntryFrameConstants::kReceiverArgOffset));
545
546 // Get the new.target and function from the frame.
547 __ mov(edx, Operand(scratch2, EntryFrameConstants::kNewTargetArgOffset));
548 __ mov(edi, Operand(scratch2, EntryFrameConstants::kFunctionArgOffset));
549
550 // Invoke the code.
551 Handle<Code> builtin = is_construct
552 ? BUILTIN_CODE(masm->isolate(), Construct)
553 : masm->isolate()->builtins()->Call();
554 __ Call(builtin, RelocInfo::CODE_TARGET);
555
556 // Exit the internal frame. Notice that this also removes the empty.
557 // context and the function left on the stack by the code
558 // invocation.
559 }
560 __ ret(0);
561 }
562
Generate_JSEntryTrampoline(MacroAssembler * masm)563 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
564 Generate_JSEntryTrampolineHelper(masm, false);
565 }
566
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)567 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
568 Generate_JSEntryTrampolineHelper(masm, true);
569 }
570
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)571 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
572 // This expects two C++ function parameters passed by Invoke() in
573 // execution.cc.
574 // r1: microtask_queue
575 __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(),
576 Operand(ebp, EntryFrameConstants::kMicrotaskQueueArgOffset));
577 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
578 }
579
GetSharedFunctionInfoBytecode(MacroAssembler * masm,Register sfi_data,Register scratch1)580 static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
581 Register sfi_data,
582 Register scratch1) {
583 Label done;
584
585 __ CmpObjectType(sfi_data, INTERPRETER_DATA_TYPE, scratch1);
586 __ j(not_equal, &done, Label::kNear);
587 __ mov(sfi_data,
588 FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
589
590 __ bind(&done);
591 }
592
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)593 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
594 Register scratch) {
595 DCHECK(!AreAliased(code, scratch));
596 // Verify that the code kind is baseline code via the CodeKind.
597 __ mov(scratch, FieldOperand(code, Code::kFlagsOffset));
598 __ DecodeField<Code::KindField>(scratch);
599 __ cmp(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
600 __ Assert(equal, AbortReason::kExpectedBaselineData);
601 }
602
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)603 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
604 Register sfi_data,
605 Register scratch1,
606 Label* is_baseline) {
607 ASM_CODE_COMMENT(masm);
608 Label done;
609 __ LoadMap(scratch1, sfi_data);
610
611 __ CmpInstanceType(scratch1, CODET_TYPE);
612 if (FLAG_debug_code) {
613 Label not_baseline;
614 __ j(not_equal, ¬_baseline);
615 AssertCodeIsBaseline(masm, sfi_data, scratch1);
616 __ j(equal, is_baseline);
617 __ bind(¬_baseline);
618 } else {
619 __ j(equal, is_baseline);
620 }
621
622 __ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
623 __ j(not_equal, &done, Label::kNear);
624
625 __ mov(sfi_data,
626 FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
627
628 __ bind(&done);
629 }
630
631 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)632 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
633 // ----------- S t a t e -------------
634 // -- eax : the value to pass to the generator
635 // -- edx : the JSGeneratorObject to resume
636 // -- esp[0] : return address
637 // -----------------------------------
638 // Store input value into generator object.
639 __ mov(FieldOperand(edx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
640 Register object = WriteBarrierDescriptor::ObjectRegister();
641 __ mov(object, edx);
642 __ RecordWriteField(object, JSGeneratorObject::kInputOrDebugPosOffset, eax,
643 WriteBarrierDescriptor::SlotAddressRegister(),
644 SaveFPRegsMode::kIgnore);
645 // Check that edx is still valid, RecordWrite might have clobbered it.
646 __ AssertGeneratorObject(edx);
647
648 // Load suspended function and context.
649 __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
650 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
651
652 // Flood function if we are stepping.
653 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
654 Label stepping_prepared;
655 ExternalReference debug_hook =
656 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
657 __ cmpb(__ ExternalReferenceAsOperand(debug_hook, ecx), Immediate(0));
658 __ j(not_equal, &prepare_step_in_if_stepping);
659
660 // Flood function if we need to continue stepping in the suspended generator.
661 ExternalReference debug_suspended_generator =
662 ExternalReference::debug_suspended_generator_address(masm->isolate());
663 __ cmp(edx, __ ExternalReferenceAsOperand(debug_suspended_generator, ecx));
664 __ j(equal, &prepare_step_in_suspended_generator);
665 __ bind(&stepping_prepared);
666
667 // Check the stack for overflow. We are not trying to catch interruptions
668 // (i.e. debug break and preemption) here, so check the "real stack limit".
669 Label stack_overflow;
670 __ CompareStackLimit(esp, StackLimitKind::kRealStackLimit);
671 __ j(below, &stack_overflow);
672
673 // Pop return address.
674 __ PopReturnAddressTo(eax);
675
676 // ----------- S t a t e -------------
677 // -- eax : return address
678 // -- edx : the JSGeneratorObject to resume
679 // -- edi : generator function
680 // -- esi : generator context
681 // -----------------------------------
682
683 {
684 __ movd(xmm0, ebx);
685
686 // Copy the function arguments from the generator object's register file.
687 __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
688 __ movzx_w(ecx, FieldOperand(
689 ecx, SharedFunctionInfo::kFormalParameterCountOffset));
690 __ dec(ecx); // Exclude receiver.
691 __ mov(ebx,
692 FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
693 {
694 Label done_loop, loop;
695 __ bind(&loop);
696 __ dec(ecx);
697 __ j(less, &done_loop);
698 __ Push(
699 FieldOperand(ebx, ecx, times_tagged_size, FixedArray::kHeaderSize));
700 __ jmp(&loop);
701 __ bind(&done_loop);
702 }
703
704 // Push receiver.
705 __ Push(FieldOperand(edx, JSGeneratorObject::kReceiverOffset));
706
707 // Restore registers.
708 __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
709 __ movd(ebx, xmm0);
710 }
711
712 // Underlying function needs to have bytecode available.
713 if (FLAG_debug_code) {
714 Label is_baseline, ok;
715 __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
716 __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
717 __ Push(eax);
718 GetSharedFunctionInfoBytecodeOrBaseline(masm, ecx, eax, &is_baseline);
719 __ Pop(eax);
720
721 __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
722 __ Assert(equal, AbortReason::kMissingBytecodeArray);
723 __ jmp(&ok);
724
725 __ bind(&is_baseline);
726 __ Pop(eax);
727 __ CmpObjectType(ecx, CODET_TYPE, ecx);
728 __ Assert(equal, AbortReason::kMissingBytecodeArray);
729
730 __ bind(&ok);
731 }
732
733 // Resume (Ignition/TurboFan) generator object.
734 {
735 __ PushReturnAddressFrom(eax);
736 __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
737 __ movzx_w(eax, FieldOperand(
738 eax, SharedFunctionInfo::kFormalParameterCountOffset));
739 // We abuse new.target both to indicate that this is a resume call and to
740 // pass in the generator object. In ordinary calls, new.target is always
741 // undefined because generator functions are non-constructable.
742 static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
743 __ mov(ecx, FieldOperand(edi, JSFunction::kCodeOffset));
744 __ JumpCodeObject(ecx);
745 }
746
747 __ bind(&prepare_step_in_if_stepping);
748 {
749 FrameScope scope(masm, StackFrame::INTERNAL);
750 __ Push(edx);
751 __ Push(edi);
752 // Push hole as receiver since we do not use it for stepping.
753 __ PushRoot(RootIndex::kTheHoleValue);
754 __ CallRuntime(Runtime::kDebugOnFunctionCall);
755 __ Pop(edx);
756 __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
757 }
758 __ jmp(&stepping_prepared);
759
760 __ bind(&prepare_step_in_suspended_generator);
761 {
762 FrameScope scope(masm, StackFrame::INTERNAL);
763 __ Push(edx);
764 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
765 __ Pop(edx);
766 __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
767 }
768 __ jmp(&stepping_prepared);
769
770 __ bind(&stack_overflow);
771 {
772 FrameScope scope(masm, StackFrame::INTERNAL);
773 __ CallRuntime(Runtime::kThrowStackOverflow);
774 __ int3(); // This should be unreachable.
775 }
776 }
777
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register value,Register slot_address)778 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
779 Register optimized_code,
780 Register closure,
781 Register value,
782 Register slot_address) {
783 ASM_CODE_COMMENT(masm);
784 // Store the optimized code in the closure.
785 __ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
786 __ mov(value, optimized_code); // Write barrier clobbers slot_address below.
787 __ RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
788 SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
789 SmiCheck::kOmit);
790 }
791
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)792 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
793 Register scratch2) {
794 ASM_CODE_COMMENT(masm);
795 Register params_size = scratch1;
796 // Get the size of the formal parameters (in bytes).
797 __ mov(params_size,
798 Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
799 __ mov(params_size,
800 FieldOperand(params_size, BytecodeArray::kParameterSizeOffset));
801
802 Register actual_params_size = scratch2;
803 // Compute the size of the actual parameters (in bytes).
804 __ mov(actual_params_size, Operand(ebp, StandardFrameConstants::kArgCOffset));
805 __ lea(actual_params_size,
806 Operand(actual_params_size, times_system_pointer_size, 0));
807
808 // If actual is bigger than formal, then we should use it to free up the stack
809 // arguments.
810 Label corrected_args_count;
811 __ cmp(params_size, actual_params_size);
812 __ j(greater_equal, &corrected_args_count, Label::kNear);
813 __ mov(params_size, actual_params_size);
814 __ bind(&corrected_args_count);
815
816 // Leave the frame (also dropping the register file).
817 __ leave();
818
819 // Drop receiver + arguments.
820 __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
821 TurboAssembler::kCountIncludesReceiver);
822 }
823
824 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)825 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
826 Register actual_state,
827 TieringState expected_state,
828 Runtime::FunctionId function_id) {
829 ASM_CODE_COMMENT(masm);
830 Label no_match;
831 __ cmp(actual_state, static_cast<int>(expected_state));
832 __ j(not_equal, &no_match, Label::kNear);
833 GenerateTailCallToReturnedCode(masm, function_id);
834 __ bind(&no_match);
835 }
836
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry)837 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
838 Register optimized_code_entry) {
839 // ----------- S t a t e -------------
840 // -- eax : actual argument count
841 // -- edx : new target (preserved for callee if needed, and caller)
842 // -- edi : target function (preserved for callee if needed, and caller)
843 // -----------------------------------
844 ASM_CODE_COMMENT(masm);
845 DCHECK(!AreAliased(edx, edi, optimized_code_entry));
846
847 Register closure = edi;
848 __ Push(eax);
849 __ Push(edx);
850
851 Label heal_optimized_code_slot;
852
853 // If the optimized code is cleared, go to runtime to update the optimization
854 // marker field.
855 __ LoadWeakValue(optimized_code_entry, &heal_optimized_code_slot);
856
857 // Check if the optimized code is marked for deopt. If it is, bailout to a
858 // given label.
859 __ mov(eax,
860 FieldOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
861 __ test(FieldOperand(eax, CodeDataContainer::kKindSpecificFlagsOffset),
862 Immediate(1 << Code::kMarkedForDeoptimizationBit));
863 __ j(not_zero, &heal_optimized_code_slot);
864
865 // Optimized code is good, get it into the closure and link the closure
866 // into the optimized functions list, then tail call the optimized code.
867 __ Push(optimized_code_entry);
868 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, edx,
869 ecx);
870 static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
871 __ Pop(optimized_code_entry);
872 __ LoadCodeObjectEntry(ecx, optimized_code_entry);
873 __ Pop(edx);
874 __ Pop(eax);
875 __ jmp(ecx);
876
877 // Optimized code slot contains deoptimized code or code is cleared and
878 // optimized code marker isn't updated. Evict the code, update the marker
879 // and re-enter the closure's code.
880 __ bind(&heal_optimized_code_slot);
881 __ Pop(edx);
882 __ Pop(eax);
883 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
884 }
885
MaybeOptimizeCode(MacroAssembler * masm,Register tiering_state)886 static void MaybeOptimizeCode(MacroAssembler* masm, Register tiering_state) {
887 // ----------- S t a t e -------------
888 // -- eax : actual argument count
889 // -- edx : new target (preserved for callee if needed, and caller)
890 // -- edi : target function (preserved for callee if needed, and caller)
891 // -- tiering_state : a Smi containing a non-zero tiering state.
892 // -----------------------------------
893 ASM_CODE_COMMENT(masm);
894 DCHECK(!AreAliased(edx, edi, tiering_state));
895
896 TailCallRuntimeIfStateEquals(masm, tiering_state,
897 TieringState::kRequestTurbofan_Synchronous,
898 Runtime::kCompileTurbofan_Synchronous);
899 TailCallRuntimeIfStateEquals(masm, tiering_state,
900 TieringState::kRequestTurbofan_Concurrent,
901 Runtime::kCompileTurbofan_Concurrent);
902
903 __ int3();
904 }
905
906 // Advance the current bytecode offset. This simulates what all bytecode
907 // handlers do upon completion of the underlying operation. Will bail out to a
908 // label if the bytecode (without prefix) is a return bytecode. Will not advance
909 // the bytecode offset if the current bytecode is a JumpLoop, instead just
910 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register scratch1,Register scratch2,Register scratch3,Label * if_return)911 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
912 Register bytecode_array,
913 Register bytecode_offset,
914 Register scratch1, Register scratch2,
915 Register scratch3, Label* if_return) {
916 ASM_CODE_COMMENT(masm);
917 Register bytecode_size_table = scratch1;
918 Register bytecode = scratch2;
919
920 // The bytecode offset value will be increased by one in wide and extra wide
921 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
922 // will restore the original bytecode. In order to simplify the code, we have
923 // a backup of it.
924 Register original_bytecode_offset = scratch3;
925 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
926 bytecode, original_bytecode_offset));
927 __ Move(bytecode_size_table,
928 Immediate(ExternalReference::bytecode_size_table_address()));
929
930 // Load the current bytecode.
931 __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
932 __ Move(original_bytecode_offset, bytecode_offset);
933
934 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
935 Label process_bytecode, extra_wide;
936 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
937 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
938 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
939 STATIC_ASSERT(3 ==
940 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
941 __ cmp(bytecode, Immediate(0x3));
942 __ j(above, &process_bytecode, Label::kNear);
943 // The code to load the next bytecode is common to both wide and extra wide.
944 // We can hoist them up here. inc has to happen before test since it
945 // modifies the ZF flag.
946 __ inc(bytecode_offset);
947 __ test(bytecode, Immediate(0x1));
948 __ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
949 __ j(not_equal, &extra_wide, Label::kNear);
950
951 // Load the next bytecode and update table to the wide scaled table.
952 __ add(bytecode_size_table,
953 Immediate(kByteSize * interpreter::Bytecodes::kBytecodeCount));
954 __ jmp(&process_bytecode, Label::kNear);
955
956 __ bind(&extra_wide);
957 // Update table to the extra wide scaled table.
958 __ add(bytecode_size_table,
959 Immediate(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
960
961 __ bind(&process_bytecode);
962
963 // Bailout to the return label if this is a return bytecode.
964 #define JUMP_IF_EQUAL(NAME) \
965 __ cmp(bytecode, \
966 Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
967 __ j(equal, if_return);
968 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
969 #undef JUMP_IF_EQUAL
970
971 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
972 // of the loop.
973 Label end, not_jump_loop;
974 __ cmp(bytecode,
975 Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
976 __ j(not_equal, ¬_jump_loop, Label::kNear);
977 // If this is a wide or extra wide JumpLoop, we need to restore the original
978 // bytecode_offset since we might have increased it to skip the wide /
979 // extra-wide prefix bytecode.
980 __ Move(bytecode_offset, original_bytecode_offset);
981 __ jmp(&end, Label::kNear);
982
983 __ bind(¬_jump_loop);
984 // Otherwise, load the size of the current bytecode and advance the offset.
985 __ movzx_b(bytecode_size_table,
986 Operand(bytecode_size_table, bytecode, times_1, 0));
987 __ add(bytecode_offset, bytecode_size_table);
988
989 __ bind(&end);
990 }
991
992 // Read off the optimization state in the feedback vector and check if there
993 // is optimized code or a tiering state that needs to be processed.
994 // Registers optimization_state and feedback_vector must be aliased.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,XMMRegister saved_feedback_vector,Label * has_optimized_code_or_state)995 static void LoadTieringStateAndJumpIfNeedsProcessing(
996 MacroAssembler* masm, Register optimization_state,
997 XMMRegister saved_feedback_vector, Label* has_optimized_code_or_state) {
998 ASM_CODE_COMMENT(masm);
999 Register feedback_vector = optimization_state;
1000
1001 // Store feedback_vector. We may need it if we need to load the optimize code
1002 // slot entry.
1003 __ movd(saved_feedback_vector, feedback_vector);
1004 __ mov(optimization_state,
1005 FieldOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1006
1007 // Check if there is optimized code or a tiering state that needes to be
1008 // processed.
1009 __ test(optimization_state,
1010 Immediate(
1011 FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1012 __ j(not_zero, has_optimized_code_or_state);
1013 }
1014
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,XMMRegister saved_feedback_vector)1015 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1016 MacroAssembler* masm, Register optimization_state,
1017 XMMRegister saved_feedback_vector) {
1018 ASM_CODE_COMMENT(masm);
1019 Label maybe_has_optimized_code;
1020 // Check if optimized code is available
1021 __ test(optimization_state,
1022 Immediate(FeedbackVector::kTieringStateIsAnyRequestMask));
1023 __ j(zero, &maybe_has_optimized_code);
1024
1025 Register tiering_state = optimization_state;
1026 __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1027 MaybeOptimizeCode(masm, tiering_state);
1028
1029 __ bind(&maybe_has_optimized_code);
1030 Register optimized_code_entry = tiering_state;
1031 Register feedback_vector = tiering_state;
1032 __ movd(feedback_vector, saved_feedback_vector); // Restore feedback vector.
1033 __ mov(
1034 optimized_code_entry,
1035 FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
1036 TailCallOptimizedCodeSlot(masm, optimized_code_entry);
1037 }
1038
1039 namespace {
1040
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array)1041 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
1042 Register bytecode_array) {
1043 // Reset the bytecode age and OSR state (optimized to a single write).
1044 static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
1045 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1046 __ mov(FieldOperand(bytecode_array,
1047 BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
1048 Immediate(0));
1049 }
1050
1051 } // namespace
1052
1053 // Generate code for entering a JS function with the interpreter.
1054 // On entry to the function the receiver and arguments have been pushed on the
1055 // stack left to right.
1056 //
1057 // The live registers are:
1058 // o eax: actual argument count
1059 // o edi: the JS function object being called
1060 // o edx: the incoming new target or generator object
1061 // o esi: our context
1062 // o ebp: the caller's frame pointer
1063 // o esp: stack pointer (pointing to return address)
1064 //
1065 // The function builds an interpreter frame. See InterpreterFrameConstants in
1066 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1067 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1068 Register closure = edi;
1069
1070 __ movd(xmm0, eax); // Spill actual argument count.
1071
1072 // The bytecode array could have been flushed from the shared function info,
1073 // if so, call into CompileLazy.
1074 __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
1075 __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
1076
1077 Label is_baseline;
1078 GetSharedFunctionInfoBytecodeOrBaseline(masm, ecx, eax, &is_baseline);
1079
1080 Label compile_lazy;
1081 __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, eax);
1082 __ j(not_equal, &compile_lazy);
1083
1084 Register feedback_vector = ecx;
1085 Label push_stack_frame;
1086 // Load feedback vector and check if it is valid. If valid, check for
1087 // optimized code and update invocation count. Otherwise, setup the stack
1088 // frame.
1089 __ mov(feedback_vector,
1090 FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1091 __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
1092 __ mov(eax, FieldOperand(feedback_vector, HeapObject::kMapOffset));
1093 __ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
1094 __ j(not_equal, &push_stack_frame);
1095
1096 // Load the optimization state from the feedback vector and re-use the
1097 // register.
1098 Label has_optimized_code_or_state;
1099 Register optimization_state = ecx;
1100 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state, xmm1,
1101 &has_optimized_code_or_state);
1102
1103 Label not_optimized;
1104 __ bind(¬_optimized);
1105
1106 // Load the feedback vector and increment the invocation count.
1107 __ mov(feedback_vector,
1108 FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1109 __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
1110 __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
1111
1112 // Open a frame scope to indicate that there is a frame on the stack. The
1113 // MANUAL indicates that the scope shouldn't actually generate code to set
1114 // up the frame (that is done below).
1115 __ bind(&push_stack_frame);
1116 FrameScope frame_scope(masm, StackFrame::MANUAL);
1117 __ push(ebp); // Caller's frame pointer.
1118 __ mov(ebp, esp);
1119 __ push(kContextRegister); // Callee's context.
1120 __ push(kJavaScriptCallTargetRegister); // Callee's JS function.
1121 __ movd(kJavaScriptCallArgCountRegister, xmm0);
1122 __ push(kJavaScriptCallArgCountRegister); // Actual argument count.
1123
1124 // Get the bytecode array from the function object and load it into
1125 // kInterpreterBytecodeArrayRegister.
1126 __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
1127 __ mov(kInterpreterBytecodeArrayRegister,
1128 FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
1129 GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, eax);
1130
1131 // Check function data field is actually a BytecodeArray object.
1132 if (FLAG_debug_code) {
1133 __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
1134 __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
1135 eax);
1136 __ Assert(
1137 equal,
1138 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1139 }
1140
1141 ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
1142
1143 // Push bytecode array.
1144 __ push(kInterpreterBytecodeArrayRegister);
1145 // Push Smi tagged initial bytecode array offset.
1146 __ push(Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag)));
1147
1148 // Allocate the local and temporary register file on the stack.
1149 Label stack_overflow;
1150 {
1151 // Load frame size from the BytecodeArray object.
1152 Register frame_size = ecx;
1153 __ mov(frame_size, FieldOperand(kInterpreterBytecodeArrayRegister,
1154 BytecodeArray::kFrameSizeOffset));
1155
1156 // Do a stack check to ensure we don't go over the limit.
1157 __ mov(eax, esp);
1158 __ sub(eax, frame_size);
1159 __ CompareStackLimit(eax, StackLimitKind::kRealStackLimit);
1160 __ j(below, &stack_overflow);
1161
1162 // If ok, push undefined as the initial value for all register file entries.
1163 Label loop_header;
1164 Label loop_check;
1165 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1166 __ jmp(&loop_check);
1167 __ bind(&loop_header);
1168 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1169 __ push(kInterpreterAccumulatorRegister);
1170 // Continue loop if not done.
1171 __ bind(&loop_check);
1172 __ sub(frame_size, Immediate(kSystemPointerSize));
1173 __ j(greater_equal, &loop_header);
1174 }
1175
1176 // If the bytecode array has a valid incoming new target or generator object
1177 // register, initialize it with incoming value which was passed in edx.
1178 Label no_incoming_new_target_or_generator_register;
1179 __ mov(ecx, FieldOperand(
1180 kInterpreterBytecodeArrayRegister,
1181 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1182 __ test(ecx, ecx);
1183 __ j(zero, &no_incoming_new_target_or_generator_register);
1184 __ mov(Operand(ebp, ecx, times_system_pointer_size, 0), edx);
1185 __ bind(&no_incoming_new_target_or_generator_register);
1186
1187 // Perform interrupt stack check.
1188 // TODO(solanes): Merge with the real stack limit check above.
1189 Label stack_check_interrupt, after_stack_check_interrupt;
1190 __ CompareStackLimit(esp, StackLimitKind::kInterruptStackLimit);
1191 __ j(below, &stack_check_interrupt);
1192 __ bind(&after_stack_check_interrupt);
1193
1194 // The accumulator is already loaded with undefined.
1195
1196 __ mov(kInterpreterBytecodeOffsetRegister,
1197 Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
1198
1199 // Load the dispatch table into a register and dispatch to the bytecode
1200 // handler at the current bytecode offset.
1201 Label do_dispatch;
1202 __ bind(&do_dispatch);
1203 __ Move(kInterpreterDispatchTableRegister,
1204 Immediate(ExternalReference::interpreter_dispatch_table_address(
1205 masm->isolate())));
1206 __ movzx_b(ecx, Operand(kInterpreterBytecodeArrayRegister,
1207 kInterpreterBytecodeOffsetRegister, times_1, 0));
1208 __ mov(kJavaScriptCallCodeStartRegister,
1209 Operand(kInterpreterDispatchTableRegister, ecx,
1210 times_system_pointer_size, 0));
1211 __ call(kJavaScriptCallCodeStartRegister);
1212 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1213
1214 // Any returns to the entry trampoline are either due to the return bytecode
1215 // or the interpreter tail calling a builtin and then a dispatch.
1216
1217 // Get bytecode array and bytecode offset from the stack frame.
1218 __ mov(kInterpreterBytecodeArrayRegister,
1219 Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1220 __ mov(kInterpreterBytecodeOffsetRegister,
1221 Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1222 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1223
1224 // Either return, or advance to the next bytecode and dispatch.
1225 Label do_return;
1226 __ Push(eax);
1227 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1228 kInterpreterBytecodeOffsetRegister, ecx,
1229 kInterpreterDispatchTableRegister, eax,
1230 &do_return);
1231 __ Pop(eax);
1232 __ jmp(&do_dispatch);
1233
1234 __ bind(&do_return);
1235 __ Pop(eax);
1236 // The return value is in eax.
1237 LeaveInterpreterFrame(masm, edx, ecx);
1238 __ ret(0);
1239
1240 __ bind(&stack_check_interrupt);
1241 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1242 // for the call to the StackGuard.
1243 __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
1244 Immediate(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1245 kFunctionEntryBytecodeOffset)));
1246 __ CallRuntime(Runtime::kStackGuard);
1247
1248 // After the call, restore the bytecode array, bytecode offset and accumulator
1249 // registers again. Also, restore the bytecode offset in the stack to its
1250 // previous value.
1251 __ mov(kInterpreterBytecodeArrayRegister,
1252 Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1253 __ mov(kInterpreterBytecodeOffsetRegister,
1254 Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
1255 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1256
1257 // It's ok to clobber kInterpreterBytecodeOffsetRegister since we are setting
1258 // it again after continuing.
1259 __ SmiTag(kInterpreterBytecodeOffsetRegister);
1260 __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
1261 kInterpreterBytecodeOffsetRegister);
1262
1263 __ jmp(&after_stack_check_interrupt);
1264
1265 __ bind(&has_optimized_code_or_state);
1266 {
1267 // Restore actual argument count.
1268 __ movd(eax, xmm0);
1269 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1270 xmm1);
1271 }
1272
1273 __ bind(&compile_lazy);
1274 // Restore actual argument count.
1275 __ movd(eax, xmm0);
1276 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1277
1278 __ bind(&is_baseline);
1279 {
1280 __ movd(xmm2, ecx); // Save baseline data.
1281 // Load the feedback vector from the closure.
1282 __ mov(feedback_vector,
1283 FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1284 __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
1285
1286 Label install_baseline_code;
1287 // Check if feedback vector is valid. If not, call prepare for baseline to
1288 // allocate it.
1289 __ LoadMap(eax, feedback_vector);
1290 __ CmpInstanceType(eax, FEEDBACK_VECTOR_TYPE);
1291 __ j(not_equal, &install_baseline_code);
1292
1293 // Check the tiering state.
1294 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state, xmm1,
1295 &has_optimized_code_or_state);
1296
1297 // Load the baseline code into the closure.
1298 __ movd(ecx, xmm2);
1299 static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
1300 __ push(edx); // Spill.
1301 __ push(ecx);
1302 __ Push(xmm0, eax); // Save the argument count (currently in xmm0).
1303 ReplaceClosureCodeWithOptimizedCode(masm, ecx, closure, eax, ecx);
1304 __ pop(eax); // Restore the argument count.
1305 __ pop(ecx);
1306 __ pop(edx);
1307 __ JumpCodeObject(ecx);
1308
1309 __ bind(&install_baseline_code);
1310 __ movd(eax, xmm0); // Recover argument count.
1311 GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1312 }
1313
1314 __ bind(&stack_overflow);
1315 __ CallRuntime(Runtime::kThrowStackOverflow);
1316 __ int3(); // Should not return.
1317 }
1318
GenerateInterpreterPushArgs(MacroAssembler * masm,Register array_limit,Register start_address)1319 static void GenerateInterpreterPushArgs(MacroAssembler* masm,
1320 Register array_limit,
1321 Register start_address) {
1322 // ----------- S t a t e -------------
1323 // -- start_address : Pointer to the last argument in the args array.
1324 // -- array_limit : Pointer to one before the first argument in the
1325 // args array.
1326 // -----------------------------------
1327 ASM_CODE_COMMENT(masm);
1328 Label loop_header, loop_check;
1329 __ jmp(&loop_check);
1330 __ bind(&loop_header);
1331 __ Push(Operand(array_limit, 0));
1332 __ bind(&loop_check);
1333 __ add(array_limit, Immediate(kSystemPointerSize));
1334 __ cmp(array_limit, start_address);
1335 __ j(below_equal, &loop_header, Label::kNear);
1336 }
1337
1338 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1339 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1340 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1341 InterpreterPushArgsMode mode) {
1342 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1343 // ----------- S t a t e -------------
1344 // -- eax : the number of arguments
1345 // -- ecx : the address of the first argument to be pushed. Subsequent
1346 // arguments should be consecutive above this, in the same order as
1347 // they are to be pushed onto the stack.
1348 // -- edi : the target to call (can be any Object).
1349 // -----------------------------------
1350
1351 const Register scratch = edx;
1352 const Register argv = ecx;
1353
1354 Label stack_overflow;
1355 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1356 // The spread argument should not be pushed.
1357 __ dec(eax);
1358 }
1359
1360 // Add a stack check before pushing the arguments.
1361 __ StackOverflowCheck(eax, scratch, &stack_overflow, true);
1362 __ movd(xmm0, eax); // Spill number of arguments.
1363
1364 // Compute the expected number of arguments.
1365 __ mov(scratch, eax);
1366 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1367 __ dec(scratch); // Exclude receiver.
1368 }
1369
1370 // Pop return address to allow tail-call after pushing arguments.
1371 __ PopReturnAddressTo(eax);
1372
1373 // Find the address of the last argument.
1374 __ shl(scratch, kSystemPointerSizeLog2);
1375 __ neg(scratch);
1376 __ add(scratch, argv);
1377
1378 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1379 __ movd(xmm1, scratch);
1380 GenerateInterpreterPushArgs(masm, scratch, argv);
1381 // Pass the spread in the register ecx.
1382 __ movd(ecx, xmm1);
1383 __ mov(ecx, Operand(ecx, 0));
1384 } else {
1385 GenerateInterpreterPushArgs(masm, scratch, argv);
1386 }
1387
1388 // Push "undefined" as the receiver arg if we need to.
1389 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1390 __ PushRoot(RootIndex::kUndefinedValue);
1391 }
1392
1393 __ PushReturnAddressFrom(eax);
1394 __ movd(eax, xmm0); // Restore number of arguments.
1395
1396 // Call the target.
1397 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1398 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1399 RelocInfo::CODE_TARGET);
1400 } else {
1401 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1402 RelocInfo::CODE_TARGET);
1403 }
1404
1405 __ bind(&stack_overflow);
1406 {
1407 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1408
1409 // This should be unreachable.
1410 __ int3();
1411 }
1412 }
1413
1414 namespace {
1415
1416 // This function modifies start_addr, and only reads the contents of num_args
1417 // register. scratch1 and scratch2 are used as temporary registers.
Generate_InterpreterPushZeroAndArgsAndReturnAddress(MacroAssembler * masm,Register num_args,Register start_addr,Register scratch1,Register scratch2,int num_slots_to_move,Label * stack_overflow)1418 void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
1419 MacroAssembler* masm, Register num_args, Register start_addr,
1420 Register scratch1, Register scratch2, int num_slots_to_move,
1421 Label* stack_overflow) {
1422 // We have to move return address and the temporary registers above it
1423 // before we can copy arguments onto the stack. To achieve this:
1424 // Step 1: Increment the stack pointer by num_args + 1 for receiver (if it is
1425 // not included in argc already). Step 2: Move the return address and values
1426 // around it to the top of stack. Step 3: Copy the arguments into the correct
1427 // locations.
1428 // current stack =====> required stack layout
1429 // | | | return addr | (2) <-- esp (1)
1430 // | | | addtl. slot |
1431 // | | | arg N | (3)
1432 // | | | .... |
1433 // | | | arg 1 |
1434 // | return addr | <-- esp | arg 0 |
1435 // | addtl. slot | | receiver slot |
1436
1437 // Check for stack overflow before we increment the stack pointer.
1438 __ StackOverflowCheck(num_args, scratch1, stack_overflow, true);
1439
1440 // Step 1 - Update the stack pointer.
1441
1442 __ lea(scratch1, Operand(num_args, times_system_pointer_size, 0));
1443 __ AllocateStackSpace(scratch1);
1444
1445 // Step 2 move return_address and slots around it to the correct locations.
1446 // Move from top to bottom, otherwise we may overwrite when num_args = 0 or 1,
1447 // basically when the source and destination overlap. We at least need one
1448 // extra slot for receiver, so no extra checks are required to avoid copy.
1449 for (int i = 0; i < num_slots_to_move + 1; i++) {
1450 __ mov(scratch1, Operand(esp, num_args, times_system_pointer_size,
1451 i * kSystemPointerSize));
1452 __ mov(Operand(esp, i * kSystemPointerSize), scratch1);
1453 }
1454
1455 // Step 3 copy arguments to correct locations.
1456 // Slot meant for receiver contains return address. Reset it so that
1457 // we will not incorrectly interpret return address as an object.
1458 __ mov(Operand(esp, (num_slots_to_move + 1) * kSystemPointerSize),
1459 Immediate(0));
1460 __ mov(scratch1, Immediate(0));
1461
1462 Label loop_header, loop_check;
1463 __ jmp(&loop_check);
1464 __ bind(&loop_header);
1465 __ mov(scratch2, Operand(start_addr, 0));
1466 __ mov(Operand(esp, scratch1, times_system_pointer_size,
1467 (num_slots_to_move + 1) * kSystemPointerSize),
1468 scratch2);
1469 __ sub(start_addr, Immediate(kSystemPointerSize));
1470 __ bind(&loop_check);
1471 __ inc(scratch1);
1472 __ cmp(scratch1, eax);
1473 __ j(less, &loop_header, Label::kNear);
1474 }
1475
1476 } // anonymous namespace
1477
1478 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1479 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1480 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1481 // ----------- S t a t e -------------
1482 // -- eax : the number of arguments
1483 // -- ecx : the address of the first argument to be pushed. Subsequent
1484 // arguments should be consecutive above this, in the same order
1485 // as they are to be pushed onto the stack.
1486 // -- esp[0] : return address
1487 // -- esp[4] : allocation site feedback (if available or undefined)
1488 // -- esp[8] : the new target
1489 // -- esp[12] : the constructor
1490 // -----------------------------------
1491 Label stack_overflow;
1492
1493 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1494 // The spread argument should not be pushed.
1495 __ dec(eax);
1496 }
1497
1498 // Push arguments and move return address and stack spill slots to the top of
1499 // stack. The eax register is readonly. The ecx register will be modified. edx
1500 // and edi are used as scratch registers.
1501 Generate_InterpreterPushZeroAndArgsAndReturnAddress(
1502 masm, eax, ecx, edx, edi,
1503 InterpreterPushArgsThenConstructDescriptor::GetStackParameterCount(),
1504 &stack_overflow);
1505
1506 // Call the appropriate constructor. eax and ecx already contain intended
1507 // values, remaining registers still need to be initialized from the stack.
1508
1509 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1510 // Tail call to the array construct stub (still in the caller context at
1511 // this point).
1512
1513 __ movd(xmm0, eax); // Spill number of arguments.
1514 __ PopReturnAddressTo(eax);
1515 __ Pop(kJavaScriptCallExtraArg1Register);
1516 __ Pop(kJavaScriptCallNewTargetRegister);
1517 __ Pop(kJavaScriptCallTargetRegister);
1518 __ PushReturnAddressFrom(eax);
1519
1520 __ AssertFunction(kJavaScriptCallTargetRegister, eax);
1521 __ AssertUndefinedOrAllocationSite(kJavaScriptCallExtraArg1Register, eax);
1522
1523 __ movd(eax, xmm0); // Reload number of arguments.
1524 __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1525 RelocInfo::CODE_TARGET);
1526 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1527 __ movd(xmm0, eax); // Spill number of arguments.
1528 __ PopReturnAddressTo(eax);
1529 __ Drop(1); // The allocation site is unused.
1530 __ Pop(kJavaScriptCallNewTargetRegister);
1531 __ Pop(kJavaScriptCallTargetRegister);
1532 // Pass the spread in the register ecx, overwriting ecx.
1533 __ mov(ecx, Operand(ecx, 0));
1534 __ PushReturnAddressFrom(eax);
1535 __ movd(eax, xmm0); // Reload number of arguments.
1536 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1537 RelocInfo::CODE_TARGET);
1538 } else {
1539 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1540 __ PopReturnAddressTo(ecx);
1541 __ Drop(1); // The allocation site is unused.
1542 __ Pop(kJavaScriptCallNewTargetRegister);
1543 __ Pop(kJavaScriptCallTargetRegister);
1544 __ PushReturnAddressFrom(ecx);
1545
1546 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1547 }
1548
1549 __ bind(&stack_overflow);
1550 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1551 __ int3();
1552 }
1553
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1554 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1555 // Set the return address to the correct point in the interpreter entry
1556 // trampoline.
1557 Label builtin_trampoline, trampoline_loaded;
1558 Smi interpreter_entry_return_pc_offset(
1559 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1560 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1561
1562 static constexpr Register scratch = ecx;
1563
1564 // If the SFI function_data is an InterpreterData, the function will have a
1565 // custom copy of the interpreter entry trampoline for profiling. If so,
1566 // get the custom trampoline, otherwise grab the entry address of the global
1567 // trampoline.
1568 __ mov(scratch, Operand(ebp, StandardFrameConstants::kFunctionOffset));
1569 __ mov(scratch, FieldOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
1570 __ mov(scratch,
1571 FieldOperand(scratch, SharedFunctionInfo::kFunctionDataOffset));
1572 __ Push(eax);
1573 __ CmpObjectType(scratch, INTERPRETER_DATA_TYPE, eax);
1574 __ j(not_equal, &builtin_trampoline, Label::kNear);
1575
1576 __ mov(scratch,
1577 FieldOperand(scratch, InterpreterData::kInterpreterTrampolineOffset));
1578 __ add(scratch, Immediate(Code::kHeaderSize - kHeapObjectTag));
1579 __ jmp(&trampoline_loaded, Label::kNear);
1580
1581 __ bind(&builtin_trampoline);
1582 __ mov(scratch,
1583 __ ExternalReferenceAsOperand(
1584 ExternalReference::
1585 address_of_interpreter_entry_trampoline_instruction_start(
1586 masm->isolate()),
1587 scratch));
1588
1589 __ bind(&trampoline_loaded);
1590 __ Pop(eax);
1591 __ add(scratch, Immediate(interpreter_entry_return_pc_offset.value()));
1592 __ push(scratch);
1593
1594 // Initialize the dispatch table register.
1595 __ Move(kInterpreterDispatchTableRegister,
1596 Immediate(ExternalReference::interpreter_dispatch_table_address(
1597 masm->isolate())));
1598
1599 // Get the bytecode array pointer from the frame.
1600 __ mov(kInterpreterBytecodeArrayRegister,
1601 Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1602
1603 if (FLAG_debug_code) {
1604 // Check function data field is actually a BytecodeArray object.
1605 __ AssertNotSmi(kInterpreterBytecodeArrayRegister);
1606 __ CmpObjectType(kInterpreterBytecodeArrayRegister, BYTECODE_ARRAY_TYPE,
1607 scratch);
1608 __ Assert(
1609 equal,
1610 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1611 }
1612
1613 // Get the target bytecode offset from the frame.
1614 __ mov(kInterpreterBytecodeOffsetRegister,
1615 Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1616 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1617
1618 if (FLAG_debug_code) {
1619 Label okay;
1620 __ cmp(kInterpreterBytecodeOffsetRegister,
1621 Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
1622 __ j(greater_equal, &okay, Label::kNear);
1623 __ int3();
1624 __ bind(&okay);
1625 }
1626
1627 // Dispatch to the target bytecode.
1628 __ movzx_b(scratch, Operand(kInterpreterBytecodeArrayRegister,
1629 kInterpreterBytecodeOffsetRegister, times_1, 0));
1630 __ mov(kJavaScriptCallCodeStartRegister,
1631 Operand(kInterpreterDispatchTableRegister, scratch,
1632 times_system_pointer_size, 0));
1633 __ jmp(kJavaScriptCallCodeStartRegister);
1634 }
1635
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1636 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1637 // Get bytecode array and bytecode offset from the stack frame.
1638 __ mov(kInterpreterBytecodeArrayRegister,
1639 Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1640 __ mov(kInterpreterBytecodeOffsetRegister,
1641 Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1642 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1643
1644 Label enter_bytecode, function_entry_bytecode;
1645 __ cmp(kInterpreterBytecodeOffsetRegister,
1646 Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
1647 kFunctionEntryBytecodeOffset));
1648 __ j(equal, &function_entry_bytecode);
1649
1650 // Advance to the next bytecode.
1651 Label if_return;
1652 __ Push(eax);
1653 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1654 kInterpreterBytecodeOffsetRegister, ecx, esi,
1655 eax, &if_return);
1656 __ Pop(eax);
1657
1658 __ bind(&enter_bytecode);
1659 // Convert new bytecode offset to a Smi and save in the stackframe.
1660 __ mov(ecx, kInterpreterBytecodeOffsetRegister);
1661 __ SmiTag(ecx);
1662 __ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ecx);
1663
1664 Generate_InterpreterEnterBytecode(masm);
1665
1666 __ bind(&function_entry_bytecode);
1667 // If the code deoptimizes during the implicit function entry stack interrupt
1668 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1669 // not a valid bytecode offset. Detect this case and advance to the first
1670 // actual bytecode.
1671 __ mov(kInterpreterBytecodeOffsetRegister,
1672 Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
1673 __ jmp(&enter_bytecode);
1674
1675 // We should never take the if_return path.
1676 __ bind(&if_return);
1677 // No need to pop eax here since we will be aborting anyway.
1678 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1679 }
1680
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1681 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1682 Generate_InterpreterEnterBytecode(masm);
1683 }
1684 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1685 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1686 auto descriptor =
1687 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1688 Register arg_count = descriptor.GetRegisterParameter(
1689 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1690 Register frame_size = descriptor.GetRegisterParameter(
1691 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1692
1693 // Save argument count and bytecode array.
1694 XMMRegister saved_arg_count = xmm0;
1695 XMMRegister saved_bytecode_array = xmm1;
1696 XMMRegister saved_frame_size = xmm2;
1697 XMMRegister saved_feedback_vector = xmm3;
1698 __ movd(saved_arg_count, arg_count);
1699 __ movd(saved_frame_size, frame_size);
1700
1701 // Use the arg count (eax) as the scratch register.
1702 Register scratch = arg_count;
1703
1704 // Load the feedback vector from the closure.
1705 Register feedback_vector = ecx;
1706 Register closure = descriptor.GetRegisterParameter(
1707 BaselineOutOfLinePrologueDescriptor::kClosure);
1708 __ mov(feedback_vector,
1709 FieldOperand(closure, JSFunction::kFeedbackCellOffset));
1710 __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
1711 if (FLAG_debug_code) {
1712 __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch);
1713 __ Assert(equal, AbortReason::kExpectedFeedbackVector);
1714 }
1715
1716 // Load the optimization state from the feedback vector and re-use the
1717 // register.
1718 Label has_optimized_code_or_state;
1719 Register optimization_state = ecx;
1720 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1721 saved_feedback_vector,
1722 &has_optimized_code_or_state);
1723
1724 // Load the feedback vector and increment the invocation count.
1725 __ movd(feedback_vector, saved_feedback_vector);
1726 __ inc(FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
1727
1728 XMMRegister return_address = xmm4;
1729 // Save the return address, so that we can push it to the end of the newly
1730 // set-up frame once we're done setting it up.
1731 __ PopReturnAddressTo(return_address, scratch);
1732 // The bytecode array was pushed to the stack by the caller.
1733 __ Pop(saved_bytecode_array, scratch);
1734 FrameScope frame_scope(masm, StackFrame::MANUAL);
1735 {
1736 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1737 __ EnterFrame(StackFrame::BASELINE);
1738
1739 __ Push(descriptor.GetRegisterParameter(
1740 BaselineOutOfLinePrologueDescriptor::kCalleeContext)); // Callee's
1741 // context.
1742 Register callee_js_function = descriptor.GetRegisterParameter(
1743 BaselineOutOfLinePrologueDescriptor::kClosure);
1744 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1745 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1746 __ Push(callee_js_function); // Callee's JS function.
1747 __ Push(saved_arg_count, scratch); // Push actual argument count.
1748
1749 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1750 // the frame, so load it into a register.
1751 Register bytecode_array = scratch;
1752 __ movd(bytecode_array, saved_bytecode_array);
1753 ResetBytecodeAgeAndOsrState(masm, bytecode_array);
1754 __ Push(bytecode_array);
1755
1756 // Baseline code frames store the feedback vector where interpreter would
1757 // store the bytecode offset.
1758 __ Push(saved_feedback_vector, scratch);
1759 }
1760
1761 Label call_stack_guard;
1762 {
1763 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1764 // Stack check. This folds the checks for both the interrupt stack limit
1765 // check and the real stack limit into one by just checking for the
1766 // interrupt limit. The interrupt limit is either equal to the real stack
1767 // limit or tighter. By ensuring we have space until that limit after
1768 // building the frame we can quickly precheck both at once.
1769 //
1770 // TODO(v8:11429): Backport this folded check to the
1771 // InterpreterEntryTrampoline.
1772 __ movd(frame_size, saved_frame_size);
1773 __ Move(scratch, esp);
1774 DCHECK_NE(frame_size, kJavaScriptCallNewTargetRegister);
1775 __ sub(scratch, frame_size);
1776 __ CompareStackLimit(scratch, StackLimitKind::kInterruptStackLimit);
1777 __ j(below, &call_stack_guard);
1778 }
1779
1780 // Push the return address back onto the stack for return.
1781 __ PushReturnAddressFrom(return_address, scratch);
1782 // Return to caller pushed pc, without any frame teardown.
1783 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1784 __ Ret();
1785
1786 __ bind(&has_optimized_code_or_state);
1787 {
1788 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1789 // Drop the return address and bytecode array, rebalancing the return stack
1790 // buffer by using JumpMode::kPushAndReturn. We can't leave the slot and
1791 // overwrite it on return since we may do a runtime call along the way that
1792 // requires the stack to only contain valid frames.
1793 __ Drop(2);
1794 __ movd(arg_count, saved_arg_count); // Restore actual argument count.
1795 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1796 saved_feedback_vector);
1797 __ Trap();
1798 }
1799
1800 __ bind(&call_stack_guard);
1801 {
1802 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1803 {
1804 // Push the baseline code return address now, as if it had been pushed by
1805 // the call to this builtin.
1806 __ PushReturnAddressFrom(return_address, scratch);
1807 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1808 // Save incoming new target or generator
1809 __ Push(kJavaScriptCallNewTargetRegister);
1810 __ SmiTag(frame_size);
1811 __ Push(frame_size);
1812 __ CallRuntime(Runtime::kStackGuardWithGap, 1);
1813 __ Pop(kJavaScriptCallNewTargetRegister);
1814 }
1815
1816 // Return to caller pushed pc, without any frame teardown.
1817 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1818 __ Ret();
1819 }
1820 }
1821
1822 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1823 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1824 bool java_script_builtin,
1825 bool with_result) {
1826 const RegisterConfiguration* config(RegisterConfiguration::Default());
1827 int allocatable_register_count = config->num_allocatable_general_registers();
1828 if (with_result) {
1829 if (java_script_builtin) {
1830 // xmm0 is not included in the allocateable registers.
1831 __ movd(xmm0, eax);
1832 } else {
1833 // Overwrite the hole inserted by the deoptimizer with the return value
1834 // from the LAZY deopt point.
1835 __ mov(
1836 Operand(esp, config->num_allocatable_general_registers() *
1837 kSystemPointerSize +
1838 BuiltinContinuationFrameConstants::kFixedFrameSize),
1839 eax);
1840 }
1841 }
1842
1843 // Replace the builtin index Smi on the stack with the start address of the
1844 // builtin loaded from the builtins table. The ret below will return to this
1845 // address.
1846 int offset_to_builtin_index = allocatable_register_count * kSystemPointerSize;
1847 __ mov(eax, Operand(esp, offset_to_builtin_index));
1848 __ LoadEntryFromBuiltinIndex(eax);
1849 __ mov(Operand(esp, offset_to_builtin_index), eax);
1850
1851 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1852 int code = config->GetAllocatableGeneralCode(i);
1853 __ pop(Register::from_code(code));
1854 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1855 __ SmiUntag(Register::from_code(code));
1856 }
1857 }
1858 if (with_result && java_script_builtin) {
1859 // Overwrite the hole inserted by the deoptimizer with the return value from
1860 // the LAZY deopt point. eax contains the arguments count, the return value
1861 // from LAZY is always the last argument.
1862 __ movd(Operand(esp, eax, times_system_pointer_size,
1863 BuiltinContinuationFrameConstants::kFixedFrameSize -
1864 kJSArgcReceiverSlots * kSystemPointerSize),
1865 xmm0);
1866 }
1867 __ mov(
1868 ebp,
1869 Operand(esp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1870 const int offsetToPC =
1871 BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp -
1872 kSystemPointerSize;
1873 __ pop(Operand(esp, offsetToPC));
1874 __ Drop(offsetToPC / kSystemPointerSize);
1875 __ ret(0);
1876 }
1877 } // namespace
1878
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1879 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1880 Generate_ContinueToBuiltinHelper(masm, false, false);
1881 }
1882
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1883 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1884 MacroAssembler* masm) {
1885 Generate_ContinueToBuiltinHelper(masm, false, true);
1886 }
1887
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1888 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1889 Generate_ContinueToBuiltinHelper(masm, true, false);
1890 }
1891
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1892 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1893 MacroAssembler* masm) {
1894 Generate_ContinueToBuiltinHelper(masm, true, true);
1895 }
1896
Generate_NotifyDeoptimized(MacroAssembler * masm)1897 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1898 {
1899 FrameScope scope(masm, StackFrame::INTERNAL);
1900 __ CallRuntime(Runtime::kNotifyDeoptimized);
1901 // Tear down internal frame.
1902 }
1903
1904 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), eax.code());
1905 __ mov(eax, Operand(esp, 1 * kSystemPointerSize));
1906 __ ret(1 * kSystemPointerSize); // Remove eax.
1907 }
1908
1909 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1910 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1911 // ----------- S t a t e -------------
1912 // -- eax : argc
1913 // -- esp[0] : return address
1914 // -- esp[1] : receiver
1915 // -- esp[2] : thisArg
1916 // -- esp[3] : argArray
1917 // -----------------------------------
1918
1919 // 1. Load receiver into xmm0, argArray into edx (if present), remove all
1920 // arguments from the stack (including the receiver), and push thisArg (if
1921 // present) instead.
1922 {
1923 Label no_arg_array, no_this_arg;
1924 StackArgumentsAccessor args(eax);
1925 // Spill receiver to allow the usage of edi as a scratch register.
1926 __ movd(xmm0, args.GetReceiverOperand());
1927
1928 __ LoadRoot(edx, RootIndex::kUndefinedValue);
1929 __ mov(edi, edx);
1930 __ cmp(eax, Immediate(JSParameterCount(0)));
1931 __ j(equal, &no_this_arg, Label::kNear);
1932 {
1933 __ mov(edi, args[1]);
1934 __ cmp(eax, Immediate(JSParameterCount(1)));
1935 __ j(equal, &no_arg_array, Label::kNear);
1936 __ mov(edx, args[2]);
1937 __ bind(&no_arg_array);
1938 }
1939 __ bind(&no_this_arg);
1940 __ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
1941 TurboAssembler::kCountIsInteger,
1942 TurboAssembler::kCountIncludesReceiver);
1943
1944 // Restore receiver to edi.
1945 __ movd(edi, xmm0);
1946 }
1947
1948 // ----------- S t a t e -------------
1949 // -- edx : argArray
1950 // -- edi : receiver
1951 // -- esp[0] : return address
1952 // -- esp[4] : thisArg
1953 // -----------------------------------
1954
1955 // 2. We don't need to check explicitly for callable receiver here,
1956 // since that's the first thing the Call/CallWithArrayLike builtins
1957 // will do.
1958
1959 // 3. Tail call with no arguments if argArray is null or undefined.
1960 Label no_arguments;
1961 __ JumpIfRoot(edx, RootIndex::kNullValue, &no_arguments, Label::kNear);
1962 __ JumpIfRoot(edx, RootIndex::kUndefinedValue, &no_arguments, Label::kNear);
1963
1964 // 4a. Apply the receiver to the given argArray.
1965 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1966 RelocInfo::CODE_TARGET);
1967
1968 // 4b. The argArray is either null or undefined, so we tail call without any
1969 // arguments to the receiver.
1970 __ bind(&no_arguments);
1971 {
1972 __ Move(eax, JSParameterCount(0));
1973 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1974 }
1975 }
1976
1977 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1978 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1979 // Stack Layout:
1980 // esp[0] : Return address
1981 // esp[8] : Argument 0 (receiver: callable to call)
1982 // esp[16] : Argument 1
1983 // ...
1984 // esp[8 * n] : Argument n-1
1985 // esp[8 * (n + 1)] : Argument n
1986 // eax contains the number of arguments, n.
1987
1988 // 1. Get the callable to call (passed as receiver) from the stack.
1989 {
1990 StackArgumentsAccessor args(eax);
1991 __ mov(edi, args.GetReceiverOperand());
1992 }
1993
1994 // 2. Save the return address and drop the callable.
1995 __ PopReturnAddressTo(edx);
1996 __ Pop(ecx);
1997
1998 // 3. Make sure we have at least one argument.
1999 {
2000 Label done;
2001 __ cmp(eax, Immediate(JSParameterCount(0)));
2002 __ j(greater, &done, Label::kNear);
2003 __ PushRoot(RootIndex::kUndefinedValue);
2004 __ inc(eax);
2005 __ bind(&done);
2006 }
2007
2008 // 4. Push back the return address one slot down on the stack (overwriting the
2009 // original callable), making the original first argument the new receiver.
2010 __ PushReturnAddressFrom(edx);
2011 __ dec(eax); // One fewer argument (first argument is new receiver).
2012
2013 // 5. Call the callable.
2014 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2015 }
2016
Generate_ReflectApply(MacroAssembler * masm)2017 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2018 // ----------- S t a t e -------------
2019 // -- eax : argc
2020 // -- esp[0] : return address
2021 // -- esp[4] : receiver
2022 // -- esp[8] : target (if argc >= 1)
2023 // -- esp[12] : thisArgument (if argc >= 2)
2024 // -- esp[16] : argumentsList (if argc == 3)
2025 // -----------------------------------
2026
2027 // 1. Load target into edi (if present), argumentsList into edx (if present),
2028 // remove all arguments from the stack (including the receiver), and push
2029 // thisArgument (if present) instead.
2030 {
2031 Label done;
2032 StackArgumentsAccessor args(eax);
2033 __ LoadRoot(edi, RootIndex::kUndefinedValue);
2034 __ mov(edx, edi);
2035 __ mov(ecx, edi);
2036 __ cmp(eax, Immediate(JSParameterCount(1)));
2037 __ j(below, &done, Label::kNear);
2038 __ mov(edi, args[1]); // target
2039 __ j(equal, &done, Label::kNear);
2040 __ mov(ecx, args[2]); // thisArgument
2041 __ cmp(eax, Immediate(JSParameterCount(3)));
2042 __ j(below, &done, Label::kNear);
2043 __ mov(edx, args[3]); // argumentsList
2044 __ bind(&done);
2045
2046 // Spill argumentsList to use edx as a scratch register.
2047 __ movd(xmm0, edx);
2048
2049 __ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
2050 TurboAssembler::kCountIsInteger,
2051 TurboAssembler::kCountIncludesReceiver);
2052
2053 // Restore argumentsList.
2054 __ movd(edx, xmm0);
2055 }
2056
2057 // ----------- S t a t e -------------
2058 // -- edx : argumentsList
2059 // -- edi : target
2060 // -- esp[0] : return address
2061 // -- esp[4] : thisArgument
2062 // -----------------------------------
2063
2064 // 2. We don't need to check explicitly for callable target here,
2065 // since that's the first thing the Call/CallWithArrayLike builtins
2066 // will do.
2067
2068 // 3. Apply the target to the given argumentsList.
2069 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2070 RelocInfo::CODE_TARGET);
2071 }
2072
Generate_ReflectConstruct(MacroAssembler * masm)2073 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2074 // ----------- S t a t e -------------
2075 // -- eax : argc
2076 // -- esp[0] : return address
2077 // -- esp[4] : receiver
2078 // -- esp[8] : target
2079 // -- esp[12] : argumentsList
2080 // -- esp[16] : new.target (optional)
2081 // -----------------------------------
2082
2083 // 1. Load target into edi (if present), argumentsList into ecx (if present),
2084 // new.target into edx (if present, otherwise use target), remove all
2085 // arguments from the stack (including the receiver), and push thisArgument
2086 // (if present) instead.
2087 {
2088 Label done;
2089 StackArgumentsAccessor args(eax);
2090 __ LoadRoot(edi, RootIndex::kUndefinedValue);
2091 __ mov(edx, edi);
2092 __ mov(ecx, edi);
2093 __ cmp(eax, Immediate(JSParameterCount(1)));
2094 __ j(below, &done, Label::kNear);
2095 __ mov(edi, args[1]); // target
2096 __ mov(edx, edi);
2097 __ j(equal, &done, Label::kNear);
2098 __ mov(ecx, args[2]); // argumentsList
2099 __ cmp(eax, Immediate(JSParameterCount(3)));
2100 __ j(below, &done, Label::kNear);
2101 __ mov(edx, args[3]); // new.target
2102 __ bind(&done);
2103
2104 // Spill argumentsList to use ecx as a scratch register.
2105 __ movd(xmm0, ecx);
2106
2107 __ DropArgumentsAndPushNewReceiver(
2108 eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
2109 TurboAssembler::kCountIsInteger,
2110 TurboAssembler::kCountIncludesReceiver);
2111
2112 // Restore argumentsList.
2113 __ movd(ecx, xmm0);
2114 }
2115
2116 // ----------- S t a t e -------------
2117 // -- ecx : argumentsList
2118 // -- edx : new.target
2119 // -- edi : target
2120 // -- esp[0] : return address
2121 // -- esp[4] : receiver (undefined)
2122 // -----------------------------------
2123
2124 // 2. We don't need to check explicitly for constructor target here,
2125 // since that's the first thing the Construct/ConstructWithArrayLike
2126 // builtins will do.
2127
2128 // 3. We don't need to check explicitly for constructor new.target here,
2129 // since that's the second thing the Construct/ConstructWithArrayLike
2130 // builtins will do.
2131
2132 // 4. Construct the target with the given new.target and argumentsList.
2133 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2134 RelocInfo::CODE_TARGET);
2135 }
2136
2137 namespace {
2138
2139 // Allocate new stack space for |count| arguments and shift all existing
2140 // arguments already on the stack. |pointer_to_new_space_out| points to the
2141 // first free slot on the stack to copy additional arguments to and
2142 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2)2143 void Generate_AllocateSpaceAndShiftExistingArguments(
2144 MacroAssembler* masm, Register count, Register argc_in_out,
2145 Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
2146 DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2147 scratch2));
2148 // Use pointer_to_new_space_out as scratch until we set it to the correct
2149 // value at the end.
2150 Register old_esp = pointer_to_new_space_out;
2151 Register new_space = scratch1;
2152 __ mov(old_esp, esp);
2153
2154 __ lea(new_space, Operand(count, times_system_pointer_size, 0));
2155 __ AllocateStackSpace(new_space);
2156
2157 Register current = scratch1;
2158 Register value = scratch2;
2159
2160 Label loop, entry;
2161 __ mov(current, 0);
2162 __ jmp(&entry);
2163 __ bind(&loop);
2164 __ mov(value, Operand(old_esp, current, times_system_pointer_size, 0));
2165 __ mov(Operand(esp, current, times_system_pointer_size, 0), value);
2166 __ inc(current);
2167 __ bind(&entry);
2168 __ cmp(current, argc_in_out);
2169 __ j(less_equal, &loop, Label::kNear);
2170
2171 // Point to the next free slot above the shifted arguments (argc + 1 slot for
2172 // the return address).
2173 __ lea(
2174 pointer_to_new_space_out,
2175 Operand(esp, argc_in_out, times_system_pointer_size, kSystemPointerSize));
2176 // Update the total number of arguments.
2177 __ add(argc_in_out, count);
2178 }
2179
2180 } // namespace
2181
2182 // static
2183 // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2184 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2185 Handle<Code> code) {
2186 // ----------- S t a t e -------------
2187 // -- edi : target
2188 // -- esi : context for the Call / Construct builtin
2189 // -- eax : number of parameters on the stack
2190 // -- ecx : len (number of elements to from args)
2191 // -- edx : new.target (checked to be constructor or undefined)
2192 // -- esp[4] : arguments list (a FixedArray)
2193 // -- esp[0] : return address.
2194 // -----------------------------------
2195
2196 __ movd(xmm0, edx); // Spill new.target.
2197 __ movd(xmm1, edi); // Spill target.
2198 __ movd(xmm3, esi); // Spill the context.
2199
2200 const Register kArgumentsList = esi;
2201 const Register kArgumentsLength = ecx;
2202
2203 __ PopReturnAddressTo(edx);
2204 __ pop(kArgumentsList);
2205 __ PushReturnAddressFrom(edx);
2206
2207 if (FLAG_debug_code) {
2208 // Allow kArgumentsList to be a FixedArray, or a FixedDoubleArray if
2209 // kArgumentsLength == 0.
2210 Label ok, fail;
2211 __ AssertNotSmi(kArgumentsList);
2212 __ mov(edx, FieldOperand(kArgumentsList, HeapObject::kMapOffset));
2213 __ CmpInstanceType(edx, FIXED_ARRAY_TYPE);
2214 __ j(equal, &ok);
2215 __ CmpInstanceType(edx, FIXED_DOUBLE_ARRAY_TYPE);
2216 __ j(not_equal, &fail);
2217 __ cmp(kArgumentsLength, 0);
2218 __ j(equal, &ok);
2219 // Fall through.
2220 __ bind(&fail);
2221 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2222
2223 __ bind(&ok);
2224 }
2225
2226 // Check the stack for overflow. We are not trying to catch interruptions
2227 // (i.e. debug break and preemption) here, so check the "real stack limit".
2228 Label stack_overflow;
2229 __ StackOverflowCheck(kArgumentsLength, edx, &stack_overflow);
2230
2231 __ movd(xmm4, kArgumentsList); // Spill the arguments list.
2232 // Move the arguments already in the stack,
2233 // including the receiver and the return address.
2234 // kArgumentsLength (ecx): Number of arguments to make room for.
2235 // eax: Number of arguments already on the stack.
2236 // edx: Points to first free slot on the stack after arguments were shifted.
2237 Generate_AllocateSpaceAndShiftExistingArguments(masm, kArgumentsLength, eax,
2238 edx, edi, esi);
2239 __ movd(kArgumentsList, xmm4); // Recover arguments list.
2240 __ movd(xmm2, eax); // Spill argument count.
2241
2242 // Push additional arguments onto the stack.
2243 {
2244 __ Move(eax, Immediate(0));
2245 Label done, push, loop;
2246 __ bind(&loop);
2247 __ cmp(eax, kArgumentsLength);
2248 __ j(equal, &done, Label::kNear);
2249 // Turn the hole into undefined as we go.
2250 __ mov(edi, FieldOperand(kArgumentsList, eax, times_tagged_size,
2251 FixedArray::kHeaderSize));
2252 __ CompareRoot(edi, RootIndex::kTheHoleValue);
2253 __ j(not_equal, &push, Label::kNear);
2254 __ LoadRoot(edi, RootIndex::kUndefinedValue);
2255 __ bind(&push);
2256 __ mov(Operand(edx, 0), edi);
2257 __ add(edx, Immediate(kSystemPointerSize));
2258 __ inc(eax);
2259 __ jmp(&loop);
2260 __ bind(&done);
2261 }
2262
2263 // Restore eax, edi and edx.
2264 __ movd(esi, xmm3); // Restore the context.
2265 __ movd(eax, xmm2); // Restore argument count.
2266 __ movd(edi, xmm1); // Restore target.
2267 __ movd(edx, xmm0); // Restore new.target.
2268
2269 // Tail-call to the actual Call or Construct builtin.
2270 __ Jump(code, RelocInfo::CODE_TARGET);
2271
2272 __ bind(&stack_overflow);
2273 __ movd(esi, xmm3); // Restore the context.
2274 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2275 }
2276
2277 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2278 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2279 CallOrConstructMode mode,
2280 Handle<Code> code) {
2281 // ----------- S t a t e -------------
2282 // -- eax : the number of arguments
2283 // -- edi : the target to call (can be any Object)
2284 // -- esi : context for the Call / Construct builtin
2285 // -- edx : the new target (for [[Construct]] calls)
2286 // -- ecx : start index (to support rest parameters)
2287 // -----------------------------------
2288
2289 __ movd(xmm0, esi); // Spill the context.
2290
2291 Register scratch = esi;
2292
2293 // Check if new.target has a [[Construct]] internal method.
2294 if (mode == CallOrConstructMode::kConstruct) {
2295 Label new_target_constructor, new_target_not_constructor;
2296 __ JumpIfSmi(edx, &new_target_not_constructor, Label::kNear);
2297 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
2298 __ test_b(FieldOperand(scratch, Map::kBitFieldOffset),
2299 Immediate(Map::Bits1::IsConstructorBit::kMask));
2300 __ j(not_zero, &new_target_constructor, Label::kNear);
2301 __ bind(&new_target_not_constructor);
2302 {
2303 FrameScope scope(masm, StackFrame::MANUAL);
2304 __ EnterFrame(StackFrame::INTERNAL);
2305 __ Push(edx);
2306 __ movd(esi, xmm0); // Restore the context.
2307 __ CallRuntime(Runtime::kThrowNotConstructor);
2308 }
2309 __ bind(&new_target_constructor);
2310 }
2311
2312 __ movd(xmm1, edx); // Preserve new.target (in case of [[Construct]]).
2313
2314 Label stack_done, stack_overflow;
2315 __ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
2316 __ dec(edx); // Exclude receiver.
2317 __ sub(edx, ecx);
2318 __ j(less_equal, &stack_done);
2319 {
2320 // ----------- S t a t e -------------
2321 // -- eax : the number of arguments already in the stack
2322 // -- ecx : start index (to support rest parameters)
2323 // -- edx : number of arguments to copy, i.e. arguments count - start index
2324 // -- edi : the target to call (can be any Object)
2325 // -- ebp : point to the caller stack frame
2326 // -- xmm0 : context for the Call / Construct builtin
2327 // -- xmm1 : the new target (for [[Construct]] calls)
2328 // -----------------------------------
2329
2330 // Forward the arguments from the caller frame.
2331 __ movd(xmm2, edi); // Preserve the target to call.
2332 __ StackOverflowCheck(edx, edi, &stack_overflow);
2333 __ movd(xmm3, ebx); // Preserve root register.
2334
2335 Register scratch = ebx;
2336
2337 // Move the arguments already in the stack,
2338 // including the receiver and the return address.
2339 // edx: Number of arguments to make room for.
2340 // eax: Number of arguments already on the stack.
2341 // esi: Points to first free slot on the stack after arguments were shifted.
2342 Generate_AllocateSpaceAndShiftExistingArguments(masm, edx, eax, esi, ebx,
2343 edi);
2344
2345 // Point to the first argument to copy (skipping receiver).
2346 __ lea(ecx, Operand(ecx, times_system_pointer_size,
2347 CommonFrameConstants::kFixedFrameSizeAboveFp +
2348 kSystemPointerSize));
2349 __ add(ecx, ebp);
2350
2351 // Copy the additional caller arguments onto the stack.
2352 // TODO(victorgomes): Consider using forward order as potentially more cache
2353 // friendly.
2354 {
2355 Register src = ecx, dest = esi, num = edx;
2356 Label loop;
2357 __ bind(&loop);
2358 __ dec(num);
2359 __ mov(scratch, Operand(src, num, times_system_pointer_size, 0));
2360 __ mov(Operand(dest, num, times_system_pointer_size, 0), scratch);
2361 __ j(not_zero, &loop);
2362 }
2363
2364 __ movd(ebx, xmm3); // Restore root register.
2365 __ movd(edi, xmm2); // Restore the target to call.
2366 }
2367 __ bind(&stack_done);
2368
2369 __ movd(edx, xmm1); // Restore new.target (in case of [[Construct]]).
2370 __ movd(esi, xmm0); // Restore the context.
2371
2372 // Tail-call to the {code} handler.
2373 __ Jump(code, RelocInfo::CODE_TARGET);
2374
2375 __ bind(&stack_overflow);
2376 __ movd(edi, xmm2); // Restore the target to call.
2377 __ movd(esi, xmm0); // Restore the context.
2378 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2379 }
2380
2381 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2382 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2383 ConvertReceiverMode mode) {
2384 // ----------- S t a t e -------------
2385 // -- eax : the number of arguments
2386 // -- edi : the function to call (checked to be a JSFunction)
2387 // -----------------------------------
2388 StackArgumentsAccessor args(eax);
2389 __ AssertCallableFunction(edi, edx);
2390
2391 __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2392
2393 // Enter the context of the function; ToObject has to run in the function
2394 // context, and we also need to take the global proxy from the function
2395 // context in case of conversion.
2396 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2397 // We need to convert the receiver for non-native sloppy mode functions.
2398 Label done_convert;
2399 __ test(FieldOperand(edx, SharedFunctionInfo::kFlagsOffset),
2400 Immediate(SharedFunctionInfo::IsNativeBit::kMask |
2401 SharedFunctionInfo::IsStrictBit::kMask));
2402 __ j(not_zero, &done_convert);
2403 {
2404 // ----------- S t a t e -------------
2405 // -- eax : the number of arguments
2406 // -- edx : the shared function info.
2407 // -- edi : the function to call (checked to be a JSFunction)
2408 // -- esi : the function context.
2409 // -----------------------------------
2410
2411 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2412 // Patch receiver to global proxy.
2413 __ LoadGlobalProxy(ecx);
2414 } else {
2415 Label convert_to_object, convert_receiver;
2416 __ mov(ecx, args.GetReceiverOperand());
2417 __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
2418 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2419 __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ecx); // Clobbers ecx.
2420 __ j(above_equal, &done_convert);
2421 // Reload the receiver (it was clobbered by CmpObjectType).
2422 __ mov(ecx, args.GetReceiverOperand());
2423 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2424 Label convert_global_proxy;
2425 __ JumpIfRoot(ecx, RootIndex::kUndefinedValue, &convert_global_proxy,
2426 Label::kNear);
2427 __ JumpIfNotRoot(ecx, RootIndex::kNullValue, &convert_to_object,
2428 Label::kNear);
2429 __ bind(&convert_global_proxy);
2430 {
2431 // Patch receiver to global proxy.
2432 __ LoadGlobalProxy(ecx);
2433 }
2434 __ jmp(&convert_receiver);
2435 }
2436 __ bind(&convert_to_object);
2437 {
2438 // Convert receiver using ToObject.
2439 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2440 // in the fast case? (fall back to AllocateInNewSpace?)
2441 FrameScope scope(masm, StackFrame::INTERNAL);
2442 __ SmiTag(eax);
2443 __ Push(eax);
2444 __ Push(edi);
2445 __ mov(eax, ecx);
2446 __ Push(esi);
2447 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2448 RelocInfo::CODE_TARGET);
2449 __ Pop(esi);
2450 __ mov(ecx, eax);
2451 __ Pop(edi);
2452 __ Pop(eax);
2453 __ SmiUntag(eax);
2454 }
2455 __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2456 __ bind(&convert_receiver);
2457 }
2458 __ mov(args.GetReceiverOperand(), ecx);
2459 }
2460 __ bind(&done_convert);
2461
2462 // ----------- S t a t e -------------
2463 // -- eax : the number of arguments
2464 // -- edx : the shared function info.
2465 // -- edi : the function to call (checked to be a JSFunction)
2466 // -- esi : the function context.
2467 // -----------------------------------
2468
2469 __ movzx_w(
2470 ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
2471 __ InvokeFunctionCode(edi, no_reg, ecx, eax, InvokeType::kJump);
2472 }
2473
2474 namespace {
2475
Generate_PushBoundArguments(MacroAssembler * masm)2476 void Generate_PushBoundArguments(MacroAssembler* masm) {
2477 // ----------- S t a t e -------------
2478 // -- eax : the number of arguments
2479 // -- edx : new.target (only in case of [[Construct]])
2480 // -- edi : target (checked to be a JSBoundFunction)
2481 // -----------------------------------
2482 __ movd(xmm0, edx); // Spill edx.
2483
2484 // Load [[BoundArguments]] into ecx and length of that into edx.
2485 Label no_bound_arguments;
2486 __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
2487 __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset));
2488 __ SmiUntag(edx);
2489 __ test(edx, edx);
2490 __ j(zero, &no_bound_arguments);
2491 {
2492 // ----------- S t a t e -------------
2493 // -- eax : the number of arguments
2494 // -- xmm0 : new.target (only in case of [[Construct]])
2495 // -- edi : target (checked to be a JSBoundFunction)
2496 // -- ecx : the [[BoundArguments]] (implemented as FixedArray)
2497 // -- edx : the number of [[BoundArguments]]
2498 // -----------------------------------
2499
2500 // Check the stack for overflow.
2501 {
2502 Label done, stack_overflow;
2503 __ StackOverflowCheck(edx, ecx, &stack_overflow);
2504 __ jmp(&done);
2505 __ bind(&stack_overflow);
2506 {
2507 FrameScope frame(masm, StackFrame::MANUAL);
2508 __ EnterFrame(StackFrame::INTERNAL);
2509 __ CallRuntime(Runtime::kThrowStackOverflow);
2510 __ int3();
2511 }
2512 __ bind(&done);
2513 }
2514
2515 // Spill context.
2516 __ movd(xmm3, esi);
2517
2518 // Save Return Adress and Receiver into registers.
2519 __ pop(esi);
2520 __ movd(xmm1, esi);
2521 __ pop(esi);
2522 __ movd(xmm2, esi);
2523
2524 // Push [[BoundArguments]] to the stack.
2525 {
2526 Label loop;
2527 __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundArgumentsOffset));
2528 __ mov(edx, FieldOperand(ecx, FixedArray::kLengthOffset));
2529 __ SmiUntag(edx);
2530 // Adjust effective number of arguments (eax contains the number of
2531 // arguments from the call not including receiver plus the number of
2532 // [[BoundArguments]]).
2533 __ add(eax, edx);
2534 __ bind(&loop);
2535 __ dec(edx);
2536 __ mov(esi, FieldOperand(ecx, edx, times_tagged_size,
2537 FixedArray::kHeaderSize));
2538 __ push(esi);
2539 __ j(greater, &loop);
2540 }
2541
2542 // Restore Receiver and Return Address.
2543 __ movd(esi, xmm2);
2544 __ push(esi);
2545 __ movd(esi, xmm1);
2546 __ push(esi);
2547
2548 // Restore context.
2549 __ movd(esi, xmm3);
2550 }
2551
2552 __ bind(&no_bound_arguments);
2553 __ movd(edx, xmm0); // Reload edx.
2554 }
2555
2556 } // namespace
2557
2558 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2559 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2560 // ----------- S t a t e -------------
2561 // -- eax : the number of arguments
2562 // -- edi : the function to call (checked to be a JSBoundFunction)
2563 // -----------------------------------
2564 __ AssertBoundFunction(edi);
2565
2566 // Patch the receiver to [[BoundThis]].
2567 StackArgumentsAccessor args(eax);
2568 __ mov(ecx, FieldOperand(edi, JSBoundFunction::kBoundThisOffset));
2569 __ mov(args.GetReceiverOperand(), ecx);
2570
2571 // Push the [[BoundArguments]] onto the stack.
2572 Generate_PushBoundArguments(masm);
2573
2574 // Call the [[BoundTargetFunction]] via the Call builtin.
2575 __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
2576 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2577 RelocInfo::CODE_TARGET);
2578 }
2579
2580 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2581 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2582 // ----------- S t a t e -------------
2583 // -- eax : the number of arguments
2584 // -- edi : the target to call (can be any Object).
2585 // -----------------------------------
2586 Register argc = eax;
2587 Register target = edi;
2588 Register map = ecx;
2589 Register instance_type = edx;
2590 DCHECK(!AreAliased(argc, target, map, instance_type));
2591
2592 StackArgumentsAccessor args(argc);
2593
2594 Label non_callable, non_smi, non_callable_jsfunction, non_jsboundfunction,
2595 non_proxy, non_wrapped_function, class_constructor;
2596 __ JumpIfSmi(target, &non_callable);
2597 __ bind(&non_smi);
2598 __ LoadMap(map, target);
2599 __ CmpInstanceTypeRange(map, instance_type, map,
2600 FIRST_CALLABLE_JS_FUNCTION_TYPE,
2601 LAST_CALLABLE_JS_FUNCTION_TYPE);
2602 __ j(above, &non_callable_jsfunction);
2603 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2604 RelocInfo::CODE_TARGET);
2605
2606 __ bind(&non_callable_jsfunction);
2607 __ cmpw(instance_type, Immediate(JS_BOUND_FUNCTION_TYPE));
2608 __ j(not_equal, &non_jsboundfunction);
2609 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2610 RelocInfo::CODE_TARGET);
2611
2612 // Check if target is a proxy and call CallProxy external builtin
2613 __ bind(&non_jsboundfunction);
2614 __ LoadMap(map, target);
2615 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2616 Immediate(Map::Bits1::IsCallableBit::kMask));
2617 __ j(zero, &non_callable);
2618
2619 // Call CallProxy external builtin
2620 __ cmpw(instance_type, Immediate(JS_PROXY_TYPE));
2621 __ j(not_equal, &non_proxy);
2622 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET);
2623
2624 // Check if target is a wrapped function and call CallWrappedFunction external
2625 // builtin
2626 __ bind(&non_proxy);
2627 __ cmpw(instance_type, Immediate(JS_WRAPPED_FUNCTION_TYPE));
2628 __ j(not_equal, &non_wrapped_function);
2629 __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2630 RelocInfo::CODE_TARGET);
2631
2632 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2633 // Check that the function is not a "classConstructor".
2634 __ bind(&non_wrapped_function);
2635 __ cmpw(instance_type, Immediate(JS_CLASS_CONSTRUCTOR_TYPE));
2636 __ j(equal, &class_constructor);
2637
2638 // 2. Call to something else, which might have a [[Call]] internal method (if
2639 // not we raise an exception).
2640 // Overwrite the original receiver with the (original) target.
2641 __ mov(args.GetReceiverOperand(), target);
2642 // Let the "call_as_function_delegate" take care of the rest.
2643 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2644 __ Jump(masm->isolate()->builtins()->CallFunction(
2645 ConvertReceiverMode::kNotNullOrUndefined),
2646 RelocInfo::CODE_TARGET);
2647
2648 // 3. Call to something that is not callable.
2649 __ bind(&non_callable);
2650 {
2651 FrameScope scope(masm, StackFrame::INTERNAL);
2652 __ Push(target);
2653 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2654 __ Trap(); // Unreachable.
2655 }
2656
2657 // 4. The function is a "classConstructor", need to raise an exception.
2658 __ bind(&class_constructor);
2659 {
2660 FrameScope frame(masm, StackFrame::INTERNAL);
2661 __ Push(target);
2662 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2663 __ Trap(); // Unreachable.
2664 }
2665 }
2666
2667 // static
Generate_ConstructFunction(MacroAssembler * masm)2668 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2669 // ----------- S t a t e -------------
2670 // -- eax : the number of arguments
2671 // -- edx : the new target (checked to be a constructor)
2672 // -- edi : the constructor to call (checked to be a JSFunction)
2673 // -----------------------------------
2674 __ AssertConstructor(edi);
2675 __ AssertFunction(edi, ecx);
2676
2677 Label call_generic_stub;
2678
2679 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2680 __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
2681 __ test(FieldOperand(ecx, SharedFunctionInfo::kFlagsOffset),
2682 Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2683 __ j(zero, &call_generic_stub, Label::kNear);
2684
2685 // Calling convention for function specific ConstructStubs require
2686 // ecx to contain either an AllocationSite or undefined.
2687 __ LoadRoot(ecx, RootIndex::kUndefinedValue);
2688 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2689 RelocInfo::CODE_TARGET);
2690
2691 __ bind(&call_generic_stub);
2692 // Calling convention for function specific ConstructStubs require
2693 // ecx to contain either an AllocationSite or undefined.
2694 __ LoadRoot(ecx, RootIndex::kUndefinedValue);
2695 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2696 RelocInfo::CODE_TARGET);
2697 }
2698
2699 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2700 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2701 // ----------- S t a t e -------------
2702 // -- eax : the number of arguments
2703 // -- edx : the new target (checked to be a constructor)
2704 // -- edi : the constructor to call (checked to be a JSBoundFunction)
2705 // -----------------------------------
2706 __ AssertConstructor(edi);
2707 __ AssertBoundFunction(edi);
2708
2709 // Push the [[BoundArguments]] onto the stack.
2710 Generate_PushBoundArguments(masm);
2711
2712 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2713 {
2714 Label done;
2715 __ cmp(edi, edx);
2716 __ j(not_equal, &done, Label::kNear);
2717 __ mov(edx, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
2718 __ bind(&done);
2719 }
2720
2721 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2722 __ mov(edi, FieldOperand(edi, JSBoundFunction::kBoundTargetFunctionOffset));
2723 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2724 }
2725
2726 // static
Generate_Construct(MacroAssembler * masm)2727 void Builtins::Generate_Construct(MacroAssembler* masm) {
2728 // ----------- S t a t e -------------
2729 // -- eax : the number of arguments
2730 // -- edx : the new target (either the same as the constructor or
2731 // the JSFunction on which new was invoked initially)
2732 // -- edi : the constructor to call (can be any Object)
2733 // -----------------------------------
2734 Register argc = eax;
2735 Register target = edi;
2736 Register map = ecx;
2737 DCHECK(!AreAliased(argc, target, map));
2738
2739 StackArgumentsAccessor args(argc);
2740
2741 // Check if target is a Smi.
2742 Label non_constructor, non_proxy, non_jsfunction, non_jsboundfunction;
2743 __ JumpIfSmi(target, &non_constructor);
2744
2745 // Check if target has a [[Construct]] internal method.
2746 __ mov(map, FieldOperand(target, HeapObject::kMapOffset));
2747 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2748 Immediate(Map::Bits1::IsConstructorBit::kMask));
2749 __ j(zero, &non_constructor);
2750
2751 // Dispatch based on instance type.
2752 __ CmpInstanceTypeRange(map, map, map, FIRST_JS_FUNCTION_TYPE,
2753 LAST_JS_FUNCTION_TYPE);
2754 __ j(above, &non_jsfunction);
2755 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2756 RelocInfo::CODE_TARGET);
2757
2758 // Only dispatch to bound functions after checking whether they are
2759 // constructors.
2760 __ bind(&non_jsfunction);
2761 __ mov(map, FieldOperand(target, HeapObject::kMapOffset));
2762 __ CmpInstanceType(map, JS_BOUND_FUNCTION_TYPE);
2763 __ j(not_equal, &non_jsboundfunction);
2764 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2765 RelocInfo::CODE_TARGET);
2766
2767 // Only dispatch to proxies after checking whether they are constructors.
2768 __ bind(&non_jsboundfunction);
2769 __ CmpInstanceType(map, JS_PROXY_TYPE);
2770 __ j(not_equal, &non_proxy);
2771 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2772 RelocInfo::CODE_TARGET);
2773
2774 // Called Construct on an exotic Object with a [[Construct]] internal method.
2775 __ bind(&non_proxy);
2776 {
2777 // Overwrite the original receiver with the (original) target.
2778 __ mov(args.GetReceiverOperand(), target);
2779 // Let the "call_as_constructor_delegate" take care of the rest.
2780 __ LoadNativeContextSlot(target,
2781 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2782 __ Jump(masm->isolate()->builtins()->CallFunction(),
2783 RelocInfo::CODE_TARGET);
2784 }
2785
2786 // Called Construct on an Object that doesn't have a [[Construct]] internal
2787 // method.
2788 __ bind(&non_constructor);
2789 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2790 RelocInfo::CODE_TARGET);
2791 }
2792
2793 namespace {
2794
Generate_OSREntry(MacroAssembler * masm,Register entry_address)2795 void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
2796 ASM_CODE_COMMENT(masm);
2797 // Overwrite the return address on the stack.
2798 __ mov(Operand(esp, 0), entry_address);
2799
2800 // And "return" to the OSR entry point of the function.
2801 __ ret(0);
2802 }
2803
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)2804 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
2805 ASM_CODE_COMMENT(masm);
2806 {
2807 FrameScope scope(masm, StackFrame::INTERNAL);
2808 __ CallRuntime(Runtime::kCompileOptimizedOSR);
2809 }
2810
2811 Label skip;
2812 // If the code object is null, just return to the caller.
2813 __ cmp(eax, Immediate(0));
2814 __ j(not_equal, &skip, Label::kNear);
2815 __ ret(0);
2816
2817 __ bind(&skip);
2818
2819 if (is_interpreter) {
2820 // Drop the handler frame that is be sitting on top of the actual
2821 // JavaScript frame. This is the case then OSR is triggered from bytecode.
2822 __ leave();
2823 }
2824
2825 // Load deoptimization data from the code object.
2826 __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOrInterpreterDataOffset -
2827 kHeapObjectTag));
2828
2829 // Load the OSR entrypoint offset from the deoptimization data.
2830 __ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt(
2831 DeoptimizationData::kOsrPcOffsetIndex) -
2832 kHeapObjectTag));
2833 __ SmiUntag(ecx);
2834
2835 // Compute the target address = code_obj + header_size + osr_offset
2836 __ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
2837
2838 Generate_OSREntry(masm, eax);
2839 }
2840
2841 } // namespace
2842
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)2843 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2844 return OnStackReplacement(masm, true);
2845 }
2846
Generate_BaselineOnStackReplacement(MacroAssembler * masm)2847 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2848 __ mov(kContextRegister,
2849 MemOperand(ebp, BaselineFrameConstants::kContextOffset));
2850 return OnStackReplacement(masm, false);
2851 }
2852
2853 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2854 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2855 // The function index was put in edi by the jump table trampoline.
2856 // Convert to Smi for the runtime call.
2857 __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2858 {
2859 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2860 FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2861
2862 // Save all parameter registers (see wasm-linkage.h). They might be
2863 // overwritten in the runtime call below. We don't have any callee-saved
2864 // registers in wasm, so no need to store anything else.
2865 static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs ==
2866 arraysize(wasm::kGpParamRegisters),
2867 "frame size mismatch");
2868 for (Register reg : wasm::kGpParamRegisters) {
2869 __ Push(reg);
2870 }
2871 static_assert(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs ==
2872 arraysize(wasm::kFpParamRegisters),
2873 "frame size mismatch");
2874 __ AllocateStackSpace(kSimd128Size * arraysize(wasm::kFpParamRegisters));
2875 int offset = 0;
2876 for (DoubleRegister reg : wasm::kFpParamRegisters) {
2877 __ movdqu(Operand(esp, offset), reg);
2878 offset += kSimd128Size;
2879 }
2880
2881 // Push the Wasm instance for loading the jump table address after the
2882 // runtime call.
2883 __ Push(kWasmInstanceRegister);
2884
2885 // Push the Wasm instance again as an explicit argument to the runtime
2886 // function.
2887 __ Push(kWasmInstanceRegister);
2888 // Push the function index as second argument.
2889 __ Push(kWasmCompileLazyFuncIndexRegister);
2890 // Initialize the JavaScript context with 0. CEntry will use it to
2891 // set the current context on the isolate.
2892 __ Move(kContextRegister, Smi::zero());
2893 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2894 // The runtime function returns the jump table slot offset as a Smi. Use
2895 // that to compute the jump target in edi.
2896 __ Pop(kWasmInstanceRegister);
2897 __ mov(edi, MemOperand(kWasmInstanceRegister,
2898 WasmInstanceObject::kJumpTableStartOffset -
2899 kHeapObjectTag));
2900 __ SmiUntag(kReturnRegister0);
2901 __ add(edi, kReturnRegister0);
2902 // edi now holds the jump table slot where we want to jump to in the end.
2903
2904 // Restore registers.
2905 for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
2906 offset -= kSimd128Size;
2907 __ movdqu(reg, Operand(esp, offset));
2908 }
2909 DCHECK_EQ(0, offset);
2910 __ add(esp, Immediate(kSimd128Size * arraysize(wasm::kFpParamRegisters)));
2911 for (Register reg : base::Reversed(wasm::kGpParamRegisters)) {
2912 __ Pop(reg);
2913 }
2914 }
2915
2916 // Finally, jump to the jump table slot for the function.
2917 __ jmp(edi);
2918 }
2919
Generate_WasmDebugBreak(MacroAssembler * masm)2920 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2921 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2922 {
2923 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2924
2925 // Save all parameter registers. They might hold live values, we restore
2926 // them after the runtime call.
2927 for (Register reg :
2928 base::Reversed(WasmDebugBreakFrameConstants::kPushedGpRegs)) {
2929 __ Push(reg);
2930 }
2931
2932 constexpr int kFpStackSize =
2933 kSimd128Size * WasmDebugBreakFrameConstants::kNumPushedFpRegisters;
2934 __ AllocateStackSpace(kFpStackSize);
2935 int offset = kFpStackSize;
2936 for (DoubleRegister reg :
2937 base::Reversed(WasmDebugBreakFrameConstants::kPushedFpRegs)) {
2938 offset -= kSimd128Size;
2939 __ movdqu(Operand(esp, offset), reg);
2940 }
2941
2942 // Initialize the JavaScript context with 0. CEntry will use it to
2943 // set the current context on the isolate.
2944 __ Move(kContextRegister, Smi::zero());
2945 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2946
2947 // Restore registers.
2948 for (DoubleRegister reg : WasmDebugBreakFrameConstants::kPushedFpRegs) {
2949 __ movdqu(reg, Operand(esp, offset));
2950 offset += kSimd128Size;
2951 }
2952 __ add(esp, Immediate(kFpStackSize));
2953 for (Register reg : WasmDebugBreakFrameConstants::kPushedGpRegs) {
2954 __ Pop(reg);
2955 }
2956 }
2957
2958 __ ret(0);
2959 }
2960
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2961 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2962 // TODO(v8:10701): Implement for this platform.
2963 __ Trap();
2964 }
2965
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)2966 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
2967 // TODO(v8:12191): Implement for this platform.
2968 __ Trap();
2969 }
2970
Generate_WasmSuspend(MacroAssembler * masm)2971 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
2972 // TODO(v8:12191): Implement for this platform.
2973 __ Trap();
2974 }
2975
Generate_WasmResume(MacroAssembler * masm)2976 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
2977 // TODO(v8:12191): Implement for this platform.
2978 __ Trap();
2979 }
2980
Generate_WasmOnStackReplace(MacroAssembler * masm)2981 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2982 // Only needed on x64.
2983 __ Trap();
2984 }
2985 #endif // V8_ENABLE_WEBASSEMBLY
2986
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2987 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2988 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2989 bool builtin_exit_frame) {
2990 // eax: number of arguments including receiver
2991 // edx: pointer to C function
2992 // ebp: frame pointer (restored after C call)
2993 // esp: stack pointer (restored after C call)
2994 // esi: current context (C callee-saved)
2995 // edi: JS function of the caller (C callee-saved)
2996 //
2997 // If argv_mode == ArgvMode::kRegister:
2998 // ecx: pointer to the first argument
2999
3000 STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
3001 STATIC_ASSERT(ecx == kRuntimeCallArgvRegister);
3002 STATIC_ASSERT(edx == kRuntimeCallFunctionRegister);
3003 STATIC_ASSERT(esi == kContextRegister);
3004 STATIC_ASSERT(edi == kJSFunctionRegister);
3005
3006 DCHECK(!AreAliased(kRuntimeCallArgCountRegister, kRuntimeCallArgvRegister,
3007 kRuntimeCallFunctionRegister, kContextRegister,
3008 kJSFunctionRegister, kRootRegister));
3009
3010 // Reserve space on the stack for the three arguments passed to the call. If
3011 // result size is greater than can be returned in registers, also reserve
3012 // space for the hidden argument for the result location, and space for the
3013 // result itself.
3014 int arg_stack_space = 3;
3015
3016 // Enter the exit frame that transitions from JavaScript to C++.
3017 if (argv_mode == ArgvMode::kRegister) {
3018 DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
3019 DCHECK(!builtin_exit_frame);
3020 __ EnterApiExitFrame(arg_stack_space, edi);
3021
3022 // Move argc and argv into the correct registers.
3023 __ mov(esi, ecx);
3024 __ mov(edi, eax);
3025 } else {
3026 __ EnterExitFrame(
3027 arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
3028 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
3029 }
3030
3031 // edx: pointer to C function
3032 // ebp: frame pointer (restored after C call)
3033 // esp: stack pointer (restored after C call)
3034 // edi: number of arguments including receiver (C callee-saved)
3035 // esi: pointer to the first argument (C callee-saved)
3036
3037 // Result returned in eax, or eax+edx if result size is 2.
3038
3039 // Check stack alignment.
3040 if (FLAG_debug_code) {
3041 __ CheckStackAlignment();
3042 }
3043 // Call C function.
3044 __ mov(Operand(esp, 0 * kSystemPointerSize), edi); // argc.
3045 __ mov(Operand(esp, 1 * kSystemPointerSize), esi); // argv.
3046 __ Move(ecx, Immediate(ExternalReference::isolate_address(masm->isolate())));
3047 __ mov(Operand(esp, 2 * kSystemPointerSize), ecx);
3048 __ call(kRuntimeCallFunctionRegister);
3049
3050 // Result is in eax or edx:eax - do not destroy these registers!
3051
3052 // Check result for exception sentinel.
3053 Label exception_returned;
3054 __ CompareRoot(eax, RootIndex::kException);
3055 __ j(equal, &exception_returned);
3056
3057 // Check that there is no pending exception, otherwise we
3058 // should have returned the exception sentinel.
3059 if (FLAG_debug_code) {
3060 __ push(edx);
3061 __ LoadRoot(edx, RootIndex::kTheHoleValue);
3062 Label okay;
3063 ExternalReference pending_exception_address = ExternalReference::Create(
3064 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
3065 __ cmp(edx, __ ExternalReferenceAsOperand(pending_exception_address, ecx));
3066 // Cannot use check here as it attempts to generate call into runtime.
3067 __ j(equal, &okay, Label::kNear);
3068 __ int3();
3069 __ bind(&okay);
3070 __ pop(edx);
3071 }
3072
3073 // Exit the JavaScript to C++ exit frame.
3074 __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
3075 argv_mode == ArgvMode::kStack);
3076 __ ret(0);
3077
3078 // Handling of exception.
3079 __ bind(&exception_returned);
3080
3081 ExternalReference pending_handler_context_address = ExternalReference::Create(
3082 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
3083 ExternalReference pending_handler_entrypoint_address =
3084 ExternalReference::Create(
3085 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
3086 ExternalReference pending_handler_fp_address = ExternalReference::Create(
3087 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
3088 ExternalReference pending_handler_sp_address = ExternalReference::Create(
3089 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
3090
3091 // Ask the runtime for help to determine the handler. This will set eax to
3092 // contain the current pending exception, don't clobber it.
3093 ExternalReference find_handler =
3094 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
3095 {
3096 FrameScope scope(masm, StackFrame::MANUAL);
3097 __ PrepareCallCFunction(3, eax);
3098 __ mov(Operand(esp, 0 * kSystemPointerSize), Immediate(0)); // argc.
3099 __ mov(Operand(esp, 1 * kSystemPointerSize), Immediate(0)); // argv.
3100 __ Move(esi,
3101 Immediate(ExternalReference::isolate_address(masm->isolate())));
3102 __ mov(Operand(esp, 2 * kSystemPointerSize), esi);
3103 __ CallCFunction(find_handler, 3);
3104 }
3105
3106 // Retrieve the handler context, SP and FP.
3107 __ mov(esp, __ ExternalReferenceAsOperand(pending_handler_sp_address, esi));
3108 __ mov(ebp, __ ExternalReferenceAsOperand(pending_handler_fp_address, esi));
3109 __ mov(esi,
3110 __ ExternalReferenceAsOperand(pending_handler_context_address, esi));
3111
3112 // If the handler is a JS frame, restore the context to the frame. Note that
3113 // the context will be set to (esi == 0) for non-JS frames.
3114 Label skip;
3115 __ test(esi, esi);
3116 __ j(zero, &skip, Label::kNear);
3117 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
3118 __ bind(&skip);
3119
3120 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
3121 ExternalReference c_entry_fp_address = ExternalReference::Create(
3122 IsolateAddressId::kCEntryFPAddress, masm->isolate());
3123 __ mov(__ ExternalReferenceAsOperand(c_entry_fp_address, esi), Immediate(0));
3124
3125 // Compute the handler entry address and jump to it.
3126 __ mov(edi, __ ExternalReferenceAsOperand(pending_handler_entrypoint_address,
3127 edi));
3128 __ jmp(edi);
3129 }
3130
Generate_DoubleToI(MacroAssembler * masm)3131 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
3132 Label check_negative, process_64_bits, done;
3133
3134 // Account for return address and saved regs.
3135 const int kArgumentOffset = 4 * kSystemPointerSize;
3136
3137 MemOperand mantissa_operand(MemOperand(esp, kArgumentOffset));
3138 MemOperand exponent_operand(
3139 MemOperand(esp, kArgumentOffset + kDoubleSize / 2));
3140
3141 // The result is returned on the stack.
3142 MemOperand return_operand = mantissa_operand;
3143
3144 Register scratch1 = ebx;
3145
3146 // Since we must use ecx for shifts below, use some other register (eax)
3147 // to calculate the result.
3148 Register result_reg = eax;
3149 // Save ecx if it isn't the return register and therefore volatile, or if it
3150 // is the return register, then save the temp register we use in its stead for
3151 // the result.
3152 Register save_reg = eax;
3153 __ push(ecx);
3154 __ push(scratch1);
3155 __ push(save_reg);
3156
3157 __ mov(scratch1, mantissa_operand);
3158 if (CpuFeatures::IsSupported(SSE3)) {
3159 CpuFeatureScope scope(masm, SSE3);
3160 // Load x87 register with heap number.
3161 __ fld_d(mantissa_operand);
3162 }
3163 __ mov(ecx, exponent_operand);
3164
3165 __ and_(ecx, HeapNumber::kExponentMask);
3166 __ shr(ecx, HeapNumber::kExponentShift);
3167 __ lea(result_reg, MemOperand(ecx, -HeapNumber::kExponentBias));
3168 __ cmp(result_reg, Immediate(HeapNumber::kMantissaBits));
3169 __ j(below, &process_64_bits);
3170
3171 // Result is entirely in lower 32-bits of mantissa
3172 int delta =
3173 HeapNumber::kExponentBias + base::Double::kPhysicalSignificandSize;
3174 if (CpuFeatures::IsSupported(SSE3)) {
3175 __ fstp(0);
3176 }
3177 __ sub(ecx, Immediate(delta));
3178 __ xor_(result_reg, result_reg);
3179 __ cmp(ecx, Immediate(31));
3180 __ j(above, &done);
3181 __ shl_cl(scratch1);
3182 __ jmp(&check_negative);
3183
3184 __ bind(&process_64_bits);
3185 if (CpuFeatures::IsSupported(SSE3)) {
3186 CpuFeatureScope scope(masm, SSE3);
3187 // Reserve space for 64 bit answer.
3188 __ AllocateStackSpace(kDoubleSize); // Nolint.
3189 // Do conversion, which cannot fail because we checked the exponent.
3190 __ fisttp_d(Operand(esp, 0));
3191 __ mov(result_reg, Operand(esp, 0)); // Load low word of answer as result
3192 __ add(esp, Immediate(kDoubleSize));
3193 __ jmp(&done);
3194 } else {
3195 // Result must be extracted from shifted 32-bit mantissa
3196 __ sub(ecx, Immediate(delta));
3197 __ neg(ecx);
3198 __ mov(result_reg, exponent_operand);
3199 __ and_(
3200 result_reg,
3201 Immediate(static_cast<uint32_t>(base::Double::kSignificandMask >> 32)));
3202 __ add(result_reg,
3203 Immediate(static_cast<uint32_t>(base::Double::kHiddenBit >> 32)));
3204 __ shrd_cl(scratch1, result_reg);
3205 __ shr_cl(result_reg);
3206 __ test(ecx, Immediate(32));
3207 __ cmov(not_equal, scratch1, result_reg);
3208 }
3209
3210 // If the double was negative, negate the integer result.
3211 __ bind(&check_negative);
3212 __ mov(result_reg, scratch1);
3213 __ neg(result_reg);
3214 __ cmp(exponent_operand, Immediate(0));
3215 __ cmov(greater, result_reg, scratch1);
3216
3217 // Restore registers
3218 __ bind(&done);
3219 __ mov(return_operand, result_reg);
3220 __ pop(save_reg);
3221 __ pop(scratch1);
3222 __ pop(ecx);
3223 __ ret(0);
3224 }
3225
3226 namespace {
3227
3228 // Generates an Operand for saving parameters after PrepareCallApiFunction.
ApiParameterOperand(int index)3229 Operand ApiParameterOperand(int index) {
3230 return Operand(esp, index * kSystemPointerSize);
3231 }
3232
3233 // Prepares stack to put arguments (aligns and so on). Reserves
3234 // space for return value if needed (assumes the return value is a handle).
3235 // Arguments must be stored in ApiParameterOperand(0), ApiParameterOperand(1)
3236 // etc. Saves context (esi). If space was reserved for return value then
3237 // stores the pointer to the reserved slot into esi.
PrepareCallApiFunction(MacroAssembler * masm,int argc,Register scratch)3238 void PrepareCallApiFunction(MacroAssembler* masm, int argc, Register scratch) {
3239 ASM_CODE_COMMENT(masm);
3240 __ EnterApiExitFrame(argc, scratch);
3241 if (FLAG_debug_code) {
3242 __ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
3243 }
3244 }
3245
3246 // Calls an API function. Allocates HandleScope, extracts returned value
3247 // from handle and propagates exceptions. Clobbers esi, edi and
3248 // caller-save registers. Restores context. On return removes
3249 // stack_space * kSystemPointerSize (GCed).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,Operand thunk_last_arg,int stack_space,Operand * stack_space_operand,Operand return_value_operand)3250 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
3251 ExternalReference thunk_ref,
3252 Operand thunk_last_arg, int stack_space,
3253 Operand* stack_space_operand,
3254 Operand return_value_operand) {
3255 Isolate* isolate = masm->isolate();
3256
3257 ExternalReference next_address =
3258 ExternalReference::handle_scope_next_address(isolate);
3259 ExternalReference limit_address =
3260 ExternalReference::handle_scope_limit_address(isolate);
3261 ExternalReference level_address =
3262 ExternalReference::handle_scope_level_address(isolate);
3263
3264 DCHECK(edx == function_address);
3265 // Allocate HandleScope in callee-save registers.
3266 __ add(__ ExternalReferenceAsOperand(level_address, esi), Immediate(1));
3267 __ mov(esi, __ ExternalReferenceAsOperand(next_address, esi));
3268 __ mov(edi, __ ExternalReferenceAsOperand(limit_address, edi));
3269
3270 Label profiler_enabled, end_profiler_check;
3271 __ Move(eax, Immediate(ExternalReference::is_profiling_address(isolate)));
3272 __ cmpb(Operand(eax, 0), Immediate(0));
3273 __ j(not_zero, &profiler_enabled);
3274 __ Move(eax, Immediate(ExternalReference::address_of_runtime_stats_flag()));
3275 __ cmp(Operand(eax, 0), Immediate(0));
3276 __ j(not_zero, &profiler_enabled);
3277 {
3278 // Call the api function directly.
3279 __ mov(eax, function_address);
3280 __ jmp(&end_profiler_check);
3281 }
3282 __ bind(&profiler_enabled);
3283 {
3284 // Additional parameter is the address of the actual getter function.
3285 __ mov(thunk_last_arg, function_address);
3286 __ Move(eax, Immediate(thunk_ref));
3287 }
3288 __ bind(&end_profiler_check);
3289
3290 // Call the api function.
3291 __ call(eax);
3292
3293 Label prologue;
3294 // Load the value from ReturnValue
3295 __ mov(eax, return_value_operand);
3296
3297 Label promote_scheduled_exception;
3298 Label delete_allocated_handles;
3299 Label leave_exit_frame;
3300
3301 __ bind(&prologue);
3302 // No more valid handles (the result handle was the last one). Restore
3303 // previous handle scope.
3304 __ mov(__ ExternalReferenceAsOperand(next_address, ecx), esi);
3305 __ sub(__ ExternalReferenceAsOperand(level_address, ecx), Immediate(1));
3306 __ Assert(above_equal, AbortReason::kInvalidHandleScopeLevel);
3307 __ cmp(edi, __ ExternalReferenceAsOperand(limit_address, ecx));
3308 __ j(not_equal, &delete_allocated_handles);
3309
3310 // Leave the API exit frame.
3311 __ bind(&leave_exit_frame);
3312 if (stack_space_operand != nullptr) {
3313 DCHECK_EQ(stack_space, 0);
3314 __ mov(edx, *stack_space_operand);
3315 }
3316 __ LeaveApiExitFrame();
3317
3318 // Check if the function scheduled an exception.
3319 ExternalReference scheduled_exception_address =
3320 ExternalReference::scheduled_exception_address(isolate);
3321 __ mov(ecx, __ ExternalReferenceAsOperand(scheduled_exception_address, ecx));
3322 __ CompareRoot(ecx, RootIndex::kTheHoleValue);
3323 __ j(not_equal, &promote_scheduled_exception);
3324
3325 #if DEBUG
3326 // Check if the function returned a valid JavaScript value.
3327 Label ok;
3328 Register return_value = eax;
3329 Register map = ecx;
3330
3331 __ JumpIfSmi(return_value, &ok, Label::kNear);
3332 __ mov(map, FieldOperand(return_value, HeapObject::kMapOffset));
3333
3334 __ CmpInstanceType(map, LAST_NAME_TYPE);
3335 __ j(below_equal, &ok, Label::kNear);
3336
3337 __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
3338 __ j(above_equal, &ok, Label::kNear);
3339
3340 __ CompareRoot(map, RootIndex::kHeapNumberMap);
3341 __ j(equal, &ok, Label::kNear);
3342
3343 __ CompareRoot(map, RootIndex::kBigIntMap);
3344 __ j(equal, &ok, Label::kNear);
3345
3346 __ CompareRoot(return_value, RootIndex::kUndefinedValue);
3347 __ j(equal, &ok, Label::kNear);
3348
3349 __ CompareRoot(return_value, RootIndex::kTrueValue);
3350 __ j(equal, &ok, Label::kNear);
3351
3352 __ CompareRoot(return_value, RootIndex::kFalseValue);
3353 __ j(equal, &ok, Label::kNear);
3354
3355 __ CompareRoot(return_value, RootIndex::kNullValue);
3356 __ j(equal, &ok, Label::kNear);
3357
3358 __ Abort(AbortReason::kAPICallReturnedInvalidObject);
3359
3360 __ bind(&ok);
3361 #endif
3362
3363 if (stack_space_operand == nullptr) {
3364 DCHECK_NE(stack_space, 0);
3365 __ ret(stack_space * kSystemPointerSize);
3366 } else {
3367 DCHECK_EQ(0, stack_space);
3368 __ pop(ecx);
3369 __ add(esp, edx);
3370 __ jmp(ecx);
3371 }
3372
3373 // Re-throw by promoting a scheduled exception.
3374 __ bind(&promote_scheduled_exception);
3375 __ TailCallRuntime(Runtime::kPromoteScheduledException);
3376
3377 // HandleScope limit has changed. Delete allocated extensions.
3378 ExternalReference delete_extensions =
3379 ExternalReference::delete_handle_scope_extensions();
3380 __ bind(&delete_allocated_handles);
3381 __ mov(__ ExternalReferenceAsOperand(limit_address, ecx), edi);
3382 __ mov(edi, eax);
3383 __ Move(eax, Immediate(ExternalReference::isolate_address(isolate)));
3384 __ mov(Operand(esp, 0), eax);
3385 __ Move(eax, Immediate(delete_extensions));
3386 __ call(eax);
3387 __ mov(eax, edi);
3388 __ jmp(&leave_exit_frame);
3389 }
3390
3391 } // namespace
3392
Generate_CallApiCallback(MacroAssembler * masm)3393 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3394 // ----------- S t a t e -------------
3395 // -- esi : context
3396 // -- edx : api function address
3397 // -- ecx : arguments count (not including the receiver)
3398 // -- eax : call data
3399 // -- edi : holder
3400 // -- esp[0] : return address
3401 // -- esp[8] : argument 0 (receiver)
3402 // -- esp[16] : argument 1
3403 // -- ...
3404 // -- esp[argc * 8] : argument (argc - 1)
3405 // -- esp[(argc + 1) * 8] : argument argc
3406 // -----------------------------------
3407
3408 Register api_function_address = edx;
3409 Register argc = ecx;
3410 Register call_data = eax;
3411 Register holder = edi;
3412
3413 // Park argc in xmm0.
3414 __ movd(xmm0, argc);
3415
3416 DCHECK(!AreAliased(api_function_address, argc, holder));
3417
3418 using FCA = FunctionCallbackArguments;
3419
3420 STATIC_ASSERT(FCA::kArgsLength == 6);
3421 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3422 STATIC_ASSERT(FCA::kDataIndex == 4);
3423 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3424 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3425 STATIC_ASSERT(FCA::kIsolateIndex == 1);
3426 STATIC_ASSERT(FCA::kHolderIndex == 0);
3427
3428 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3429 //
3430 // Current state:
3431 // esp[0]: return address
3432 //
3433 // Target state:
3434 // esp[0 * kSystemPointerSize]: return address
3435 // esp[1 * kSystemPointerSize]: kHolder
3436 // esp[2 * kSystemPointerSize]: kIsolate
3437 // esp[3 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
3438 // esp[4 * kSystemPointerSize]: undefined (kReturnValue)
3439 // esp[5 * kSystemPointerSize]: kData
3440 // esp[6 * kSystemPointerSize]: undefined (kNewTarget)
3441
3442 __ PopReturnAddressTo(ecx);
3443 __ PushRoot(RootIndex::kUndefinedValue);
3444 __ Push(call_data);
3445 __ PushRoot(RootIndex::kUndefinedValue);
3446 __ PushRoot(RootIndex::kUndefinedValue);
3447 __ Push(Immediate(ExternalReference::isolate_address(masm->isolate())));
3448 __ Push(holder);
3449 __ PushReturnAddressFrom(ecx);
3450
3451 // Reload argc from xmm0.
3452 __ movd(argc, xmm0);
3453
3454 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3455 // We use it below to set up the FunctionCallbackInfo object.
3456 Register scratch = eax;
3457 __ lea(scratch, Operand(esp, 1 * kSystemPointerSize));
3458
3459 // The API function takes a reference to v8::Arguments. If the CPU profiler
3460 // is enabled, a wrapper function will be called and we need to pass
3461 // the address of the callback as an additional parameter. Always allocate
3462 // space for it.
3463 static constexpr int kApiArgc = 1 + 1;
3464
3465 // Allocate the v8::Arguments structure in the arguments' space since
3466 // it's not controlled by GC.
3467 static constexpr int kApiStackSpace = 4;
3468
3469 PrepareCallApiFunction(masm, kApiArgc + kApiStackSpace, edi);
3470
3471 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3472 __ mov(ApiParameterOperand(kApiArgc + 0), scratch);
3473
3474 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3475 // on the stack).
3476 __ lea(scratch,
3477 Operand(scratch, (FCA::kArgsLength + 1) * kSystemPointerSize));
3478 __ mov(ApiParameterOperand(kApiArgc + 1), scratch);
3479
3480 // FunctionCallbackInfo::length_.
3481 __ mov(ApiParameterOperand(kApiArgc + 2), argc);
3482
3483 // We also store the number of bytes to drop from the stack after returning
3484 // from the API function here.
3485 __ lea(scratch,
3486 Operand(argc, times_system_pointer_size,
3487 (FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
3488 __ mov(ApiParameterOperand(kApiArgc + 3), scratch);
3489
3490 // v8::InvocationCallback's argument.
3491 __ lea(scratch, ApiParameterOperand(kApiArgc + 0));
3492 __ mov(ApiParameterOperand(0), scratch);
3493
3494 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3495
3496 // There are two stack slots above the arguments we constructed on the stack:
3497 // the stored ebp (pushed by EnterApiExitFrame), and the return address.
3498 static constexpr int kStackSlotsAboveFCA = 2;
3499 Operand return_value_operand(
3500 ebp,
3501 (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
3502
3503 static constexpr int kUseStackSpaceOperand = 0;
3504 Operand stack_space_operand = ApiParameterOperand(kApiArgc + 3);
3505 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3506 ApiParameterOperand(1), kUseStackSpaceOperand,
3507 &stack_space_operand, return_value_operand);
3508 }
3509
Generate_CallApiGetter(MacroAssembler * masm)3510 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3511 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3512 // name below the exit frame to make GC aware of them.
3513 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3514 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3515 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3516 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3517 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3518 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3519 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3520 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3521
3522 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3523 Register holder = ApiGetterDescriptor::HolderRegister();
3524 Register callback = ApiGetterDescriptor::CallbackRegister();
3525 Register scratch = edi;
3526 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3527
3528 __ pop(scratch); // Pop return address to extend the frame.
3529 __ push(receiver);
3530 __ push(FieldOperand(callback, AccessorInfo::kDataOffset));
3531 __ PushRoot(RootIndex::kUndefinedValue); // ReturnValue
3532 // ReturnValue default value
3533 __ PushRoot(RootIndex::kUndefinedValue);
3534 __ Push(Immediate(ExternalReference::isolate_address(masm->isolate())));
3535 __ push(holder);
3536 __ push(Immediate(Smi::zero())); // should_throw_on_error -> false
3537 __ push(FieldOperand(callback, AccessorInfo::kNameOffset));
3538 __ push(scratch); // Restore return address.
3539
3540 // v8::PropertyCallbackInfo::args_ array and name handle.
3541 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3542
3543 // Allocate v8::PropertyCallbackInfo object, arguments for callback and
3544 // space for optional callback address parameter (in case CPU profiler is
3545 // active) in non-GCed stack space.
3546 const int kApiArgc = 3 + 1;
3547
3548 PrepareCallApiFunction(masm, kApiArgc, scratch);
3549
3550 // Load address of v8::PropertyAccessorInfo::args_ array. The value in ebp
3551 // here corresponds to esp + kSystemPointerSize before PrepareCallApiFunction.
3552 __ lea(scratch, Operand(ebp, kSystemPointerSize + 2 * kSystemPointerSize));
3553 // Create v8::PropertyCallbackInfo object on the stack and initialize
3554 // it's args_ field.
3555 Operand info_object = ApiParameterOperand(3);
3556 __ mov(info_object, scratch);
3557
3558 // Name as handle.
3559 __ sub(scratch, Immediate(kSystemPointerSize));
3560 __ mov(ApiParameterOperand(0), scratch);
3561 // Arguments pointer.
3562 __ lea(scratch, info_object);
3563 __ mov(ApiParameterOperand(1), scratch);
3564 // Reserve space for optional callback address parameter.
3565 Operand thunk_last_arg = ApiParameterOperand(2);
3566
3567 ExternalReference thunk_ref =
3568 ExternalReference::invoke_accessor_getter_callback();
3569
3570 __ mov(scratch, FieldOperand(callback, AccessorInfo::kJsGetterOffset));
3571 Register function_address = edx;
3572 __ mov(function_address,
3573 FieldOperand(scratch, Foreign::kForeignAddressOffset));
3574 // +3 is to skip prolog, return address and name handle.
3575 Operand return_value_operand(
3576 ebp,
3577 (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
3578 Operand* const kUseStackSpaceConstant = nullptr;
3579 CallApiFunctionAndReturn(masm, function_address, thunk_ref, thunk_last_arg,
3580 kStackUnwindSpace, kUseStackSpaceConstant,
3581 return_value_operand);
3582 }
3583
Generate_DirectCEntry(MacroAssembler * masm)3584 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3585 __ int3(); // Unused on this architecture.
3586 }
3587
3588 namespace {
3589
3590 enum Direction { FORWARD, BACKWARD };
3591 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
3592
3593 // Expects registers:
3594 // esi - source, aligned if alignment == ALIGNED
3595 // edi - destination, always aligned
3596 // ecx - count (copy size in bytes)
3597 // edx - loop count (number of 64 byte chunks)
MemMoveEmitMainLoop(MacroAssembler * masm,Label * move_last_15,Direction direction,Alignment alignment)3598 void MemMoveEmitMainLoop(MacroAssembler* masm, Label* move_last_15,
3599 Direction direction, Alignment alignment) {
3600 ASM_CODE_COMMENT(masm);
3601 Register src = esi;
3602 Register dst = edi;
3603 Register count = ecx;
3604 Register loop_count = edx;
3605 Label loop, move_last_31, move_last_63;
3606 __ cmp(loop_count, 0);
3607 __ j(equal, &move_last_63);
3608 __ bind(&loop);
3609 // Main loop. Copy in 64 byte chunks.
3610 if (direction == BACKWARD) __ sub(src, Immediate(0x40));
3611 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
3612 __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
3613 __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
3614 __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
3615 if (direction == FORWARD) __ add(src, Immediate(0x40));
3616 if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
3617 __ movdqa(Operand(dst, 0x00), xmm0);
3618 __ movdqa(Operand(dst, 0x10), xmm1);
3619 __ movdqa(Operand(dst, 0x20), xmm2);
3620 __ movdqa(Operand(dst, 0x30), xmm3);
3621 if (direction == FORWARD) __ add(dst, Immediate(0x40));
3622 __ dec(loop_count);
3623 __ j(not_zero, &loop);
3624 // At most 63 bytes left to copy.
3625 __ bind(&move_last_63);
3626 __ test(count, Immediate(0x20));
3627 __ j(zero, &move_last_31);
3628 if (direction == BACKWARD) __ sub(src, Immediate(0x20));
3629 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
3630 __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
3631 if (direction == FORWARD) __ add(src, Immediate(0x20));
3632 if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
3633 __ movdqa(Operand(dst, 0x00), xmm0);
3634 __ movdqa(Operand(dst, 0x10), xmm1);
3635 if (direction == FORWARD) __ add(dst, Immediate(0x20));
3636 // At most 31 bytes left to copy.
3637 __ bind(&move_last_31);
3638 __ test(count, Immediate(0x10));
3639 __ j(zero, move_last_15);
3640 if (direction == BACKWARD) __ sub(src, Immediate(0x10));
3641 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
3642 if (direction == FORWARD) __ add(src, Immediate(0x10));
3643 if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
3644 __ movdqa(Operand(dst, 0), xmm0);
3645 if (direction == FORWARD) __ add(dst, Immediate(0x10));
3646 }
3647
MemMoveEmitPopAndReturn(MacroAssembler * masm)3648 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
3649 __ pop(esi);
3650 __ pop(edi);
3651 __ ret(0);
3652 }
3653
3654 } // namespace
3655
Generate_MemMove(MacroAssembler * masm)3656 void Builtins::Generate_MemMove(MacroAssembler* masm) {
3657 // Generated code is put into a fixed, unmovable buffer, and not into
3658 // the V8 heap. We can't, and don't, refer to any relocatable addresses
3659 // (e.g. the JavaScript nan-object).
3660
3661 // 32-bit C declaration function calls pass arguments on stack.
3662
3663 // Stack layout:
3664 // esp[12]: Third argument, size.
3665 // esp[8]: Second argument, source pointer.
3666 // esp[4]: First argument, destination pointer.
3667 // esp[0]: return address
3668
3669 const int kDestinationOffset = 1 * kSystemPointerSize;
3670 const int kSourceOffset = 2 * kSystemPointerSize;
3671 const int kSizeOffset = 3 * kSystemPointerSize;
3672
3673 // When copying up to this many bytes, use special "small" handlers.
3674 const size_t kSmallCopySize = 8;
3675 // When copying up to this many bytes, use special "medium" handlers.
3676 const size_t kMediumCopySize = 63;
3677 // When non-overlapping region of src and dst is less than this,
3678 // use a more careful implementation (slightly slower).
3679 const size_t kMinMoveDistance = 16;
3680 // Note that these values are dictated by the implementation below,
3681 // do not just change them and hope things will work!
3682
3683 int stack_offset = 0; // Update if we change the stack height.
3684
3685 Label backward, backward_much_overlap;
3686 Label forward_much_overlap, small_size, medium_size, pop_and_return;
3687 __ push(edi);
3688 __ push(esi);
3689 stack_offset += 2 * kSystemPointerSize;
3690 Register dst = edi;
3691 Register src = esi;
3692 Register count = ecx;
3693 Register loop_count = edx;
3694 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
3695 __ mov(src, Operand(esp, stack_offset + kSourceOffset));
3696 __ mov(count, Operand(esp, stack_offset + kSizeOffset));
3697
3698 __ cmp(dst, src);
3699 __ j(equal, &pop_and_return);
3700
3701 __ prefetch(Operand(src, 0), 1);
3702 __ cmp(count, kSmallCopySize);
3703 __ j(below_equal, &small_size);
3704 __ cmp(count, kMediumCopySize);
3705 __ j(below_equal, &medium_size);
3706 __ cmp(dst, src);
3707 __ j(above, &backward);
3708
3709 {
3710 // |dst| is a lower address than |src|. Copy front-to-back.
3711 Label unaligned_source, move_last_15, skip_last_move;
3712 __ mov(eax, src);
3713 __ sub(eax, dst);
3714 __ cmp(eax, kMinMoveDistance);
3715 __ j(below, &forward_much_overlap);
3716 // Copy first 16 bytes.
3717 __ movdqu(xmm0, Operand(src, 0));
3718 __ movdqu(Operand(dst, 0), xmm0);
3719 // Determine distance to alignment: 16 - (dst & 0xF).
3720 __ mov(edx, dst);
3721 __ and_(edx, 0xF);
3722 __ neg(edx);
3723 __ add(edx, Immediate(16));
3724 __ add(dst, edx);
3725 __ add(src, edx);
3726 __ sub(count, edx);
3727 // dst is now aligned. Main copy loop.
3728 __ mov(loop_count, count);
3729 __ shr(loop_count, 6);
3730 // Check if src is also aligned.
3731 __ test(src, Immediate(0xF));
3732 __ j(not_zero, &unaligned_source);
3733 // Copy loop for aligned source and destination.
3734 MemMoveEmitMainLoop(masm, &move_last_15, FORWARD, MOVE_ALIGNED);
3735 // At most 15 bytes to copy. Copy 16 bytes at end of string.
3736 __ bind(&move_last_15);
3737 __ and_(count, 0xF);
3738 __ j(zero, &skip_last_move, Label::kNear);
3739 __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
3740 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
3741 __ bind(&skip_last_move);
3742 MemMoveEmitPopAndReturn(masm);
3743
3744 // Copy loop for unaligned source and aligned destination.
3745 __ bind(&unaligned_source);
3746 MemMoveEmitMainLoop(masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
3747 __ jmp(&move_last_15);
3748
3749 // Less than kMinMoveDistance offset between dst and src.
3750 Label loop_until_aligned, last_15_much_overlap;
3751 __ bind(&loop_until_aligned);
3752 __ mov_b(eax, Operand(src, 0));
3753 __ inc(src);
3754 __ mov_b(Operand(dst, 0), eax);
3755 __ inc(dst);
3756 __ dec(count);
3757 __ bind(&forward_much_overlap); // Entry point into this block.
3758 __ test(dst, Immediate(0xF));
3759 __ j(not_zero, &loop_until_aligned);
3760 // dst is now aligned, src can't be. Main copy loop.
3761 __ mov(loop_count, count);
3762 __ shr(loop_count, 6);
3763 MemMoveEmitMainLoop(masm, &last_15_much_overlap, FORWARD, MOVE_UNALIGNED);
3764 __ bind(&last_15_much_overlap);
3765 __ and_(count, 0xF);
3766 __ j(zero, &pop_and_return);
3767 __ cmp(count, kSmallCopySize);
3768 __ j(below_equal, &small_size);
3769 __ jmp(&medium_size);
3770 }
3771
3772 {
3773 // |dst| is a higher address than |src|. Copy backwards.
3774 Label unaligned_source, move_first_15, skip_last_move;
3775 __ bind(&backward);
3776 // |dst| and |src| always point to the end of what's left to copy.
3777 __ add(dst, count);
3778 __ add(src, count);
3779 __ mov(eax, dst);
3780 __ sub(eax, src);
3781 __ cmp(eax, kMinMoveDistance);
3782 __ j(below, &backward_much_overlap);
3783 // Copy last 16 bytes.
3784 __ movdqu(xmm0, Operand(src, -0x10));
3785 __ movdqu(Operand(dst, -0x10), xmm0);
3786 // Find distance to alignment: dst & 0xF
3787 __ mov(edx, dst);
3788 __ and_(edx, 0xF);
3789 __ sub(dst, edx);
3790 __ sub(src, edx);
3791 __ sub(count, edx);
3792 // dst is now aligned. Main copy loop.
3793 __ mov(loop_count, count);
3794 __ shr(loop_count, 6);
3795 // Check if src is also aligned.
3796 __ test(src, Immediate(0xF));
3797 __ j(not_zero, &unaligned_source);
3798 // Copy loop for aligned source and destination.
3799 MemMoveEmitMainLoop(masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
3800 // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
3801 __ bind(&move_first_15);
3802 __ and_(count, 0xF);
3803 __ j(zero, &skip_last_move, Label::kNear);
3804 __ sub(src, count);
3805 __ sub(dst, count);
3806 __ movdqu(xmm0, Operand(src, 0));
3807 __ movdqu(Operand(dst, 0), xmm0);
3808 __ bind(&skip_last_move);
3809 MemMoveEmitPopAndReturn(masm);
3810
3811 // Copy loop for unaligned source and aligned destination.
3812 __ bind(&unaligned_source);
3813 MemMoveEmitMainLoop(masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
3814 __ jmp(&move_first_15);
3815
3816 // Less than kMinMoveDistance offset between dst and src.
3817 Label loop_until_aligned, first_15_much_overlap;
3818 __ bind(&loop_until_aligned);
3819 __ dec(src);
3820 __ dec(dst);
3821 __ mov_b(eax, Operand(src, 0));
3822 __ mov_b(Operand(dst, 0), eax);
3823 __ dec(count);
3824 __ bind(&backward_much_overlap); // Entry point into this block.
3825 __ test(dst, Immediate(0xF));
3826 __ j(not_zero, &loop_until_aligned);
3827 // dst is now aligned, src can't be. Main copy loop.
3828 __ mov(loop_count, count);
3829 __ shr(loop_count, 6);
3830 MemMoveEmitMainLoop(masm, &first_15_much_overlap, BACKWARD, MOVE_UNALIGNED);
3831 __ bind(&first_15_much_overlap);
3832 __ and_(count, 0xF);
3833 __ j(zero, &pop_and_return);
3834 // Small/medium handlers expect dst/src to point to the beginning.
3835 __ sub(dst, count);
3836 __ sub(src, count);
3837 __ cmp(count, kSmallCopySize);
3838 __ j(below_equal, &small_size);
3839 __ jmp(&medium_size);
3840 }
3841 {
3842 // Special handlers for 9 <= copy_size < 64. No assumptions about
3843 // alignment or move distance, so all reads must be unaligned and
3844 // must happen before any writes.
3845 Label f9_16, f17_32, f33_48, f49_63;
3846
3847 __ bind(&f9_16);
3848 __ movsd(xmm0, Operand(src, 0));
3849 __ movsd(xmm1, Operand(src, count, times_1, -8));
3850 __ movsd(Operand(dst, 0), xmm0);
3851 __ movsd(Operand(dst, count, times_1, -8), xmm1);
3852 MemMoveEmitPopAndReturn(masm);
3853
3854 __ bind(&f17_32);
3855 __ movdqu(xmm0, Operand(src, 0));
3856 __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
3857 __ movdqu(Operand(dst, 0x00), xmm0);
3858 __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
3859 MemMoveEmitPopAndReturn(masm);
3860
3861 __ bind(&f33_48);
3862 __ movdqu(xmm0, Operand(src, 0x00));
3863 __ movdqu(xmm1, Operand(src, 0x10));
3864 __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
3865 __ movdqu(Operand(dst, 0x00), xmm0);
3866 __ movdqu(Operand(dst, 0x10), xmm1);
3867 __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
3868 MemMoveEmitPopAndReturn(masm);
3869
3870 __ bind(&f49_63);
3871 __ movdqu(xmm0, Operand(src, 0x00));
3872 __ movdqu(xmm1, Operand(src, 0x10));
3873 __ movdqu(xmm2, Operand(src, 0x20));
3874 __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
3875 __ movdqu(Operand(dst, 0x00), xmm0);
3876 __ movdqu(Operand(dst, 0x10), xmm1);
3877 __ movdqu(Operand(dst, 0x20), xmm2);
3878 __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
3879 MemMoveEmitPopAndReturn(masm);
3880
3881 __ bind(&medium_size); // Entry point into this block.
3882 __ mov(eax, count);
3883 __ dec(eax);
3884 __ shr(eax, 4);
3885 if (FLAG_debug_code) {
3886 Label ok;
3887 __ cmp(eax, 3);
3888 __ j(below_equal, &ok);
3889 __ int3();
3890 __ bind(&ok);
3891 }
3892
3893 // Dispatch to handlers.
3894 Label eax_is_2_or_3;
3895
3896 __ cmp(eax, 1);
3897 __ j(greater, &eax_is_2_or_3);
3898 __ j(less, &f9_16); // eax == 0.
3899 __ jmp(&f17_32); // eax == 1.
3900
3901 __ bind(&eax_is_2_or_3);
3902 __ cmp(eax, 3);
3903 __ j(less, &f33_48); // eax == 2.
3904 __ jmp(&f49_63); // eax == 3.
3905 }
3906 {
3907 // Specialized copiers for copy_size <= 8 bytes.
3908 Label f0, f1, f2, f3, f4, f5_8;
3909 __ bind(&f0);
3910 MemMoveEmitPopAndReturn(masm);
3911
3912 __ bind(&f1);
3913 __ mov_b(eax, Operand(src, 0));
3914 __ mov_b(Operand(dst, 0), eax);
3915 MemMoveEmitPopAndReturn(masm);
3916
3917 __ bind(&f2);
3918 __ mov_w(eax, Operand(src, 0));
3919 __ mov_w(Operand(dst, 0), eax);
3920 MemMoveEmitPopAndReturn(masm);
3921
3922 __ bind(&f3);
3923 __ mov_w(eax, Operand(src, 0));
3924 __ mov_b(edx, Operand(src, 2));
3925 __ mov_w(Operand(dst, 0), eax);
3926 __ mov_b(Operand(dst, 2), edx);
3927 MemMoveEmitPopAndReturn(masm);
3928
3929 __ bind(&f4);
3930 __ mov(eax, Operand(src, 0));
3931 __ mov(Operand(dst, 0), eax);
3932 MemMoveEmitPopAndReturn(masm);
3933
3934 __ bind(&f5_8);
3935 __ mov(eax, Operand(src, 0));
3936 __ mov(edx, Operand(src, count, times_1, -4));
3937 __ mov(Operand(dst, 0), eax);
3938 __ mov(Operand(dst, count, times_1, -4), edx);
3939 MemMoveEmitPopAndReturn(masm);
3940
3941 __ bind(&small_size); // Entry point into this block.
3942 if (FLAG_debug_code) {
3943 Label ok;
3944 __ cmp(count, 8);
3945 __ j(below_equal, &ok);
3946 __ int3();
3947 __ bind(&ok);
3948 }
3949
3950 // Dispatch to handlers.
3951 Label count_is_above_3, count_is_2_or_3;
3952
3953 __ cmp(count, 3);
3954 __ j(greater, &count_is_above_3);
3955
3956 __ cmp(count, 1);
3957 __ j(greater, &count_is_2_or_3);
3958 __ j(less, &f0); // count == 0.
3959 __ jmp(&f1); // count == 1.
3960
3961 __ bind(&count_is_2_or_3);
3962 __ cmp(count, 3);
3963 __ j(less, &f2); // count == 2.
3964 __ jmp(&f3); // count == 3.
3965
3966 __ bind(&count_is_above_3);
3967 __ cmp(count, 5);
3968 __ j(less, &f4); // count == 4.
3969 __ jmp(&f5_8); // count in [5, 8[.
3970 }
3971
3972 __ bind(&pop_and_return);
3973 MemMoveEmitPopAndReturn(masm);
3974 }
3975
3976 namespace {
3977
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3978 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3979 DeoptimizeKind deopt_kind) {
3980 Isolate* isolate = masm->isolate();
3981
3982 // Save all general purpose registers before messing with them.
3983 const int kNumberOfRegisters = Register::kNumRegisters;
3984
3985 const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumRegisters;
3986 __ AllocateStackSpace(kDoubleRegsSize);
3987 const RegisterConfiguration* config = RegisterConfiguration::Default();
3988 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3989 int code = config->GetAllocatableDoubleCode(i);
3990 XMMRegister xmm_reg = XMMRegister::from_code(code);
3991 int offset = code * kDoubleSize;
3992 __ movsd(Operand(esp, offset), xmm_reg);
3993 }
3994
3995 __ pushad();
3996
3997 ExternalReference c_entry_fp_address =
3998 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate);
3999 __ mov(masm->ExternalReferenceAsOperand(c_entry_fp_address, esi), ebp);
4000
4001 const int kSavedRegistersAreaSize =
4002 kNumberOfRegisters * kSystemPointerSize + kDoubleRegsSize;
4003
4004 // Get the address of the location in the code object
4005 // and compute the fp-to-sp delta in register edx.
4006 __ mov(ecx, Operand(esp, kSavedRegistersAreaSize));
4007 __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kSystemPointerSize));
4008
4009 __ sub(edx, ebp);
4010 __ neg(edx);
4011
4012 // Allocate a new deoptimizer object.
4013 __ PrepareCallCFunction(5, eax);
4014 __ mov(eax, Immediate(0));
4015 Label context_check;
4016 __ mov(edi, Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset));
4017 __ JumpIfSmi(edi, &context_check);
4018 __ mov(eax, Operand(ebp, StandardFrameConstants::kFunctionOffset));
4019 __ bind(&context_check);
4020 __ mov(Operand(esp, 0 * kSystemPointerSize), eax); // Function.
4021 __ mov(Operand(esp, 1 * kSystemPointerSize),
4022 Immediate(static_cast<int>(deopt_kind)));
4023 __ mov(Operand(esp, 2 * kSystemPointerSize), ecx); // Code address or 0.
4024 __ mov(Operand(esp, 3 * kSystemPointerSize), edx); // Fp-to-sp delta.
4025 __ Move(Operand(esp, 4 * kSystemPointerSize),
4026 Immediate(ExternalReference::isolate_address(masm->isolate())));
4027 {
4028 AllowExternalCallThatCantCauseGC scope(masm);
4029 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
4030 }
4031
4032 // Preserve deoptimizer object in register eax and get the input
4033 // frame descriptor pointer.
4034 __ mov(esi, Operand(eax, Deoptimizer::input_offset()));
4035
4036 // Fill in the input registers.
4037 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
4038 int offset =
4039 (i * kSystemPointerSize) + FrameDescription::registers_offset();
4040 __ pop(Operand(esi, offset));
4041 }
4042
4043 int double_regs_offset = FrameDescription::double_registers_offset();
4044 // Fill in the double input registers.
4045 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4046 int code = config->GetAllocatableDoubleCode(i);
4047 int dst_offset = code * kDoubleSize + double_regs_offset;
4048 int src_offset = code * kDoubleSize;
4049 __ movsd(xmm0, Operand(esp, src_offset));
4050 __ movsd(Operand(esi, dst_offset), xmm0);
4051 }
4052
4053 // Clear FPU all exceptions.
4054 // TODO(ulan): Find out why the TOP register is not zero here in some cases,
4055 // and check that the generated code never deoptimizes with unbalanced stack.
4056 __ fnclex();
4057
4058 // Mark the stack as not iterable for the CPU profiler which won't be able to
4059 // walk the stack without the return address.
4060 __ mov_b(__ ExternalReferenceAsOperand(
4061 ExternalReference::stack_is_iterable_address(isolate), edx),
4062 Immediate(0));
4063
4064 // Remove the return address and the double registers.
4065 __ add(esp, Immediate(kDoubleRegsSize + 1 * kSystemPointerSize));
4066
4067 // Compute a pointer to the unwinding limit in register ecx; that is
4068 // the first stack slot not part of the input frame.
4069 __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
4070 __ add(ecx, esp);
4071
4072 // Unwind the stack down to - but not including - the unwinding
4073 // limit and copy the contents of the activation frame to the input
4074 // frame description.
4075 __ lea(edx, Operand(esi, FrameDescription::frame_content_offset()));
4076 Label pop_loop_header;
4077 __ jmp(&pop_loop_header);
4078 Label pop_loop;
4079 __ bind(&pop_loop);
4080 __ pop(Operand(edx, 0));
4081 __ add(edx, Immediate(sizeof(uint32_t)));
4082 __ bind(&pop_loop_header);
4083 __ cmp(ecx, esp);
4084 __ j(not_equal, &pop_loop);
4085
4086 // Compute the output frame in the deoptimizer.
4087 __ push(eax);
4088 __ PrepareCallCFunction(1, esi);
4089 __ mov(Operand(esp, 0 * kSystemPointerSize), eax);
4090 {
4091 AllowExternalCallThatCantCauseGC scope(masm);
4092 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
4093 }
4094 __ pop(eax);
4095
4096 __ mov(esp, Operand(eax, Deoptimizer::caller_frame_top_offset()));
4097
4098 // Replace the current (input) frame with the output frames.
4099 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
4100 // Outer loop state: eax = current FrameDescription**, edx = one
4101 // past the last FrameDescription**.
4102 __ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
4103 __ mov(eax, Operand(eax, Deoptimizer::output_offset()));
4104 __ lea(edx, Operand(eax, edx, times_system_pointer_size, 0));
4105 __ jmp(&outer_loop_header);
4106 __ bind(&outer_push_loop);
4107 // Inner loop state: esi = current FrameDescription*, ecx = loop
4108 // index.
4109 __ mov(esi, Operand(eax, 0));
4110 __ mov(ecx, Operand(esi, FrameDescription::frame_size_offset()));
4111 __ jmp(&inner_loop_header);
4112 __ bind(&inner_push_loop);
4113 __ sub(ecx, Immediate(sizeof(uint32_t)));
4114 __ push(Operand(esi, ecx, times_1, FrameDescription::frame_content_offset()));
4115 __ bind(&inner_loop_header);
4116 __ test(ecx, ecx);
4117 __ j(not_zero, &inner_push_loop);
4118 __ add(eax, Immediate(kSystemPointerSize));
4119 __ bind(&outer_loop_header);
4120 __ cmp(eax, edx);
4121 __ j(below, &outer_push_loop);
4122
4123 // In case of a failed STUB, we have to restore the XMM registers.
4124 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
4125 int code = config->GetAllocatableDoubleCode(i);
4126 XMMRegister xmm_reg = XMMRegister::from_code(code);
4127 int src_offset = code * kDoubleSize + double_regs_offset;
4128 __ movsd(xmm_reg, Operand(esi, src_offset));
4129 }
4130
4131 // Push pc and continuation from the last output frame.
4132 __ push(Operand(esi, FrameDescription::pc_offset()));
4133 __ push(Operand(esi, FrameDescription::continuation_offset()));
4134
4135 // Push the registers from the last output frame.
4136 for (int i = 0; i < kNumberOfRegisters; i++) {
4137 int offset =
4138 (i * kSystemPointerSize) + FrameDescription::registers_offset();
4139 __ push(Operand(esi, offset));
4140 }
4141
4142 __ mov_b(__ ExternalReferenceAsOperand(
4143 ExternalReference::stack_is_iterable_address(isolate), edx),
4144 Immediate(1));
4145
4146 // Restore the registers from the stack.
4147 __ popad();
4148
4149 __ InitializeRootRegister();
4150
4151 // Return to the continuation point.
4152 __ ret(0);
4153 }
4154
4155 } // namespace
4156
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)4157 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
4158 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
4159 }
4160
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)4161 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
4162 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
4163 }
4164
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)4165 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
4166 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
4167 }
4168
4169 namespace {
4170
4171 // Restarts execution either at the current or next (in execution order)
4172 // bytecode. If there is baseline code on the shared function info, converts an
4173 // interpreter frame into a baseline frame and continues execution in baseline
4174 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)4175 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
4176 bool next_bytecode,
4177 bool is_osr = false) {
4178 Label start;
4179 __ bind(&start);
4180
4181 // Spill the accumulator register; note that we're not within a frame, so we
4182 // have to make sure to pop it before doing any GC-visible calls.
4183 __ push(kInterpreterAccumulatorRegister);
4184
4185 // Get function from the frame.
4186 Register closure = eax;
4187 __ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
4188
4189 // Get the Code object from the shared function info.
4190 Register code_obj = esi;
4191 __ mov(code_obj,
4192 FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
4193 __ mov(code_obj,
4194 FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
4195
4196 // Check if we have baseline code. For OSR entry it is safe to assume we
4197 // always have baseline code.
4198 if (!is_osr) {
4199 Label start_with_baseline;
4200 __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
4201 __ j(equal, &start_with_baseline);
4202
4203 // Start with bytecode as there is no baseline code.
4204 __ pop(kInterpreterAccumulatorRegister);
4205 Builtin builtin_id = next_bytecode
4206 ? Builtin::kInterpreterEnterAtNextBytecode
4207 : Builtin::kInterpreterEnterAtBytecode;
4208 __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
4209 RelocInfo::CODE_TARGET);
4210
4211 __ bind(&start_with_baseline);
4212 } else if (FLAG_debug_code) {
4213 __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
4214 __ Assert(equal, AbortReason::kExpectedBaselineData);
4215 }
4216
4217 if (FLAG_debug_code) {
4218 AssertCodeIsBaseline(masm, code_obj, ecx);
4219 }
4220
4221 // Load the feedback vector.
4222 Register feedback_vector = ecx;
4223 __ mov(feedback_vector,
4224 FieldOperand(closure, JSFunction::kFeedbackCellOffset));
4225 __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
4226
4227 Label install_baseline_code;
4228 // Check if feedback vector is valid. If not, call prepare for baseline to
4229 // allocate it.
4230 __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE,
4231 kInterpreterBytecodeOffsetRegister);
4232 __ j(not_equal, &install_baseline_code);
4233
4234 // Save BytecodeOffset from the stack frame.
4235 __ mov(kInterpreterBytecodeOffsetRegister,
4236 MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
4237 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
4238 // Replace BytecodeOffset with the feedback vector.
4239 __ mov(MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
4240 feedback_vector);
4241 feedback_vector = no_reg;
4242
4243 // Compute baseline pc for bytecode offset.
4244 ExternalReference get_baseline_pc_extref;
4245 if (next_bytecode || is_osr) {
4246 get_baseline_pc_extref =
4247 ExternalReference::baseline_pc_for_next_executed_bytecode();
4248 } else {
4249 get_baseline_pc_extref =
4250 ExternalReference::baseline_pc_for_bytecode_offset();
4251 }
4252 Register get_baseline_pc = ecx;
4253 __ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
4254
4255 // If the code deoptimizes during the implicit function entry stack interrupt
4256 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
4257 // not a valid bytecode offset.
4258 // TODO(pthier): Investigate if it is feasible to handle this special case
4259 // in TurboFan instead of here.
4260 Label valid_bytecode_offset, function_entry_bytecode;
4261 if (!is_osr) {
4262 __ cmp(kInterpreterBytecodeOffsetRegister,
4263 Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
4264 kFunctionEntryBytecodeOffset));
4265 __ j(equal, &function_entry_bytecode);
4266 }
4267
4268 __ sub(kInterpreterBytecodeOffsetRegister,
4269 Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
4270
4271 __ bind(&valid_bytecode_offset);
4272 // Get bytecode array from the stack frame.
4273 __ mov(kInterpreterBytecodeArrayRegister,
4274 MemOperand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
4275 {
4276 FrameScope scope(masm, StackFrame::INTERNAL);
4277 __ PrepareCallCFunction(3, eax);
4278 __ mov(Operand(esp, 0 * kSystemPointerSize), code_obj);
4279 __ mov(Operand(esp, 1 * kSystemPointerSize),
4280 kInterpreterBytecodeOffsetRegister);
4281 __ mov(Operand(esp, 2 * kSystemPointerSize),
4282 kInterpreterBytecodeArrayRegister);
4283 __ CallCFunction(get_baseline_pc, 3);
4284 }
4285 __ lea(code_obj,
4286 FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
4287 __ pop(kInterpreterAccumulatorRegister);
4288
4289 if (is_osr) {
4290 // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
4291 // disarm Sparkplug here.
4292 ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
4293 Generate_OSREntry(masm, code_obj);
4294 } else {
4295 __ jmp(code_obj);
4296 }
4297 __ Trap(); // Unreachable.
4298
4299 if (!is_osr) {
4300 __ bind(&function_entry_bytecode);
4301 // If the bytecode offset is kFunctionEntryOffset, get the start address of
4302 // the first bytecode.
4303 __ mov(kInterpreterBytecodeOffsetRegister, Immediate(0));
4304 if (next_bytecode) {
4305 __ LoadAddress(get_baseline_pc,
4306 ExternalReference::baseline_pc_for_bytecode_offset());
4307 }
4308 __ jmp(&valid_bytecode_offset);
4309 }
4310
4311 __ bind(&install_baseline_code);
4312 // Pop/re-push the accumulator so that it's spilled within the below frame
4313 // scope, to keep the stack valid. Use ecx for this -- we can't save it in
4314 // kInterpreterAccumulatorRegister because that aliases with closure.
4315 DCHECK(!AreAliased(ecx, kContextRegister, closure));
4316 __ pop(ecx);
4317 // Restore the clobbered context register.
4318 __ mov(kContextRegister,
4319 Operand(ebp, StandardFrameConstants::kContextOffset));
4320 {
4321 FrameScope scope(masm, StackFrame::INTERNAL);
4322 __ Push(ecx);
4323 __ Push(closure);
4324 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
4325 // Now that we're restarting, we don't have to worry about closure and
4326 // accumulator aliasing, so pop the spilled accumulator directly back into
4327 // the right register.
4328 __ Pop(kInterpreterAccumulatorRegister);
4329 }
4330 // Retry from the start after installing baseline code.
4331 __ jmp(&start);
4332 }
4333
4334 } // namespace
4335
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)4336 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
4337 MacroAssembler* masm) {
4338 Generate_BaselineOrInterpreterEntry(masm, false);
4339 }
4340
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)4341 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
4342 MacroAssembler* masm) {
4343 Generate_BaselineOrInterpreterEntry(masm, true);
4344 }
4345
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)4346 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
4347 MacroAssembler* masm) {
4348 Generate_BaselineOrInterpreterEntry(masm, false, true);
4349 }
4350
4351 #undef __
4352
4353 } // namespace internal
4354 } // namespace v8
4355
4356 #endif // V8_TARGET_ARCH_IA32
4357