1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM
6
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
11 #include "src/codegen/macro-assembler-inl.h"
12 #include "src/codegen/register-configuration.h"
13 #include "src/debug/debug.h"
14 #include "src/deoptimizer/deoptimizer.h"
15 #include "src/execution/frame-constants.h"
16 #include "src/execution/frames.h"
17 #include "src/heap/heap-inl.h"
18 #include "src/logging/counters.h"
19 #include "src/objects/cell.h"
20 #include "src/objects/foreign.h"
21 #include "src/objects/heap-number.h"
22 #include "src/objects/js-generator.h"
23 #include "src/objects/objects-inl.h"
24 #include "src/objects/smi.h"
25 #include "src/runtime/runtime.h"
26
27 #if V8_ENABLE_WEBASSEMBLY
28 #include "src/wasm/wasm-linkage.h"
29 #include "src/wasm/wasm-objects.h"
30 #endif // V8_ENABLE_WEBASSEMBLY
31
32 namespace v8 {
33 namespace internal {
34
35 #define __ ACCESS_MASM(masm)
36
Generate_Adaptor(MacroAssembler * masm,Address address)37 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
38 #if defined(__thumb__)
39 // Thumb mode builtin.
40 DCHECK_EQ(1, reinterpret_cast<uintptr_t>(
41 ExternalReference::Create(address).address()) &
42 1);
43 #endif
44 __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
45 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
46 RelocInfo::CODE_TARGET);
47 }
48
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)49 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
50 Runtime::FunctionId function_id) {
51 // ----------- S t a t e -------------
52 // -- r0 : actual argument count
53 // -- r1 : target function (preserved for callee)
54 // -- r3 : new target (preserved for callee)
55 // -----------------------------------
56 {
57 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
58 // Push a copy of the target function, the new target and the actual
59 // argument count.
60 // Push function as parameter to the runtime call.
61 __ SmiTag(kJavaScriptCallArgCountRegister);
62 __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
63 kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
64
65 __ CallRuntime(function_id, 1);
66 __ mov(r2, r0);
67
68 // Restore target function, new target and actual argument count.
69 __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
70 kJavaScriptCallArgCountRegister);
71 __ SmiUntag(kJavaScriptCallArgCountRegister);
72 }
73 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
74 __ JumpCodeObject(r2);
75 }
76
77 namespace {
78
79 enum class ArgumentsElementType {
80 kRaw, // Push arguments as they are.
81 kHandle // Dereference arguments before pushing.
82 };
83
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,ArgumentsElementType element_type)84 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
85 Register scratch,
86 ArgumentsElementType element_type) {
87 DCHECK(!AreAliased(array, argc, scratch));
88 UseScratchRegisterScope temps(masm);
89 Register counter = scratch;
90 Register value = temps.Acquire();
91 Label loop, entry;
92 __ sub(counter, argc, Operand(kJSArgcReceiverSlots));
93 __ b(&entry);
94 __ bind(&loop);
95 __ ldr(value, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
96 if (element_type == ArgumentsElementType::kHandle) {
97 __ ldr(value, MemOperand(value));
98 }
99 __ push(value);
100 __ bind(&entry);
101 __ sub(counter, counter, Operand(1), SetCC);
102 __ b(ge, &loop);
103 }
104
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)105 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
106 // ----------- S t a t e -------------
107 // -- r0 : number of arguments
108 // -- r1 : constructor function
109 // -- r3 : new target
110 // -- cp : context
111 // -- lr : return address
112 // -- sp[...]: constructor arguments
113 // -----------------------------------
114
115 Register scratch = r2;
116
117 Label stack_overflow;
118
119 __ StackOverflowCheck(r0, scratch, &stack_overflow);
120
121 // Enter a construct frame.
122 {
123 FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
124
125 // Preserve the incoming parameters on the stack.
126 __ SmiTag(r0);
127 __ Push(cp, r0);
128 __ SmiUntag(r0);
129
130 // TODO(victorgomes): When the arguments adaptor is completely removed, we
131 // should get the formal parameter count and copy the arguments in its
132 // correct position (including any undefined), instead of delaying this to
133 // InvokeFunction.
134
135 // Set up pointer to first argument (skip receiver).
136 __ add(
137 r4, fp,
138 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
139 // Copy arguments and receiver to the expression stack.
140 // r4: Pointer to start of arguments.
141 // r0: Number of arguments.
142 Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
143 // The receiver for the builtin/api call.
144 __ PushRoot(RootIndex::kTheHoleValue);
145
146 // Call the function.
147 // r0: number of arguments (untagged)
148 // r1: constructor function
149 // r3: new target
150 __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall);
151
152 // Restore context from the frame.
153 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
154 // Restore smi-tagged arguments count from the frame.
155 __ ldr(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
156 // Leave construct frame.
157 }
158
159 // Remove caller arguments from the stack and return.
160 __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
161 TurboAssembler::kCountIncludesReceiver);
162 __ Jump(lr);
163
164 __ bind(&stack_overflow);
165 {
166 FrameScope scope(masm, StackFrame::INTERNAL);
167 __ CallRuntime(Runtime::kThrowStackOverflow);
168 __ bkpt(0); // Unreachable code.
169 }
170 }
171
172 } // namespace
173
174 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)175 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
176 // ----------- S t a t e -------------
177 // -- r0: number of arguments (untagged)
178 // -- r1: constructor function
179 // -- r3: new target
180 // -- cp: context
181 // -- lr: return address
182 // -- sp[...]: constructor arguments
183 // -----------------------------------
184
185 FrameScope scope(masm, StackFrame::MANUAL);
186 // Enter a construct frame.
187 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
188 __ EnterFrame(StackFrame::CONSTRUCT);
189
190 // Preserve the incoming parameters on the stack.
191 __ LoadRoot(r4, RootIndex::kTheHoleValue);
192 __ SmiTag(r0);
193 __ Push(cp, r0, r1, r4, r3);
194
195 // ----------- S t a t e -------------
196 // -- sp[0*kPointerSize]: new target
197 // -- sp[1*kPointerSize]: padding
198 // -- r1 and sp[2*kPointerSize]: constructor function
199 // -- sp[3*kPointerSize]: number of arguments (tagged)
200 // -- sp[4*kPointerSize]: context
201 // -----------------------------------
202
203 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
204 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
205 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r4);
206 __ JumpIfIsInRange(
207 r4, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
208 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
209 ¬_create_implicit_receiver);
210
211 // If not derived class constructor: Allocate the new receiver object.
212 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r4,
213 r5);
214 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
215 __ b(&post_instantiation_deopt_entry);
216
217 // Else: use TheHoleValue as receiver for constructor call
218 __ bind(¬_create_implicit_receiver);
219 __ LoadRoot(r0, RootIndex::kTheHoleValue);
220
221 // ----------- S t a t e -------------
222 // -- r0: receiver
223 // -- Slot 3 / sp[0*kPointerSize]: new target
224 // -- Slot 2 / sp[1*kPointerSize]: constructor function
225 // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
226 // -- Slot 0 / sp[3*kPointerSize]: context
227 // -----------------------------------
228 // Deoptimizer enters here.
229 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
230 masm->pc_offset());
231 __ bind(&post_instantiation_deopt_entry);
232
233 // Restore new target.
234 __ Pop(r3);
235
236 // Push the allocated receiver to the stack.
237 __ Push(r0);
238 // We need two copies because we may have to return the original one
239 // and the calling conventions dictate that the called function pops the
240 // receiver. The second copy is pushed after the arguments, we saved in r6
241 // since r0 needs to store the number of arguments before
242 // InvokingFunction.
243 __ mov(r6, r0);
244
245 // Set up pointer to first argument (skip receiver).
246 __ add(r4, fp,
247 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
248
249 // Restore constructor function and argument count.
250 __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
251 __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
252 __ SmiUntag(r0);
253
254 Label stack_overflow;
255 __ StackOverflowCheck(r0, r5, &stack_overflow);
256
257 // TODO(victorgomes): When the arguments adaptor is completely removed, we
258 // should get the formal parameter count and copy the arguments in its
259 // correct position (including any undefined), instead of delaying this to
260 // InvokeFunction.
261
262 // Copy arguments to the expression stack.
263 // r4: Pointer to start of argument.
264 // r0: Number of arguments.
265 Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
266
267 // Push implicit receiver.
268 __ Push(r6);
269
270 // Call the function.
271 __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall);
272
273 // ----------- S t a t e -------------
274 // -- r0: constructor result
275 // -- sp[0*kPointerSize]: implicit receiver
276 // -- sp[1*kPointerSize]: padding
277 // -- sp[2*kPointerSize]: constructor function
278 // -- sp[3*kPointerSize]: number of arguments
279 // -- sp[4*kPointerSize]: context
280 // -----------------------------------
281
282 // Store offset of return address for deoptimizer.
283 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
284 masm->pc_offset());
285
286 // If the result is an object (in the ECMA sense), we should get rid
287 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
288 // on page 74.
289 Label use_receiver, do_throw, leave_and_return, check_receiver;
290
291 // If the result is undefined, we jump out to using the implicit receiver.
292 __ JumpIfNotRoot(r0, RootIndex::kUndefinedValue, &check_receiver);
293
294 // Otherwise we do a smi check and fall through to check if the return value
295 // is a valid receiver.
296
297 // Throw away the result of the constructor invocation and use the
298 // on-stack receiver as the result.
299 __ bind(&use_receiver);
300 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
301 __ JumpIfRoot(r0, RootIndex::kTheHoleValue, &do_throw);
302
303 __ bind(&leave_and_return);
304 // Restore smi-tagged arguments count from the frame.
305 __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
306 // Leave construct frame.
307 __ LeaveFrame(StackFrame::CONSTRUCT);
308
309 // Remove caller arguments from the stack and return.
310 __ DropArguments(r1, TurboAssembler::kCountIsSmi,
311 TurboAssembler::kCountIncludesReceiver);
312 __ Jump(lr);
313
314 __ bind(&check_receiver);
315 // If the result is a smi, it is *not* an object in the ECMA sense.
316 __ JumpIfSmi(r0, &use_receiver);
317
318 // If the type of the result (stored in its map) is less than
319 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
320 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
321 __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
322 __ b(ge, &leave_and_return);
323 __ b(&use_receiver);
324
325 __ bind(&do_throw);
326 // Restore the context from the frame.
327 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
328 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
329 __ bkpt(0);
330
331 __ bind(&stack_overflow);
332 // Restore the context from the frame.
333 __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
334 __ CallRuntime(Runtime::kThrowStackOverflow);
335 // Unreachable code.
336 __ bkpt(0);
337 }
338
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)339 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
340 Generate_JSBuiltinsConstructStubHelper(masm);
341 }
342
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)343 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
344 Register scratch) {
345 DCHECK(!AreAliased(code, scratch));
346 // Verify that the code kind is baseline code via the CodeKind.
347 __ ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
348 __ DecodeField<Code::KindField>(scratch);
349 __ cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
350 __ Assert(eq, AbortReason::kExpectedBaselineData);
351 }
352
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)353 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
354 Register sfi_data,
355 Register scratch1,
356 Label* is_baseline) {
357 ASM_CODE_COMMENT(masm);
358 Label done;
359 __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
360 if (FLAG_debug_code) {
361 Label not_baseline;
362 __ b(ne, ¬_baseline);
363 AssertCodeIsBaseline(masm, sfi_data, scratch1);
364 __ b(eq, is_baseline);
365 __ bind(¬_baseline);
366 } else {
367 __ b(eq, is_baseline);
368 }
369 __ cmp(scratch1, Operand(INTERPRETER_DATA_TYPE));
370 __ b(ne, &done);
371 __ ldr(sfi_data,
372 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
373
374 __ bind(&done);
375 }
376
377 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)378 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
379 // ----------- S t a t e -------------
380 // -- r0 : the value to pass to the generator
381 // -- r1 : the JSGeneratorObject to resume
382 // -- lr : return address
383 // -----------------------------------
384 // Store input value into generator object.
385 __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
386 __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0,
387 kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
388 // Check that r1 is still valid, RecordWrite might have clobbered it.
389 __ AssertGeneratorObject(r1);
390
391 // Load suspended function and context.
392 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
393 __ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
394
395 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
396 Label stepping_prepared;
397 Register scratch = r5;
398
399 // Flood function if we are stepping.
400 ExternalReference debug_hook =
401 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
402 __ Move(scratch, debug_hook);
403 __ ldrsb(scratch, MemOperand(scratch));
404 __ cmp(scratch, Operand(0));
405 __ b(ne, &prepare_step_in_if_stepping);
406
407 // Flood function if we need to continue stepping in the suspended
408 // generator.
409 ExternalReference debug_suspended_generator =
410 ExternalReference::debug_suspended_generator_address(masm->isolate());
411 __ Move(scratch, debug_suspended_generator);
412 __ ldr(scratch, MemOperand(scratch));
413 __ cmp(scratch, Operand(r1));
414 __ b(eq, &prepare_step_in_suspended_generator);
415 __ bind(&stepping_prepared);
416
417 // Check the stack for overflow. We are not trying to catch interruptions
418 // (i.e. debug break and preemption) here, so check the "real stack limit".
419 Label stack_overflow;
420 __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
421 __ cmp(sp, scratch);
422 __ b(lo, &stack_overflow);
423
424 // ----------- S t a t e -------------
425 // -- r1 : the JSGeneratorObject to resume
426 // -- r4 : generator function
427 // -- cp : generator context
428 // -- lr : return address
429 // -- sp[0] : generator receiver
430 // -----------------------------------
431
432 // Copy the function arguments from the generator object's register file.
433 __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
434 __ ldrh(r3,
435 FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
436 __ sub(r3, r3, Operand(kJSArgcReceiverSlots));
437 __ ldr(r2,
438 FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
439 {
440 Label done_loop, loop;
441 __ bind(&loop);
442 __ sub(r3, r3, Operand(1), SetCC);
443 __ b(lt, &done_loop);
444 __ add(scratch, r2, Operand(r3, LSL, kTaggedSizeLog2));
445 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
446 __ Push(scratch);
447 __ b(&loop);
448 __ bind(&done_loop);
449
450 // Push receiver.
451 __ ldr(scratch, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
452 __ Push(scratch);
453 }
454
455 // Underlying function needs to have bytecode available.
456 if (FLAG_debug_code) {
457 Label is_baseline;
458 __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
459 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
460 GetSharedFunctionInfoBytecodeOrBaseline(masm, r3, r0, &is_baseline);
461 __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
462 __ Assert(eq, AbortReason::kMissingBytecodeArray);
463 __ bind(&is_baseline);
464 }
465
466 // Resume (Ignition/TurboFan) generator object.
467 {
468 __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
469 __ ldrh(r0, FieldMemOperand(
470 r0, SharedFunctionInfo::kFormalParameterCountOffset));
471 // We abuse new.target both to indicate that this is a resume call and to
472 // pass in the generator object. In ordinary calls, new.target is always
473 // undefined because generator functions are non-constructable.
474 __ Move(r3, r1);
475 __ Move(r1, r4);
476 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
477 __ ldr(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
478 __ JumpCodeObject(r2);
479 }
480
481 __ bind(&prepare_step_in_if_stepping);
482 {
483 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
484 __ Push(r1, r4);
485 // Push hole as receiver since we do not use it for stepping.
486 __ PushRoot(RootIndex::kTheHoleValue);
487 __ CallRuntime(Runtime::kDebugOnFunctionCall);
488 __ Pop(r1);
489 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
490 }
491 __ b(&stepping_prepared);
492
493 __ bind(&prepare_step_in_suspended_generator);
494 {
495 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
496 __ Push(r1);
497 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
498 __ Pop(r1);
499 __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
500 }
501 __ b(&stepping_prepared);
502
503 __ bind(&stack_overflow);
504 {
505 FrameScope scope(masm, StackFrame::INTERNAL);
506 __ CallRuntime(Runtime::kThrowStackOverflow);
507 __ bkpt(0); // This should be unreachable.
508 }
509 }
510
Generate_ConstructedNonConstructable(MacroAssembler * masm)511 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
512 FrameScope scope(masm, StackFrame::INTERNAL);
513 __ push(r1);
514 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
515 }
516
517 namespace {
518
519 // Total size of the stack space pushed by JSEntryVariant.
520 // JSEntryTrampoline uses this to access on stack arguments passed to
521 // JSEntryVariant.
522 constexpr int kPushedStackSpace = kNumCalleeSaved * kPointerSize -
523 kPointerSize /* FP */ +
524 kNumDoubleCalleeSaved * kDoubleSize +
525 5 * kPointerSize /* r5, r6, r7, fp, lr */ +
526 EntryFrameConstants::kCallerFPOffset;
527
528 // Assert that the EntryFrameConstants are in sync with the builtin.
529 static_assert(kPushedStackSpace == EntryFrameConstants::kDirectCallerSPOffset +
530 3 * kPointerSize /* r5, r6, r7*/ +
531 EntryFrameConstants::kCallerFPOffset,
532 "Pushed stack space and frame constants do not match. See "
533 "frame-constants-arm.h");
534
535 // Called with the native C calling convention. The corresponding function
536 // signature is either:
537 //
538 // using JSEntryFunction = GeneratedCode<Address(
539 // Address root_register_value, Address new_target, Address target,
540 // Address receiver, intptr_t argc, Address** argv)>;
541 // or
542 // using JSEntryFunction = GeneratedCode<Address(
543 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)544 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
545 Builtin entry_trampoline) {
546 // The register state is either:
547 // r0: root_register_value
548 // r1: code entry
549 // r2: function
550 // r3: receiver
551 // [sp + 0 * kSystemPointerSize]: argc
552 // [sp + 1 * kSystemPointerSize]: argv
553 // or
554 // r0: root_register_value
555 // r1: microtask_queue
556 // Preserve all but r0 and pass them to entry_trampoline.
557 Label invoke, handler_entry, exit;
558 const RegList kCalleeSavedWithoutFp = kCalleeSaved - fp;
559
560 // Update |pushed_stack_space| when we manipulate the stack.
561 int pushed_stack_space = EntryFrameConstants::kCallerFPOffset;
562 {
563 NoRootArrayScope no_root_array(masm);
564
565 // Called from C, so do not pop argc and args on exit (preserve sp)
566 // No need to save register-passed args
567 // Save callee-saved registers (incl. cp), but without fp
568 __ stm(db_w, sp, kCalleeSavedWithoutFp);
569 pushed_stack_space +=
570 kNumCalleeSaved * kPointerSize - kPointerSize /* FP */;
571
572 // Save callee-saved vfp registers.
573 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
574 pushed_stack_space += kNumDoubleCalleeSaved * kDoubleSize;
575
576 // Set up the reserved register for 0.0.
577 __ vmov(kDoubleRegZero, base::Double(0.0));
578
579 // Initialize the root register.
580 // C calling convention. The first argument is passed in r0.
581 __ mov(kRootRegister, r0);
582 }
583
584 // Push a frame with special values setup to mark it as an entry frame.
585 // r0: root_register_value
586 __ mov(r7, Operand(StackFrame::TypeToMarker(type)));
587 __ mov(r6, Operand(StackFrame::TypeToMarker(type)));
588 __ Move(r4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
589 masm->isolate()));
590 __ ldr(r5, MemOperand(r4));
591
592 __ stm(db_w, sp, {r5, r6, r7, fp, lr});
593 pushed_stack_space += 5 * kPointerSize /* r5, r6, r7, fp, lr */;
594
595 // Clear c_entry_fp, now we've pushed its previous value to the stack.
596 // If the c_entry_fp is not already zero and we don't clear it, the
597 // SafeStackFrameIterator will assume we are executing C++ and miss the JS
598 // frames on top.
599 __ mov(r5, Operand::Zero());
600 __ str(r5, MemOperand(r4));
601
602 Register scratch = r6;
603
604 // Set up frame pointer for the frame to be pushed.
605 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
606
607 // If this is the outermost JS call, set js_entry_sp value.
608 Label non_outermost_js;
609 ExternalReference js_entry_sp = ExternalReference::Create(
610 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
611 __ Move(r5, js_entry_sp);
612 __ ldr(scratch, MemOperand(r5));
613 __ cmp(scratch, Operand::Zero());
614 __ b(ne, &non_outermost_js);
615 __ str(fp, MemOperand(r5));
616 __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
617 Label cont;
618 __ b(&cont);
619 __ bind(&non_outermost_js);
620 __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
621 __ bind(&cont);
622 __ push(scratch);
623
624 // Jump to a faked try block that does the invoke, with a faked catch
625 // block that sets the pending exception.
626 __ jmp(&invoke);
627
628 // Block literal pool emission whilst taking the position of the handler
629 // entry. This avoids making the assumption that literal pools are always
630 // emitted after an instruction is emitted, rather than before.
631 {
632 Assembler::BlockConstPoolScope block_const_pool(masm);
633 __ bind(&handler_entry);
634
635 // Store the current pc as the handler offset. It's used later to create the
636 // handler table.
637 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
638
639 // Caught exception: Store result (exception) in the pending exception
640 // field in the JSEnv and return a failure sentinel. Coming in here the
641 // fp will be invalid because the PushStackHandler below sets it to 0 to
642 // signal the existence of the JSEntry frame.
643 __ Move(scratch,
644 ExternalReference::Create(
645 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
646 }
647 __ str(r0, MemOperand(scratch));
648 __ LoadRoot(r0, RootIndex::kException);
649 __ b(&exit);
650
651 // Invoke: Link this frame into the handler chain.
652 __ bind(&invoke);
653 // Must preserve r0-r4, r5-r6 are available.
654 __ PushStackHandler();
655 // If an exception not caught by another handler occurs, this handler
656 // returns control to the code after the bl(&invoke) above, which
657 // restores all kCalleeSaved registers (including cp and fp) to their
658 // saved values before returning a failure to C.
659 //
660 // Invoke the function by calling through JS entry trampoline builtin and
661 // pop the faked function when we return.
662 Handle<Code> trampoline_code =
663 masm->isolate()->builtins()->code_handle(entry_trampoline);
664 DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
665 USE(pushed_stack_space);
666 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
667
668 // Unlink this frame from the handler chain.
669 __ PopStackHandler();
670
671 __ bind(&exit); // r0 holds result
672 // Check if the current stack frame is marked as the outermost JS frame.
673 Label non_outermost_js_2;
674 __ pop(r5);
675 __ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
676 __ b(ne, &non_outermost_js_2);
677 __ mov(r6, Operand::Zero());
678 __ Move(r5, js_entry_sp);
679 __ str(r6, MemOperand(r5));
680 __ bind(&non_outermost_js_2);
681
682 // Restore the top frame descriptors from the stack.
683 __ pop(r3);
684 __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
685 masm->isolate()));
686 __ str(r3, MemOperand(scratch));
687
688 // Reset the stack to the callee saved registers.
689 __ add(sp, sp,
690 Operand(-EntryFrameConstants::kCallerFPOffset -
691 kSystemPointerSize /* already popped one */));
692
693 __ ldm(ia_w, sp, {fp, lr});
694
695 // Restore callee-saved vfp registers.
696 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
697
698 __ ldm(ia_w, sp, kCalleeSavedWithoutFp);
699
700 __ mov(pc, lr);
701
702 // Emit constant pool.
703 __ CheckConstPool(true, false);
704 }
705
706 } // namespace
707
Generate_JSEntry(MacroAssembler * masm)708 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
709 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
710 }
711
Generate_JSConstructEntry(MacroAssembler * masm)712 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
713 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
714 Builtin::kJSConstructEntryTrampoline);
715 }
716
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)717 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
718 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
719 Builtin::kRunMicrotasksTrampoline);
720 }
721
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)722 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
723 bool is_construct) {
724 // Called from Generate_JS_Entry
725 // r0: root_register_value
726 // r1: new.target
727 // r2: function
728 // r3: receiver
729 // [fp + kPushedStackSpace + 0 * kSystemPointerSize]: argc
730 // [fp + kPushedStackSpace + 1 * kSystemPointerSize]: argv
731 // r5-r6, r8 and cp may be clobbered
732
733 __ ldr(r0,
734 MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgcOffset));
735 __ ldr(r4,
736 MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
737
738 // r1: new.target
739 // r2: function
740 // r3: receiver
741 // r0: argc
742 // r4: argv
743
744 // Enter an internal frame.
745 {
746 FrameScope scope(masm, StackFrame::INTERNAL);
747
748 // Setup the context (we need to use the caller context from the isolate).
749 ExternalReference context_address = ExternalReference::Create(
750 IsolateAddressId::kContextAddress, masm->isolate());
751 __ Move(cp, context_address);
752 __ ldr(cp, MemOperand(cp));
753
754 // Push the function.
755 __ Push(r2);
756
757 // Check if we have enough stack space to push all arguments + receiver.
758 // Clobbers r5.
759 Label enough_stack_space, stack_overflow;
760 __ mov(r6, r0);
761 __ StackOverflowCheck(r6, r5, &stack_overflow);
762 __ b(&enough_stack_space);
763 __ bind(&stack_overflow);
764 __ CallRuntime(Runtime::kThrowStackOverflow);
765 // Unreachable code.
766 __ bkpt(0);
767
768 __ bind(&enough_stack_space);
769
770 // Copy arguments to the stack.
771 // r1: new.target
772 // r2: function
773 // r3: receiver
774 // r0: argc
775 // r4: argv, i.e. points to first arg
776 Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kHandle);
777
778 // Push the receiver.
779 __ Push(r3);
780
781 // Setup new.target and function.
782 __ mov(r3, r1);
783 __ mov(r1, r2);
784 // r0: argc
785 // r1: function
786 // r3: new.target
787
788 // Initialize all JavaScript callee-saved registers, since they will be seen
789 // by the garbage collector as part of handlers.
790 __ LoadRoot(r4, RootIndex::kUndefinedValue);
791 __ mov(r2, r4);
792 __ mov(r5, r4);
793 __ mov(r6, r4);
794 __ mov(r8, r4);
795 if (kR9Available == 1) {
796 __ mov(r9, r4);
797 }
798
799 // Invoke the code.
800 Handle<Code> builtin = is_construct
801 ? BUILTIN_CODE(masm->isolate(), Construct)
802 : masm->isolate()->builtins()->Call();
803 __ Call(builtin, RelocInfo::CODE_TARGET);
804
805 // Exit the JS frame and remove the parameters (except function), and
806 // return.
807 // Respect ABI stack constraint.
808 }
809 __ Jump(lr);
810
811 // r0: result
812 }
813
Generate_JSEntryTrampoline(MacroAssembler * masm)814 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
815 Generate_JSEntryTrampolineHelper(masm, false);
816 }
817
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)818 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
819 Generate_JSEntryTrampolineHelper(masm, true);
820 }
821
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)822 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
823 // This expects two C++ function parameters passed by Invoke() in
824 // execution.cc.
825 // r0: root_register_value
826 // r1: microtask_queue
827
828 __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r1);
829 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
830 }
831
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure)832 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
833 Register optimized_code,
834 Register closure) {
835 ASM_CODE_COMMENT(masm);
836 DCHECK(!AreAliased(optimized_code, closure));
837 // Store code entry in the closure.
838 __ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
839 __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
840 kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
841 RememberedSetAction::kOmit, SmiCheck::kOmit);
842 }
843
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)844 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
845 Register scratch2) {
846 ASM_CODE_COMMENT(masm);
847 Register params_size = scratch1;
848 // Get the size of the formal parameters + receiver (in bytes).
849 __ ldr(params_size,
850 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
851 __ ldr(params_size,
852 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
853
854 Register actual_params_size = scratch2;
855 // Compute the size of the actual parameters + receiver (in bytes).
856 __ ldr(actual_params_size,
857 MemOperand(fp, StandardFrameConstants::kArgCOffset));
858 __ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
859
860 // If actual is bigger than formal, then we should use it to free up the stack
861 // arguments.
862 __ cmp(params_size, actual_params_size);
863 __ mov(params_size, actual_params_size, LeaveCC, lt);
864
865 // Leave the frame (also dropping the register file).
866 __ LeaveFrame(StackFrame::INTERPRETED);
867
868 // Drop receiver + arguments.
869 __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
870 TurboAssembler::kCountIncludesReceiver);
871 }
872
873 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)874 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
875 Register actual_state,
876 TieringState expected_state,
877 Runtime::FunctionId function_id) {
878 ASM_CODE_COMMENT(masm);
879 Label no_match;
880 __ cmp_raw_immediate(actual_state, static_cast<int>(expected_state));
881 __ b(ne, &no_match);
882 GenerateTailCallToReturnedCode(masm, function_id);
883 __ bind(&no_match);
884 }
885
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch)886 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
887 Register optimized_code_entry,
888 Register scratch) {
889 // ----------- S t a t e -------------
890 // -- r0 : actual argument count
891 // -- r3 : new target (preserved for callee if needed, and caller)
892 // -- r1 : target function (preserved for callee if needed, and caller)
893 // -----------------------------------
894 DCHECK(!AreAliased(r1, r3, optimized_code_entry, scratch));
895
896 Register closure = r1;
897 Label heal_optimized_code_slot;
898
899 // If the optimized code is cleared, go to runtime to update the optimization
900 // marker field.
901 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
902 &heal_optimized_code_slot);
903
904 // Check if the optimized code is marked for deopt. If it is, call the
905 // runtime to clear it.
906 __ ldr(scratch,
907 FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
908 __ ldr(scratch,
909 FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
910 __ tst(scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
911 __ b(ne, &heal_optimized_code_slot);
912
913 // Optimized code is good, get it into the closure and link the closure
914 // into the optimized functions list, then tail call the optimized code.
915 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
916 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
917 __ LoadCodeObjectEntry(r2, optimized_code_entry);
918 __ Jump(r2);
919
920 // Optimized code slot contains deoptimized code or code is cleared and
921 // optimized code marker isn't updated. Evict the code, update the marker
922 // and re-enter the closure's code.
923 __ bind(&heal_optimized_code_slot);
924 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
925 }
926
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)927 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
928 Register tiering_state) {
929 // ----------- S t a t e -------------
930 // -- r0 : actual argument count
931 // -- r3 : new target (preserved for callee if needed, and caller)
932 // -- r1 : target function (preserved for callee if needed, and caller)
933 // -- feedback vector (preserved for caller if needed)
934 // -- tiering_state : a int32 containing a non-zero optimization
935 // marker.
936 // -----------------------------------
937 DCHECK(!AreAliased(feedback_vector, r1, r3, tiering_state));
938
939 TailCallRuntimeIfStateEquals(masm, tiering_state,
940 TieringState::kRequestTurbofan_Synchronous,
941 Runtime::kCompileTurbofan_Synchronous);
942 TailCallRuntimeIfStateEquals(masm, tiering_state,
943 TieringState::kRequestTurbofan_Concurrent,
944 Runtime::kCompileTurbofan_Concurrent);
945
946 __ stop();
947 }
948
949 // Advance the current bytecode offset. This simulates what all bytecode
950 // handlers do upon completion of the underlying operation. Will bail out to a
951 // label if the bytecode (without prefix) is a return bytecode. Will not advance
952 // the bytecode offset if the current bytecode is a JumpLoop, instead just
953 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Label * if_return)954 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
955 Register bytecode_array,
956 Register bytecode_offset,
957 Register bytecode, Register scratch1,
958 Register scratch2, Label* if_return) {
959 ASM_CODE_COMMENT(masm);
960 Register bytecode_size_table = scratch1;
961
962 // The bytecode offset value will be increased by one in wide and extra wide
963 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
964 // will restore the original bytecode. In order to simplify the code, we have
965 // a backup of it.
966 Register original_bytecode_offset = scratch2;
967 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
968 bytecode, original_bytecode_offset));
969
970 __ Move(bytecode_size_table,
971 ExternalReference::bytecode_size_table_address());
972 __ Move(original_bytecode_offset, bytecode_offset);
973
974 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
975 Label process_bytecode;
976 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
977 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
978 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
979 STATIC_ASSERT(3 ==
980 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
981 __ cmp(bytecode, Operand(0x3));
982 __ b(hi, &process_bytecode);
983 __ tst(bytecode, Operand(0x1));
984 // Load the next bytecode.
985 __ add(bytecode_offset, bytecode_offset, Operand(1));
986 __ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
987
988 // Update table to the wide scaled table.
989 __ add(bytecode_size_table, bytecode_size_table,
990 Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
991 // Conditionally update table to the extra wide scaled table. We are taking
992 // advantage of the fact that the extra wide follows the wide one.
993 __ add(bytecode_size_table, bytecode_size_table,
994 Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount), LeaveCC,
995 ne);
996
997 __ bind(&process_bytecode);
998
999 // Bailout to the return label if this is a return bytecode.
1000
1001 // Create cmp, cmpne, ..., cmpne to check for a return bytecode.
1002 Condition flag = al;
1003 #define JUMP_IF_EQUAL(NAME) \
1004 __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME)), \
1005 flag); \
1006 flag = ne;
1007 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1008 #undef JUMP_IF_EQUAL
1009
1010 __ b(if_return, eq);
1011
1012 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1013 // of the loop.
1014 Label end, not_jump_loop;
1015 __ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1016 __ b(ne, ¬_jump_loop);
1017 // We need to restore the original bytecode_offset since we might have
1018 // increased it to skip the wide / extra-wide prefix bytecode.
1019 __ Move(bytecode_offset, original_bytecode_offset);
1020 __ b(&end);
1021
1022 __ bind(¬_jump_loop);
1023 // Otherwise, load the size of the current bytecode and advance the offset.
1024 __ ldrb(scratch1, MemOperand(bytecode_size_table, bytecode));
1025 __ add(bytecode_offset, bytecode_offset, scratch1);
1026
1027 __ bind(&end);
1028 }
1029
1030 // Read off the optimization state in the feedback vector and check if there
1031 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1032 static void LoadTieringStateAndJumpIfNeedsProcessing(
1033 MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1034 Label* has_optimized_code_or_state) {
1035 ASM_CODE_COMMENT(masm);
1036 DCHECK(!AreAliased(optimization_state, feedback_vector));
1037 __ ldr(optimization_state,
1038 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1039 __ tst(
1040 optimization_state,
1041 Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1042 __ b(ne, has_optimized_code_or_state);
1043 }
1044
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1045 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1046 MacroAssembler* masm, Register optimization_state,
1047 Register feedback_vector) {
1048 ASM_CODE_COMMENT(masm);
1049 DCHECK(!AreAliased(optimization_state, feedback_vector));
1050 Label maybe_has_optimized_code;
1051 // Check if optimized code is available
1052 __ tst(optimization_state,
1053 Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
1054 __ b(eq, &maybe_has_optimized_code);
1055
1056 Register tiering_state = optimization_state;
1057 __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1058 MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1059
1060 __ bind(&maybe_has_optimized_code);
1061 Register optimized_code_entry = optimization_state;
1062 __ ldr(tiering_state,
1063 FieldMemOperand(feedback_vector,
1064 FeedbackVector::kMaybeOptimizedCodeOffset));
1065 TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
1066 }
1067
1068 namespace {
1069
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array,Register scratch)1070 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
1071 Register scratch) {
1072 // Reset the bytecode age and OSR state (optimized to a single write).
1073 static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
1074 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1075 __ mov(scratch, Operand(0));
1076 __ str(scratch,
1077 FieldMemOperand(bytecode_array,
1078 BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
1079 }
1080
1081 } // namespace
1082
1083 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1084 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1085 UseScratchRegisterScope temps(masm);
1086 // Need a few extra registers
1087 temps.Include(r8, r9);
1088
1089 auto descriptor =
1090 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1091 Register closure = descriptor.GetRegisterParameter(
1092 BaselineOutOfLinePrologueDescriptor::kClosure);
1093 // Load the feedback vector from the closure.
1094 Register feedback_vector = temps.Acquire();
1095 __ ldr(feedback_vector,
1096 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1097 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1098 if (FLAG_debug_code) {
1099 UseScratchRegisterScope temps(masm);
1100 Register scratch = temps.Acquire();
1101 __ CompareObjectType(feedback_vector, scratch, scratch,
1102 FEEDBACK_VECTOR_TYPE);
1103 __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1104 }
1105
1106 // Check the tiering state.
1107 Label has_optimized_code_or_state;
1108 Register optimization_state = no_reg;
1109 {
1110 UseScratchRegisterScope temps(masm);
1111 // optimization_state will be used only in |has_optimized_code_or_state|
1112 // and outside it can be reused.
1113 optimization_state = temps.Acquire();
1114 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1115 feedback_vector,
1116 &has_optimized_code_or_state);
1117 }
1118
1119 // Increment invocation count for the function.
1120 {
1121 UseScratchRegisterScope temps(masm);
1122 Register invocation_count = temps.Acquire();
1123 __ ldr(invocation_count,
1124 FieldMemOperand(feedback_vector,
1125 FeedbackVector::kInvocationCountOffset));
1126 __ add(invocation_count, invocation_count, Operand(1));
1127 __ str(invocation_count,
1128 FieldMemOperand(feedback_vector,
1129 FeedbackVector::kInvocationCountOffset));
1130 }
1131
1132 FrameScope frame_scope(masm, StackFrame::MANUAL);
1133 {
1134 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1135 // Normally the first thing we'd do here is Push(lr, fp), but we already
1136 // entered the frame in BaselineCompiler::Prologue, as we had to use the
1137 // value lr before the call to this BaselineOutOfLinePrologue builtin.
1138
1139 Register callee_context = descriptor.GetRegisterParameter(
1140 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1141 Register callee_js_function = descriptor.GetRegisterParameter(
1142 BaselineOutOfLinePrologueDescriptor::kClosure);
1143 __ Push(callee_context, callee_js_function);
1144 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1145 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1146
1147 Register argc = descriptor.GetRegisterParameter(
1148 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1149 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1150 // the frame, so load it into a register.
1151 Register bytecodeArray = descriptor.GetRegisterParameter(
1152 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1153 {
1154 UseScratchRegisterScope temps(masm);
1155 ResetBytecodeAgeAndOsrState(masm, bytecodeArray, temps.Acquire());
1156 }
1157 __ Push(argc, bytecodeArray);
1158
1159 // Baseline code frames store the feedback vector where interpreter would
1160 // store the bytecode offset.
1161 if (FLAG_debug_code) {
1162 UseScratchRegisterScope temps(masm);
1163 Register scratch = temps.Acquire();
1164 __ CompareObjectType(feedback_vector, scratch, scratch,
1165 FEEDBACK_VECTOR_TYPE);
1166 __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1167 }
1168 __ Push(feedback_vector);
1169 }
1170
1171 Label call_stack_guard;
1172 Register frame_size = descriptor.GetRegisterParameter(
1173 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1174 {
1175 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1176 // Stack check. This folds the checks for both the interrupt stack limit
1177 // check and the real stack limit into one by just checking for the
1178 // interrupt limit. The interrupt limit is either equal to the real stack
1179 // limit or tighter. By ensuring we have space until that limit after
1180 // building the frame we can quickly precheck both at once.
1181 UseScratchRegisterScope temps(masm);
1182
1183 Register sp_minus_frame_size = temps.Acquire();
1184 __ sub(sp_minus_frame_size, sp, frame_size);
1185 Register interrupt_limit = temps.Acquire();
1186 __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
1187 __ cmp(sp_minus_frame_size, interrupt_limit);
1188 __ b(&call_stack_guard, lo);
1189 }
1190
1191 // Do "fast" return to the caller pc in lr.
1192 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1193 __ Ret();
1194
1195 __ bind(&has_optimized_code_or_state);
1196 {
1197 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1198 UseScratchRegisterScope temps(masm);
1199 // Ensure the optimization_state is not allocated again.
1200 temps.Exclude(optimization_state);
1201
1202 // Drop the frame created by the baseline call.
1203 __ ldm(ia_w, sp, {fp, lr});
1204 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1205 feedback_vector);
1206 __ Trap();
1207 }
1208
1209 __ bind(&call_stack_guard);
1210 {
1211 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1212 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1213 // Save incoming new target or generator
1214 __ Push(kJavaScriptCallNewTargetRegister);
1215 __ SmiTag(frame_size);
1216 __ Push(frame_size);
1217 __ CallRuntime(Runtime::kStackGuardWithGap);
1218 __ Pop(kJavaScriptCallNewTargetRegister);
1219 }
1220
1221 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1222 __ Ret();
1223 }
1224
1225 // Generate code for entering a JS function with the interpreter.
1226 // On entry to the function the receiver and arguments have been pushed on the
1227 // stack left to right.
1228 //
1229 // The live registers are:
1230 // o r0: actual argument count
1231 // o r1: the JS function object being called.
1232 // o r3: the incoming new target or generator object
1233 // o cp: our context
1234 // o fp: the caller's frame pointer
1235 // o sp: stack pointer
1236 // o lr: return address
1237 //
1238 // The function builds an interpreter frame. See InterpreterFrameConstants in
1239 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1240 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1241 Register closure = r1;
1242 Register feedback_vector = r2;
1243
1244 // Get the bytecode array from the function object and load it into
1245 // kInterpreterBytecodeArrayRegister.
1246 __ ldr(r4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1247 __ ldr(kInterpreterBytecodeArrayRegister,
1248 FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
1249
1250 Label is_baseline;
1251 GetSharedFunctionInfoBytecodeOrBaseline(
1252 masm, kInterpreterBytecodeArrayRegister, r8, &is_baseline);
1253
1254 // The bytecode array could have been flushed from the shared function info,
1255 // if so, call into CompileLazy.
1256 Label compile_lazy;
1257 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
1258 BYTECODE_ARRAY_TYPE);
1259 __ b(ne, &compile_lazy);
1260
1261 // Load the feedback vector from the closure.
1262 __ ldr(feedback_vector,
1263 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1264 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1265
1266 Label push_stack_frame;
1267 // Check if feedback vector is valid. If valid, check for optimized code
1268 // and update invocation count. Otherwise, setup the stack frame.
1269 __ ldr(r4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1270 __ ldrh(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
1271 __ cmp(r4, Operand(FEEDBACK_VECTOR_TYPE));
1272 __ b(ne, &push_stack_frame);
1273
1274 Register optimization_state = r4;
1275 Label has_optimized_code_or_state;
1276 LoadTieringStateAndJumpIfNeedsProcessing(
1277 masm, optimization_state, feedback_vector, &has_optimized_code_or_state);
1278
1279 Label not_optimized;
1280 __ bind(¬_optimized);
1281
1282 // Increment invocation count for the function.
1283 __ ldr(r9, FieldMemOperand(feedback_vector,
1284 FeedbackVector::kInvocationCountOffset));
1285 __ add(r9, r9, Operand(1));
1286 __ str(r9, FieldMemOperand(feedback_vector,
1287 FeedbackVector::kInvocationCountOffset));
1288
1289 // Open a frame scope to indicate that there is a frame on the stack. The
1290 // MANUAL indicates that the scope shouldn't actually generate code to set up
1291 // the frame (that is done below).
1292 __ bind(&push_stack_frame);
1293 FrameScope frame_scope(masm, StackFrame::MANUAL);
1294 __ PushStandardFrame(closure);
1295
1296 ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r9);
1297
1298 // Load the initial bytecode offset.
1299 __ mov(kInterpreterBytecodeOffsetRegister,
1300 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1301
1302 // Push bytecode array and Smi tagged bytecode array offset.
1303 __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1304 __ Push(kInterpreterBytecodeArrayRegister, r4);
1305
1306 // Allocate the local and temporary register file on the stack.
1307 Label stack_overflow;
1308 {
1309 // Load frame size from the BytecodeArray object.
1310 __ ldr(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1311 BytecodeArray::kFrameSizeOffset));
1312
1313 // Do a stack check to ensure we don't go over the limit.
1314 __ sub(r9, sp, Operand(r4));
1315 __ LoadStackLimit(r2, StackLimitKind::kRealStackLimit);
1316 __ cmp(r9, Operand(r2));
1317 __ b(lo, &stack_overflow);
1318
1319 // If ok, push undefined as the initial value for all register file entries.
1320 Label loop_header;
1321 Label loop_check;
1322 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1323 __ b(&loop_check, al);
1324 __ bind(&loop_header);
1325 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1326 __ push(kInterpreterAccumulatorRegister);
1327 // Continue loop if not done.
1328 __ bind(&loop_check);
1329 __ sub(r4, r4, Operand(kPointerSize), SetCC);
1330 __ b(&loop_header, ge);
1331 }
1332
1333 // If the bytecode array has a valid incoming new target or generator object
1334 // register, initialize it with incoming value which was passed in r3.
1335 __ ldr(r9, FieldMemOperand(
1336 kInterpreterBytecodeArrayRegister,
1337 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1338 __ cmp(r9, Operand::Zero());
1339 __ str(r3, MemOperand(fp, r9, LSL, kPointerSizeLog2), ne);
1340
1341 // Perform interrupt stack check.
1342 // TODO(solanes): Merge with the real stack limit check above.
1343 Label stack_check_interrupt, after_stack_check_interrupt;
1344 __ LoadStackLimit(r4, StackLimitKind::kInterruptStackLimit);
1345 __ cmp(sp, r4);
1346 __ b(lo, &stack_check_interrupt);
1347 __ bind(&after_stack_check_interrupt);
1348
1349 // The accumulator is already loaded with undefined.
1350
1351 // Load the dispatch table into a register and dispatch to the bytecode
1352 // handler at the current bytecode offset.
1353 Label do_dispatch;
1354 __ bind(&do_dispatch);
1355 __ Move(
1356 kInterpreterDispatchTableRegister,
1357 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1358 __ ldrb(r4, MemOperand(kInterpreterBytecodeArrayRegister,
1359 kInterpreterBytecodeOffsetRegister));
1360 __ ldr(
1361 kJavaScriptCallCodeStartRegister,
1362 MemOperand(kInterpreterDispatchTableRegister, r4, LSL, kPointerSizeLog2));
1363 __ Call(kJavaScriptCallCodeStartRegister);
1364 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1365
1366 // Any returns to the entry trampoline are either due to the return bytecode
1367 // or the interpreter tail calling a builtin and then a dispatch.
1368
1369 // Get bytecode array and bytecode offset from the stack frame.
1370 __ ldr(kInterpreterBytecodeArrayRegister,
1371 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1372 __ ldr(kInterpreterBytecodeOffsetRegister,
1373 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1374 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1375
1376 // Either return, or advance to the next bytecode and dispatch.
1377 Label do_return;
1378 __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
1379 kInterpreterBytecodeOffsetRegister));
1380 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1381 kInterpreterBytecodeOffsetRegister, r1, r2, r3,
1382 &do_return);
1383 __ jmp(&do_dispatch);
1384
1385 __ bind(&do_return);
1386 // The return value is in r0.
1387 LeaveInterpreterFrame(masm, r2, r4);
1388 __ Jump(lr);
1389
1390 __ bind(&stack_check_interrupt);
1391 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1392 // for the call to the StackGuard.
1393 __ mov(kInterpreterBytecodeOffsetRegister,
1394 Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1395 kFunctionEntryBytecodeOffset)));
1396 __ str(kInterpreterBytecodeOffsetRegister,
1397 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1398 __ CallRuntime(Runtime::kStackGuard);
1399
1400 // After the call, restore the bytecode array, bytecode offset and accumulator
1401 // registers again. Also, restore the bytecode offset in the stack to its
1402 // previous value.
1403 __ ldr(kInterpreterBytecodeArrayRegister,
1404 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1405 __ mov(kInterpreterBytecodeOffsetRegister,
1406 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1407 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1408
1409 __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1410 __ str(r4, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1411
1412 __ jmp(&after_stack_check_interrupt);
1413
1414 __ bind(&has_optimized_code_or_state);
1415 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1416 feedback_vector);
1417
1418 __ bind(&is_baseline);
1419 {
1420 // Load the feedback vector from the closure.
1421 __ ldr(feedback_vector,
1422 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1423 __ ldr(feedback_vector,
1424 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1425
1426 Label install_baseline_code;
1427 // Check if feedback vector is valid. If not, call prepare for baseline to
1428 // allocate it.
1429 __ ldr(r8, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1430 __ ldrh(r8, FieldMemOperand(r8, Map::kInstanceTypeOffset));
1431 __ cmp(r8, Operand(FEEDBACK_VECTOR_TYPE));
1432 __ b(ne, &install_baseline_code);
1433
1434 // Check the tiering state.
1435 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1436 feedback_vector,
1437 &has_optimized_code_or_state);
1438
1439 // Load the baseline code into the closure.
1440 __ mov(r2, kInterpreterBytecodeArrayRegister);
1441 static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
1442 ReplaceClosureCodeWithOptimizedCode(masm, r2, closure);
1443 __ JumpCodeObject(r2);
1444
1445 __ bind(&install_baseline_code);
1446 GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1447 }
1448
1449 __ bind(&compile_lazy);
1450 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1451
1452 __ bind(&stack_overflow);
1453 __ CallRuntime(Runtime::kThrowStackOverflow);
1454 __ bkpt(0); // Should not return.
1455 }
1456
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1457 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1458 Register start_address,
1459 Register scratch) {
1460 ASM_CODE_COMMENT(masm);
1461 // Find the argument with lowest address.
1462 __ sub(scratch, num_args, Operand(1));
1463 __ mov(scratch, Operand(scratch, LSL, kSystemPointerSizeLog2));
1464 __ sub(start_address, start_address, scratch);
1465 // Push the arguments.
1466 __ PushArray(start_address, num_args, scratch,
1467 TurboAssembler::PushArrayOrder::kReverse);
1468 }
1469
1470 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1471 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1472 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1473 InterpreterPushArgsMode mode) {
1474 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1475 // ----------- S t a t e -------------
1476 // -- r0 : the number of arguments
1477 // -- r2 : the address of the first argument to be pushed. Subsequent
1478 // arguments should be consecutive above this, in the same order as
1479 // they are to be pushed onto the stack.
1480 // -- r1 : the target to call (can be any Object).
1481 // -----------------------------------
1482 Label stack_overflow;
1483
1484 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1485 // The spread argument should not be pushed.
1486 __ sub(r0, r0, Operand(1));
1487 }
1488
1489 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1490 __ sub(r3, r0, Operand(kJSArgcReceiverSlots));
1491 } else {
1492 __ mov(r3, r0);
1493 }
1494
1495 __ StackOverflowCheck(r3, r4, &stack_overflow);
1496
1497 // Push the arguments. r2 and r4 will be modified.
1498 GenerateInterpreterPushArgs(masm, r3, r2, r4);
1499
1500 // Push "undefined" as the receiver arg if we need to.
1501 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1502 __ PushRoot(RootIndex::kUndefinedValue);
1503 }
1504
1505 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1506 // Pass the spread in the register r2.
1507 // r2 already points to the penultimate argument, the spread
1508 // lies in the next interpreter register.
1509 __ sub(r2, r2, Operand(kSystemPointerSize));
1510 __ ldr(r2, MemOperand(r2));
1511 }
1512
1513 // Call the target.
1514 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1515 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1516 RelocInfo::CODE_TARGET);
1517 } else {
1518 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1519 RelocInfo::CODE_TARGET);
1520 }
1521
1522 __ bind(&stack_overflow);
1523 {
1524 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1525 // Unreachable code.
1526 __ bkpt(0);
1527 }
1528 }
1529
1530 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1531 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1532 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1533 // ----------- S t a t e -------------
1534 // -- r0 : argument count
1535 // -- r3 : new target
1536 // -- r1 : constructor to call
1537 // -- r2 : allocation site feedback if available, undefined otherwise.
1538 // -- r4 : address of the first argument
1539 // -----------------------------------
1540 Label stack_overflow;
1541
1542 __ StackOverflowCheck(r0, r6, &stack_overflow);
1543
1544 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1545 // The spread argument should not be pushed.
1546 __ sub(r0, r0, Operand(1));
1547 }
1548
1549 Register argc_without_receiver = r6;
1550 __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots));
1551 // Push the arguments. r4 and r5 will be modified.
1552 GenerateInterpreterPushArgs(masm, argc_without_receiver, r4, r5);
1553
1554 // Push a slot for the receiver to be constructed.
1555 __ mov(r5, Operand::Zero());
1556 __ push(r5);
1557
1558 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1559 // Pass the spread in the register r2.
1560 // r4 already points to the penultimate argument, the spread
1561 // lies in the next interpreter register.
1562 __ sub(r4, r4, Operand(kSystemPointerSize));
1563 __ ldr(r2, MemOperand(r4));
1564 } else {
1565 __ AssertUndefinedOrAllocationSite(r2, r5);
1566 }
1567
1568 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1569 __ AssertFunction(r1);
1570
1571 // Tail call to the array construct stub (still in the caller
1572 // context at this point).
1573 Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1574 __ Jump(code, RelocInfo::CODE_TARGET);
1575 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1576 // Call the constructor with r0, r1, and r3 unmodified.
1577 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1578 RelocInfo::CODE_TARGET);
1579 } else {
1580 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1581 // Call the constructor with r0, r1, and r3 unmodified.
1582 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1583 }
1584
1585 __ bind(&stack_overflow);
1586 {
1587 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1588 // Unreachable code.
1589 __ bkpt(0);
1590 }
1591 }
1592
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1593 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1594 // Set the return address to the correct point in the interpreter entry
1595 // trampoline.
1596 Label builtin_trampoline, trampoline_loaded;
1597 Smi interpreter_entry_return_pc_offset(
1598 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1599 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1600
1601 // If the SFI function_data is an InterpreterData, the function will have a
1602 // custom copy of the interpreter entry trampoline for profiling. If so,
1603 // get the custom trampoline, otherwise grab the entry address of the global
1604 // trampoline.
1605 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1606 __ ldr(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
1607 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
1608 __ CompareObjectType(r2, kInterpreterDispatchTableRegister,
1609 kInterpreterDispatchTableRegister,
1610 INTERPRETER_DATA_TYPE);
1611 __ b(ne, &builtin_trampoline);
1612
1613 __ ldr(r2,
1614 FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
1615 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
1616 __ b(&trampoline_loaded);
1617
1618 __ bind(&builtin_trampoline);
1619 __ Move(r2, ExternalReference::
1620 address_of_interpreter_entry_trampoline_instruction_start(
1621 masm->isolate()));
1622 __ ldr(r2, MemOperand(r2));
1623
1624 __ bind(&trampoline_loaded);
1625 __ add(lr, r2, Operand(interpreter_entry_return_pc_offset.value()));
1626
1627 // Initialize the dispatch table register.
1628 __ Move(
1629 kInterpreterDispatchTableRegister,
1630 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1631
1632 // Get the bytecode array pointer from the frame.
1633 __ ldr(kInterpreterBytecodeArrayRegister,
1634 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1635
1636 if (FLAG_debug_code) {
1637 // Check function data field is actually a BytecodeArray object.
1638 __ SmiTst(kInterpreterBytecodeArrayRegister);
1639 __ Assert(
1640 ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1641 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r1, no_reg,
1642 BYTECODE_ARRAY_TYPE);
1643 __ Assert(
1644 eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1645 }
1646
1647 // Get the target bytecode offset from the frame.
1648 __ ldr(kInterpreterBytecodeOffsetRegister,
1649 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1650 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1651
1652 if (FLAG_debug_code) {
1653 Label okay;
1654 __ cmp(kInterpreterBytecodeOffsetRegister,
1655 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1656 __ b(ge, &okay);
1657 __ bkpt(0);
1658 __ bind(&okay);
1659 }
1660
1661 // Dispatch to the target bytecode.
1662 UseScratchRegisterScope temps(masm);
1663 Register scratch = temps.Acquire();
1664 __ ldrb(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
1665 kInterpreterBytecodeOffsetRegister));
1666 __ ldr(kJavaScriptCallCodeStartRegister,
1667 MemOperand(kInterpreterDispatchTableRegister, scratch, LSL,
1668 kPointerSizeLog2));
1669 __ Jump(kJavaScriptCallCodeStartRegister);
1670 }
1671
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1672 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1673 // Get bytecode array and bytecode offset from the stack frame.
1674 __ ldr(kInterpreterBytecodeArrayRegister,
1675 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1676 __ ldr(kInterpreterBytecodeOffsetRegister,
1677 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1678 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1679
1680 Label enter_bytecode, function_entry_bytecode;
1681 __ cmp(kInterpreterBytecodeOffsetRegister,
1682 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1683 kFunctionEntryBytecodeOffset));
1684 __ b(eq, &function_entry_bytecode);
1685
1686 // Load the current bytecode.
1687 __ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
1688 kInterpreterBytecodeOffsetRegister));
1689
1690 // Advance to the next bytecode.
1691 Label if_return;
1692 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1693 kInterpreterBytecodeOffsetRegister, r1, r2, r3,
1694 &if_return);
1695
1696 __ bind(&enter_bytecode);
1697 // Convert new bytecode offset to a Smi and save in the stackframe.
1698 __ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
1699 __ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1700
1701 Generate_InterpreterEnterBytecode(masm);
1702
1703 __ bind(&function_entry_bytecode);
1704 // If the code deoptimizes during the implicit function entry stack interrupt
1705 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1706 // not a valid bytecode offset. Detect this case and advance to the first
1707 // actual bytecode.
1708 __ mov(kInterpreterBytecodeOffsetRegister,
1709 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1710 __ b(&enter_bytecode);
1711
1712 // We should never take the if_return path.
1713 __ bind(&if_return);
1714 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1715 }
1716
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1717 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1718 Generate_InterpreterEnterBytecode(masm);
1719 }
1720
1721 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1722 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1723 bool java_script_builtin,
1724 bool with_result) {
1725 const RegisterConfiguration* config(RegisterConfiguration::Default());
1726 int allocatable_register_count = config->num_allocatable_general_registers();
1727 UseScratchRegisterScope temps(masm);
1728 Register scratch = temps.Acquire(); // Temp register is not allocatable.
1729 if (with_result) {
1730 if (java_script_builtin) {
1731 __ mov(scratch, r0);
1732 } else {
1733 // Overwrite the hole inserted by the deoptimizer with the return value
1734 // from the LAZY deopt point.
1735 __ str(
1736 r0,
1737 MemOperand(
1738 sp, config->num_allocatable_general_registers() * kPointerSize +
1739 BuiltinContinuationFrameConstants::kFixedFrameSize));
1740 }
1741 }
1742 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1743 int code = config->GetAllocatableGeneralCode(i);
1744 __ Pop(Register::from_code(code));
1745 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1746 __ SmiUntag(Register::from_code(code));
1747 }
1748 }
1749 if (java_script_builtin && with_result) {
1750 // Overwrite the hole inserted by the deoptimizer with the return value from
1751 // the LAZY deopt point. r0 contains the arguments count, the return value
1752 // from LAZY is always the last argument.
1753 constexpr int return_value_offset =
1754 BuiltinContinuationFrameConstants::kFixedSlotCount -
1755 kJSArgcReceiverSlots;
1756 __ add(r0, r0, Operand(return_value_offset));
1757 __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
1758 // Recover arguments count.
1759 __ sub(r0, r0, Operand(return_value_offset));
1760 }
1761 __ ldr(fp, MemOperand(
1762 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1763 // Load builtin index (stored as a Smi) and use it to get the builtin start
1764 // address from the builtins table.
1765 Register builtin = scratch;
1766 __ Pop(builtin);
1767 __ add(sp, sp,
1768 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1769 __ Pop(lr);
1770 __ LoadEntryFromBuiltinIndex(builtin);
1771 __ bx(builtin);
1772 }
1773 } // namespace
1774
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1775 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1776 Generate_ContinueToBuiltinHelper(masm, false, false);
1777 }
1778
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1779 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1780 MacroAssembler* masm) {
1781 Generate_ContinueToBuiltinHelper(masm, false, true);
1782 }
1783
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1784 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1785 Generate_ContinueToBuiltinHelper(masm, true, false);
1786 }
1787
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1788 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1789 MacroAssembler* masm) {
1790 Generate_ContinueToBuiltinHelper(masm, true, true);
1791 }
1792
Generate_NotifyDeoptimized(MacroAssembler * masm)1793 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1794 {
1795 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1796 __ CallRuntime(Runtime::kNotifyDeoptimized);
1797 }
1798
1799 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r0.code());
1800 __ pop(r0);
1801 __ Ret();
1802 }
1803
1804 namespace {
1805
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand::Zero ())1806 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1807 Operand offset = Operand::Zero()) {
1808 // Compute the target address = entry_address + offset
1809 if (offset.IsImmediate() && offset.immediate() == 0) {
1810 __ mov(lr, entry_address);
1811 } else {
1812 __ add(lr, entry_address, offset);
1813 }
1814
1815 // "return" to the OSR entry point of the function.
1816 __ Ret();
1817 }
1818
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)1819 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
1820 ASM_CODE_COMMENT(masm);
1821 {
1822 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1823 __ CallRuntime(Runtime::kCompileOptimizedOSR);
1824 }
1825
1826 // If the code object is null, just return to the caller.
1827 Label skip;
1828 __ cmp(r0, Operand(Smi::zero()));
1829 __ b(ne, &skip);
1830 __ Ret();
1831
1832 __ bind(&skip);
1833
1834 if (is_interpreter) {
1835 // Drop the handler frame that is be sitting on top of the actual
1836 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1837 __ LeaveFrame(StackFrame::STUB);
1838 }
1839
1840 // Load deoptimization data from the code object.
1841 // <deopt_data> = <code>[#deoptimization_data_offset]
1842 __ ldr(r1,
1843 FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
1844
1845 {
1846 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
1847 __ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
1848
1849 // Load the OSR entrypoint offset from the deoptimization data.
1850 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1851 __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
1852 DeoptimizationData::kOsrPcOffsetIndex)));
1853
1854 Generate_OSREntry(masm, r0, Operand::SmiUntag(r1));
1855 }
1856 }
1857 } // namespace
1858
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1859 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1860 return OnStackReplacement(masm, true);
1861 }
1862
Generate_BaselineOnStackReplacement(MacroAssembler * masm)1863 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
1864 __ ldr(kContextRegister,
1865 MemOperand(fp, BaselineFrameConstants::kContextOffset));
1866 return OnStackReplacement(masm, false);
1867 }
1868
1869 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1870 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1871 // ----------- S t a t e -------------
1872 // -- r0 : argc
1873 // -- sp[0] : receiver
1874 // -- sp[4] : thisArg
1875 // -- sp[8] : argArray
1876 // -----------------------------------
1877
1878 // 1. Load receiver into r1, argArray into r2 (if present), remove all
1879 // arguments from the stack (including the receiver), and push thisArg (if
1880 // present) instead.
1881 {
1882 __ LoadRoot(r5, RootIndex::kUndefinedValue);
1883 __ mov(r2, r5);
1884 __ ldr(r1, MemOperand(sp, 0)); // receiver
1885 __ cmp(r0, Operand(JSParameterCount(1)));
1886 __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
1887 __ cmp(r0, Operand(JSParameterCount(2)), ge);
1888 __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
1889 __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
1890 TurboAssembler::kCountIncludesReceiver);
1891 }
1892
1893 // ----------- S t a t e -------------
1894 // -- r2 : argArray
1895 // -- r1 : receiver
1896 // -- sp[0] : thisArg
1897 // -----------------------------------
1898
1899 // 2. We don't need to check explicitly for callable receiver here,
1900 // since that's the first thing the Call/CallWithArrayLike builtins
1901 // will do.
1902
1903 // 3. Tail call with no arguments if argArray is null or undefined.
1904 Label no_arguments;
1905 __ JumpIfRoot(r2, RootIndex::kNullValue, &no_arguments);
1906 __ JumpIfRoot(r2, RootIndex::kUndefinedValue, &no_arguments);
1907
1908 // 4a. Apply the receiver to the given argArray.
1909 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1910 RelocInfo::CODE_TARGET);
1911
1912 // 4b. The argArray is either null or undefined, so we tail call without any
1913 // arguments to the receiver.
1914 __ bind(&no_arguments);
1915 {
1916 __ mov(r0, Operand(JSParameterCount(0)));
1917 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1918 }
1919 }
1920
1921 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1922 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1923 // 1. Get the callable to call (passed as receiver) from the stack.
1924 __ Pop(r1);
1925
1926 // 2. Make sure we have at least one argument.
1927 // r0: actual number of arguments
1928 {
1929 Label done;
1930 __ cmp(r0, Operand(JSParameterCount(0)));
1931 __ b(ne, &done);
1932 __ PushRoot(RootIndex::kUndefinedValue);
1933 __ add(r0, r0, Operand(1));
1934 __ bind(&done);
1935 }
1936
1937 // 3. Adjust the actual number of arguments.
1938 __ sub(r0, r0, Operand(1));
1939
1940 // 4. Call the callable.
1941 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1942 }
1943
Generate_ReflectApply(MacroAssembler * masm)1944 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1945 // ----------- S t a t e -------------
1946 // -- r0 : argc
1947 // -- sp[0] : receiver
1948 // -- sp[4] : target (if argc >= 1)
1949 // -- sp[8] : thisArgument (if argc >= 2)
1950 // -- sp[12] : argumentsList (if argc == 3)
1951 // -----------------------------------
1952
1953 // 1. Load target into r1 (if present), argumentsList into r2 (if present),
1954 // remove all arguments from the stack (including the receiver), and push
1955 // thisArgument (if present) instead.
1956 {
1957 __ LoadRoot(r1, RootIndex::kUndefinedValue);
1958 __ mov(r5, r1);
1959 __ mov(r2, r1);
1960 __ cmp(r0, Operand(JSParameterCount(1)));
1961 __ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
1962 __ cmp(r0, Operand(JSParameterCount(2)), ge);
1963 __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
1964 __ cmp(r0, Operand(JSParameterCount(3)), ge);
1965 __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
1966 __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
1967 TurboAssembler::kCountIncludesReceiver);
1968 }
1969
1970 // ----------- S t a t e -------------
1971 // -- r2 : argumentsList
1972 // -- r1 : target
1973 // -- sp[0] : thisArgument
1974 // -----------------------------------
1975
1976 // 2. We don't need to check explicitly for callable target here,
1977 // since that's the first thing the Call/CallWithArrayLike builtins
1978 // will do.
1979
1980 // 3. Apply the target to the given argumentsList.
1981 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1982 RelocInfo::CODE_TARGET);
1983 }
1984
Generate_ReflectConstruct(MacroAssembler * masm)1985 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1986 // ----------- S t a t e -------------
1987 // -- r0 : argc
1988 // -- sp[0] : receiver
1989 // -- sp[4] : target
1990 // -- sp[8] : argumentsList
1991 // -- sp[12] : new.target (optional)
1992 // -----------------------------------
1993
1994 // 1. Load target into r1 (if present), argumentsList into r2 (if present),
1995 // new.target into r3 (if present, otherwise use target), remove all
1996 // arguments from the stack (including the receiver), and push thisArgument
1997 // (if present) instead.
1998 {
1999 __ LoadRoot(r1, RootIndex::kUndefinedValue);
2000 __ mov(r2, r1);
2001 __ mov(r4, r1);
2002 __ cmp(r0, Operand(JSParameterCount(1)));
2003 __ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
2004 __ mov(r3, r1); // new.target defaults to target
2005 __ cmp(r0, Operand(JSParameterCount(2)), ge);
2006 __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
2007 __ cmp(r0, Operand(JSParameterCount(3)), ge);
2008 __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
2009 __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
2010 TurboAssembler::kCountIncludesReceiver);
2011 }
2012
2013 // ----------- S t a t e -------------
2014 // -- r2 : argumentsList
2015 // -- r3 : new.target
2016 // -- r1 : target
2017 // -- sp[0] : receiver (undefined)
2018 // -----------------------------------
2019
2020 // 2. We don't need to check explicitly for constructor target here,
2021 // since that's the first thing the Construct/ConstructWithArrayLike
2022 // builtins will do.
2023
2024 // 3. We don't need to check explicitly for constructor new.target here,
2025 // since that's the second thing the Construct/ConstructWithArrayLike
2026 // builtins will do.
2027
2028 // 4. Construct the target with the given new.target and argumentsList.
2029 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2030 RelocInfo::CODE_TARGET);
2031 }
2032
2033 namespace {
2034
2035 // Allocate new stack space for |count| arguments and shift all existing
2036 // arguments already on the stack. |pointer_to_new_space_out| points to the
2037 // first free slot on the stack to copy additional arguments to and
2038 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2)2039 void Generate_AllocateSpaceAndShiftExistingArguments(
2040 MacroAssembler* masm, Register count, Register argc_in_out,
2041 Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
2042 DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2043 scratch2));
2044 UseScratchRegisterScope temps(masm);
2045 Register old_sp = scratch1;
2046 Register new_space = scratch2;
2047 __ mov(old_sp, sp);
2048 __ lsl(new_space, count, Operand(kSystemPointerSizeLog2));
2049 __ AllocateStackSpace(new_space);
2050
2051 Register end = scratch2;
2052 Register value = temps.Acquire();
2053 Register dest = pointer_to_new_space_out;
2054 __ mov(dest, sp);
2055 __ add(end, old_sp, Operand(argc_in_out, LSL, kSystemPointerSizeLog2));
2056 Label loop, done;
2057 __ bind(&loop);
2058 __ cmp(old_sp, end);
2059 __ b(ge, &done);
2060 __ ldr(value, MemOperand(old_sp, kSystemPointerSize, PostIndex));
2061 __ str(value, MemOperand(dest, kSystemPointerSize, PostIndex));
2062 __ b(&loop);
2063 __ bind(&done);
2064
2065 // Update total number of arguments.
2066 __ add(argc_in_out, argc_in_out, count);
2067 }
2068
2069 } // namespace
2070
2071 // static
2072 // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2073 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2074 Handle<Code> code) {
2075 // ----------- S t a t e -------------
2076 // -- r1 : target
2077 // -- r0 : number of parameters on the stack
2078 // -- r2 : arguments list (a FixedArray)
2079 // -- r4 : len (number of elements to push from args)
2080 // -- r3 : new.target (for [[Construct]])
2081 // -----------------------------------
2082 Register scratch = r8;
2083
2084 if (FLAG_debug_code) {
2085 // Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0.
2086 Label ok, fail;
2087 __ AssertNotSmi(r2);
2088 __ ldr(scratch, FieldMemOperand(r2, HeapObject::kMapOffset));
2089 __ ldrh(r6, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2090 __ cmp(r6, Operand(FIXED_ARRAY_TYPE));
2091 __ b(eq, &ok);
2092 __ cmp(r6, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2093 __ b(ne, &fail);
2094 __ cmp(r4, Operand(0));
2095 __ b(eq, &ok);
2096 // Fall through.
2097 __ bind(&fail);
2098 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2099
2100 __ bind(&ok);
2101 }
2102
2103 Label stack_overflow;
2104 __ StackOverflowCheck(r4, scratch, &stack_overflow);
2105
2106 // Move the arguments already in the stack,
2107 // including the receiver and the return address.
2108 // r4: Number of arguments to make room for.
2109 // r0: Number of arguments already on the stack.
2110 // r9: Points to first free slot on the stack after arguments were shifted.
2111 Generate_AllocateSpaceAndShiftExistingArguments(masm, r4, r0, r9, r5, r6);
2112
2113 // Copy arguments onto the stack (thisArgument is already on the stack).
2114 {
2115 __ mov(r6, Operand(0));
2116 __ LoadRoot(r5, RootIndex::kTheHoleValue);
2117 Label done, loop;
2118 __ bind(&loop);
2119 __ cmp(r6, r4);
2120 __ b(eq, &done);
2121 __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
2122 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2123 __ cmp(scratch, r5);
2124 // Turn the hole into undefined as we go.
2125 __ LoadRoot(scratch, RootIndex::kUndefinedValue, eq);
2126 __ str(scratch, MemOperand(r9, kSystemPointerSize, PostIndex));
2127 __ add(r6, r6, Operand(1));
2128 __ b(&loop);
2129 __ bind(&done);
2130 }
2131
2132 // Tail-call to the actual Call or Construct builtin.
2133 __ Jump(code, RelocInfo::CODE_TARGET);
2134
2135 __ bind(&stack_overflow);
2136 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2137 }
2138
2139 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2140 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2141 CallOrConstructMode mode,
2142 Handle<Code> code) {
2143 // ----------- S t a t e -------------
2144 // -- r0 : the number of arguments
2145 // -- r3 : the new.target (for [[Construct]] calls)
2146 // -- r1 : the target to call (can be any Object)
2147 // -- r2 : start index (to support rest parameters)
2148 // -----------------------------------
2149
2150 Register scratch = r6;
2151
2152 // Check if new.target has a [[Construct]] internal method.
2153 if (mode == CallOrConstructMode::kConstruct) {
2154 Label new_target_constructor, new_target_not_constructor;
2155 __ JumpIfSmi(r3, &new_target_not_constructor);
2156 __ ldr(scratch, FieldMemOperand(r3, HeapObject::kMapOffset));
2157 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2158 __ tst(scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
2159 __ b(ne, &new_target_constructor);
2160 __ bind(&new_target_not_constructor);
2161 {
2162 FrameScope scope(masm, StackFrame::MANUAL);
2163 __ EnterFrame(StackFrame::INTERNAL);
2164 __ Push(r3);
2165 __ CallRuntime(Runtime::kThrowNotConstructor);
2166 }
2167 __ bind(&new_target_constructor);
2168 }
2169
2170 Label stack_done, stack_overflow;
2171 __ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2172 __ sub(r5, r5, Operand(kJSArgcReceiverSlots));
2173 __ sub(r5, r5, r2, SetCC);
2174 __ b(le, &stack_done);
2175 {
2176 // ----------- S t a t e -------------
2177 // -- r0 : the number of arguments already in the stack
2178 // -- r1 : the target to call (can be any Object)
2179 // -- r2 : start index (to support rest parameters)
2180 // -- r3 : the new.target (for [[Construct]] calls)
2181 // -- fp : point to the caller stack frame
2182 // -- r5 : number of arguments to copy, i.e. arguments count - start index
2183 // -----------------------------------
2184
2185 // Check for stack overflow.
2186 __ StackOverflowCheck(r5, scratch, &stack_overflow);
2187
2188 // Forward the arguments from the caller frame.
2189 // Point to the first argument to copy (skipping the receiver).
2190 __ add(r4, fp,
2191 Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2192 kSystemPointerSize));
2193 __ add(r4, r4, Operand(r2, LSL, kSystemPointerSizeLog2));
2194
2195 // Move the arguments already in the stack,
2196 // including the receiver and the return address.
2197 // r5: Number of arguments to make room for.
2198 // r0: Number of arguments already on the stack.
2199 // r2: Points to first free slot on the stack after arguments were shifted.
2200 Generate_AllocateSpaceAndShiftExistingArguments(masm, r5, r0, r2, scratch,
2201 r8);
2202
2203 // Copy arguments from the caller frame.
2204 // TODO(victorgomes): Consider using forward order as potentially more cache
2205 // friendly.
2206 {
2207 Label loop;
2208 __ bind(&loop);
2209 {
2210 __ sub(r5, r5, Operand(1), SetCC);
2211 __ ldr(scratch, MemOperand(r4, r5, LSL, kSystemPointerSizeLog2));
2212 __ str(scratch, MemOperand(r2, r5, LSL, kSystemPointerSizeLog2));
2213 __ b(ne, &loop);
2214 }
2215 }
2216 }
2217 __ b(&stack_done);
2218 __ bind(&stack_overflow);
2219 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2220 __ bind(&stack_done);
2221
2222 // Tail-call to the {code} handler.
2223 __ Jump(code, RelocInfo::CODE_TARGET);
2224 }
2225
2226 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2227 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2228 ConvertReceiverMode mode) {
2229 // ----------- S t a t e -------------
2230 // -- r0 : the number of arguments
2231 // -- r1 : the function to call (checked to be a JSFunction)
2232 // -----------------------------------
2233 __ AssertCallableFunction(r1);
2234
2235 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2236
2237 // Enter the context of the function; ToObject has to run in the function
2238 // context, and we also need to take the global proxy from the function
2239 // context in case of conversion.
2240 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
2241 // We need to convert the receiver for non-native sloppy mode functions.
2242 Label done_convert;
2243 __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kFlagsOffset));
2244 __ tst(r3, Operand(SharedFunctionInfo::IsNativeBit::kMask |
2245 SharedFunctionInfo::IsStrictBit::kMask));
2246 __ b(ne, &done_convert);
2247 {
2248 // ----------- S t a t e -------------
2249 // -- r0 : the number of arguments
2250 // -- r1 : the function to call (checked to be a JSFunction)
2251 // -- r2 : the shared function info.
2252 // -- cp : the function context.
2253 // -----------------------------------
2254
2255 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2256 // Patch receiver to global proxy.
2257 __ LoadGlobalProxy(r3);
2258 } else {
2259 Label convert_to_object, convert_receiver;
2260 __ ldr(r3, __ ReceiverOperand(r0));
2261 __ JumpIfSmi(r3, &convert_to_object);
2262 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2263 __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
2264 __ b(hs, &done_convert);
2265 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2266 Label convert_global_proxy;
2267 __ JumpIfRoot(r3, RootIndex::kUndefinedValue, &convert_global_proxy);
2268 __ JumpIfNotRoot(r3, RootIndex::kNullValue, &convert_to_object);
2269 __ bind(&convert_global_proxy);
2270 {
2271 // Patch receiver to global proxy.
2272 __ LoadGlobalProxy(r3);
2273 }
2274 __ b(&convert_receiver);
2275 }
2276 __ bind(&convert_to_object);
2277 {
2278 // Convert receiver using ToObject.
2279 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2280 // in the fast case? (fall back to AllocateInNewSpace?)
2281 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2282 __ SmiTag(r0);
2283 __ Push(r0, r1);
2284 __ mov(r0, r3);
2285 __ Push(cp);
2286 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2287 RelocInfo::CODE_TARGET);
2288 __ Pop(cp);
2289 __ mov(r3, r0);
2290 __ Pop(r0, r1);
2291 __ SmiUntag(r0);
2292 }
2293 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2294 __ bind(&convert_receiver);
2295 }
2296 __ str(r3, __ ReceiverOperand(r0));
2297 }
2298 __ bind(&done_convert);
2299
2300 // ----------- S t a t e -------------
2301 // -- r0 : the number of arguments
2302 // -- r1 : the function to call (checked to be a JSFunction)
2303 // -- r2 : the shared function info.
2304 // -- cp : the function context.
2305 // -----------------------------------
2306
2307 __ ldrh(r2,
2308 FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
2309 __ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump);
2310 }
2311
2312 namespace {
2313
Generate_PushBoundArguments(MacroAssembler * masm)2314 void Generate_PushBoundArguments(MacroAssembler* masm) {
2315 ASM_CODE_COMMENT(masm);
2316 // ----------- S t a t e -------------
2317 // -- r0 : the number of arguments
2318 // -- r1 : target (checked to be a JSBoundFunction)
2319 // -- r3 : new.target (only in case of [[Construct]])
2320 // -----------------------------------
2321
2322 // Load [[BoundArguments]] into r2 and length of that into r4.
2323 Label no_bound_arguments;
2324 __ ldr(r2, FieldMemOperand(r1, JSBoundFunction::kBoundArgumentsOffset));
2325 __ ldr(r4, FieldMemOperand(r2, FixedArray::kLengthOffset));
2326 __ SmiUntag(r4);
2327 __ cmp(r4, Operand(0));
2328 __ b(eq, &no_bound_arguments);
2329 {
2330 // ----------- S t a t e -------------
2331 // -- r0 : the number of arguments
2332 // -- r1 : target (checked to be a JSBoundFunction)
2333 // -- r2 : the [[BoundArguments]] (implemented as FixedArray)
2334 // -- r3 : new.target (only in case of [[Construct]])
2335 // -- r4 : the number of [[BoundArguments]]
2336 // -----------------------------------
2337
2338 Register scratch = r6;
2339
2340 {
2341 // Check the stack for overflow. We are not trying to catch interruptions
2342 // (i.e. debug break and preemption) here, so check the "real stack
2343 // limit".
2344 Label done;
2345 __ mov(scratch, Operand(r4, LSL, kSystemPointerSizeLog2));
2346 {
2347 UseScratchRegisterScope temps(masm);
2348 Register remaining_stack_size = temps.Acquire();
2349 DCHECK(!AreAliased(r0, r1, r2, r3, r4, scratch, remaining_stack_size));
2350
2351 // Compute the space we have left. The stack might already be overflowed
2352 // here which will cause remaining_stack_size to become negative.
2353 __ LoadStackLimit(remaining_stack_size,
2354 StackLimitKind::kRealStackLimit);
2355 __ sub(remaining_stack_size, sp, remaining_stack_size);
2356
2357 // Check if the arguments will overflow the stack.
2358 __ cmp(remaining_stack_size, scratch);
2359 }
2360 __ b(gt, &done);
2361 {
2362 FrameScope scope(masm, StackFrame::MANUAL);
2363 __ EnterFrame(StackFrame::INTERNAL);
2364 __ CallRuntime(Runtime::kThrowStackOverflow);
2365 }
2366 __ bind(&done);
2367 }
2368
2369 // Pop receiver.
2370 __ Pop(r5);
2371
2372 // Push [[BoundArguments]].
2373 {
2374 Label loop;
2375 __ add(r0, r0, r4); // Adjust effective number of arguments.
2376 __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2377 __ bind(&loop);
2378 __ sub(r4, r4, Operand(1), SetCC);
2379 __ ldr(scratch, MemOperand(r2, r4, LSL, kTaggedSizeLog2));
2380 __ Push(scratch);
2381 __ b(gt, &loop);
2382 }
2383
2384 // Push receiver.
2385 __ Push(r5);
2386 }
2387 __ bind(&no_bound_arguments);
2388 }
2389
2390 } // namespace
2391
2392 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2393 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2394 // ----------- S t a t e -------------
2395 // -- r0 : the number of arguments
2396 // -- r1 : the function to call (checked to be a JSBoundFunction)
2397 // -----------------------------------
2398 __ AssertBoundFunction(r1);
2399
2400 // Patch the receiver to [[BoundThis]].
2401 __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundThisOffset));
2402 __ str(r3, __ ReceiverOperand(r0));
2403
2404 // Push the [[BoundArguments]] onto the stack.
2405 Generate_PushBoundArguments(masm);
2406
2407 // Call the [[BoundTargetFunction]] via the Call builtin.
2408 __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
2409 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2410 RelocInfo::CODE_TARGET);
2411 }
2412
2413 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2414 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2415 // ----------- S t a t e -------------
2416 // -- r0 : the number of arguments
2417 // -- r1 : the target to call (can be any Object).
2418 // -----------------------------------
2419 Register argc = r0;
2420 Register target = r1;
2421 Register map = r4;
2422 Register instance_type = r5;
2423 DCHECK(!AreAliased(argc, target, map, instance_type));
2424
2425 Label non_callable, class_constructor;
2426 __ JumpIfSmi(target, &non_callable);
2427 __ LoadMap(map, target);
2428 __ CompareInstanceTypeRange(map, instance_type,
2429 FIRST_CALLABLE_JS_FUNCTION_TYPE,
2430 LAST_CALLABLE_JS_FUNCTION_TYPE);
2431 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2432 RelocInfo::CODE_TARGET, ls);
2433 __ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
2434 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2435 RelocInfo::CODE_TARGET, eq);
2436
2437 // Check if target has a [[Call]] internal method.
2438 {
2439 Register flags = r4;
2440 __ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2441 map = no_reg;
2442 __ tst(flags, Operand(Map::Bits1::IsCallableBit::kMask));
2443 __ b(eq, &non_callable);
2444 }
2445
2446 // Check if target is a proxy and call CallProxy external builtin
2447 __ cmp(instance_type, Operand(JS_PROXY_TYPE));
2448 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
2449
2450 // Check if target is a wrapped function and call CallWrappedFunction external
2451 // builtin
2452 __ cmp(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
2453 __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2454 RelocInfo::CODE_TARGET, eq);
2455
2456 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2457 // Check that the function is not a "classConstructor".
2458 __ cmp(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2459 __ b(eq, &class_constructor);
2460
2461 // 2. Call to something else, which might have a [[Call]] internal method (if
2462 // not we raise an exception).
2463 // Overwrite the original receiver the (original) target.
2464 __ str(target, __ ReceiverOperand(argc));
2465 // Let the "call_as_function_delegate" take care of the rest.
2466 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2467 __ Jump(masm->isolate()->builtins()->CallFunction(
2468 ConvertReceiverMode::kNotNullOrUndefined),
2469 RelocInfo::CODE_TARGET);
2470
2471 // 3. Call to something that is not callable.
2472 __ bind(&non_callable);
2473 {
2474 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2475 __ Push(target);
2476 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2477 __ Trap(); // Unreachable.
2478 }
2479
2480 // 4. The function is a "classConstructor", need to raise an exception.
2481 __ bind(&class_constructor);
2482 {
2483 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2484 __ Push(target);
2485 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2486 __ Trap(); // Unreachable.
2487 }
2488 }
2489
2490 // static
Generate_ConstructFunction(MacroAssembler * masm)2491 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2492 // ----------- S t a t e -------------
2493 // -- r0 : the number of arguments
2494 // -- r1 : the constructor to call (checked to be a JSFunction)
2495 // -- r3 : the new target (checked to be a constructor)
2496 // -----------------------------------
2497 __ AssertConstructor(r1);
2498 __ AssertFunction(r1);
2499
2500 // Calling convention for function specific ConstructStubs require
2501 // r2 to contain either an AllocationSite or undefined.
2502 __ LoadRoot(r2, RootIndex::kUndefinedValue);
2503
2504 Label call_generic_stub;
2505
2506 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2507 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2508 __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
2509 __ tst(r4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2510 __ b(eq, &call_generic_stub);
2511
2512 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2513 RelocInfo::CODE_TARGET);
2514
2515 __ bind(&call_generic_stub);
2516 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2517 RelocInfo::CODE_TARGET);
2518 }
2519
2520 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2521 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2522 // ----------- S t a t e -------------
2523 // -- r0 : the number of arguments
2524 // -- r1 : the function to call (checked to be a JSBoundFunction)
2525 // -- r3 : the new target (checked to be a constructor)
2526 // -----------------------------------
2527 __ AssertConstructor(r1);
2528 __ AssertBoundFunction(r1);
2529
2530 // Push the [[BoundArguments]] onto the stack.
2531 Generate_PushBoundArguments(masm);
2532
2533 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2534 __ cmp(r1, r3);
2535 __ ldr(r3, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset),
2536 eq);
2537
2538 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2539 __ ldr(r1, FieldMemOperand(r1, JSBoundFunction::kBoundTargetFunctionOffset));
2540 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2541 }
2542
2543 // static
Generate_Construct(MacroAssembler * masm)2544 void Builtins::Generate_Construct(MacroAssembler* masm) {
2545 // ----------- S t a t e -------------
2546 // -- r0 : the number of arguments
2547 // -- r1 : the constructor to call (can be any Object)
2548 // -- r3 : the new target (either the same as the constructor or
2549 // the JSFunction on which new was invoked initially)
2550 // -----------------------------------
2551 Register argc = r0;
2552 Register target = r1;
2553 Register map = r4;
2554 Register instance_type = r5;
2555 DCHECK(!AreAliased(argc, target, map, instance_type));
2556
2557 // Check if target is a Smi.
2558 Label non_constructor, non_proxy;
2559 __ JumpIfSmi(target, &non_constructor);
2560
2561 // Check if target has a [[Construct]] internal method.
2562 __ ldr(map, FieldMemOperand(target, HeapObject::kMapOffset));
2563 {
2564 Register flags = r2;
2565 DCHECK(!AreAliased(argc, target, map, instance_type, flags));
2566 __ ldrb(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2567 __ tst(flags, Operand(Map::Bits1::IsConstructorBit::kMask));
2568 __ b(eq, &non_constructor);
2569 }
2570
2571 // Dispatch based on instance type.
2572 __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
2573 LAST_JS_FUNCTION_TYPE);
2574 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2575 RelocInfo::CODE_TARGET, ls);
2576
2577 // Only dispatch to bound functions after checking whether they are
2578 // constructors.
2579 __ cmp(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
2580 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2581 RelocInfo::CODE_TARGET, eq);
2582
2583 // Only dispatch to proxies after checking whether they are constructors.
2584 __ cmp(instance_type, Operand(JS_PROXY_TYPE));
2585 __ b(ne, &non_proxy);
2586 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2587 RelocInfo::CODE_TARGET);
2588
2589 // Called Construct on an exotic Object with a [[Construct]] internal method.
2590 __ bind(&non_proxy);
2591 {
2592 // Overwrite the original receiver with the (original) target.
2593 __ str(target, __ ReceiverOperand(argc));
2594 // Let the "call_as_constructor_delegate" take care of the rest.
2595 __ LoadNativeContextSlot(target,
2596 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2597 __ Jump(masm->isolate()->builtins()->CallFunction(),
2598 RelocInfo::CODE_TARGET);
2599 }
2600
2601 // Called Construct on an Object that doesn't have a [[Construct]] internal
2602 // method.
2603 __ bind(&non_constructor);
2604 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2605 RelocInfo::CODE_TARGET);
2606 }
2607
2608 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2609 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2610 // The function index was put in a register by the jump table trampoline.
2611 // Convert to Smi for the runtime call.
2612 __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2613 {
2614 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2615 FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2616
2617 // Save all parameter registers (see wasm-linkage.h). They might be
2618 // overwritten in the runtime call below. We don't have any callee-saved
2619 // registers in wasm, so no need to store anything else.
2620 RegList gp_regs;
2621 for (Register gp_param_reg : wasm::kGpParamRegisters) {
2622 gp_regs.set(gp_param_reg);
2623 }
2624 DwVfpRegister lowest_fp_reg = std::begin(wasm::kFpParamRegisters)[0];
2625 DwVfpRegister highest_fp_reg = std::end(wasm::kFpParamRegisters)[-1];
2626 for (DwVfpRegister fp_param_reg : wasm::kFpParamRegisters) {
2627 CHECK(fp_param_reg.code() >= lowest_fp_reg.code() &&
2628 fp_param_reg.code() <= highest_fp_reg.code());
2629 }
2630
2631 CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
2632 CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
2633 arraysize(wasm::kFpParamRegisters));
2634 CHECK_EQ(gp_regs.Count(),
2635 WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs);
2636 CHECK_EQ(highest_fp_reg.code() - lowest_fp_reg.code() + 1,
2637 WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs);
2638
2639 __ stm(db_w, sp, gp_regs);
2640 __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
2641
2642 // Push the Wasm instance for loading the jump table address after the
2643 // runtime call.
2644 __ push(kWasmInstanceRegister);
2645
2646 // Push the Wasm instance again as an explicit argument to the runtime
2647 // function.
2648 __ push(kWasmInstanceRegister);
2649 // Push the function index as second argument.
2650 __ push(kWasmCompileLazyFuncIndexRegister);
2651 // Initialize the JavaScript context with 0. CEntry will use it to
2652 // set the current context on the isolate.
2653 __ Move(cp, Smi::zero());
2654 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2655 // The runtime function returns the jump table slot offset as a Smi. Use
2656 // that to compute the jump target in r8.
2657 __ pop(kWasmInstanceRegister);
2658 __ ldr(r8, MemOperand(
2659 kWasmInstanceRegister,
2660 WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
2661 __ add(r8, r8, Operand::SmiUntag(kReturnRegister0));
2662 // r8 now holds the jump table slot where we want to jump to in the end.
2663
2664 // Restore registers.
2665 __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
2666 __ ldm(ia_w, sp, gp_regs);
2667 }
2668
2669 // Finally, jump to the jump table slot for the function.
2670 __ Jump(r8);
2671 }
2672
Generate_WasmDebugBreak(MacroAssembler * masm)2673 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2674 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2675 {
2676 FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2677
2678 STATIC_ASSERT(DwVfpRegister::kNumRegisters == 32);
2679 constexpr DwVfpRegister last =
2680 WasmDebugBreakFrameConstants::kPushedFpRegs.last();
2681 constexpr DwVfpRegister first =
2682 WasmDebugBreakFrameConstants::kPushedFpRegs.first();
2683 static_assert(
2684 WasmDebugBreakFrameConstants::kPushedFpRegs.Count() ==
2685 last.code() - first.code() + 1,
2686 "All registers in the range from first to last have to be set");
2687
2688 // Save all parameter registers. They might hold live values, we restore
2689 // them after the runtime call.
2690 constexpr DwVfpRegister lowest_fp_reg = first;
2691 constexpr DwVfpRegister highest_fp_reg = last;
2692
2693 // Store gp parameter registers.
2694 __ stm(db_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
2695 // Store fp parameter registers.
2696 __ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
2697
2698 // Initialize the JavaScript context with 0. CEntry will use it to
2699 // set the current context on the isolate.
2700 __ Move(cp, Smi::zero());
2701 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2702
2703 // Restore registers.
2704 __ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
2705 __ ldm(ia_w, sp, WasmDebugBreakFrameConstants::kPushedGpRegs);
2706 }
2707 __ Ret();
2708 }
2709
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2710 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2711 // TODO(v8:10701): Implement for this platform.
2712 __ Trap();
2713 }
2714
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)2715 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
2716 // TODO(v8:12191): Implement for this platform.
2717 __ Trap();
2718 }
2719
Generate_WasmSuspend(MacroAssembler * masm)2720 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
2721 // TODO(v8:12191): Implement for this platform.
2722 __ Trap();
2723 }
2724
Generate_WasmResume(MacroAssembler * masm)2725 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
2726 // TODO(v8:12191): Implement for this platform.
2727 __ Trap();
2728 }
2729
Generate_WasmOnStackReplace(MacroAssembler * masm)2730 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2731 // Only needed on x64.
2732 __ Trap();
2733 }
2734 #endif // V8_ENABLE_WEBASSEMBLY
2735
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2736 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2737 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2738 bool builtin_exit_frame) {
2739 // Called from JavaScript; parameters are on stack as if calling JS function.
2740 // r0: number of arguments including receiver
2741 // r1: pointer to builtin function
2742 // fp: frame pointer (restored after C call)
2743 // sp: stack pointer (restored as callee's sp after C call)
2744 // cp: current context (C callee-saved)
2745 //
2746 // If argv_mode == ArgvMode::kRegister:
2747 // r2: pointer to the first argument
2748
2749 __ mov(r5, Operand(r1));
2750
2751 if (argv_mode == ArgvMode::kRegister) {
2752 // Move argv into the correct register.
2753 __ mov(r1, Operand(r2));
2754 } else {
2755 // Compute the argv pointer in a callee-saved register.
2756 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
2757 __ sub(r1, r1, Operand(kPointerSize));
2758 }
2759
2760 // Enter the exit frame that transitions from JavaScript to C++.
2761 FrameScope scope(masm, StackFrame::MANUAL);
2762 __ EnterExitFrame(
2763 save_doubles == SaveFPRegsMode::kSave, 0,
2764 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2765
2766 // Store a copy of argc in callee-saved registers for later.
2767 __ mov(r4, Operand(r0));
2768
2769 // r0, r4: number of arguments including receiver (C callee-saved)
2770 // r1: pointer to the first argument (C callee-saved)
2771 // r5: pointer to builtin function (C callee-saved)
2772
2773 #if V8_HOST_ARCH_ARM
2774 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
2775 int frame_alignment_mask = frame_alignment - 1;
2776 if (FLAG_debug_code) {
2777 if (frame_alignment > kPointerSize) {
2778 Label alignment_as_expected;
2779 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
2780 __ tst(sp, Operand(frame_alignment_mask));
2781 __ b(eq, &alignment_as_expected);
2782 // Don't use Check here, as it will call Runtime_Abort re-entering here.
2783 __ stop();
2784 __ bind(&alignment_as_expected);
2785 }
2786 }
2787 #endif
2788
2789 // Call C built-in.
2790 // r0 = argc, r1 = argv, r2 = isolate
2791 __ Move(r2, ExternalReference::isolate_address(masm->isolate()));
2792 __ StoreReturnAddressAndCall(r5);
2793
2794 // Result returned in r0 or r1:r0 - do not destroy these registers!
2795
2796 // Check result for exception sentinel.
2797 Label exception_returned;
2798 __ CompareRoot(r0, RootIndex::kException);
2799 __ b(eq, &exception_returned);
2800
2801 // Check that there is no pending exception, otherwise we
2802 // should have returned the exception sentinel.
2803 if (FLAG_debug_code) {
2804 Label okay;
2805 ExternalReference pending_exception_address = ExternalReference::Create(
2806 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2807 __ Move(r3, pending_exception_address);
2808 __ ldr(r3, MemOperand(r3));
2809 __ CompareRoot(r3, RootIndex::kTheHoleValue);
2810 // Cannot use check here as it attempts to generate call into runtime.
2811 __ b(eq, &okay);
2812 __ stop();
2813 __ bind(&okay);
2814 }
2815
2816 // Exit C frame and return.
2817 // r0:r1: result
2818 // sp: stack pointer
2819 // fp: frame pointer
2820 Register argc = argv_mode == ArgvMode::kRegister
2821 // We don't want to pop arguments so set argc to no_reg.
2822 ? no_reg
2823 // Callee-saved register r4 still holds argc.
2824 : r4;
2825 __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
2826 __ mov(pc, lr);
2827
2828 // Handling of exception.
2829 __ bind(&exception_returned);
2830
2831 ExternalReference pending_handler_context_address = ExternalReference::Create(
2832 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2833 ExternalReference pending_handler_entrypoint_address =
2834 ExternalReference::Create(
2835 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2836 ExternalReference pending_handler_fp_address = ExternalReference::Create(
2837 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2838 ExternalReference pending_handler_sp_address = ExternalReference::Create(
2839 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2840
2841 // Ask the runtime for help to determine the handler. This will set r0 to
2842 // contain the current pending exception, don't clobber it.
2843 ExternalReference find_handler =
2844 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2845 {
2846 FrameScope scope(masm, StackFrame::MANUAL);
2847 __ PrepareCallCFunction(3, 0);
2848 __ mov(r0, Operand(0));
2849 __ mov(r1, Operand(0));
2850 __ Move(r2, ExternalReference::isolate_address(masm->isolate()));
2851 __ CallCFunction(find_handler, 3);
2852 }
2853
2854 // Retrieve the handler context, SP and FP.
2855 __ Move(cp, pending_handler_context_address);
2856 __ ldr(cp, MemOperand(cp));
2857 __ Move(sp, pending_handler_sp_address);
2858 __ ldr(sp, MemOperand(sp));
2859 __ Move(fp, pending_handler_fp_address);
2860 __ ldr(fp, MemOperand(fp));
2861
2862 // If the handler is a JS frame, restore the context to the frame. Note that
2863 // the context will be set to (cp == 0) for non-JS frames.
2864 __ cmp(cp, Operand(0));
2865 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
2866
2867 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
2868 {
2869 UseScratchRegisterScope temps(masm);
2870 Register scratch = temps.Acquire();
2871 __ Move(scratch, ExternalReference::Create(
2872 IsolateAddressId::kCEntryFPAddress, masm->isolate()));
2873 __ mov(r1, Operand::Zero());
2874 __ str(r1, MemOperand(scratch));
2875 }
2876
2877 // Compute the handler entry address and jump to it.
2878 ConstantPoolUnavailableScope constant_pool_unavailable(masm);
2879 __ Move(r1, pending_handler_entrypoint_address);
2880 __ ldr(r1, MemOperand(r1));
2881 __ Jump(r1);
2882 }
2883
Generate_DoubleToI(MacroAssembler * masm)2884 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2885 Label negate, done;
2886
2887 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2888 UseScratchRegisterScope temps(masm);
2889 Register result_reg = r7;
2890 Register double_low = GetRegisterThatIsNotOneOf(result_reg);
2891 Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
2892 LowDwVfpRegister double_scratch = temps.AcquireLowD();
2893
2894 // Save the old values from these temporary registers on the stack.
2895 __ Push(result_reg, double_high, double_low);
2896
2897 // Account for saved regs.
2898 const int kArgumentOffset = 3 * kPointerSize;
2899
2900 MemOperand input_operand(sp, kArgumentOffset);
2901 MemOperand result_operand = input_operand;
2902
2903 // Load double input.
2904 __ vldr(double_scratch, input_operand);
2905 __ vmov(double_low, double_high, double_scratch);
2906 // Try to convert with a FPU convert instruction. This handles all
2907 // non-saturating cases.
2908 __ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
2909
2910 Register scratch = temps.Acquire();
2911 __ Ubfx(scratch, double_high, HeapNumber::kExponentShift,
2912 HeapNumber::kExponentBits);
2913 // Load scratch with exponent - 1. This is faster than loading
2914 // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
2915 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
2916 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
2917 // If exponent is greater than or equal to 84, the 32 less significant
2918 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
2919 // the result is 0.
2920 // Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is
2921 // greater than this, the conversion is out of range, so return zero.
2922 __ cmp(scratch, Operand(83));
2923 __ mov(result_reg, Operand::Zero(), LeaveCC, ge);
2924 __ b(ge, &done);
2925
2926 // If we reach this code, 30 <= exponent <= 83.
2927 // `TryInlineTruncateDoubleToI` above will have truncated any double with an
2928 // exponent lower than 30.
2929 if (FLAG_debug_code) {
2930 // Scratch is exponent - 1.
2931 __ cmp(scratch, Operand(30 - 1));
2932 __ Check(ge, AbortReason::kUnexpectedValue);
2933 }
2934
2935 // We don't have to handle cases where 0 <= exponent <= 20 for which we would
2936 // need to shift right the high part of the mantissa.
2937 // Scratch contains exponent - 1.
2938 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
2939 __ rsb(scratch, scratch, Operand(51), SetCC);
2940
2941 // 52 <= exponent <= 83, shift only double_low.
2942 // On entry, scratch contains: 52 - exponent.
2943 __ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls);
2944 __ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls);
2945 __ b(ls, &negate);
2946
2947 // 21 <= exponent <= 51, shift double_low and double_high
2948 // to generate the result.
2949 __ mov(double_low, Operand(double_low, LSR, scratch));
2950 // Scratch contains: 52 - exponent.
2951 // We needs: exponent - 20.
2952 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
2953 __ rsb(scratch, scratch, Operand(32));
2954 __ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord);
2955 // Set the implicit 1 before the mantissa part in double_high.
2956 __ orr(result_reg, result_reg,
2957 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2958 __ orr(result_reg, double_low, Operand(result_reg, LSL, scratch));
2959
2960 __ bind(&negate);
2961 // If input was positive, double_high ASR 31 equals 0 and
2962 // double_high LSR 31 equals zero.
2963 // New result = (result eor 0) + 0 = result.
2964 // If the input was negative, we have to negate the result.
2965 // Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
2966 // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
2967 __ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
2968 __ add(result_reg, result_reg, Operand(double_high, LSR, 31));
2969
2970 __ bind(&done);
2971 __ str(result_reg, result_operand);
2972
2973 // Restore registers corrupted in this routine and return.
2974 __ Pop(result_reg, double_high, double_low);
2975 __ Ret();
2976 }
2977
2978 namespace {
2979
AddressOffset(ExternalReference ref0,ExternalReference ref1)2980 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2981 return ref0.address() - ref1.address();
2982 }
2983
2984 // Calls an API function. Allocates HandleScope, extracts returned value
2985 // from handle and propagates exceptions. Restores context. stack_space
2986 // - space to be unwound on exit (includes the call JS arguments space and
2987 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)2988 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
2989 ExternalReference thunk_ref, int stack_space,
2990 MemOperand* stack_space_operand,
2991 MemOperand return_value_operand) {
2992 ASM_CODE_COMMENT(masm);
2993 Isolate* isolate = masm->isolate();
2994 ExternalReference next_address =
2995 ExternalReference::handle_scope_next_address(isolate);
2996 const int kNextOffset = 0;
2997 const int kLimitOffset = AddressOffset(
2998 ExternalReference::handle_scope_limit_address(isolate), next_address);
2999 const int kLevelOffset = AddressOffset(
3000 ExternalReference::handle_scope_level_address(isolate), next_address);
3001
3002 DCHECK(function_address == r1 || function_address == r2);
3003
3004 Label profiler_enabled, end_profiler_check;
3005 __ Move(r9, ExternalReference::is_profiling_address(isolate));
3006 __ ldrb(r9, MemOperand(r9, 0));
3007 __ cmp(r9, Operand(0));
3008 __ b(ne, &profiler_enabled);
3009 __ Move(r9, ExternalReference::address_of_runtime_stats_flag());
3010 __ ldr(r9, MemOperand(r9, 0));
3011 __ cmp(r9, Operand(0));
3012 __ b(ne, &profiler_enabled);
3013 {
3014 // Call the api function directly.
3015 __ Move(r3, function_address);
3016 __ b(&end_profiler_check);
3017 }
3018 __ bind(&profiler_enabled);
3019 {
3020 // Additional parameter is the address of the actual callback.
3021 __ Move(r3, thunk_ref);
3022 }
3023 __ bind(&end_profiler_check);
3024
3025 // Allocate HandleScope in callee-save registers.
3026 __ Move(r9, next_address);
3027 __ ldr(r4, MemOperand(r9, kNextOffset));
3028 __ ldr(r5, MemOperand(r9, kLimitOffset));
3029 __ ldr(r6, MemOperand(r9, kLevelOffset));
3030 __ add(r6, r6, Operand(1));
3031 __ str(r6, MemOperand(r9, kLevelOffset));
3032
3033 __ StoreReturnAddressAndCall(r3);
3034
3035 Label promote_scheduled_exception;
3036 Label delete_allocated_handles;
3037 Label leave_exit_frame;
3038 Label return_value_loaded;
3039
3040 // load value from ReturnValue
3041 __ ldr(r0, return_value_operand);
3042 __ bind(&return_value_loaded);
3043 // No more valid handles (the result handle was the last one). Restore
3044 // previous handle scope.
3045 __ str(r4, MemOperand(r9, kNextOffset));
3046 if (FLAG_debug_code) {
3047 __ ldr(r1, MemOperand(r9, kLevelOffset));
3048 __ cmp(r1, r6);
3049 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
3050 }
3051 __ sub(r6, r6, Operand(1));
3052 __ str(r6, MemOperand(r9, kLevelOffset));
3053 __ ldr(r6, MemOperand(r9, kLimitOffset));
3054 __ cmp(r5, r6);
3055 __ b(ne, &delete_allocated_handles);
3056
3057 // Leave the API exit frame.
3058 __ bind(&leave_exit_frame);
3059 // LeaveExitFrame expects unwind space to be in a register.
3060 if (stack_space_operand == nullptr) {
3061 DCHECK_NE(stack_space, 0);
3062 __ mov(r4, Operand(stack_space));
3063 } else {
3064 DCHECK_EQ(stack_space, 0);
3065 __ ldr(r4, *stack_space_operand);
3066 }
3067 __ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
3068
3069 // Check if the function scheduled an exception.
3070 __ LoadRoot(r4, RootIndex::kTheHoleValue);
3071 __ Move(r6, ExternalReference::scheduled_exception_address(isolate));
3072 __ ldr(r5, MemOperand(r6));
3073 __ cmp(r4, r5);
3074 __ b(ne, &promote_scheduled_exception);
3075
3076 __ mov(pc, lr);
3077
3078 // Re-throw by promoting a scheduled exception.
3079 __ bind(&promote_scheduled_exception);
3080 __ TailCallRuntime(Runtime::kPromoteScheduledException);
3081
3082 // HandleScope limit has changed. Delete allocated extensions.
3083 __ bind(&delete_allocated_handles);
3084 __ str(r5, MemOperand(r9, kLimitOffset));
3085 __ mov(r4, r0);
3086 __ PrepareCallCFunction(1);
3087 __ Move(r0, ExternalReference::isolate_address(isolate));
3088 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3089 __ mov(r0, r4);
3090 __ jmp(&leave_exit_frame);
3091 }
3092
3093 } // namespace
3094
Generate_CallApiCallback(MacroAssembler * masm)3095 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3096 // ----------- S t a t e -------------
3097 // -- cp : context
3098 // -- r1 : api function address
3099 // -- r2 : arguments count (not including the receiver)
3100 // -- r3 : call data
3101 // -- r0 : holder
3102 // -- sp[0] : receiver
3103 // -- sp[8] : first argument
3104 // -- ...
3105 // -- sp[(argc) * 8] : last argument
3106 // -----------------------------------
3107
3108 Register api_function_address = r1;
3109 Register argc = r2;
3110 Register call_data = r3;
3111 Register holder = r0;
3112 Register scratch = r4;
3113
3114 DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
3115
3116 using FCA = FunctionCallbackArguments;
3117
3118 STATIC_ASSERT(FCA::kArgsLength == 6);
3119 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3120 STATIC_ASSERT(FCA::kDataIndex == 4);
3121 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3122 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3123 STATIC_ASSERT(FCA::kIsolateIndex == 1);
3124 STATIC_ASSERT(FCA::kHolderIndex == 0);
3125
3126 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3127 //
3128 // Target state:
3129 // sp[0 * kPointerSize]: kHolder
3130 // sp[1 * kPointerSize]: kIsolate
3131 // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
3132 // sp[3 * kPointerSize]: undefined (kReturnValue)
3133 // sp[4 * kPointerSize]: kData
3134 // sp[5 * kPointerSize]: undefined (kNewTarget)
3135
3136 // Reserve space on the stack.
3137 __ AllocateStackSpace(FCA::kArgsLength * kPointerSize);
3138
3139 // kHolder.
3140 __ str(holder, MemOperand(sp, 0 * kPointerSize));
3141
3142 // kIsolate.
3143 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3144 __ str(scratch, MemOperand(sp, 1 * kPointerSize));
3145
3146 // kReturnValueDefaultValue and kReturnValue.
3147 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3148 __ str(scratch, MemOperand(sp, 2 * kPointerSize));
3149 __ str(scratch, MemOperand(sp, 3 * kPointerSize));
3150
3151 // kData.
3152 __ str(call_data, MemOperand(sp, 4 * kPointerSize));
3153
3154 // kNewTarget.
3155 __ str(scratch, MemOperand(sp, 5 * kPointerSize));
3156
3157 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3158 // We use it below to set up the FunctionCallbackInfo object.
3159 __ mov(scratch, sp);
3160
3161 // Allocate the v8::Arguments structure in the arguments' space since
3162 // it's not controlled by GC.
3163 static constexpr int kApiStackSpace = 4;
3164 static constexpr bool kDontSaveDoubles = false;
3165 FrameScope frame_scope(masm, StackFrame::MANUAL);
3166 __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3167
3168 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3169 // Arguments are after the return address (pushed by EnterExitFrame()).
3170 __ str(scratch, MemOperand(sp, 1 * kPointerSize));
3171
3172 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3173 // on the stack).
3174 __ add(scratch, scratch, Operand((FCA::kArgsLength + 1) * kPointerSize));
3175 __ str(scratch, MemOperand(sp, 2 * kPointerSize));
3176
3177 // FunctionCallbackInfo::length_.
3178 __ str(argc, MemOperand(sp, 3 * kPointerSize));
3179
3180 // We also store the number of bytes to drop from the stack after returning
3181 // from the API function here.
3182 __ mov(scratch,
3183 Operand((FCA::kArgsLength + 1 /* receiver */) * kPointerSize));
3184 __ add(scratch, scratch, Operand(argc, LSL, kPointerSizeLog2));
3185 __ str(scratch, MemOperand(sp, 4 * kPointerSize));
3186
3187 // v8::InvocationCallback's argument.
3188 __ add(r0, sp, Operand(1 * kPointerSize));
3189
3190 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3191
3192 // There are two stack slots above the arguments we constructed on the stack.
3193 // TODO(jgruber): Document what these arguments are.
3194 static constexpr int kStackSlotsAboveFCA = 2;
3195 MemOperand return_value_operand(
3196 fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3197
3198 static constexpr int kUseStackSpaceOperand = 0;
3199 MemOperand stack_space_operand(sp, 4 * kPointerSize);
3200
3201 AllowExternalCallThatCantCauseGC scope(masm);
3202 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3203 kUseStackSpaceOperand, &stack_space_operand,
3204 return_value_operand);
3205 }
3206
Generate_CallApiGetter(MacroAssembler * masm)3207 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3208 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3209 // name below the exit frame to make GC aware of them.
3210 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3211 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3212 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3213 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3214 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3215 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3216 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3217 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3218
3219 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3220 Register holder = ApiGetterDescriptor::HolderRegister();
3221 Register callback = ApiGetterDescriptor::CallbackRegister();
3222 Register scratch = r4;
3223 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3224
3225 Register api_function_address = r2;
3226
3227 __ push(receiver);
3228 // Push data from AccessorInfo.
3229 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3230 __ push(scratch);
3231 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3232 __ Push(scratch, scratch);
3233 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3234 __ Push(scratch, holder);
3235 __ Push(Smi::zero()); // should_throw_on_error -> false
3236 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3237 __ push(scratch);
3238 // v8::PropertyCallbackInfo::args_ array and name handle.
3239 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3240
3241 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3242 __ mov(r0, sp); // r0 = Handle<Name>
3243 __ add(r1, r0, Operand(1 * kPointerSize)); // r1 = v8::PCI::args_
3244
3245 const int kApiStackSpace = 1;
3246 FrameScope frame_scope(masm, StackFrame::MANUAL);
3247 __ EnterExitFrame(false, kApiStackSpace);
3248
3249 // Create v8::PropertyCallbackInfo object on the stack and initialize
3250 // it's args_ field.
3251 __ str(r1, MemOperand(sp, 1 * kPointerSize));
3252 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = v8::PropertyCallbackInfo&
3253
3254 ExternalReference thunk_ref =
3255 ExternalReference::invoke_accessor_getter_callback();
3256
3257 __ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3258 __ ldr(api_function_address,
3259 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3260
3261 // +3 is to skip prolog, return address and name handle.
3262 MemOperand return_value_operand(
3263 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3264 MemOperand* const kUseStackSpaceConstant = nullptr;
3265 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3266 kStackUnwindSpace, kUseStackSpaceConstant,
3267 return_value_operand);
3268 }
3269
Generate_DirectCEntry(MacroAssembler * masm)3270 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3271 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3272 // purpose Code object) to be able to call into C functions that may trigger
3273 // GC and thus move the caller.
3274 //
3275 // DirectCEntry places the return address on the stack (updated by the GC),
3276 // making the call GC safe. The irregexp backend relies on this.
3277
3278 __ str(lr, MemOperand(sp, 0)); // Store the return address.
3279 __ blx(ip); // Call the C++ function.
3280 __ ldr(pc, MemOperand(sp, 0)); // Return to calling code.
3281 }
3282
Generate_MemCopyUint8Uint8(MacroAssembler * masm)3283 void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
3284 Register dest = r0;
3285 Register src = r1;
3286 Register chars = r2;
3287 Register temp1 = r3;
3288 Label less_4;
3289
3290 {
3291 UseScratchRegisterScope temps(masm);
3292 Register temp2 = temps.Acquire();
3293 Label loop;
3294
3295 __ bic(temp2, chars, Operand(0x3), SetCC);
3296 __ b(&less_4, eq);
3297 __ add(temp2, dest, temp2);
3298
3299 __ bind(&loop);
3300 __ ldr(temp1, MemOperand(src, 4, PostIndex));
3301 __ str(temp1, MemOperand(dest, 4, PostIndex));
3302 __ cmp(dest, temp2);
3303 __ b(&loop, ne);
3304 }
3305
3306 __ bind(&less_4);
3307 __ mov(chars, Operand(chars, LSL, 31), SetCC);
3308 // bit0 => Z (ne), bit1 => C (cs)
3309 __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
3310 __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
3311 __ ldrb(temp1, MemOperand(src), ne);
3312 __ strb(temp1, MemOperand(dest), ne);
3313 __ Ret();
3314 }
3315
3316 namespace {
3317
3318 // This code tries to be close to ia32 code so that any changes can be
3319 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3320 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3321 DeoptimizeKind deopt_kind) {
3322 Isolate* isolate = masm->isolate();
3323
3324 // Note: This is an overapproximation; we always reserve space for 32 double
3325 // registers, even though the actual CPU may only support 16. In the latter
3326 // case, SaveFPRegs and RestoreFPRegs still use 32 stack slots, but only fill
3327 // 16.
3328 static constexpr int kDoubleRegsSize =
3329 kDoubleSize * DwVfpRegister::kNumRegisters;
3330
3331 // Save all allocatable VFP registers before messing with them.
3332 {
3333 UseScratchRegisterScope temps(masm);
3334 Register scratch = temps.Acquire();
3335 __ SaveFPRegs(sp, scratch);
3336 }
3337
3338 // Save all general purpose registers before messing with them.
3339 static constexpr int kNumberOfRegisters = Register::kNumRegisters;
3340 STATIC_ASSERT(kNumberOfRegisters == 16);
3341
3342 // Everything but pc, lr and ip which will be saved but not restored.
3343 RegList restored_regs = kJSCallerSaved | kCalleeSaved | RegList{ip};
3344
3345 // Push all 16 registers (needed to populate FrameDescription::registers_).
3346 // TODO(v8:1588): Note that using pc with stm is deprecated, so we should
3347 // perhaps handle this a bit differently.
3348 __ stm(db_w, sp, restored_regs | RegList{sp, lr, pc});
3349
3350 {
3351 UseScratchRegisterScope temps(masm);
3352 Register scratch = temps.Acquire();
3353 __ Move(scratch, ExternalReference::Create(
3354 IsolateAddressId::kCEntryFPAddress, isolate));
3355 __ str(fp, MemOperand(scratch));
3356 }
3357
3358 static constexpr int kSavedRegistersAreaSize =
3359 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3360
3361 // Get the address of the location in the code object (r3) (return
3362 // address for lazy deoptimization) and compute the fp-to-sp delta in
3363 // register r4.
3364 __ mov(r2, lr);
3365 __ add(r3, sp, Operand(kSavedRegistersAreaSize));
3366 __ sub(r3, fp, r3);
3367
3368 // Allocate a new deoptimizer object.
3369 // Pass four arguments in r0 to r3 and fifth argument on stack.
3370 __ PrepareCallCFunction(5);
3371 __ mov(r0, Operand(0));
3372 Label context_check;
3373 __ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3374 __ JumpIfSmi(r1, &context_check);
3375 __ ldr(r0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3376 __ bind(&context_check);
3377 __ mov(r1, Operand(static_cast<int>(deopt_kind)));
3378 // r2: code address or 0 already loaded.
3379 // r3: Fp-to-sp delta already loaded.
3380 __ Move(r4, ExternalReference::isolate_address(isolate));
3381 __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Isolate.
3382 // Call Deoptimizer::New().
3383 {
3384 AllowExternalCallThatCantCauseGC scope(masm);
3385 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3386 }
3387
3388 // Preserve "deoptimizer" object in register r0 and get the input
3389 // frame descriptor pointer to r1 (deoptimizer->input_);
3390 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
3391
3392 // Copy core registers into FrameDescription::registers_.
3393 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3394 for (int i = 0; i < kNumberOfRegisters; i++) {
3395 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3396 __ ldr(r2, MemOperand(sp, i * kPointerSize));
3397 __ str(r2, MemOperand(r1, offset));
3398 }
3399
3400 // Copy double registers to double_registers_.
3401 static constexpr int kDoubleRegsOffset =
3402 FrameDescription::double_registers_offset();
3403 {
3404 UseScratchRegisterScope temps(masm);
3405 Register scratch = temps.Acquire();
3406 Register src_location = r4;
3407 __ add(src_location, sp, Operand(kNumberOfRegisters * kPointerSize));
3408 __ RestoreFPRegs(src_location, scratch);
3409
3410 Register dst_location = r4;
3411 __ add(dst_location, r1, Operand(kDoubleRegsOffset));
3412 __ SaveFPRegsToHeap(dst_location, scratch);
3413 }
3414
3415 // Mark the stack as not iterable for the CPU profiler which won't be able to
3416 // walk the stack without the return address.
3417 {
3418 UseScratchRegisterScope temps(masm);
3419 Register is_iterable = temps.Acquire();
3420 Register zero = r4;
3421 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3422 __ mov(zero, Operand(0));
3423 __ strb(zero, MemOperand(is_iterable));
3424 }
3425
3426 // Remove the saved registers from the stack.
3427 __ add(sp, sp, Operand(kSavedRegistersAreaSize));
3428
3429 // Compute a pointer to the unwinding limit in register r2; that is
3430 // the first stack slot not part of the input frame.
3431 __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
3432 __ add(r2, r2, sp);
3433
3434 // Unwind the stack down to - but not including - the unwinding
3435 // limit and copy the contents of the activation frame to the input
3436 // frame description.
3437 __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
3438 Label pop_loop;
3439 Label pop_loop_header;
3440 __ b(&pop_loop_header);
3441 __ bind(&pop_loop);
3442 __ pop(r4);
3443 __ str(r4, MemOperand(r3, 0));
3444 __ add(r3, r3, Operand(sizeof(uint32_t)));
3445 __ bind(&pop_loop_header);
3446 __ cmp(r2, sp);
3447 __ b(ne, &pop_loop);
3448
3449 // Compute the output frame in the deoptimizer.
3450 __ push(r0); // Preserve deoptimizer object across call.
3451 // r0: deoptimizer object; r1: scratch.
3452 __ PrepareCallCFunction(1);
3453 // Call Deoptimizer::ComputeOutputFrames().
3454 {
3455 AllowExternalCallThatCantCauseGC scope(masm);
3456 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3457 }
3458 __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
3459
3460 __ ldr(sp, MemOperand(r0, Deoptimizer::caller_frame_top_offset()));
3461
3462 // Replace the current (input) frame with the output frames.
3463 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3464 // Outer loop state: r4 = current "FrameDescription** output_",
3465 // r1 = one past the last FrameDescription**.
3466 __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
3467 __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
3468 __ add(r1, r4, Operand(r1, LSL, 2));
3469 __ jmp(&outer_loop_header);
3470 __ bind(&outer_push_loop);
3471 // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
3472 __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
3473 __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
3474 __ jmp(&inner_loop_header);
3475 __ bind(&inner_push_loop);
3476 __ sub(r3, r3, Operand(sizeof(uint32_t)));
3477 __ add(r6, r2, Operand(r3));
3478 __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
3479 __ push(r6);
3480 __ bind(&inner_loop_header);
3481 __ cmp(r3, Operand::Zero());
3482 __ b(ne, &inner_push_loop); // test for gt?
3483 __ add(r4, r4, Operand(kPointerSize));
3484 __ bind(&outer_loop_header);
3485 __ cmp(r4, r1);
3486 __ b(lt, &outer_push_loop);
3487
3488 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
3489
3490 // State:
3491 // r1: Deoptimizer::input_ (FrameDescription*).
3492 // r2: The last output FrameDescription pointer (FrameDescription*).
3493
3494 // Restore double registers from the input frame description.
3495 {
3496 UseScratchRegisterScope temps(masm);
3497 Register scratch = temps.Acquire();
3498 Register src_location = r6;
3499 __ add(src_location, r1, Operand(kDoubleRegsOffset));
3500 __ RestoreFPRegsFromHeap(src_location, scratch);
3501 }
3502
3503 // Push pc and continuation from the last output frame.
3504 __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
3505 __ push(r6);
3506 __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
3507 __ push(r6);
3508
3509 // Push the registers from the last output frame.
3510 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3511 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3512 __ ldr(r6, MemOperand(r2, offset));
3513 __ push(r6);
3514 }
3515
3516 // Restore the registers from the stack.
3517 __ ldm(ia_w, sp, restored_regs); // all but pc registers.
3518
3519 {
3520 UseScratchRegisterScope temps(masm);
3521 Register is_iterable = temps.Acquire();
3522 Register one = r4;
3523 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3524 __ mov(one, Operand(1));
3525 __ strb(one, MemOperand(is_iterable));
3526 }
3527
3528 // Remove sp, lr and pc.
3529 __ Drop(3);
3530 {
3531 UseScratchRegisterScope temps(masm);
3532 Register scratch = temps.Acquire();
3533 __ pop(scratch); // get continuation, leave pc on stack
3534 __ pop(lr);
3535 __ Jump(scratch);
3536 }
3537
3538 __ stop();
3539 }
3540
3541 } // namespace
3542
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3543 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3544 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3545 }
3546
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3547 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3548 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3549 }
3550
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)3551 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
3552 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
3553 }
3554
3555 namespace {
3556
3557 // Restarts execution either at the current or next (in execution order)
3558 // bytecode. If there is baseline code on the shared function info, converts an
3559 // interpreter frame into a baseline frame and continues execution in baseline
3560 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)3561 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
3562 bool next_bytecode,
3563 bool is_osr = false) {
3564 Label start;
3565 __ bind(&start);
3566
3567 // Get function from the frame.
3568 Register closure = r1;
3569 __ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3570
3571 // Get the Code object from the shared function info.
3572 Register code_obj = r4;
3573 __ ldr(code_obj,
3574 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
3575 __ ldr(code_obj,
3576 FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
3577
3578 // Check if we have baseline code. For OSR entry it is safe to assume we
3579 // always have baseline code.
3580 if (!is_osr) {
3581 Label start_with_baseline;
3582 __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
3583 __ b(eq, &start_with_baseline);
3584
3585 // Start with bytecode as there is no baseline code.
3586 Builtin builtin_id = next_bytecode
3587 ? Builtin::kInterpreterEnterAtNextBytecode
3588 : Builtin::kInterpreterEnterAtBytecode;
3589 __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
3590 RelocInfo::CODE_TARGET);
3591
3592 // Start with baseline code.
3593 __ bind(&start_with_baseline);
3594 } else if (FLAG_debug_code) {
3595 __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
3596 __ Assert(eq, AbortReason::kExpectedBaselineData);
3597 }
3598
3599 if (FLAG_debug_code) {
3600 AssertCodeIsBaseline(masm, code_obj, r3);
3601 }
3602
3603 // Load the feedback vector.
3604 Register feedback_vector = r2;
3605 __ ldr(feedback_vector,
3606 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3607 __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
3608
3609 Label install_baseline_code;
3610 // Check if feedback vector is valid. If not, call prepare for baseline to
3611 // allocate it.
3612 __ CompareObjectType(feedback_vector, r3, r3, FEEDBACK_VECTOR_TYPE);
3613 __ b(ne, &install_baseline_code);
3614
3615 // Save BytecodeOffset from the stack frame.
3616 __ ldr(kInterpreterBytecodeOffsetRegister,
3617 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3618 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
3619 // Replace BytecodeOffset with the feedback vector.
3620 __ str(feedback_vector,
3621 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3622 feedback_vector = no_reg;
3623
3624 // Compute baseline pc for bytecode offset.
3625 ExternalReference get_baseline_pc_extref;
3626 if (next_bytecode || is_osr) {
3627 get_baseline_pc_extref =
3628 ExternalReference::baseline_pc_for_next_executed_bytecode();
3629 } else {
3630 get_baseline_pc_extref =
3631 ExternalReference::baseline_pc_for_bytecode_offset();
3632 }
3633 Register get_baseline_pc = r3;
3634 __ Move(get_baseline_pc, get_baseline_pc_extref);
3635
3636 // If the code deoptimizes during the implicit function entry stack interrupt
3637 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
3638 // not a valid bytecode offset.
3639 // TODO(pthier): Investigate if it is feasible to handle this special case
3640 // in TurboFan instead of here.
3641 Label valid_bytecode_offset, function_entry_bytecode;
3642 if (!is_osr) {
3643 __ cmp(kInterpreterBytecodeOffsetRegister,
3644 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
3645 kFunctionEntryBytecodeOffset));
3646 __ b(eq, &function_entry_bytecode);
3647 }
3648
3649 __ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
3650 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
3651
3652 __ bind(&valid_bytecode_offset);
3653 // Get bytecode array from the stack frame.
3654 __ ldr(kInterpreterBytecodeArrayRegister,
3655 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3656 // Save the accumulator register, since it's clobbered by the below call.
3657 __ Push(kInterpreterAccumulatorRegister);
3658 {
3659 Register arg_reg_1 = r0;
3660 Register arg_reg_2 = r1;
3661 Register arg_reg_3 = r2;
3662 __ mov(arg_reg_1, code_obj);
3663 __ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
3664 __ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
3665 FrameScope scope(masm, StackFrame::INTERNAL);
3666 __ PrepareCallCFunction(3, 0);
3667 __ CallCFunction(get_baseline_pc, 3, 0);
3668 }
3669 __ add(code_obj, code_obj, kReturnRegister0);
3670 __ Pop(kInterpreterAccumulatorRegister);
3671
3672 if (is_osr) {
3673 // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
3674 // disarm Sparkplug here.
3675 UseScratchRegisterScope temps(masm);
3676 ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister,
3677 temps.Acquire());
3678 Generate_OSREntry(masm, code_obj,
3679 Operand(Code::kHeaderSize - kHeapObjectTag));
3680 } else {
3681 __ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
3682 __ Jump(code_obj);
3683 }
3684 __ Trap(); // Unreachable.
3685
3686 if (!is_osr) {
3687 __ bind(&function_entry_bytecode);
3688 // If the bytecode offset is kFunctionEntryOffset, get the start address of
3689 // the first bytecode.
3690 __ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
3691 if (next_bytecode) {
3692 __ Move(get_baseline_pc,
3693 ExternalReference::baseline_pc_for_bytecode_offset());
3694 }
3695 __ b(&valid_bytecode_offset);
3696 }
3697
3698 __ bind(&install_baseline_code);
3699 {
3700 FrameScope scope(masm, StackFrame::INTERNAL);
3701 __ Push(kInterpreterAccumulatorRegister);
3702 __ Push(closure);
3703 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
3704 __ Pop(kInterpreterAccumulatorRegister);
3705 }
3706 // Retry from the start after installing baseline code.
3707 __ b(&start);
3708 }
3709
3710 } // namespace
3711
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3712 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3713 MacroAssembler* masm) {
3714 Generate_BaselineOrInterpreterEntry(masm, false);
3715 }
3716
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3717 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3718 MacroAssembler* masm) {
3719 Generate_BaselineOrInterpreterEntry(masm, true);
3720 }
3721
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3722 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3723 MacroAssembler* masm) {
3724 Generate_BaselineOrInterpreterEntry(masm, false, true);
3725 }
3726
3727 #undef __
3728
3729 } // namespace internal
3730 } // namespace v8
3731
3732 #endif // V8_TARGET_ARCH_ARM
3733