1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_MIPS64
6
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 #include "src/debug/debug.h"
11 #include "src/deoptimizer/deoptimizer.h"
12 #include "src/execution/frame-constants.h"
13 #include "src/execution/frames.h"
14 #include "src/logging/counters.h"
15 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
16 #include "src/codegen/macro-assembler-inl.h"
17 #include "src/codegen/mips64/constants-mips64.h"
18 #include "src/codegen/register-configuration.h"
19 #include "src/heap/heap-inl.h"
20 #include "src/objects/cell.h"
21 #include "src/objects/foreign.h"
22 #include "src/objects/heap-number.h"
23 #include "src/objects/js-generator.h"
24 #include "src/objects/objects-inl.h"
25 #include "src/objects/smi.h"
26 #include "src/runtime/runtime.h"
27
28 #if V8_ENABLE_WEBASSEMBLY
29 #include "src/wasm/wasm-linkage.h"
30 #include "src/wasm/wasm-objects.h"
31 #endif // V8_ENABLE_WEBASSEMBLY
32
33 namespace v8 {
34 namespace internal {
35
36 #define __ ACCESS_MASM(masm)
37
Generate_Adaptor(MacroAssembler * masm,Address address)38 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
39 __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
40 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
41 RelocInfo::CODE_TARGET);
42 }
43
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)44 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
45 Runtime::FunctionId function_id) {
46 // ----------- S t a t e -------------
47 // -- a0 : actual argument count
48 // -- a1 : target function (preserved for callee)
49 // -- a3 : new target (preserved for callee)
50 // -----------------------------------
51 {
52 FrameScope scope(masm, StackFrame::INTERNAL);
53 // Push a copy of the target function, the new target and the actual
54 // argument count.
55 // Push function as parameter to the runtime call.
56 __ SmiTag(kJavaScriptCallArgCountRegister);
57 __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
58 kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
59
60 __ CallRuntime(function_id, 1);
61 // Restore target function, new target and actual argument count.
62 __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
63 kJavaScriptCallArgCountRegister);
64 __ SmiUntag(kJavaScriptCallArgCountRegister);
65 }
66
67 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
68 __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
69 __ Jump(a2);
70 }
71
72 namespace {
73
74 enum class ArgumentsElementType {
75 kRaw, // Push arguments as they are.
76 kHandle // Dereference arguments before pushing.
77 };
78
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,Register scratch2,ArgumentsElementType element_type)79 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
80 Register scratch, Register scratch2,
81 ArgumentsElementType element_type) {
82 DCHECK(!AreAliased(array, argc, scratch));
83 Label loop, entry;
84 __ Dsubu(scratch, argc, Operand(kJSArgcReceiverSlots));
85 __ Branch(&entry);
86 __ bind(&loop);
87 __ Dlsa(scratch2, array, scratch, kSystemPointerSizeLog2);
88 __ Ld(scratch2, MemOperand(scratch2));
89 if (element_type == ArgumentsElementType::kHandle) {
90 __ Ld(scratch2, MemOperand(scratch2));
91 }
92 __ push(scratch2);
93 __ bind(&entry);
94 __ Daddu(scratch, scratch, Operand(-1));
95 __ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
96 }
97
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)98 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
99 // ----------- S t a t e -------------
100 // -- a0 : number of arguments
101 // -- a1 : constructor function
102 // -- a3 : new target
103 // -- cp : context
104 // -- ra : return address
105 // -- sp[...]: constructor arguments
106 // -----------------------------------
107
108 // Enter a construct frame.
109 {
110 FrameScope scope(masm, StackFrame::CONSTRUCT);
111
112 // Preserve the incoming parameters on the stack.
113 __ SmiTag(a0);
114 __ Push(cp, a0);
115 __ SmiUntag(a0);
116
117 // Set up pointer to first argument (skip receiver).
118 __ Daddu(
119 t2, fp,
120 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
121 // Copy arguments and receiver to the expression stack.
122 // t2: Pointer to start of arguments.
123 // a0: Number of arguments.
124 Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw);
125 // The receiver for the builtin/api call.
126 __ PushRoot(RootIndex::kTheHoleValue);
127
128 // Call the function.
129 // a0: number of arguments (untagged)
130 // a1: constructor function
131 // a3: new target
132 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
133
134 // Restore context from the frame.
135 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
136 // Restore smi-tagged arguments count from the frame.
137 __ Ld(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
138 // Leave construct frame.
139 }
140
141 // Remove caller arguments from the stack and return.
142 __ DropArguments(t3, TurboAssembler::kCountIsSmi,
143 TurboAssembler::kCountIncludesReceiver, t3);
144 __ Ret();
145 }
146
147 } // namespace
148
149 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)150 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
151 // ----------- S t a t e -------------
152 // -- a0: number of arguments (untagged)
153 // -- a1: constructor function
154 // -- a3: new target
155 // -- cp: context
156 // -- ra: return address
157 // -- sp[...]: constructor arguments
158 // -----------------------------------
159
160 // Enter a construct frame.
161 FrameScope scope(masm, StackFrame::MANUAL);
162 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
163 __ EnterFrame(StackFrame::CONSTRUCT);
164
165 // Preserve the incoming parameters on the stack.
166 __ SmiTag(a0);
167 __ Push(cp, a0, a1);
168 __ PushRoot(RootIndex::kUndefinedValue);
169 __ Push(a3);
170
171 // ----------- S t a t e -------------
172 // -- sp[0*kPointerSize]: new target
173 // -- sp[1*kPointerSize]: padding
174 // -- a1 and sp[2*kPointerSize]: constructor function
175 // -- sp[3*kPointerSize]: number of arguments (tagged)
176 // -- sp[4*kPointerSize]: context
177 // -----------------------------------
178
179 __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
180 __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
181 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
182 __ JumpIfIsInRange(
183 t2, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
184 static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
185 ¬_create_implicit_receiver);
186
187 // If not derived class constructor: Allocate the new receiver object.
188 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
189 t2, t3);
190 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
191 RelocInfo::CODE_TARGET);
192 __ Branch(&post_instantiation_deopt_entry);
193
194 // Else: use TheHoleValue as receiver for constructor call
195 __ bind(¬_create_implicit_receiver);
196 __ LoadRoot(v0, RootIndex::kTheHoleValue);
197
198 // ----------- S t a t e -------------
199 // -- v0: receiver
200 // -- Slot 4 / sp[0*kPointerSize]: new target
201 // -- Slot 3 / sp[1*kPointerSize]: padding
202 // -- Slot 2 / sp[2*kPointerSize]: constructor function
203 // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
204 // -- Slot 0 / sp[4*kPointerSize]: context
205 // -----------------------------------
206 // Deoptimizer enters here.
207 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
208 masm->pc_offset());
209 __ bind(&post_instantiation_deopt_entry);
210
211 // Restore new target.
212 __ Pop(a3);
213
214 // Push the allocated receiver to the stack.
215 __ Push(v0);
216
217 // We need two copies because we may have to return the original one
218 // and the calling conventions dictate that the called function pops the
219 // receiver. The second copy is pushed after the arguments, we saved in a6
220 // since v0 will store the return value of callRuntime.
221 __ mov(a6, v0);
222
223 // Set up pointer to last argument.
224 __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
225 kSystemPointerSize));
226
227 // ----------- S t a t e -------------
228 // -- a3: new target
229 // -- sp[0*kPointerSize]: implicit receiver
230 // -- sp[1*kPointerSize]: implicit receiver
231 // -- sp[2*kPointerSize]: padding
232 // -- sp[3*kPointerSize]: constructor function
233 // -- sp[4*kPointerSize]: number of arguments (tagged)
234 // -- sp[5*kPointerSize]: context
235 // -----------------------------------
236
237 // Restore constructor function and argument count.
238 __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
239 __ Ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
240 __ SmiUntag(a0);
241
242 Label stack_overflow;
243 __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
244
245 // TODO(victorgomes): When the arguments adaptor is completely removed, we
246 // should get the formal parameter count and copy the arguments in its
247 // correct position (including any undefined), instead of delaying this to
248 // InvokeFunction.
249
250 // Copy arguments and receiver to the expression stack.
251 // t2: Pointer to start of argument.
252 // a0: Number of arguments.
253 Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw);
254 // We need two copies because we may have to return the original one
255 // and the calling conventions dictate that the called function pops the
256 // receiver. The second copy is pushed after the arguments,
257 __ Push(a6);
258
259 // Call the function.
260 __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
261
262 // ----------- S t a t e -------------
263 // -- v0: constructor result
264 // -- sp[0*kPointerSize]: implicit receiver
265 // -- sp[1*kPointerSize]: padding
266 // -- sp[2*kPointerSize]: constructor function
267 // -- sp[3*kPointerSize]: number of arguments
268 // -- sp[4*kPointerSize]: context
269 // -----------------------------------
270
271 // Store offset of return address for deoptimizer.
272 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
273 masm->pc_offset());
274
275 // If the result is an object (in the ECMA sense), we should get rid
276 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
277 // on page 74.
278 Label use_receiver, do_throw, leave_and_return, check_receiver;
279
280 // If the result is undefined, we jump out to using the implicit receiver.
281 __ JumpIfNotRoot(v0, RootIndex::kUndefinedValue, &check_receiver);
282
283 // Otherwise we do a smi check and fall through to check if the return value
284 // is a valid receiver.
285
286 // Throw away the result of the constructor invocation and use the
287 // on-stack receiver as the result.
288 __ bind(&use_receiver);
289 __ Ld(v0, MemOperand(sp, 0 * kPointerSize));
290 __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
291
292 __ bind(&leave_and_return);
293 // Restore smi-tagged arguments count from the frame.
294 __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
295 // Leave construct frame.
296 __ LeaveFrame(StackFrame::CONSTRUCT);
297
298 // Remove caller arguments from the stack and return.
299 __ DropArguments(a1, TurboAssembler::kCountIsSmi,
300 TurboAssembler::kCountIncludesReceiver, a4);
301 __ Ret();
302
303 __ bind(&check_receiver);
304 __ JumpIfSmi(v0, &use_receiver);
305
306 // If the type of the result (stored in its map) is less than
307 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
308 __ GetObjectType(v0, t2, t2);
309 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
310 __ Branch(&leave_and_return, greater_equal, t2,
311 Operand(FIRST_JS_RECEIVER_TYPE));
312 __ Branch(&use_receiver);
313
314 __ bind(&do_throw);
315 // Restore the context from the frame.
316 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
317 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
318 __ break_(0xCC);
319
320 __ bind(&stack_overflow);
321 // Restore the context from the frame.
322 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
323 __ CallRuntime(Runtime::kThrowStackOverflow);
324 __ break_(0xCC);
325 }
326
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)327 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
328 Generate_JSBuiltinsConstructStubHelper(masm);
329 }
330
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)331 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
332 Register scratch) {
333 DCHECK(!AreAliased(code, scratch));
334 // Verify that the code kind is baseline code via the CodeKind.
335 __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
336 __ DecodeField<Code::KindField>(scratch);
337 __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
338 Operand(static_cast<int>(CodeKind::BASELINE)));
339 }
340
341 // TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
342 // the more general dispatch.
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)343 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
344 Register sfi_data,
345 Register scratch1,
346 Label* is_baseline) {
347 Label done;
348
349 __ GetObjectType(sfi_data, scratch1, scratch1);
350 if (FLAG_debug_code) {
351 Label not_baseline;
352 __ Branch(¬_baseline, ne, scratch1, Operand(CODET_TYPE));
353 AssertCodeIsBaseline(masm, sfi_data, scratch1);
354 __ Branch(is_baseline);
355 __ bind(¬_baseline);
356 } else {
357 __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
358 }
359 __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
360 __ Ld(sfi_data,
361 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
362 __ bind(&done);
363 }
364
365 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)366 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
367 // ----------- S t a t e -------------
368 // -- v0 : the value to pass to the generator
369 // -- a1 : the JSGeneratorObject to resume
370 // -- ra : return address
371 // -----------------------------------
372 // Store input value into generator object.
373 __ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
374 __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
375 kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
376 // Check that a1 is still valid, RecordWrite might have clobbered it.
377 __ AssertGeneratorObject(a1);
378
379 // Load suspended function and context.
380 __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
381 __ Ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
382
383 // Flood function if we are stepping.
384 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
385 Label stepping_prepared;
386 ExternalReference debug_hook =
387 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
388 __ li(a5, debug_hook);
389 __ Lb(a5, MemOperand(a5));
390 __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
391
392 // Flood function if we need to continue stepping in the suspended generator.
393 ExternalReference debug_suspended_generator =
394 ExternalReference::debug_suspended_generator_address(masm->isolate());
395 __ li(a5, debug_suspended_generator);
396 __ Ld(a5, MemOperand(a5));
397 __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
398 __ bind(&stepping_prepared);
399
400 // Check the stack for overflow. We are not trying to catch interruptions
401 // (i.e. debug break and preemption) here, so check the "real stack limit".
402 Label stack_overflow;
403 __ LoadStackLimit(kScratchReg,
404 MacroAssembler::StackLimitKind::kRealStackLimit);
405 __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
406
407 // ----------- S t a t e -------------
408 // -- a1 : the JSGeneratorObject to resume
409 // -- a4 : generator function
410 // -- cp : generator context
411 // -- ra : return address
412 // -----------------------------------
413
414 // Push holes for arguments to generator function. Since the parser forced
415 // context allocation for any variables in generators, the actual argument
416 // values have already been copied into the context and these dummy values
417 // will never be used.
418 __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
419 __ Lhu(a3,
420 FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
421 __ Dsubu(a3, a3, Operand(kJSArgcReceiverSlots));
422 __ Ld(t1,
423 FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
424 {
425 Label done_loop, loop;
426 __ bind(&loop);
427 __ Dsubu(a3, a3, Operand(1));
428 __ Branch(&done_loop, lt, a3, Operand(zero_reg));
429 __ Dlsa(kScratchReg, t1, a3, kPointerSizeLog2);
430 __ Ld(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
431 __ Push(kScratchReg);
432 __ Branch(&loop);
433 __ bind(&done_loop);
434 // Push receiver.
435 __ Ld(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
436 __ Push(kScratchReg);
437 }
438
439 // Underlying function needs to have bytecode available.
440 if (FLAG_debug_code) {
441 Label is_baseline;
442 __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
443 __ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
444 GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
445 __ GetObjectType(a3, a3, a3);
446 __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
447 Operand(BYTECODE_ARRAY_TYPE));
448 __ bind(&is_baseline);
449 }
450
451 // Resume (Ignition/TurboFan) generator object.
452 {
453 __ Ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
454 __ Lhu(a0, FieldMemOperand(
455 a0, SharedFunctionInfo::kFormalParameterCountOffset));
456 // We abuse new.target both to indicate that this is a resume call and to
457 // pass in the generator object. In ordinary calls, new.target is always
458 // undefined because generator functions are non-constructable.
459 __ Move(a3, a1);
460 __ Move(a1, a4);
461 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
462 __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
463 __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
464 __ Jump(a2);
465 }
466
467 __ bind(&prepare_step_in_if_stepping);
468 {
469 FrameScope scope(masm, StackFrame::INTERNAL);
470 __ Push(a1, a4);
471 // Push hole as receiver since we do not use it for stepping.
472 __ PushRoot(RootIndex::kTheHoleValue);
473 __ CallRuntime(Runtime::kDebugOnFunctionCall);
474 __ Pop(a1);
475 }
476 __ Branch(USE_DELAY_SLOT, &stepping_prepared);
477 __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
478
479 __ bind(&prepare_step_in_suspended_generator);
480 {
481 FrameScope scope(masm, StackFrame::INTERNAL);
482 __ Push(a1);
483 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
484 __ Pop(a1);
485 }
486 __ Branch(USE_DELAY_SLOT, &stepping_prepared);
487 __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
488
489 __ bind(&stack_overflow);
490 {
491 FrameScope scope(masm, StackFrame::INTERNAL);
492 __ CallRuntime(Runtime::kThrowStackOverflow);
493 __ break_(0xCC); // This should be unreachable.
494 }
495 }
496
Generate_ConstructedNonConstructable(MacroAssembler * masm)497 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
498 FrameScope scope(masm, StackFrame::INTERNAL);
499 __ Push(a1);
500 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
501 }
502
503 // Clobbers scratch1 and scratch2; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc,Register scratch1,Register scratch2)504 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
505 Register scratch1, Register scratch2) {
506 // Check the stack for overflow. We are not trying to catch
507 // interruptions (e.g. debug break and preemption) here, so the "real stack
508 // limit" is checked.
509 Label okay;
510 __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
511 // Make a2 the space we have left. The stack might already be overflowed
512 // here which will cause r2 to become negative.
513 __ dsubu(scratch1, sp, scratch1);
514 // Check if the arguments will overflow the stack.
515 __ dsll(scratch2, argc, kPointerSizeLog2);
516 __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
517
518 // Out of stack space.
519 __ CallRuntime(Runtime::kThrowStackOverflow);
520
521 __ bind(&okay);
522 }
523
524 namespace {
525
526 // Called with the native C calling convention. The corresponding function
527 // signature is either:
528 //
529 // using JSEntryFunction = GeneratedCode<Address(
530 // Address root_register_value, Address new_target, Address target,
531 // Address receiver, intptr_t argc, Address** args)>;
532 // or
533 // using JSEntryFunction = GeneratedCode<Address(
534 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)535 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
536 Builtin entry_trampoline) {
537 Label invoke, handler_entry, exit;
538
539 {
540 NoRootArrayScope no_root_array(masm);
541
542 // TODO(plind): unify the ABI description here.
543 // Registers:
544 // either
545 // a0: root register value
546 // a1: entry address
547 // a2: function
548 // a3: receiver
549 // a4: argc
550 // a5: argv
551 // or
552 // a0: root register value
553 // a1: microtask_queue
554 //
555 // Stack:
556 // 0 arg slots on mips64 (4 args slots on mips)
557
558 // Save callee saved registers on the stack.
559 __ MultiPush(kCalleeSaved | ra);
560
561 // Save callee-saved FPU registers.
562 __ MultiPushFPU(kCalleeSavedFPU);
563 // Set up the reserved register for 0.0.
564 __ Move(kDoubleRegZero, 0.0);
565
566 // Initialize the root register.
567 // C calling convention. The first argument is passed in a0.
568 __ mov(kRootRegister, a0);
569 }
570
571 // a1: entry address
572 // a2: function
573 // a3: receiver
574 // a4: argc
575 // a5: argv
576
577 // We build an EntryFrame.
578 __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
579 __ li(s2, Operand(StackFrame::TypeToMarker(type)));
580 __ li(s3, Operand(StackFrame::TypeToMarker(type)));
581 ExternalReference c_entry_fp = ExternalReference::Create(
582 IsolateAddressId::kCEntryFPAddress, masm->isolate());
583 __ li(s5, c_entry_fp);
584 __ Ld(s4, MemOperand(s5));
585 __ Push(s1, s2, s3, s4);
586
587 // Clear c_entry_fp, now we've pushed its previous value to the stack.
588 // If the c_entry_fp is not already zero and we don't clear it, the
589 // SafeStackFrameIterator will assume we are executing C++ and miss the JS
590 // frames on top.
591 __ Sd(zero_reg, MemOperand(s5));
592
593 // Set up frame pointer for the frame to be pushed.
594 __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
595
596 // Registers:
597 // either
598 // a1: entry address
599 // a2: function
600 // a3: receiver
601 // a4: argc
602 // a5: argv
603 // or
604 // a1: microtask_queue
605 //
606 // Stack:
607 // caller fp |
608 // function slot | entry frame
609 // context slot |
610 // bad fp (0xFF...F) |
611 // callee saved registers + ra
612 // [ O32: 4 args slots]
613 // args
614
615 // If this is the outermost JS call, set js_entry_sp value.
616 Label non_outermost_js;
617 ExternalReference js_entry_sp = ExternalReference::Create(
618 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
619 __ li(s1, js_entry_sp);
620 __ Ld(s2, MemOperand(s1));
621 __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
622 __ Sd(fp, MemOperand(s1));
623 __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
624 Label cont;
625 __ b(&cont);
626 __ nop(); // Branch delay slot nop.
627 __ bind(&non_outermost_js);
628 __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
629 __ bind(&cont);
630 __ push(s3);
631
632 // Jump to a faked try block that does the invoke, with a faked catch
633 // block that sets the pending exception.
634 __ jmp(&invoke);
635 __ bind(&handler_entry);
636
637 // Store the current pc as the handler offset. It's used later to create the
638 // handler table.
639 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
640
641 // Caught exception: Store result (exception) in the pending exception
642 // field in the JSEnv and return a failure sentinel. Coming in here the
643 // fp will be invalid because the PushStackHandler below sets it to 0 to
644 // signal the existence of the JSEntry frame.
645 __ li(s1, ExternalReference::Create(
646 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
647 __ Sd(v0, MemOperand(s1)); // We come back from 'invoke'. result is in v0.
648 __ LoadRoot(v0, RootIndex::kException);
649 __ b(&exit); // b exposes branch delay slot.
650 __ nop(); // Branch delay slot nop.
651
652 // Invoke: Link this frame into the handler chain.
653 __ bind(&invoke);
654 __ PushStackHandler();
655 // If an exception not caught by another handler occurs, this handler
656 // returns control to the code after the bal(&invoke) above, which
657 // restores all kCalleeSaved registers (including cp and fp) to their
658 // saved values before returning a failure to C.
659 //
660 // Registers:
661 // either
662 // a0: root register value
663 // a1: entry address
664 // a2: function
665 // a3: receiver
666 // a4: argc
667 // a5: argv
668 // or
669 // a0: root register value
670 // a1: microtask_queue
671 //
672 // Stack:
673 // handler frame
674 // entry frame
675 // callee saved registers + ra
676 // [ O32: 4 args slots]
677 // args
678 //
679 // Invoke the function by calling through JS entry trampoline builtin and
680 // pop the faked function when we return.
681
682 Handle<Code> trampoline_code =
683 masm->isolate()->builtins()->code_handle(entry_trampoline);
684 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
685
686 // Unlink this frame from the handler chain.
687 __ PopStackHandler();
688
689 __ bind(&exit); // v0 holds result
690 // Check if the current stack frame is marked as the outermost JS frame.
691 Label non_outermost_js_2;
692 __ pop(a5);
693 __ Branch(&non_outermost_js_2, ne, a5,
694 Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
695 __ li(a5, js_entry_sp);
696 __ Sd(zero_reg, MemOperand(a5));
697 __ bind(&non_outermost_js_2);
698
699 // Restore the top frame descriptors from the stack.
700 __ pop(a5);
701 __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
702 masm->isolate()));
703 __ Sd(a5, MemOperand(a4));
704
705 // Reset the stack to the callee saved registers.
706 __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
707
708 // Restore callee-saved fpu registers.
709 __ MultiPopFPU(kCalleeSavedFPU);
710
711 // Restore callee saved registers from the stack.
712 __ MultiPop(kCalleeSaved | ra);
713 // Return.
714 __ Jump(ra);
715 }
716
717 } // namespace
718
Generate_JSEntry(MacroAssembler * masm)719 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
720 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
721 }
722
Generate_JSConstructEntry(MacroAssembler * masm)723 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
724 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
725 Builtin::kJSConstructEntryTrampoline);
726 }
727
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)728 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
729 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
730 Builtin::kRunMicrotasksTrampoline);
731 }
732
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)733 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
734 bool is_construct) {
735 // ----------- S t a t e -------------
736 // -- a1: new.target
737 // -- a2: function
738 // -- a3: receiver_pointer
739 // -- a4: argc
740 // -- a5: argv
741 // -----------------------------------
742
743 // Enter an internal frame.
744 {
745 FrameScope scope(masm, StackFrame::INTERNAL);
746
747 // Setup the context (we need to use the caller context from the isolate).
748 ExternalReference context_address = ExternalReference::Create(
749 IsolateAddressId::kContextAddress, masm->isolate());
750 __ li(cp, context_address);
751 __ Ld(cp, MemOperand(cp));
752
753 // Push the function onto the stack.
754 __ Push(a2);
755
756 // Check if we have enough stack space to push all arguments.
757 __ mov(a6, a4);
758 Generate_CheckStackOverflow(masm, a6, a0, s2);
759
760 // Copy arguments to the stack.
761 // a4: argc
762 // a5: argv, i.e. points to first arg
763 Generate_PushArguments(masm, a5, a4, s1, s2, ArgumentsElementType::kHandle);
764
765 // Push the receive.
766 __ Push(a3);
767
768 // a0: argc
769 // a1: function
770 // a3: new.target
771 __ mov(a3, a1);
772 __ mov(a1, a2);
773 __ mov(a0, a4);
774
775 // Initialize all JavaScript callee-saved registers, since they will be seen
776 // by the garbage collector as part of handlers.
777 __ LoadRoot(a4, RootIndex::kUndefinedValue);
778 __ mov(a5, a4);
779 __ mov(s1, a4);
780 __ mov(s2, a4);
781 __ mov(s3, a4);
782 __ mov(s4, a4);
783 __ mov(s5, a4);
784 // s6 holds the root address. Do not clobber.
785 // s7 is cp. Do not init.
786
787 // Invoke the code.
788 Handle<Code> builtin = is_construct
789 ? BUILTIN_CODE(masm->isolate(), Construct)
790 : masm->isolate()->builtins()->Call();
791 __ Call(builtin, RelocInfo::CODE_TARGET);
792
793 // Leave internal frame.
794 }
795 __ Jump(ra);
796 }
797
Generate_JSEntryTrampoline(MacroAssembler * masm)798 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
799 Generate_JSEntryTrampolineHelper(masm, false);
800 }
801
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)802 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
803 Generate_JSEntryTrampolineHelper(masm, true);
804 }
805
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)806 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
807 // a1: microtask_queue
808 __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
809 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
810 }
811
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2)812 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
813 Register optimized_code,
814 Register closure,
815 Register scratch1,
816 Register scratch2) {
817 DCHECK(!AreAliased(optimized_code, closure, scratch1, scratch2));
818 // Store code entry in the closure.
819 __ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
820 __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
821 __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
822 kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
823 RememberedSetAction::kOmit, SmiCheck::kOmit);
824 }
825
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)826 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
827 Register scratch2) {
828 Register params_size = scratch1;
829
830 // Get the size of the formal parameters + receiver (in bytes).
831 __ Ld(params_size,
832 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
833 __ Lw(params_size,
834 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
835
836 Register actual_params_size = scratch2;
837 // Compute the size of the actual parameters + receiver (in bytes).
838 __ Ld(actual_params_size,
839 MemOperand(fp, StandardFrameConstants::kArgCOffset));
840 __ dsll(actual_params_size, actual_params_size, kPointerSizeLog2);
841
842 // If actual is bigger than formal, then we should use it to free up the stack
843 // arguments.
844 __ slt(t2, params_size, actual_params_size);
845 __ movn(params_size, actual_params_size, t2);
846
847 // Leave the frame (also dropping the register file).
848 __ LeaveFrame(StackFrame::INTERPRETED);
849
850 // Drop receiver + arguments.
851 __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
852 TurboAssembler::kCountIncludesReceiver);
853 }
854
855 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)856 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
857 Register actual_state,
858 TieringState expected_state,
859 Runtime::FunctionId function_id) {
860 Label no_match;
861 __ Branch(&no_match, ne, actual_state,
862 Operand(static_cast<int>(expected_state)));
863 GenerateTailCallToReturnedCode(masm, function_id);
864 __ bind(&no_match);
865 }
866
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch1,Register scratch2)867 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
868 Register optimized_code_entry,
869 Register scratch1, Register scratch2) {
870 // ----------- S t a t e -------------
871 // -- a0 : actual argument count
872 // -- a3 : new target (preserved for callee if needed, and caller)
873 // -- a1 : target function (preserved for callee if needed, and caller)
874 // -----------------------------------
875 DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
876
877 Register closure = a1;
878 Label heal_optimized_code_slot;
879
880 // If the optimized code is cleared, go to runtime to update the optimization
881 // marker field.
882 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
883 &heal_optimized_code_slot);
884
885 // Check if the optimized code is marked for deopt. If it is, call the
886 // runtime to clear it.
887 __ Ld(scratch1,
888 FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
889 __ Lw(scratch1,
890 FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset));
891 __ And(scratch1, scratch1, Operand(1 << Code::kMarkedForDeoptimizationBit));
892 __ Branch(&heal_optimized_code_slot, ne, scratch1, Operand(zero_reg));
893
894 // Optimized code is good, get it into the closure and link the closure into
895 // the optimized functions list, then tail call the optimized code.
896 // The feedback vector is no longer used, so re-use it as a scratch
897 // register.
898 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
899 scratch1, scratch2);
900
901 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
902 __ Daddu(a2, optimized_code_entry,
903 Operand(Code::kHeaderSize - kHeapObjectTag));
904 __ Jump(a2);
905
906 // Optimized code slot contains deoptimized code or code is cleared and
907 // optimized code marker isn't updated. Evict the code, update the marker
908 // and re-enter the closure's code.
909 __ bind(&heal_optimized_code_slot);
910 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
911 }
912
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)913 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
914 Register tiering_state) {
915 // ----------- S t a t e -------------
916 // -- a0 : actual argument count
917 // -- a3 : new target (preserved for callee if needed, and caller)
918 // -- a1 : target function (preserved for callee if needed, and caller)
919 // -- feedback vector (preserved for caller if needed)
920 // -- tiering_state : a int32 containing a non-zero optimization
921 // marker.
922 // -----------------------------------
923 DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state));
924
925 TailCallRuntimeIfStateEquals(masm, tiering_state,
926 TieringState::kRequestTurbofan_Synchronous,
927 Runtime::kCompileTurbofan_Synchronous);
928 TailCallRuntimeIfStateEquals(masm, tiering_state,
929 TieringState::kRequestTurbofan_Concurrent,
930 Runtime::kCompileTurbofan_Concurrent);
931
932 __ stop();
933 }
934
935 // Advance the current bytecode offset. This simulates what all bytecode
936 // handlers do upon completion of the underlying operation. Will bail out to a
937 // label if the bytecode (without prefix) is a return bytecode. Will not advance
938 // the bytecode offset if the current bytecode is a JumpLoop, instead just
939 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Register scratch3,Label * if_return)940 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
941 Register bytecode_array,
942 Register bytecode_offset,
943 Register bytecode, Register scratch1,
944 Register scratch2, Register scratch3,
945 Label* if_return) {
946 Register bytecode_size_table = scratch1;
947
948 // The bytecode offset value will be increased by one in wide and extra wide
949 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
950 // will restore the original bytecode. In order to simplify the code, we have
951 // a backup of it.
952 Register original_bytecode_offset = scratch3;
953 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
954 bytecode_size_table, original_bytecode_offset));
955 __ Move(original_bytecode_offset, bytecode_offset);
956 __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
957
958 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
959 Label process_bytecode, extra_wide;
960 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
961 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
962 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
963 STATIC_ASSERT(3 ==
964 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
965 __ Branch(&process_bytecode, hi, bytecode, Operand(3));
966 __ And(scratch2, bytecode, Operand(1));
967 __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
968
969 // Load the next bytecode and update table to the wide scaled table.
970 __ Daddu(bytecode_offset, bytecode_offset, Operand(1));
971 __ Daddu(scratch2, bytecode_array, bytecode_offset);
972 __ Lbu(bytecode, MemOperand(scratch2));
973 __ Daddu(bytecode_size_table, bytecode_size_table,
974 Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
975 __ jmp(&process_bytecode);
976
977 __ bind(&extra_wide);
978 // Load the next bytecode and update table to the extra wide scaled table.
979 __ Daddu(bytecode_offset, bytecode_offset, Operand(1));
980 __ Daddu(scratch2, bytecode_array, bytecode_offset);
981 __ Lbu(bytecode, MemOperand(scratch2));
982 __ Daddu(bytecode_size_table, bytecode_size_table,
983 Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
984
985 __ bind(&process_bytecode);
986
987 // Bailout to the return label if this is a return bytecode.
988 #define JUMP_IF_EQUAL(NAME) \
989 __ Branch(if_return, eq, bytecode, \
990 Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
991 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
992 #undef JUMP_IF_EQUAL
993
994 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
995 // of the loop.
996 Label end, not_jump_loop;
997 __ Branch(¬_jump_loop, ne, bytecode,
998 Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
999 // We need to restore the original bytecode_offset since we might have
1000 // increased it to skip the wide / extra-wide prefix bytecode.
1001 __ Move(bytecode_offset, original_bytecode_offset);
1002 __ jmp(&end);
1003
1004 __ bind(¬_jump_loop);
1005 // Otherwise, load the size of the current bytecode and advance the offset.
1006 __ Daddu(scratch2, bytecode_size_table, bytecode);
1007 __ Lb(scratch2, MemOperand(scratch2));
1008 __ Daddu(bytecode_offset, bytecode_offset, scratch2);
1009
1010 __ bind(&end);
1011 }
1012
1013 // Read off the optimization state in the feedback vector and check if there
1014 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1015 static void LoadTieringStateAndJumpIfNeedsProcessing(
1016 MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1017 Label* has_optimized_code_or_state) {
1018 ASM_CODE_COMMENT(masm);
1019 Register scratch = t2;
1020 __ Lw(optimization_state,
1021 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1022 __ And(
1023 scratch, optimization_state,
1024 Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1025 __ Branch(has_optimized_code_or_state, ne, scratch, Operand(zero_reg));
1026 }
1027
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1028 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1029 MacroAssembler* masm, Register optimization_state,
1030 Register feedback_vector) {
1031 ASM_CODE_COMMENT(masm);
1032 Label maybe_has_optimized_code;
1033 // Check if optimized code marker is available
1034 {
1035 UseScratchRegisterScope temps(masm);
1036 Register scratch = temps.Acquire();
1037 __ And(scratch, optimization_state,
1038 Operand(FeedbackVector::kTieringStateIsAnyRequestMask));
1039 __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
1040 }
1041
1042 Register tiering_state = optimization_state;
1043 __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1044 MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1045
1046 __ bind(&maybe_has_optimized_code);
1047 Register optimized_code_entry = optimization_state;
1048 __ Ld(tiering_state,
1049 FieldMemOperand(feedback_vector,
1050 FeedbackVector::kMaybeOptimizedCodeOffset));
1051 TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
1052 }
1053
1054 namespace {
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array)1055 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
1056 Register bytecode_array) {
1057 // Reset code age and the OSR state (optimized to a single write).
1058 static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
1059 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1060 __ Sw(zero_reg,
1061 FieldMemOperand(bytecode_array,
1062 BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
1063 }
1064
1065 } // namespace
1066
1067 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1068 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1069 UseScratchRegisterScope temps(masm);
1070 temps.Include({s1, s2});
1071 auto descriptor =
1072 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1073 Register closure = descriptor.GetRegisterParameter(
1074 BaselineOutOfLinePrologueDescriptor::kClosure);
1075 // Load the feedback vector from the closure.
1076 Register feedback_vector = temps.Acquire();
1077 __ Ld(feedback_vector,
1078 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1079 __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1080 if (FLAG_debug_code) {
1081 UseScratchRegisterScope temps(masm);
1082 Register scratch = temps.Acquire();
1083 __ GetObjectType(feedback_vector, scratch, scratch);
1084 __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
1085 Operand(FEEDBACK_VECTOR_TYPE));
1086 }
1087 // Check for an tiering state.
1088 Label has_optimized_code_or_state;
1089 Register optimization_state = no_reg;
1090 {
1091 UseScratchRegisterScope temps(masm);
1092 optimization_state = temps.Acquire();
1093 // optimization_state will be used only in |has_optimized_code_or_state|
1094 // and outside it can be reused.
1095 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1096 feedback_vector,
1097 &has_optimized_code_or_state);
1098 }
1099 // Increment invocation count for the function.
1100 {
1101 UseScratchRegisterScope temps(masm);
1102 Register invocation_count = temps.Acquire();
1103 __ Lw(invocation_count,
1104 FieldMemOperand(feedback_vector,
1105 FeedbackVector::kInvocationCountOffset));
1106 __ Addu(invocation_count, invocation_count, Operand(1));
1107 __ Sw(invocation_count,
1108 FieldMemOperand(feedback_vector,
1109 FeedbackVector::kInvocationCountOffset));
1110 }
1111
1112 FrameScope frame_scope(masm, StackFrame::MANUAL);
1113 {
1114 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1115 // Normally the first thing we'd do here is Push(ra, fp), but we already
1116 // entered the frame in BaselineCompiler::Prologue, as we had to use the
1117 // value lr before the call to this BaselineOutOfLinePrologue builtin.
1118 Register callee_context = descriptor.GetRegisterParameter(
1119 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1120 Register callee_js_function = descriptor.GetRegisterParameter(
1121 BaselineOutOfLinePrologueDescriptor::kClosure);
1122 __ Push(callee_context, callee_js_function);
1123 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1124 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1125
1126 Register argc = descriptor.GetRegisterParameter(
1127 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1128 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1129 // the frame, so load it into a register.
1130 Register bytecode_array = descriptor.GetRegisterParameter(
1131 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1132 ResetBytecodeAgeAndOsrState(masm, bytecode_array);
1133 __ Push(argc, bytecode_array);
1134
1135 // Baseline code frames store the feedback vector where interpreter would
1136 // store the bytecode offset.
1137 if (FLAG_debug_code) {
1138 UseScratchRegisterScope temps(masm);
1139 Register invocation_count = temps.Acquire();
1140 __ GetObjectType(feedback_vector, invocation_count, invocation_count);
1141 __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
1142 Operand(FEEDBACK_VECTOR_TYPE));
1143 }
1144 // Our stack is currently aligned. We have have to push something along with
1145 // the feedback vector to keep it that way -- we may as well start
1146 // initialising the register frame.
1147 // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
1148 // `undefined` in the accumulator register, to skip the load in the baseline
1149 // code.
1150 __ Push(feedback_vector);
1151 }
1152
1153 Label call_stack_guard;
1154 Register frame_size = descriptor.GetRegisterParameter(
1155 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1156 {
1157 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1158 // Stack check. This folds the checks for both the interrupt stack limit
1159 // check and the real stack limit into one by just checking for the
1160 // interrupt limit. The interrupt limit is either equal to the real stack
1161 // limit or tighter. By ensuring we have space until that limit after
1162 // building the frame we can quickly precheck both at once.
1163 UseScratchRegisterScope temps(masm);
1164 Register sp_minus_frame_size = temps.Acquire();
1165 __ Dsubu(sp_minus_frame_size, sp, frame_size);
1166 Register interrupt_limit = temps.Acquire();
1167 __ LoadStackLimit(interrupt_limit,
1168 MacroAssembler::StackLimitKind::kInterruptStackLimit);
1169 __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
1170 Operand(interrupt_limit));
1171 }
1172
1173 // Do "fast" return to the caller pc in ra.
1174 // TODO(v8:11429): Document this frame setup better.
1175 __ Ret();
1176
1177 __ bind(&has_optimized_code_or_state);
1178 {
1179 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1180 UseScratchRegisterScope temps(masm);
1181 temps.Exclude(optimization_state);
1182 // Ensure the optimization_state is not allocated again.
1183 // Drop the frame created by the baseline call.
1184 __ Pop(ra, fp);
1185 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1186 feedback_vector);
1187 __ Trap();
1188 }
1189
1190 __ bind(&call_stack_guard);
1191 {
1192 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1193 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1194 // Save incoming new target or generator
1195 __ Push(kJavaScriptCallNewTargetRegister);
1196 __ SmiTag(frame_size);
1197 __ Push(frame_size);
1198 __ CallRuntime(Runtime::kStackGuardWithGap);
1199 __ Pop(kJavaScriptCallNewTargetRegister);
1200 }
1201 __ Ret();
1202 temps.Exclude({kScratchReg, kScratchReg2});
1203 }
1204
1205 // Generate code for entering a JS function with the interpreter.
1206 // On entry to the function the receiver and arguments have been pushed on the
1207 // stack left to right.
1208 //
1209 // The live registers are:
1210 // o a0 : actual argument count
1211 // o a1: the JS function object being called.
1212 // o a3: the incoming new target or generator object
1213 // o cp: our context
1214 // o fp: the caller's frame pointer
1215 // o sp: stack pointer
1216 // o ra: return address
1217 //
1218 // The function builds an interpreter frame. See InterpreterFrameConstants in
1219 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1220 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1221 Register closure = a1;
1222 Register feedback_vector = a2;
1223
1224 // Get the bytecode array from the function object and load it into
1225 // kInterpreterBytecodeArrayRegister.
1226 __ Ld(kScratchReg,
1227 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1228 __ Ld(kInterpreterBytecodeArrayRegister,
1229 FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
1230 Label is_baseline;
1231 GetSharedFunctionInfoBytecodeOrBaseline(
1232 masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
1233
1234 // The bytecode array could have been flushed from the shared function info,
1235 // if so, call into CompileLazy.
1236 Label compile_lazy;
1237 __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
1238 __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
1239
1240 // Load the feedback vector from the closure.
1241 __ Ld(feedback_vector,
1242 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1243 __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1244
1245 Label push_stack_frame;
1246 // Check if feedback vector is valid. If valid, check for optimized code
1247 // and update invocation count. Otherwise, setup the stack frame.
1248 __ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1249 __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
1250 __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
1251
1252 // Read off the optimization state in the feedback vector, and if there
1253 // is optimized code or an tiering state, call that instead.
1254 Register optimization_state = a4;
1255 __ Lw(optimization_state,
1256 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1257
1258 // Check if the optimized code slot is not empty or has a tiering state.
1259 Label has_optimized_code_or_state;
1260
1261 __ andi(t0, optimization_state,
1262 FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask);
1263 __ Branch(&has_optimized_code_or_state, ne, t0, Operand(zero_reg));
1264
1265 Label not_optimized;
1266 __ bind(¬_optimized);
1267
1268 // Increment invocation count for the function.
1269 __ Lw(a4, FieldMemOperand(feedback_vector,
1270 FeedbackVector::kInvocationCountOffset));
1271 __ Addu(a4, a4, Operand(1));
1272 __ Sw(a4, FieldMemOperand(feedback_vector,
1273 FeedbackVector::kInvocationCountOffset));
1274
1275 // Open a frame scope to indicate that there is a frame on the stack. The
1276 // MANUAL indicates that the scope shouldn't actually generate code to set up
1277 // the frame (that is done below).
1278 __ bind(&push_stack_frame);
1279 FrameScope frame_scope(masm, StackFrame::MANUAL);
1280 __ PushStandardFrame(closure);
1281
1282 ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
1283
1284 // Load initial bytecode offset.
1285 __ li(kInterpreterBytecodeOffsetRegister,
1286 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1287
1288 // Push bytecode array and Smi tagged bytecode array offset.
1289 __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
1290 __ Push(kInterpreterBytecodeArrayRegister, a4);
1291
1292 // Allocate the local and temporary register file on the stack.
1293 Label stack_overflow;
1294 {
1295 // Load frame size (word) from the BytecodeArray object.
1296 __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1297 BytecodeArray::kFrameSizeOffset));
1298
1299 // Do a stack check to ensure we don't go over the limit.
1300 __ Dsubu(a5, sp, Operand(a4));
1301 __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1302 __ Branch(&stack_overflow, lo, a5, Operand(a2));
1303
1304 // If ok, push undefined as the initial value for all register file entries.
1305 Label loop_header;
1306 Label loop_check;
1307 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1308 __ Branch(&loop_check);
1309 __ bind(&loop_header);
1310 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1311 __ push(kInterpreterAccumulatorRegister);
1312 // Continue loop if not done.
1313 __ bind(&loop_check);
1314 __ Dsubu(a4, a4, Operand(kPointerSize));
1315 __ Branch(&loop_header, ge, a4, Operand(zero_reg));
1316 }
1317
1318 // If the bytecode array has a valid incoming new target or generator object
1319 // register, initialize it with incoming value which was passed in r3.
1320 Label no_incoming_new_target_or_generator_register;
1321 __ Lw(a5, FieldMemOperand(
1322 kInterpreterBytecodeArrayRegister,
1323 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1324 __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1325 Operand(zero_reg));
1326 __ Dlsa(a5, fp, a5, kPointerSizeLog2);
1327 __ Sd(a3, MemOperand(a5));
1328 __ bind(&no_incoming_new_target_or_generator_register);
1329
1330 // Perform interrupt stack check.
1331 // TODO(solanes): Merge with the real stack limit check above.
1332 Label stack_check_interrupt, after_stack_check_interrupt;
1333 __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1334 __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
1335 __ bind(&after_stack_check_interrupt);
1336
1337 // The accumulator is already loaded with undefined.
1338
1339 // Load the dispatch table into a register and dispatch to the bytecode
1340 // handler at the current bytecode offset.
1341 Label do_dispatch;
1342 __ bind(&do_dispatch);
1343 __ li(kInterpreterDispatchTableRegister,
1344 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1345 __ Daddu(a0, kInterpreterBytecodeArrayRegister,
1346 kInterpreterBytecodeOffsetRegister);
1347 __ Lbu(a7, MemOperand(a0));
1348 __ Dlsa(kScratchReg, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
1349 __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
1350 __ Call(kJavaScriptCallCodeStartRegister);
1351 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1352
1353 // Any returns to the entry trampoline are either due to the return bytecode
1354 // or the interpreter tail calling a builtin and then a dispatch.
1355
1356 // Get bytecode array and bytecode offset from the stack frame.
1357 __ Ld(kInterpreterBytecodeArrayRegister,
1358 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1359 __ Ld(kInterpreterBytecodeOffsetRegister,
1360 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1361 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1362
1363 // Either return, or advance to the next bytecode and dispatch.
1364 Label do_return;
1365 __ Daddu(a1, kInterpreterBytecodeArrayRegister,
1366 kInterpreterBytecodeOffsetRegister);
1367 __ Lbu(a1, MemOperand(a1));
1368 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1369 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1370 a4, &do_return);
1371 __ jmp(&do_dispatch);
1372
1373 __ bind(&do_return);
1374 // The return value is in v0.
1375 LeaveInterpreterFrame(masm, t0, t1);
1376 __ Jump(ra);
1377
1378 __ bind(&stack_check_interrupt);
1379 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1380 // for the call to the StackGuard.
1381 __ li(kInterpreterBytecodeOffsetRegister,
1382 Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1383 kFunctionEntryBytecodeOffset)));
1384 __ Sd(kInterpreterBytecodeOffsetRegister,
1385 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1386 __ CallRuntime(Runtime::kStackGuard);
1387
1388 // After the call, restore the bytecode array, bytecode offset and accumulator
1389 // registers again. Also, restore the bytecode offset in the stack to its
1390 // previous value.
1391 __ Ld(kInterpreterBytecodeArrayRegister,
1392 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1393 __ li(kInterpreterBytecodeOffsetRegister,
1394 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1395 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1396
1397 __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
1398 __ Sd(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1399
1400 __ jmp(&after_stack_check_interrupt);
1401
1402 __ bind(&has_optimized_code_or_state);
1403 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1404 feedback_vector);
1405 __ bind(&is_baseline);
1406 {
1407 // Load the feedback vector from the closure.
1408 __ Ld(feedback_vector,
1409 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1410 __ Ld(feedback_vector,
1411 FieldMemOperand(feedback_vector, Cell::kValueOffset));
1412
1413 Label install_baseline_code;
1414 // Check if feedback vector is valid. If not, call prepare for baseline to
1415 // allocate it.
1416 __ Ld(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1417 __ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
1418 __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
1419
1420 // Check for an tiering state.
1421 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1422 feedback_vector,
1423 &has_optimized_code_or_state);
1424
1425 // Load the baseline code into the closure.
1426 __ Move(a2, kInterpreterBytecodeArrayRegister);
1427 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
1428 ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1);
1429 __ JumpCodeObject(a2);
1430
1431 __ bind(&install_baseline_code);
1432 GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1433 }
1434 __ bind(&compile_lazy);
1435 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1436 // Unreachable code.
1437 __ break_(0xCC);
1438
1439 __ bind(&stack_overflow);
1440 __ CallRuntime(Runtime::kThrowStackOverflow);
1441 // Unreachable code.
1442 __ break_(0xCC);
1443 }
1444
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch,Register scratch2)1445 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1446 Register start_address,
1447 Register scratch, Register scratch2) {
1448 // Find the address of the last argument.
1449 __ Dsubu(scratch, num_args, Operand(1));
1450 __ dsll(scratch, scratch, kPointerSizeLog2);
1451 __ Dsubu(start_address, start_address, scratch);
1452
1453 // Push the arguments.
1454 __ PushArray(start_address, num_args, scratch, scratch2,
1455 TurboAssembler::PushArrayOrder::kReverse);
1456 }
1457
1458 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1459 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1460 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1461 InterpreterPushArgsMode mode) {
1462 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1463 // ----------- S t a t e -------------
1464 // -- a0 : the number of arguments
1465 // -- a2 : the address of the first argument to be pushed. Subsequent
1466 // arguments should be consecutive above this, in the same order as
1467 // they are to be pushed onto the stack.
1468 // -- a1 : the target to call (can be any Object).
1469 // -----------------------------------
1470 Label stack_overflow;
1471 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1472 // The spread argument should not be pushed.
1473 __ Dsubu(a0, a0, Operand(1));
1474 }
1475
1476 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1477 __ Dsubu(a3, a0, Operand(kJSArgcReceiverSlots));
1478 } else {
1479 __ mov(a3, a0);
1480 }
1481
1482 __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1483
1484 // This function modifies a2, t0 and a4.
1485 GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
1486
1487 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1488 __ PushRoot(RootIndex::kUndefinedValue);
1489 }
1490
1491 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1492 // Pass the spread in the register a2.
1493 // a2 already points to the penultime argument, the spread
1494 // is below that.
1495 __ Ld(a2, MemOperand(a2, -kSystemPointerSize));
1496 }
1497
1498 // Call the target.
1499 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1500 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1501 RelocInfo::CODE_TARGET);
1502 } else {
1503 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1504 RelocInfo::CODE_TARGET);
1505 }
1506
1507 __ bind(&stack_overflow);
1508 {
1509 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1510 // Unreachable code.
1511 __ break_(0xCC);
1512 }
1513 }
1514
1515 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1516 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1517 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1518 // ----------- S t a t e -------------
1519 // -- a0 : argument count
1520 // -- a3 : new target
1521 // -- a1 : constructor to call
1522 // -- a2 : allocation site feedback if available, undefined otherwise.
1523 // -- a4 : address of the first argument
1524 // -----------------------------------
1525 Label stack_overflow;
1526 __ StackOverflowCheck(a0, a5, t0, &stack_overflow);
1527
1528 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1529 // The spread argument should not be pushed.
1530 __ Dsubu(a0, a0, Operand(1));
1531 }
1532
1533 Register argc_without_receiver = a6;
1534 __ Dsubu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots));
1535 // Push the arguments, This function modifies t0, a4 and a5.
1536 GenerateInterpreterPushArgs(masm, argc_without_receiver, a4, a5, t0);
1537
1538 // Push a slot for the receiver.
1539 __ push(zero_reg);
1540
1541 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1542 // Pass the spread in the register a2.
1543 // a4 already points to the penultimate argument, the spread
1544 // lies in the next interpreter register.
1545 __ Ld(a2, MemOperand(a4, -kSystemPointerSize));
1546 } else {
1547 __ AssertUndefinedOrAllocationSite(a2, t0);
1548 }
1549
1550 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1551 __ AssertFunction(a1);
1552
1553 // Tail call to the function-specific construct stub (still in the caller
1554 // context at this point).
1555 __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1556 RelocInfo::CODE_TARGET);
1557 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1558 // Call the constructor with a0, a1, and a3 unmodified.
1559 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1560 RelocInfo::CODE_TARGET);
1561 } else {
1562 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1563 // Call the constructor with a0, a1, and a3 unmodified.
1564 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1565 }
1566
1567 __ bind(&stack_overflow);
1568 {
1569 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1570 // Unreachable code.
1571 __ break_(0xCC);
1572 }
1573 }
1574
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1575 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1576 // Set the return address to the correct point in the interpreter entry
1577 // trampoline.
1578 Label builtin_trampoline, trampoline_loaded;
1579 Smi interpreter_entry_return_pc_offset(
1580 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1581 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1582
1583 // If the SFI function_data is an InterpreterData, the function will have a
1584 // custom copy of the interpreter entry trampoline for profiling. If so,
1585 // get the custom trampoline, otherwise grab the entry address of the global
1586 // trampoline.
1587 __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1588 __ Ld(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1589 __ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1590 __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1591 kInterpreterDispatchTableRegister);
1592 __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1593 Operand(INTERPRETER_DATA_TYPE));
1594
1595 __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1596 __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1597 __ Branch(&trampoline_loaded);
1598
1599 __ bind(&builtin_trampoline);
1600 __ li(t0, ExternalReference::
1601 address_of_interpreter_entry_trampoline_instruction_start(
1602 masm->isolate()));
1603 __ Ld(t0, MemOperand(t0));
1604
1605 __ bind(&trampoline_loaded);
1606 __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1607
1608 // Initialize the dispatch table register.
1609 __ li(kInterpreterDispatchTableRegister,
1610 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1611
1612 // Get the bytecode array pointer from the frame.
1613 __ Ld(kInterpreterBytecodeArrayRegister,
1614 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1615
1616 if (FLAG_debug_code) {
1617 // Check function data field is actually a BytecodeArray object.
1618 __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1619 __ Assert(ne,
1620 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1621 kScratchReg, Operand(zero_reg));
1622 __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1623 __ Assert(eq,
1624 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1625 a1, Operand(BYTECODE_ARRAY_TYPE));
1626 }
1627
1628 // Get the target bytecode offset from the frame.
1629 __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1630 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1631
1632 if (FLAG_debug_code) {
1633 Label okay;
1634 __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
1635 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1636 // Unreachable code.
1637 __ break_(0xCC);
1638 __ bind(&okay);
1639 }
1640
1641 // Dispatch to the target bytecode.
1642 __ Daddu(a1, kInterpreterBytecodeArrayRegister,
1643 kInterpreterBytecodeOffsetRegister);
1644 __ Lbu(a7, MemOperand(a1));
1645 __ Dlsa(a1, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
1646 __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
1647 __ Jump(kJavaScriptCallCodeStartRegister);
1648 }
1649
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1650 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1651 // Advance the current bytecode offset stored within the given interpreter
1652 // stack frame. This simulates what all bytecode handlers do upon completion
1653 // of the underlying operation.
1654 __ Ld(kInterpreterBytecodeArrayRegister,
1655 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1656 __ Ld(kInterpreterBytecodeOffsetRegister,
1657 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1658 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1659
1660 Label enter_bytecode, function_entry_bytecode;
1661 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1662 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1663 kFunctionEntryBytecodeOffset));
1664
1665 // Load the current bytecode.
1666 __ Daddu(a1, kInterpreterBytecodeArrayRegister,
1667 kInterpreterBytecodeOffsetRegister);
1668 __ Lbu(a1, MemOperand(a1));
1669
1670 // Advance to the next bytecode.
1671 Label if_return;
1672 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1673 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1674 a4, &if_return);
1675
1676 __ bind(&enter_bytecode);
1677 // Convert new bytecode offset to a Smi and save in the stackframe.
1678 __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1679 __ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1680
1681 Generate_InterpreterEnterBytecode(masm);
1682
1683 __ bind(&function_entry_bytecode);
1684 // If the code deoptimizes during the implicit function entry stack interrupt
1685 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1686 // not a valid bytecode offset. Detect this case and advance to the first
1687 // actual bytecode.
1688 __ li(kInterpreterBytecodeOffsetRegister,
1689 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1690 __ Branch(&enter_bytecode);
1691
1692 // We should never take the if_return path.
1693 __ bind(&if_return);
1694 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1695 }
1696
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1697 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1698 Generate_InterpreterEnterBytecode(masm);
1699 }
1700
1701 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1702 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1703 bool java_script_builtin,
1704 bool with_result) {
1705 const RegisterConfiguration* config(RegisterConfiguration::Default());
1706 int allocatable_register_count = config->num_allocatable_general_registers();
1707 UseScratchRegisterScope temps(masm);
1708 Register scratch = temps.Acquire();
1709
1710 if (with_result) {
1711 if (java_script_builtin) {
1712 __ mov(scratch, v0);
1713 } else {
1714 // Overwrite the hole inserted by the deoptimizer with the return value from
1715 // the LAZY deopt point.
1716 __ Sd(v0,
1717 MemOperand(
1718 sp, config->num_allocatable_general_registers() * kPointerSize +
1719 BuiltinContinuationFrameConstants::kFixedFrameSize));
1720 }
1721 }
1722 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1723 int code = config->GetAllocatableGeneralCode(i);
1724 __ Pop(Register::from_code(code));
1725 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1726 __ SmiUntag(Register::from_code(code));
1727 }
1728 }
1729
1730 if (with_result && java_script_builtin) {
1731 // Overwrite the hole inserted by the deoptimizer with the return value from
1732 // the LAZY deopt point. t0 contains the arguments count, the return value
1733 // from LAZY is always the last argument.
1734 constexpr int return_value_offset =
1735 BuiltinContinuationFrameConstants::kFixedSlotCount -
1736 kJSArgcReceiverSlots;
1737 __ Daddu(a0, a0, Operand(return_value_offset));
1738 __ Dlsa(t0, sp, a0, kSystemPointerSizeLog2);
1739 __ Sd(scratch, MemOperand(t0));
1740 // Recover arguments count.
1741 __ Dsubu(a0, a0, Operand(return_value_offset));
1742 }
1743
1744 __ Ld(fp, MemOperand(
1745 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1746 // Load builtin index (stored as a Smi) and use it to get the builtin start
1747 // address from the builtins table.
1748 __ Pop(t0);
1749 __ Daddu(sp, sp,
1750 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1751 __ Pop(ra);
1752 __ LoadEntryFromBuiltinIndex(t0);
1753 __ Jump(t0);
1754 }
1755 } // namespace
1756
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1757 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1758 Generate_ContinueToBuiltinHelper(masm, false, false);
1759 }
1760
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1761 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1762 MacroAssembler* masm) {
1763 Generate_ContinueToBuiltinHelper(masm, false, true);
1764 }
1765
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1766 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1767 Generate_ContinueToBuiltinHelper(masm, true, false);
1768 }
1769
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1770 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1771 MacroAssembler* masm) {
1772 Generate_ContinueToBuiltinHelper(masm, true, true);
1773 }
1774
Generate_NotifyDeoptimized(MacroAssembler * masm)1775 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1776 {
1777 FrameScope scope(masm, StackFrame::INTERNAL);
1778 __ CallRuntime(Runtime::kNotifyDeoptimized);
1779 }
1780
1781 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
1782 __ Ld(v0, MemOperand(sp, 0 * kPointerSize));
1783 __ Ret(USE_DELAY_SLOT);
1784 // Safe to fill delay slot Addu will emit one instruction.
1785 __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
1786 }
1787
1788 namespace {
1789
Generate_OSREntry(MacroAssembler * masm,Register entry_address,Operand offset=Operand (zero_reg))1790 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
1791 Operand offset = Operand(zero_reg)) {
1792 __ Daddu(ra, entry_address, offset);
1793 // And "return" to the OSR entry point of the function.
1794 __ Ret();
1795 }
1796
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)1797 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
1798 {
1799 FrameScope scope(masm, StackFrame::INTERNAL);
1800 __ CallRuntime(Runtime::kCompileOptimizedOSR);
1801 }
1802
1803 // If the code object is null, just return to the caller.
1804 __ Ret(eq, v0, Operand(Smi::zero()));
1805 if (is_interpreter) {
1806 // Drop the handler frame that is be sitting on top of the actual
1807 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1808 __ LeaveFrame(StackFrame::STUB);
1809 }
1810 // Load deoptimization data from the code object.
1811 // <deopt_data> = <code>[#deoptimization_data_offset]
1812 __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
1813 kHeapObjectTag));
1814
1815 // Load the OSR entrypoint offset from the deoptimization data.
1816 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1817 __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1818 DeoptimizationData::kOsrPcOffsetIndex) -
1819 kHeapObjectTag));
1820
1821 // Compute the target address = code_obj + header_size + osr_offset
1822 // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1823 __ Daddu(v0, v0, a1);
1824 Generate_OSREntry(masm, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
1825 }
1826 } // namespace
1827
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1828 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1829 return OnStackReplacement(masm, true);
1830 }
1831
Generate_BaselineOnStackReplacement(MacroAssembler * masm)1832 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
1833 __ Ld(kContextRegister,
1834 MemOperand(fp, StandardFrameConstants::kContextOffset));
1835 return OnStackReplacement(masm, false);
1836 }
1837
1838 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1839 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1840 // ----------- S t a t e -------------
1841 // -- a0 : argc
1842 // -- sp[0] : receiver
1843 // -- sp[4] : thisArg
1844 // -- sp[8] : argArray
1845 // -----------------------------------
1846
1847 Register argc = a0;
1848 Register arg_array = a2;
1849 Register receiver = a1;
1850 Register this_arg = a5;
1851 Register undefined_value = a3;
1852 Register scratch = a4;
1853
1854 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1855
1856 // 1. Load receiver into a1, argArray into a2 (if present), remove all
1857 // arguments from the stack (including the receiver), and push thisArg (if
1858 // present) instead.
1859 {
1860 __ Dsubu(scratch, argc, JSParameterCount(0));
1861 __ Ld(this_arg, MemOperand(sp, kPointerSize));
1862 __ Ld(arg_array, MemOperand(sp, 2 * kPointerSize));
1863 __ Movz(arg_array, undefined_value, scratch); // if argc == 0
1864 __ Movz(this_arg, undefined_value, scratch); // if argc == 0
1865 __ Dsubu(scratch, scratch, Operand(1));
1866 __ Movz(arg_array, undefined_value, scratch); // if argc == 1
1867 __ Ld(receiver, MemOperand(sp));
1868 __ DropArgumentsAndPushNewReceiver(argc, this_arg,
1869 TurboAssembler::kCountIsInteger,
1870 TurboAssembler::kCountIncludesReceiver);
1871 }
1872
1873 // ----------- S t a t e -------------
1874 // -- a2 : argArray
1875 // -- a1 : receiver
1876 // -- a3 : undefined root value
1877 // -- sp[0] : thisArg
1878 // -----------------------------------
1879
1880 // 2. We don't need to check explicitly for callable receiver here,
1881 // since that's the first thing the Call/CallWithArrayLike builtins
1882 // will do.
1883
1884 // 3. Tail call with no arguments if argArray is null or undefined.
1885 Label no_arguments;
1886 __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
1887 __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
1888
1889 // 4a. Apply the receiver to the given argArray.
1890 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1891 RelocInfo::CODE_TARGET);
1892
1893 // 4b. The argArray is either null or undefined, so we tail call without any
1894 // arguments to the receiver.
1895 __ bind(&no_arguments);
1896 {
1897 __ li(a0, JSParameterCount(0));
1898 DCHECK(receiver == a1);
1899 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1900 }
1901 }
1902
1903 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1904 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1905 // 1. Get the callable to call (passed as receiver) from the stack.
1906 {
1907 __ Pop(a1);
1908 }
1909
1910 // 2. Make sure we have at least one argument.
1911 // a0: actual number of arguments
1912 {
1913 Label done;
1914 __ Branch(&done, ne, a0, Operand(JSParameterCount(0)));
1915 __ PushRoot(RootIndex::kUndefinedValue);
1916 __ Daddu(a0, a0, Operand(1));
1917 __ bind(&done);
1918 }
1919
1920 // 3. Adjust the actual number of arguments.
1921 __ daddiu(a0, a0, -1);
1922
1923 // 4. Call the callable.
1924 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1925 }
1926
Generate_ReflectApply(MacroAssembler * masm)1927 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1928 // ----------- S t a t e -------------
1929 // -- a0 : argc
1930 // -- sp[0] : receiver
1931 // -- sp[8] : target (if argc >= 1)
1932 // -- sp[16] : thisArgument (if argc >= 2)
1933 // -- sp[24] : argumentsList (if argc == 3)
1934 // -----------------------------------
1935
1936 Register argc = a0;
1937 Register arguments_list = a2;
1938 Register target = a1;
1939 Register this_argument = a5;
1940 Register undefined_value = a3;
1941 Register scratch = a4;
1942
1943 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1944
1945 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
1946 // remove all arguments from the stack (including the receiver), and push
1947 // thisArgument (if present) instead.
1948 {
1949 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
1950 // consistent state for a simple pop operation.
1951
1952 __ Dsubu(scratch, argc, Operand(JSParameterCount(0)));
1953 __ Ld(target, MemOperand(sp, kPointerSize));
1954 __ Ld(this_argument, MemOperand(sp, 2 * kPointerSize));
1955 __ Ld(arguments_list, MemOperand(sp, 3 * kPointerSize));
1956 __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
1957 __ Movz(this_argument, undefined_value, scratch); // if argc == 0
1958 __ Movz(target, undefined_value, scratch); // if argc == 0
1959 __ Dsubu(scratch, scratch, Operand(1));
1960 __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
1961 __ Movz(this_argument, undefined_value, scratch); // if argc == 1
1962 __ Dsubu(scratch, scratch, Operand(1));
1963 __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
1964
1965 __ DropArgumentsAndPushNewReceiver(argc, this_argument,
1966 TurboAssembler::kCountIsInteger,
1967 TurboAssembler::kCountIncludesReceiver);
1968 }
1969
1970 // ----------- S t a t e -------------
1971 // -- a2 : argumentsList
1972 // -- a1 : target
1973 // -- a3 : undefined root value
1974 // -- sp[0] : thisArgument
1975 // -----------------------------------
1976
1977 // 2. We don't need to check explicitly for callable target here,
1978 // since that's the first thing the Call/CallWithArrayLike builtins
1979 // will do.
1980
1981 // 3. Apply the target to the given argumentsList.
1982 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1983 RelocInfo::CODE_TARGET);
1984 }
1985
Generate_ReflectConstruct(MacroAssembler * masm)1986 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1987 // ----------- S t a t e -------------
1988 // -- a0 : argc
1989 // -- sp[0] : receiver
1990 // -- sp[8] : target
1991 // -- sp[16] : argumentsList
1992 // -- sp[24] : new.target (optional)
1993 // -----------------------------------
1994
1995 Register argc = a0;
1996 Register arguments_list = a2;
1997 Register target = a1;
1998 Register new_target = a3;
1999 Register undefined_value = a4;
2000 Register scratch = a5;
2001
2002 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
2003
2004 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
2005 // new.target into a3 (if present, otherwise use target), remove all
2006 // arguments from the stack (including the receiver), and push thisArgument
2007 // (if present) instead.
2008 {
2009 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
2010 // consistent state for a simple pop operation.
2011
2012 __ Dsubu(scratch, argc, Operand(JSParameterCount(0)));
2013 __ Ld(target, MemOperand(sp, kPointerSize));
2014 __ Ld(arguments_list, MemOperand(sp, 2 * kPointerSize));
2015 __ Ld(new_target, MemOperand(sp, 3 * kPointerSize));
2016 __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
2017 __ Movz(new_target, undefined_value, scratch); // if argc == 0
2018 __ Movz(target, undefined_value, scratch); // if argc == 0
2019 __ Dsubu(scratch, scratch, Operand(1));
2020 __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
2021 __ Movz(new_target, target, scratch); // if argc == 1
2022 __ Dsubu(scratch, scratch, Operand(1));
2023 __ Movz(new_target, target, scratch); // if argc == 2
2024
2025 __ DropArgumentsAndPushNewReceiver(argc, undefined_value,
2026 TurboAssembler::kCountIsInteger,
2027 TurboAssembler::kCountIncludesReceiver);
2028 }
2029
2030 // ----------- S t a t e -------------
2031 // -- a2 : argumentsList
2032 // -- a1 : target
2033 // -- a3 : new.target
2034 // -- sp[0] : receiver (undefined)
2035 // -----------------------------------
2036
2037 // 2. We don't need to check explicitly for constructor target here,
2038 // since that's the first thing the Construct/ConstructWithArrayLike
2039 // builtins will do.
2040
2041 // 3. We don't need to check explicitly for constructor new.target here,
2042 // since that's the second thing the Construct/ConstructWithArrayLike
2043 // builtins will do.
2044
2045 // 4. Construct the target with the given new.target and argumentsList.
2046 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2047 RelocInfo::CODE_TARGET);
2048 }
2049
2050 namespace {
2051
2052 // Allocate new stack space for |count| arguments and shift all existing
2053 // arguments already on the stack. |pointer_to_new_space_out| points to the
2054 // first free slot on the stack to copy additional arguments to and
2055 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2,Register scratch3)2056 void Generate_AllocateSpaceAndShiftExistingArguments(
2057 MacroAssembler* masm, Register count, Register argc_in_out,
2058 Register pointer_to_new_space_out, Register scratch1, Register scratch2,
2059 Register scratch3) {
2060 DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2061 scratch2));
2062 Register old_sp = scratch1;
2063 Register new_space = scratch2;
2064 __ mov(old_sp, sp);
2065 __ dsll(new_space, count, kPointerSizeLog2);
2066 __ Dsubu(sp, sp, Operand(new_space));
2067
2068 Register end = scratch2;
2069 Register value = scratch3;
2070 Register dest = pointer_to_new_space_out;
2071 __ mov(dest, sp);
2072 __ Dlsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2);
2073 Label loop, done;
2074 __ Branch(&done, ge, old_sp, Operand(end));
2075 __ bind(&loop);
2076 __ Ld(value, MemOperand(old_sp, 0));
2077 __ Sd(value, MemOperand(dest, 0));
2078 __ Daddu(old_sp, old_sp, Operand(kSystemPointerSize));
2079 __ Daddu(dest, dest, Operand(kSystemPointerSize));
2080 __ Branch(&loop, lt, old_sp, Operand(end));
2081 __ bind(&done);
2082
2083 // Update total number of arguments.
2084 __ Daddu(argc_in_out, argc_in_out, count);
2085 }
2086
2087 } // namespace
2088
2089 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2090 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2091 Handle<Code> code) {
2092 // ----------- S t a t e -------------
2093 // -- a1 : target
2094 // -- a0 : number of parameters on the stack
2095 // -- a2 : arguments list (a FixedArray)
2096 // -- a4 : len (number of elements to push from args)
2097 // -- a3 : new.target (for [[Construct]])
2098 // -----------------------------------
2099 if (FLAG_debug_code) {
2100 // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
2101 Label ok, fail;
2102 __ AssertNotSmi(a2);
2103 __ GetObjectType(a2, t8, t8);
2104 __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
2105 __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2106 __ Branch(&ok, eq, a4, Operand(zero_reg));
2107 // Fall through.
2108 __ bind(&fail);
2109 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2110
2111 __ bind(&ok);
2112 }
2113
2114 Register args = a2;
2115 Register len = a4;
2116
2117 // Check for stack overflow.
2118 Label stack_overflow;
2119 __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
2120
2121 // Move the arguments already in the stack,
2122 // including the receiver and the return address.
2123 // a4: Number of arguments to make room for.
2124 // a0: Number of arguments already on the stack.
2125 // a7: Points to first free slot on the stack after arguments were shifted.
2126 Generate_AllocateSpaceAndShiftExistingArguments(masm, a4, a0, a7, a6, t0, t1);
2127
2128 // Push arguments onto the stack (thisArgument is already on the stack).
2129 {
2130 Label done, push, loop;
2131 Register src = a6;
2132 Register scratch = len;
2133
2134 __ daddiu(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
2135 __ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT);
2136 __ dsll(scratch, len, kPointerSizeLog2);
2137 __ Dsubu(scratch, sp, Operand(scratch));
2138 __ LoadRoot(t1, RootIndex::kTheHoleValue);
2139 __ bind(&loop);
2140 __ Ld(a5, MemOperand(src));
2141 __ daddiu(src, src, kPointerSize);
2142 __ Branch(&push, ne, a5, Operand(t1));
2143 __ LoadRoot(a5, RootIndex::kUndefinedValue);
2144 __ bind(&push);
2145 __ Sd(a5, MemOperand(a7, 0));
2146 __ Daddu(a7, a7, Operand(kSystemPointerSize));
2147 __ Daddu(scratch, scratch, Operand(kSystemPointerSize));
2148 __ Branch(&loop, ne, scratch, Operand(sp));
2149 __ bind(&done);
2150 }
2151
2152 // Tail-call to the actual Call or Construct builtin.
2153 __ Jump(code, RelocInfo::CODE_TARGET);
2154
2155 __ bind(&stack_overflow);
2156 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2157 }
2158
2159 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2160 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2161 CallOrConstructMode mode,
2162 Handle<Code> code) {
2163 // ----------- S t a t e -------------
2164 // -- a0 : the number of arguments
2165 // -- a3 : the new.target (for [[Construct]] calls)
2166 // -- a1 : the target to call (can be any Object)
2167 // -- a2 : start index (to support rest parameters)
2168 // -----------------------------------
2169
2170 // Check if new.target has a [[Construct]] internal method.
2171 if (mode == CallOrConstructMode::kConstruct) {
2172 Label new_target_constructor, new_target_not_constructor;
2173 __ JumpIfSmi(a3, &new_target_not_constructor);
2174 __ ld(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
2175 __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2176 __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
2177 __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
2178 __ bind(&new_target_not_constructor);
2179 {
2180 FrameScope scope(masm, StackFrame::MANUAL);
2181 __ EnterFrame(StackFrame::INTERNAL);
2182 __ Push(a3);
2183 __ CallRuntime(Runtime::kThrowNotConstructor);
2184 }
2185 __ bind(&new_target_constructor);
2186 }
2187
2188 Label stack_done, stack_overflow;
2189 __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2190 __ Dsubu(a7, a7, Operand(kJSArgcReceiverSlots));
2191 __ Dsubu(a7, a7, a2);
2192 __ Branch(&stack_done, le, a7, Operand(zero_reg));
2193 {
2194 // Check for stack overflow.
2195 __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
2196
2197 // Forward the arguments from the caller frame.
2198
2199 // Point to the first argument to copy (skipping the receiver).
2200 __ Daddu(a6, fp,
2201 Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2202 kSystemPointerSize));
2203 __ Dlsa(a6, a6, a2, kSystemPointerSizeLog2);
2204
2205 // Move the arguments already in the stack,
2206 // including the receiver and the return address.
2207 // a7: Number of arguments to make room for.
2208 // a0: Number of arguments already on the stack.
2209 // a2: Points to first free slot on the stack after arguments were shifted.
2210 Generate_AllocateSpaceAndShiftExistingArguments(masm, a7, a0, a2, t0, t1,
2211 t2);
2212
2213 // Copy arguments from the caller frame.
2214 // TODO(victorgomes): Consider using forward order as potentially more cache
2215 // friendly.
2216 {
2217 Label loop;
2218 __ bind(&loop);
2219 {
2220 __ Subu(a7, a7, Operand(1));
2221 __ Dlsa(t0, a6, a7, kPointerSizeLog2);
2222 __ Ld(kScratchReg, MemOperand(t0));
2223 __ Dlsa(t0, a2, a7, kPointerSizeLog2);
2224 __ Sd(kScratchReg, MemOperand(t0));
2225 __ Branch(&loop, ne, a7, Operand(zero_reg));
2226 }
2227 }
2228 }
2229 __ Branch(&stack_done);
2230 __ bind(&stack_overflow);
2231 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2232 __ bind(&stack_done);
2233
2234 // Tail-call to the {code} handler.
2235 __ Jump(code, RelocInfo::CODE_TARGET);
2236 }
2237
2238 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2239 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2240 ConvertReceiverMode mode) {
2241 // ----------- S t a t e -------------
2242 // -- a0 : the number of arguments
2243 // -- a1 : the function to call (checked to be a JSFunction)
2244 // -----------------------------------
2245 __ AssertCallableFunction(a1);
2246
2247 __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2248
2249 // Enter the context of the function; ToObject has to run in the function
2250 // context, and we also need to take the global proxy from the function
2251 // context in case of conversion.
2252 __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2253 // We need to convert the receiver for non-native sloppy mode functions.
2254 Label done_convert;
2255 __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2256 __ And(kScratchReg, a3,
2257 Operand(SharedFunctionInfo::IsNativeBit::kMask |
2258 SharedFunctionInfo::IsStrictBit::kMask));
2259 __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2260 {
2261 // ----------- S t a t e -------------
2262 // -- a0 : the number of arguments
2263 // -- a1 : the function to call (checked to be a JSFunction)
2264 // -- a2 : the shared function info.
2265 // -- cp : the function context.
2266 // -----------------------------------
2267
2268 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2269 // Patch receiver to global proxy.
2270 __ LoadGlobalProxy(a3);
2271 } else {
2272 Label convert_to_object, convert_receiver;
2273 __ LoadReceiver(a3, a0);
2274 __ JumpIfSmi(a3, &convert_to_object);
2275 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2276 __ GetObjectType(a3, a4, a4);
2277 __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
2278 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2279 Label convert_global_proxy;
2280 __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2281 __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2282 __ bind(&convert_global_proxy);
2283 {
2284 // Patch receiver to global proxy.
2285 __ LoadGlobalProxy(a3);
2286 }
2287 __ Branch(&convert_receiver);
2288 }
2289 __ bind(&convert_to_object);
2290 {
2291 // Convert receiver using ToObject.
2292 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2293 // in the fast case? (fall back to AllocateInNewSpace?)
2294 FrameScope scope(masm, StackFrame::INTERNAL);
2295 __ SmiTag(a0);
2296 __ Push(a0, a1);
2297 __ mov(a0, a3);
2298 __ Push(cp);
2299 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2300 RelocInfo::CODE_TARGET);
2301 __ Pop(cp);
2302 __ mov(a3, v0);
2303 __ Pop(a0, a1);
2304 __ SmiUntag(a0);
2305 }
2306 __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2307 __ bind(&convert_receiver);
2308 }
2309 __ StoreReceiver(a3, a0, kScratchReg);
2310 }
2311 __ bind(&done_convert);
2312
2313 // ----------- S t a t e -------------
2314 // -- a0 : the number of arguments
2315 // -- a1 : the function to call (checked to be a JSFunction)
2316 // -- a2 : the shared function info.
2317 // -- cp : the function context.
2318 // -----------------------------------
2319
2320 __ Lhu(a2,
2321 FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2322 __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
2323 }
2324
2325 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2326 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2327 // ----------- S t a t e -------------
2328 // -- a0 : the number of arguments
2329 // -- a1 : the function to call (checked to be a JSBoundFunction)
2330 // -----------------------------------
2331 __ AssertBoundFunction(a1);
2332
2333 // Patch the receiver to [[BoundThis]].
2334 {
2335 __ Ld(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2336 __ StoreReceiver(t0, a0, kScratchReg);
2337 }
2338
2339 // Load [[BoundArguments]] into a2 and length of that into a4.
2340 __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2341 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2342
2343 // ----------- S t a t e -------------
2344 // -- a0 : the number of arguments
2345 // -- a1 : the function to call (checked to be a JSBoundFunction)
2346 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2347 // -- a4 : the number of [[BoundArguments]]
2348 // -----------------------------------
2349
2350 // Reserve stack space for the [[BoundArguments]].
2351 {
2352 Label done;
2353 __ dsll(a5, a4, kPointerSizeLog2);
2354 __ Dsubu(t0, sp, Operand(a5));
2355 // Check the stack for overflow. We are not trying to catch interruptions
2356 // (i.e. debug break and preemption) here, so check the "real stack limit".
2357 __ LoadStackLimit(kScratchReg,
2358 MacroAssembler::StackLimitKind::kRealStackLimit);
2359 __ Branch(&done, hs, t0, Operand(kScratchReg));
2360 {
2361 FrameScope scope(masm, StackFrame::MANUAL);
2362 __ EnterFrame(StackFrame::INTERNAL);
2363 __ CallRuntime(Runtime::kThrowStackOverflow);
2364 }
2365 __ bind(&done);
2366 }
2367
2368 // Pop receiver.
2369 __ Pop(t0);
2370
2371 // Push [[BoundArguments]].
2372 {
2373 Label loop, done_loop;
2374 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2375 __ Daddu(a0, a0, Operand(a4));
2376 __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2377 __ bind(&loop);
2378 __ Dsubu(a4, a4, Operand(1));
2379 __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2380 __ Dlsa(a5, a2, a4, kPointerSizeLog2);
2381 __ Ld(kScratchReg, MemOperand(a5));
2382 __ Push(kScratchReg);
2383 __ Branch(&loop);
2384 __ bind(&done_loop);
2385 }
2386
2387 // Push receiver.
2388 __ Push(t0);
2389
2390 // Call the [[BoundTargetFunction]] via the Call builtin.
2391 __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2392 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2393 RelocInfo::CODE_TARGET);
2394 }
2395
2396 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2397 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2398 // ----------- S t a t e -------------
2399 // -- a0 : the number of arguments
2400 // -- a1 : the target to call (can be any Object).
2401 // -----------------------------------
2402 Register argc = a0;
2403 Register target = a1;
2404 Register map = t1;
2405 Register instance_type = t2;
2406 Register scratch = t8;
2407 DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
2408
2409 Label non_callable, class_constructor;
2410 __ JumpIfSmi(target, &non_callable);
2411 __ LoadMap(map, target);
2412 __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2413 scratch);
2414 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2415 RelocInfo::CODE_TARGET, ls, scratch,
2416 Operand(LAST_CALLABLE_JS_FUNCTION_TYPE -
2417 FIRST_CALLABLE_JS_FUNCTION_TYPE));
2418 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2419 RelocInfo::CODE_TARGET, eq, instance_type,
2420 Operand(JS_BOUND_FUNCTION_TYPE));
2421
2422 // Check if target has a [[Call]] internal method.
2423 {
2424 Register flags = t1;
2425 __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2426 map = no_reg;
2427 __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask));
2428 __ Branch(&non_callable, eq, flags, Operand(zero_reg));
2429 }
2430
2431 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
2432 instance_type, Operand(JS_PROXY_TYPE));
2433
2434 // Check if target is a wrapped function and call CallWrappedFunction external
2435 // builtin
2436 __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2437 RelocInfo::CODE_TARGET, eq, instance_type,
2438 Operand(JS_WRAPPED_FUNCTION_TYPE));
2439
2440 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2441 // Check that the function is not a "classConstructor".
2442 __ Branch(&class_constructor, eq, instance_type,
2443 Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2444
2445 // 2. Call to something else, which might have a [[Call]] internal method (if
2446 // not we raise an exception).
2447 // Overwrite the original receiver with the (original) target.
2448 __ StoreReceiver(target, argc, kScratchReg);
2449 // Let the "call_as_function_delegate" take care of the rest.
2450 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2451 __ Jump(masm->isolate()->builtins()->CallFunction(
2452 ConvertReceiverMode::kNotNullOrUndefined),
2453 RelocInfo::CODE_TARGET);
2454
2455 // 3. Call to something that is not callable.
2456 __ bind(&non_callable);
2457 {
2458 FrameScope scope(masm, StackFrame::INTERNAL);
2459 __ Push(target);
2460 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2461 }
2462
2463 // 4. The function is a "classConstructor", need to raise an exception.
2464 __ bind(&class_constructor);
2465 {
2466 FrameScope frame(masm, StackFrame::INTERNAL);
2467 __ Push(target);
2468 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2469 }
2470 }
2471
Generate_ConstructFunction(MacroAssembler * masm)2472 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2473 // ----------- S t a t e -------------
2474 // -- a0 : the number of arguments
2475 // -- a1 : the constructor to call (checked to be a JSFunction)
2476 // -- a3 : the new target (checked to be a constructor)
2477 // -----------------------------------
2478 __ AssertConstructor(a1);
2479 __ AssertFunction(a1);
2480
2481 // Calling convention for function specific ConstructStubs require
2482 // a2 to contain either an AllocationSite or undefined.
2483 __ LoadRoot(a2, RootIndex::kUndefinedValue);
2484
2485 Label call_generic_stub;
2486
2487 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2488 __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2489 __ lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2490 __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2491 __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
2492
2493 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2494 RelocInfo::CODE_TARGET);
2495
2496 __ bind(&call_generic_stub);
2497 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2498 RelocInfo::CODE_TARGET);
2499 }
2500
2501 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2502 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2503 // ----------- S t a t e -------------
2504 // -- a0 : the number of arguments
2505 // -- a1 : the function to call (checked to be a JSBoundFunction)
2506 // -- a3 : the new target (checked to be a constructor)
2507 // -----------------------------------
2508 __ AssertConstructor(a1);
2509 __ AssertBoundFunction(a1);
2510
2511 // Load [[BoundArguments]] into a2 and length of that into a4.
2512 __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2513 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2514
2515 // ----------- S t a t e -------------
2516 // -- a0 : the number of arguments
2517 // -- a1 : the function to call (checked to be a JSBoundFunction)
2518 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2519 // -- a3 : the new target (checked to be a constructor)
2520 // -- a4 : the number of [[BoundArguments]]
2521 // -----------------------------------
2522
2523 // Reserve stack space for the [[BoundArguments]].
2524 {
2525 Label done;
2526 __ dsll(a5, a4, kPointerSizeLog2);
2527 __ Dsubu(t0, sp, Operand(a5));
2528 // Check the stack for overflow. We are not trying to catch interruptions
2529 // (i.e. debug break and preemption) here, so check the "real stack limit".
2530 __ LoadStackLimit(kScratchReg,
2531 MacroAssembler::StackLimitKind::kRealStackLimit);
2532 __ Branch(&done, hs, t0, Operand(kScratchReg));
2533 {
2534 FrameScope scope(masm, StackFrame::MANUAL);
2535 __ EnterFrame(StackFrame::INTERNAL);
2536 __ CallRuntime(Runtime::kThrowStackOverflow);
2537 }
2538 __ bind(&done);
2539 }
2540
2541 // Pop receiver.
2542 __ Pop(t0);
2543
2544 // Push [[BoundArguments]].
2545 {
2546 Label loop, done_loop;
2547 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2548 __ Daddu(a0, a0, Operand(a4));
2549 __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2550 __ bind(&loop);
2551 __ Dsubu(a4, a4, Operand(1));
2552 __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2553 __ Dlsa(a5, a2, a4, kPointerSizeLog2);
2554 __ Ld(kScratchReg, MemOperand(a5));
2555 __ Push(kScratchReg);
2556 __ Branch(&loop);
2557 __ bind(&done_loop);
2558 }
2559
2560 // Push receiver.
2561 __ Push(t0);
2562
2563 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2564 {
2565 Label skip_load;
2566 __ Branch(&skip_load, ne, a1, Operand(a3));
2567 __ Ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2568 __ bind(&skip_load);
2569 }
2570
2571 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2572 __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2573 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2574 }
2575
2576 // static
Generate_Construct(MacroAssembler * masm)2577 void Builtins::Generate_Construct(MacroAssembler* masm) {
2578 // ----------- S t a t e -------------
2579 // -- a0 : the number of arguments
2580 // -- a1 : the constructor to call (can be any Object)
2581 // -- a3 : the new target (either the same as the constructor or
2582 // the JSFunction on which new was invoked initially)
2583 // -----------------------------------
2584
2585 Register argc = a0;
2586 Register target = a1;
2587 Register map = t1;
2588 Register instance_type = t2;
2589 Register scratch = t8;
2590 DCHECK(!AreAliased(argc, target, map, instance_type, scratch));
2591
2592 // Check if target is a Smi.
2593 Label non_constructor, non_proxy;
2594 __ JumpIfSmi(target, &non_constructor);
2595
2596 // Check if target has a [[Construct]] internal method.
2597 __ ld(map, FieldMemOperand(target, HeapObject::kMapOffset));
2598 {
2599 Register flags = t3;
2600 __ Lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2601 __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask));
2602 __ Branch(&non_constructor, eq, flags, Operand(zero_reg));
2603 }
2604
2605 // Dispatch based on instance type.
2606 __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch);
2607 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2608 RelocInfo::CODE_TARGET, ls, scratch,
2609 Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
2610
2611 // Only dispatch to bound functions after checking whether they are
2612 // constructors.
2613 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2614 RelocInfo::CODE_TARGET, eq, instance_type,
2615 Operand(JS_BOUND_FUNCTION_TYPE));
2616
2617 // Only dispatch to proxies after checking whether they are constructors.
2618 __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE));
2619 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2620 RelocInfo::CODE_TARGET);
2621
2622 // Called Construct on an exotic Object with a [[Construct]] internal method.
2623 __ bind(&non_proxy);
2624 {
2625 // Overwrite the original receiver with the (original) target.
2626 __ StoreReceiver(target, argc, kScratchReg);
2627 // Let the "call_as_constructor_delegate" take care of the rest.
2628 __ LoadNativeContextSlot(target,
2629 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2630 __ Jump(masm->isolate()->builtins()->CallFunction(),
2631 RelocInfo::CODE_TARGET);
2632 }
2633
2634 // Called Construct on an Object that doesn't have a [[Construct]] internal
2635 // method.
2636 __ bind(&non_constructor);
2637 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2638 RelocInfo::CODE_TARGET);
2639 }
2640
2641 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2642 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2643 // The function index was put in t0 by the jump table trampoline.
2644 // Convert to Smi for the runtime call
2645 __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2646
2647 // Compute register lists for parameters to be saved. We save all parameter
2648 // registers (see wasm-linkage.h). They might be overwritten in the runtime
2649 // call below. We don't have any callee-saved registers in wasm, so no need to
2650 // store anything else.
2651 constexpr RegList kSavedGpRegs = ([]() constexpr {
2652 RegList saved_gp_regs;
2653 for (Register gp_param_reg : wasm::kGpParamRegisters) {
2654 saved_gp_regs.set(gp_param_reg);
2655 }
2656
2657 // All set registers were unique.
2658 CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
2659 // The Wasm instance must be part of the saved registers.
2660 CHECK(saved_gp_regs.has(kWasmInstanceRegister));
2661 CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
2662 saved_gp_regs.Count());
2663 return saved_gp_regs;
2664 })();
2665
2666 constexpr DoubleRegList kSavedFpRegs = ([]() constexpr {
2667 DoubleRegList saved_fp_regs;
2668 for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2669 saved_fp_regs.set(fp_param_reg);
2670 }
2671
2672 CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
2673 CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2674 saved_fp_regs.Count());
2675 return saved_fp_regs;
2676 })();
2677
2678 {
2679 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2680 FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2681
2682 // Save registers that we need to keep alive across the runtime call.
2683 __ MultiPush(kSavedGpRegs);
2684 // Check if machine has simd enabled, if so push vector registers. If not
2685 // then only push double registers.
2686 Label push_doubles, simd_pushed;
2687 __ li(a1, ExternalReference::supports_wasm_simd_128_address());
2688 // If > 0 then simd is available.
2689 __ Lbu(a1, MemOperand(a1));
2690 __ Branch(&push_doubles, le, a1, Operand(zero_reg));
2691 // Save vector registers.
2692 {
2693 CpuFeatureScope msa_scope(
2694 masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported);
2695 __ MultiPushMSA(kSavedFpRegs);
2696 }
2697 __ Branch(&simd_pushed);
2698 __ bind(&push_doubles);
2699 __ MultiPushFPU(kSavedFpRegs);
2700 // kFixedFrameSizeFromFp is hard coded to include space for Simd
2701 // registers, so we still need to allocate extra (unused) space on the stack
2702 // as if they were saved.
2703 __ Dsubu(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
2704 __ bind(&simd_pushed);
2705 // Pass instance and function index as an explicit arguments to the runtime
2706 // function.
2707 __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2708 // Initialize the JavaScript context with 0. CEntry will use it to
2709 // set the current context on the isolate.
2710 __ Move(kContextRegister, Smi::zero());
2711 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2712
2713 // Restore registers.
2714 Label pop_doubles, simd_popped;
2715 __ li(a1, ExternalReference::supports_wasm_simd_128_address());
2716 // If > 0 then simd is available.
2717 __ Lbu(a1, MemOperand(a1));
2718 __ Branch(&pop_doubles, le, a1, Operand(zero_reg));
2719 // Pop vector registers.
2720 {
2721 CpuFeatureScope msa_scope(
2722 masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported);
2723 __ MultiPopMSA(kSavedFpRegs);
2724 }
2725 __ Branch(&simd_popped);
2726 __ bind(&pop_doubles);
2727 __ Daddu(sp, sp, kSavedFpRegs.Count() * kDoubleSize);
2728 __ MultiPopFPU(kSavedFpRegs);
2729 __ bind(&simd_popped);
2730 __ MultiPop(kSavedGpRegs);
2731 }
2732
2733 // Untag the returned Smi, for later use.
2734 static_assert(!kSavedGpRegs.has(v0));
2735 __ SmiUntag(v0);
2736
2737 // The runtime function returned the jump table slot offset as a Smi (now in
2738 // t8). Use that to compute the jump target.
2739 static_assert(!kSavedGpRegs.has(t8));
2740 __ Ld(t8,
2741 MemOperand(kWasmInstanceRegister,
2742 WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag));
2743 __ Daddu(t8, v0, t8);
2744
2745 // Finally, jump to the jump table slot for the function.
2746 __ Jump(t8);
2747 }
2748
Generate_WasmDebugBreak(MacroAssembler * masm)2749 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2750 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2751 {
2752 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2753
2754 // Save all parameter registers. They might hold live values, we restore
2755 // them after the runtime call.
2756 __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2757 __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2758
2759 // Initialize the JavaScript context with 0. CEntry will use it to
2760 // set the current context on the isolate.
2761 __ Move(cp, Smi::zero());
2762 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2763
2764 // Restore registers.
2765 __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2766 __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2767 }
2768 __ Ret();
2769 }
2770
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2771 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2772 __ Trap();
2773 }
2774
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)2775 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
2776 // TODO(v8:12191): Implement for this platform.
2777 __ Trap();
2778 }
2779
Generate_WasmSuspend(MacroAssembler * masm)2780 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
2781 // TODO(v8:12191): Implement for this platform.
2782 __ Trap();
2783 }
2784
Generate_WasmResume(MacroAssembler * masm)2785 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
2786 // TODO(v8:12191): Implement for this platform.
2787 __ Trap();
2788 }
2789
Generate_WasmOnStackReplace(MacroAssembler * masm)2790 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2791 // Only needed on x64.
2792 __ Trap();
2793 }
2794
2795 #endif // V8_ENABLE_WEBASSEMBLY
2796
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2797 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2798 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2799 bool builtin_exit_frame) {
2800 // Called from JavaScript; parameters are on stack as if calling JS function
2801 // a0: number of arguments including receiver
2802 // a1: pointer to builtin function
2803 // fp: frame pointer (restored after C call)
2804 // sp: stack pointer (restored as callee's sp after C call)
2805 // cp: current context (C callee-saved)
2806 //
2807 // If argv_mode == ArgvMode::kRegister:
2808 // a2: pointer to the first argument
2809
2810 if (argv_mode == ArgvMode::kRegister) {
2811 // Move argv into the correct register.
2812 __ mov(s1, a2);
2813 } else {
2814 // Compute the argv pointer in a callee-saved register.
2815 __ Dlsa(s1, sp, a0, kPointerSizeLog2);
2816 __ Dsubu(s1, s1, kPointerSize);
2817 }
2818
2819 // Enter the exit frame that transitions from JavaScript to C++.
2820 FrameScope scope(masm, StackFrame::MANUAL);
2821 __ EnterExitFrame(
2822 save_doubles == SaveFPRegsMode::kSave, 0,
2823 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2824
2825 // s0: number of arguments including receiver (C callee-saved)
2826 // s1: pointer to first argument (C callee-saved)
2827 // s2: pointer to builtin function (C callee-saved)
2828
2829 // Prepare arguments for C routine.
2830 // a0 = argc
2831 __ mov(s0, a0);
2832 __ mov(s2, a1);
2833
2834 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2835 // also need to reserve the 4 argument slots on the stack.
2836
2837 __ AssertStackIsAligned();
2838
2839 // a0 = argc, a1 = argv, a2 = isolate
2840 __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2841 __ mov(a1, s1);
2842
2843 __ StoreReturnAddressAndCall(s2);
2844
2845 // Result returned in v0 or v1:v0 - do not destroy these registers!
2846
2847 // Check result for exception sentinel.
2848 Label exception_returned;
2849 __ LoadRoot(a4, RootIndex::kException);
2850 __ Branch(&exception_returned, eq, a4, Operand(v0));
2851
2852 // Check that there is no pending exception, otherwise we
2853 // should have returned the exception sentinel.
2854 if (FLAG_debug_code) {
2855 Label okay;
2856 ExternalReference pending_exception_address = ExternalReference::Create(
2857 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2858 __ li(a2, pending_exception_address);
2859 __ Ld(a2, MemOperand(a2));
2860 __ LoadRoot(a4, RootIndex::kTheHoleValue);
2861 // Cannot use check here as it attempts to generate call into runtime.
2862 __ Branch(&okay, eq, a4, Operand(a2));
2863 __ stop();
2864 __ bind(&okay);
2865 }
2866
2867 // Exit C frame and return.
2868 // v0:v1: result
2869 // sp: stack pointer
2870 // fp: frame pointer
2871 Register argc = argv_mode == ArgvMode::kRegister
2872 // We don't want to pop arguments so set argc to no_reg.
2873 ? no_reg
2874 // s0: still holds argc (callee-saved).
2875 : s0;
2876 __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
2877
2878 // Handling of exception.
2879 __ bind(&exception_returned);
2880
2881 ExternalReference pending_handler_context_address = ExternalReference::Create(
2882 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2883 ExternalReference pending_handler_entrypoint_address =
2884 ExternalReference::Create(
2885 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2886 ExternalReference pending_handler_fp_address = ExternalReference::Create(
2887 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2888 ExternalReference pending_handler_sp_address = ExternalReference::Create(
2889 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2890
2891 // Ask the runtime for help to determine the handler. This will set v0 to
2892 // contain the current pending exception, don't clobber it.
2893 ExternalReference find_handler =
2894 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2895 {
2896 FrameScope scope(masm, StackFrame::MANUAL);
2897 __ PrepareCallCFunction(3, 0, a0);
2898 __ mov(a0, zero_reg);
2899 __ mov(a1, zero_reg);
2900 __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2901 __ CallCFunction(find_handler, 3);
2902 }
2903
2904 // Retrieve the handler context, SP and FP.
2905 __ li(cp, pending_handler_context_address);
2906 __ Ld(cp, MemOperand(cp));
2907 __ li(sp, pending_handler_sp_address);
2908 __ Ld(sp, MemOperand(sp));
2909 __ li(fp, pending_handler_fp_address);
2910 __ Ld(fp, MemOperand(fp));
2911
2912 // If the handler is a JS frame, restore the context to the frame. Note that
2913 // the context will be set to (cp == 0) for non-JS frames.
2914 Label zero;
2915 __ Branch(&zero, eq, cp, Operand(zero_reg));
2916 __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2917 __ bind(&zero);
2918
2919 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
2920 {
2921 UseScratchRegisterScope temps(masm);
2922 Register scratch = temps.Acquire();
2923 __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
2924 masm->isolate()));
2925 __ Sd(zero_reg, MemOperand(scratch));
2926 }
2927
2928 // Compute the handler entry address and jump to it.
2929 __ li(t9, pending_handler_entrypoint_address);
2930 __ Ld(t9, MemOperand(t9));
2931 __ Jump(t9);
2932 }
2933
Generate_DoubleToI(MacroAssembler * masm)2934 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2935 Label done;
2936 Register result_reg = t0;
2937
2938 Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2939 Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2940 Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2941 DoubleRegister double_scratch = kScratchDoubleReg;
2942
2943 // Account for saved regs.
2944 const int kArgumentOffset = 4 * kPointerSize;
2945
2946 __ Push(result_reg);
2947 __ Push(scratch, scratch2, scratch3);
2948
2949 // Load double input.
2950 __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
2951
2952 // Try a conversion to a signed integer.
2953 __ Trunc_w_d(double_scratch, double_scratch);
2954 // Move the converted value into the result register.
2955 __ mfc1(scratch3, double_scratch);
2956
2957 // Retrieve the FCSR.
2958 __ cfc1(scratch, FCSR);
2959
2960 // Check for overflow and NaNs.
2961 __ And(scratch, scratch,
2962 kFCSROverflowCauseMask | kFCSRUnderflowCauseMask |
2963 kFCSRInvalidOpCauseMask);
2964 // If we had no exceptions then set result_reg and we are done.
2965 Label error;
2966 __ Branch(&error, ne, scratch, Operand(zero_reg));
2967 __ Move(result_reg, scratch3);
2968 __ Branch(&done);
2969 __ bind(&error);
2970
2971 // Load the double value and perform a manual truncation.
2972 Register input_high = scratch2;
2973 Register input_low = scratch3;
2974
2975 __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
2976 __ Lw(input_high,
2977 MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
2978
2979 Label normal_exponent;
2980 // Extract the biased exponent in result.
2981 __ Ext(result_reg, input_high, HeapNumber::kExponentShift,
2982 HeapNumber::kExponentBits);
2983
2984 // Check for Infinity and NaNs, which should return 0.
2985 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
2986 __ Movz(result_reg, zero_reg, scratch);
2987 __ Branch(&done, eq, scratch, Operand(zero_reg));
2988
2989 // Express exponent as delta to (number of mantissa bits + 31).
2990 __ Subu(result_reg, result_reg,
2991 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
2992
2993 // If the delta is strictly positive, all bits would be shifted away,
2994 // which means that we can return 0.
2995 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
2996 __ mov(result_reg, zero_reg);
2997 __ Branch(&done);
2998
2999 __ bind(&normal_exponent);
3000 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
3001 // Calculate shift.
3002 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
3003
3004 // Save the sign.
3005 Register sign = result_reg;
3006 result_reg = no_reg;
3007 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
3008
3009 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
3010 // to check for this specific case.
3011 Label high_shift_needed, high_shift_done;
3012 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
3013 __ mov(input_high, zero_reg);
3014 __ Branch(&high_shift_done);
3015 __ bind(&high_shift_needed);
3016
3017 // Set the implicit 1 before the mantissa part in input_high.
3018 __ Or(input_high, input_high,
3019 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
3020 // Shift the mantissa bits to the correct position.
3021 // We don't need to clear non-mantissa bits as they will be shifted away.
3022 // If they weren't, it would mean that the answer is in the 32bit range.
3023 __ sllv(input_high, input_high, scratch);
3024
3025 __ bind(&high_shift_done);
3026
3027 // Replace the shifted bits with bits from the lower mantissa word.
3028 Label pos_shift, shift_done;
3029 __ li(kScratchReg, 32);
3030 __ subu(scratch, kScratchReg, scratch);
3031 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
3032
3033 // Negate scratch.
3034 __ Subu(scratch, zero_reg, scratch);
3035 __ sllv(input_low, input_low, scratch);
3036 __ Branch(&shift_done);
3037
3038 __ bind(&pos_shift);
3039 __ srlv(input_low, input_low, scratch);
3040
3041 __ bind(&shift_done);
3042 __ Or(input_high, input_high, Operand(input_low));
3043 // Restore sign if necessary.
3044 __ mov(scratch, sign);
3045 result_reg = sign;
3046 sign = no_reg;
3047 __ Subu(result_reg, zero_reg, input_high);
3048 __ Movz(result_reg, input_high, scratch);
3049
3050 __ bind(&done);
3051
3052 __ Sd(result_reg, MemOperand(sp, kArgumentOffset));
3053 __ Pop(scratch, scratch2, scratch3);
3054 __ Pop(result_reg);
3055 __ Ret();
3056 }
3057
3058 namespace {
3059
AddressOffset(ExternalReference ref0,ExternalReference ref1)3060 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3061 int64_t offset = (ref0.address() - ref1.address());
3062 DCHECK(static_cast<int>(offset) == offset);
3063 return static_cast<int>(offset);
3064 }
3065
3066 // Calls an API function. Allocates HandleScope, extracts returned value
3067 // from handle and propagates exceptions. Restores context. stack_space
3068 // - space to be unwound on exit (includes the call JS arguments space and
3069 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)3070 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
3071 ExternalReference thunk_ref, int stack_space,
3072 MemOperand* stack_space_operand,
3073 MemOperand return_value_operand) {
3074 Isolate* isolate = masm->isolate();
3075 ExternalReference next_address =
3076 ExternalReference::handle_scope_next_address(isolate);
3077 const int kNextOffset = 0;
3078 const int kLimitOffset = AddressOffset(
3079 ExternalReference::handle_scope_limit_address(isolate), next_address);
3080 const int kLevelOffset = AddressOffset(
3081 ExternalReference::handle_scope_level_address(isolate), next_address);
3082
3083 DCHECK(function_address == a1 || function_address == a2);
3084
3085 Label profiler_enabled, end_profiler_check;
3086 __ li(t9, ExternalReference::is_profiling_address(isolate));
3087 __ Lb(t9, MemOperand(t9, 0));
3088 __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
3089 __ li(t9, ExternalReference::address_of_runtime_stats_flag());
3090 __ Lw(t9, MemOperand(t9, 0));
3091 __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
3092 {
3093 // Call the api function directly.
3094 __ mov(t9, function_address);
3095 __ Branch(&end_profiler_check);
3096 }
3097
3098 __ bind(&profiler_enabled);
3099 {
3100 // Additional parameter is the address of the actual callback.
3101 __ li(t9, thunk_ref);
3102 }
3103 __ bind(&end_profiler_check);
3104
3105 // Allocate HandleScope in callee-save registers.
3106 __ li(s5, next_address);
3107 __ Ld(s0, MemOperand(s5, kNextOffset));
3108 __ Ld(s1, MemOperand(s5, kLimitOffset));
3109 __ Lw(s2, MemOperand(s5, kLevelOffset));
3110 __ Addu(s2, s2, Operand(1));
3111 __ Sw(s2, MemOperand(s5, kLevelOffset));
3112
3113 __ StoreReturnAddressAndCall(t9);
3114
3115 Label promote_scheduled_exception;
3116 Label delete_allocated_handles;
3117 Label leave_exit_frame;
3118 Label return_value_loaded;
3119
3120 // Load value from ReturnValue.
3121 __ Ld(v0, return_value_operand);
3122 __ bind(&return_value_loaded);
3123
3124 // No more valid handles (the result handle was the last one). Restore
3125 // previous handle scope.
3126 __ Sd(s0, MemOperand(s5, kNextOffset));
3127 if (FLAG_debug_code) {
3128 __ Lw(a1, MemOperand(s5, kLevelOffset));
3129 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
3130 Operand(s2));
3131 }
3132 __ Subu(s2, s2, Operand(1));
3133 __ Sw(s2, MemOperand(s5, kLevelOffset));
3134 __ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
3135 __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
3136
3137 // Leave the API exit frame.
3138 __ bind(&leave_exit_frame);
3139
3140 if (stack_space_operand == nullptr) {
3141 DCHECK_NE(stack_space, 0);
3142 __ li(s0, Operand(stack_space));
3143 } else {
3144 DCHECK_EQ(stack_space, 0);
3145 STATIC_ASSERT(kCArgSlotCount == 0);
3146 __ Ld(s0, *stack_space_operand);
3147 }
3148
3149 static constexpr bool kDontSaveDoubles = false;
3150 static constexpr bool kRegisterContainsSlotCount = false;
3151 __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
3152 kRegisterContainsSlotCount);
3153
3154 // Check if the function scheduled an exception.
3155 __ LoadRoot(a4, RootIndex::kTheHoleValue);
3156 __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
3157 __ Ld(a5, MemOperand(kScratchReg));
3158 __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
3159
3160 __ Ret();
3161
3162 // Re-throw by promoting a scheduled exception.
3163 __ bind(&promote_scheduled_exception);
3164 __ TailCallRuntime(Runtime::kPromoteScheduledException);
3165
3166 // HandleScope limit has changed. Delete allocated extensions.
3167 __ bind(&delete_allocated_handles);
3168 __ Sd(s1, MemOperand(s5, kLimitOffset));
3169 __ mov(s0, v0);
3170 __ mov(a0, v0);
3171 __ PrepareCallCFunction(1, s1);
3172 __ li(a0, ExternalReference::isolate_address(isolate));
3173 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3174 __ mov(v0, s0);
3175 __ jmp(&leave_exit_frame);
3176 }
3177
3178 } // namespace
3179
Generate_CallApiCallback(MacroAssembler * masm)3180 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3181 // ----------- S t a t e -------------
3182 // -- cp : context
3183 // -- a1 : api function address
3184 // -- a2 : arguments count
3185 // -- a3 : call data
3186 // -- a0 : holder
3187 // -- sp[0] : receiver
3188 // -- sp[8] : first argument
3189 // -- ...
3190 // -- sp[(argc) * 8] : last argument
3191 // -----------------------------------
3192
3193 Register api_function_address = a1;
3194 Register argc = a2;
3195 Register call_data = a3;
3196 Register holder = a0;
3197 Register scratch = t0;
3198 Register base = t1; // For addressing MemOperands on the stack.
3199
3200 DCHECK(!AreAliased(api_function_address, argc, call_data,
3201 holder, scratch, base));
3202
3203 using FCA = FunctionCallbackArguments;
3204
3205 STATIC_ASSERT(FCA::kArgsLength == 6);
3206 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3207 STATIC_ASSERT(FCA::kDataIndex == 4);
3208 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3209 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3210 STATIC_ASSERT(FCA::kIsolateIndex == 1);
3211 STATIC_ASSERT(FCA::kHolderIndex == 0);
3212
3213 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3214 //
3215 // Target state:
3216 // sp[0 * kPointerSize]: kHolder
3217 // sp[1 * kPointerSize]: kIsolate
3218 // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
3219 // sp[3 * kPointerSize]: undefined (kReturnValue)
3220 // sp[4 * kPointerSize]: kData
3221 // sp[5 * kPointerSize]: undefined (kNewTarget)
3222
3223 // Set up the base register for addressing through MemOperands. It will point
3224 // at the receiver (located at sp + argc * kPointerSize).
3225 __ Dlsa(base, sp, argc, kPointerSizeLog2);
3226
3227 // Reserve space on the stack.
3228 __ Dsubu(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
3229
3230 // kHolder.
3231 __ Sd(holder, MemOperand(sp, 0 * kPointerSize));
3232
3233 // kIsolate.
3234 __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3235 __ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
3236
3237 // kReturnValueDefaultValue and kReturnValue.
3238 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3239 __ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
3240 __ Sd(scratch, MemOperand(sp, 3 * kPointerSize));
3241
3242 // kData.
3243 __ Sd(call_data, MemOperand(sp, 4 * kPointerSize));
3244
3245 // kNewTarget.
3246 __ Sd(scratch, MemOperand(sp, 5 * kPointerSize));
3247
3248 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3249 // We use it below to set up the FunctionCallbackInfo object.
3250 __ mov(scratch, sp);
3251
3252 // Allocate the v8::Arguments structure in the arguments' space since
3253 // it's not controlled by GC.
3254 static constexpr int kApiStackSpace = 4;
3255 static constexpr bool kDontSaveDoubles = false;
3256 FrameScope frame_scope(masm, StackFrame::MANUAL);
3257 __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3258
3259 // EnterExitFrame may align the sp.
3260
3261 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3262 // Arguments are after the return address (pushed by EnterExitFrame()).
3263 __ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
3264
3265 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3266 // on the stack).
3267 __ Daddu(scratch, scratch,
3268 Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3269
3270 __ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
3271
3272 // FunctionCallbackInfo::length_.
3273 // Stored as int field, 32-bit integers within struct on stack always left
3274 // justified by n64 ABI.
3275 __ Sw(argc, MemOperand(sp, 3 * kPointerSize));
3276
3277 // We also store the number of bytes to drop from the stack after returning
3278 // from the API function here.
3279 // Note: Unlike on other architectures, this stores the number of slots to
3280 // drop, not the number of bytes.
3281 __ Daddu(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
3282 __ Sd(scratch, MemOperand(sp, 4 * kPointerSize));
3283
3284 // v8::InvocationCallback's argument.
3285 DCHECK(!AreAliased(api_function_address, scratch, a0));
3286 __ Daddu(a0, sp, Operand(1 * kPointerSize));
3287
3288 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3289
3290 // There are two stack slots above the arguments we constructed on the stack.
3291 // TODO(jgruber): Document what these arguments are.
3292 static constexpr int kStackSlotsAboveFCA = 2;
3293 MemOperand return_value_operand(
3294 fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3295
3296 static constexpr int kUseStackSpaceOperand = 0;
3297 MemOperand stack_space_operand(sp, 4 * kPointerSize);
3298
3299 AllowExternalCallThatCantCauseGC scope(masm);
3300 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3301 kUseStackSpaceOperand, &stack_space_operand,
3302 return_value_operand);
3303 }
3304
Generate_CallApiGetter(MacroAssembler * masm)3305 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3306 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3307 // name below the exit frame to make GC aware of them.
3308 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3309 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3310 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3311 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3312 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3313 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3314 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3315 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3316
3317 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3318 Register holder = ApiGetterDescriptor::HolderRegister();
3319 Register callback = ApiGetterDescriptor::CallbackRegister();
3320 Register scratch = a4;
3321 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3322
3323 Register api_function_address = a2;
3324
3325 // Here and below +1 is for name() pushed after the args_ array.
3326 using PCA = PropertyCallbackArguments;
3327 __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
3328 __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
3329 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3330 __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
3331 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3332 __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
3333 __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3334 kPointerSize));
3335 __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3336 __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
3337 __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
3338 // should_throw_on_error -> false
3339 DCHECK_EQ(0, Smi::zero().ptr());
3340 __ Sd(zero_reg,
3341 MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
3342 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3343 __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
3344
3345 // v8::PropertyCallbackInfo::args_ array and name handle.
3346 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3347
3348 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3349 __ mov(a0, sp); // a0 = Handle<Name>
3350 __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
3351
3352 const int kApiStackSpace = 1;
3353 FrameScope frame_scope(masm, StackFrame::MANUAL);
3354 __ EnterExitFrame(false, kApiStackSpace);
3355
3356 // Create v8::PropertyCallbackInfo object on the stack and initialize
3357 // it's args_ field.
3358 __ Sd(a1, MemOperand(sp, 1 * kPointerSize));
3359 __ Daddu(a1, sp, Operand(1 * kPointerSize));
3360 // a1 = v8::PropertyCallbackInfo&
3361
3362 ExternalReference thunk_ref =
3363 ExternalReference::invoke_accessor_getter_callback();
3364
3365 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3366 __ Ld(api_function_address,
3367 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3368
3369 // +3 is to skip prolog, return address and name handle.
3370 MemOperand return_value_operand(
3371 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3372 MemOperand* const kUseStackSpaceConstant = nullptr;
3373 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3374 kStackUnwindSpace, kUseStackSpaceConstant,
3375 return_value_operand);
3376 }
3377
Generate_DirectCEntry(MacroAssembler * masm)3378 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3379 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3380 // purpose Code object) to be able to call into C functions that may trigger
3381 // GC and thus move the caller.
3382 //
3383 // DirectCEntry places the return address on the stack (updated by the GC),
3384 // making the call GC safe. The irregexp backend relies on this.
3385
3386 // Make place for arguments to fit C calling convention. Callers use
3387 // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
3388 // have to do that here. Any caller must drop kCArgsSlotsSize stack space
3389 // after the call.
3390 __ daddiu(sp, sp, -kCArgsSlotsSize);
3391
3392 __ Sd(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
3393 __ Call(t9); // Call the C++ function.
3394 __ Ld(t9, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
3395
3396 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3397 // In case of an error the return address may point to a memory area
3398 // filled with kZapValue by the GC. Dereference the address and check for
3399 // this.
3400 __ Uld(a4, MemOperand(t9));
3401 __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
3402 Operand(reinterpret_cast<uint64_t>(kZapValue)));
3403 }
3404
3405 __ Jump(t9);
3406 }
3407
3408 namespace {
3409
3410 // This code tries to be close to ia32 code so that any changes can be
3411 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3412 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3413 DeoptimizeKind deopt_kind) {
3414 Isolate* isolate = masm->isolate();
3415
3416 // Unlike on ARM we don't save all the registers, just the useful ones.
3417 // For the rest, there are gaps on the stack, so the offsets remain the same.
3418 const int kNumberOfRegisters = Register::kNumRegisters;
3419
3420 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3421 RegList saved_regs = restored_regs | sp | ra;
3422
3423 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
3424
3425 // Save all double FPU registers before messing with them.
3426 __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
3427 const RegisterConfiguration* config = RegisterConfiguration::Default();
3428 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3429 int code = config->GetAllocatableDoubleCode(i);
3430 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3431 int offset = code * kDoubleSize;
3432 __ Sdc1(fpu_reg, MemOperand(sp, offset));
3433 }
3434
3435 // Push saved_regs (needed to populate FrameDescription::registers_).
3436 // Leave gaps for other registers.
3437 __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
3438 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3439 if ((saved_regs.bits() & (1 << i)) != 0) {
3440 __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
3441 }
3442 }
3443
3444 __ li(a2,
3445 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3446 __ Sd(fp, MemOperand(a2));
3447
3448 const int kSavedRegistersAreaSize =
3449 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3450
3451 // Get the address of the location in the code object (a2) (return
3452 // address for lazy deoptimization) and compute the fp-to-sp delta in
3453 // register a3.
3454 __ mov(a2, ra);
3455 __ Daddu(a3, sp, Operand(kSavedRegistersAreaSize));
3456
3457 __ Dsubu(a3, fp, a3);
3458
3459 // Allocate a new deoptimizer object.
3460 __ PrepareCallCFunction(5, a4);
3461 // Pass six arguments, according to n64 ABI.
3462 __ mov(a0, zero_reg);
3463 Label context_check;
3464 __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3465 __ JumpIfSmi(a1, &context_check);
3466 __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3467 __ bind(&context_check);
3468 __ li(a1, Operand(static_cast<int>(deopt_kind)));
3469 // a2: code address or 0 already loaded.
3470 // a3: already has fp-to-sp delta.
3471 __ li(a4, ExternalReference::isolate_address(isolate));
3472
3473 // Call Deoptimizer::New().
3474 {
3475 AllowExternalCallThatCantCauseGC scope(masm);
3476 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3477 }
3478
3479 // Preserve "deoptimizer" object in register v0 and get the input
3480 // frame descriptor pointer to a1 (deoptimizer->input_);
3481 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
3482 __ mov(a0, v0);
3483 __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
3484
3485 // Copy core registers into FrameDescription::registers_[kNumRegisters].
3486 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3487 for (int i = 0; i < kNumberOfRegisters; i++) {
3488 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3489 if ((saved_regs.bits() & (1 << i)) != 0) {
3490 __ Ld(a2, MemOperand(sp, i * kPointerSize));
3491 __ Sd(a2, MemOperand(a1, offset));
3492 } else if (FLAG_debug_code) {
3493 __ li(a2, kDebugZapValue);
3494 __ Sd(a2, MemOperand(a1, offset));
3495 }
3496 }
3497
3498 int double_regs_offset = FrameDescription::double_registers_offset();
3499 // Copy FPU registers to
3500 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
3501 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3502 int code = config->GetAllocatableDoubleCode(i);
3503 int dst_offset = code * kDoubleSize + double_regs_offset;
3504 int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
3505 __ Ldc1(f0, MemOperand(sp, src_offset));
3506 __ Sdc1(f0, MemOperand(a1, dst_offset));
3507 }
3508
3509 // Remove the saved registers from the stack.
3510 __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
3511
3512 // Compute a pointer to the unwinding limit in register a2; that is
3513 // the first stack slot not part of the input frame.
3514 __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
3515 __ Daddu(a2, a2, sp);
3516
3517 // Unwind the stack down to - but not including - the unwinding
3518 // limit and copy the contents of the activation frame to the input
3519 // frame description.
3520 __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
3521 Label pop_loop;
3522 Label pop_loop_header;
3523 __ BranchShort(&pop_loop_header);
3524 __ bind(&pop_loop);
3525 __ pop(a4);
3526 __ Sd(a4, MemOperand(a3, 0));
3527 __ daddiu(a3, a3, sizeof(uint64_t));
3528 __ bind(&pop_loop_header);
3529 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
3530 // Compute the output frame in the deoptimizer.
3531 __ push(a0); // Preserve deoptimizer object across call.
3532 // a0: deoptimizer object; a1: scratch.
3533 __ PrepareCallCFunction(1, a1);
3534 // Call Deoptimizer::ComputeOutputFrames().
3535 {
3536 AllowExternalCallThatCantCauseGC scope(masm);
3537 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3538 }
3539 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
3540
3541 __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
3542
3543 // Replace the current (input) frame with the output frames.
3544 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3545 // Outer loop state: a4 = current "FrameDescription** output_",
3546 // a1 = one past the last FrameDescription**.
3547 __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
3548 __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
3549 __ Dlsa(a1, a4, a1, kPointerSizeLog2);
3550 __ BranchShort(&outer_loop_header);
3551 __ bind(&outer_push_loop);
3552 // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
3553 __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
3554 __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
3555 __ BranchShort(&inner_loop_header);
3556 __ bind(&inner_push_loop);
3557 __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
3558 __ Daddu(a6, a2, Operand(a3));
3559 __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
3560 __ push(a7);
3561 __ bind(&inner_loop_header);
3562 __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
3563
3564 __ Daddu(a4, a4, Operand(kPointerSize));
3565 __ bind(&outer_loop_header);
3566 __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
3567
3568 __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
3569 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3570 int code = config->GetAllocatableDoubleCode(i);
3571 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3572 int src_offset = code * kDoubleSize + double_regs_offset;
3573 __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
3574 }
3575
3576 // Push pc and continuation from the last output frame.
3577 __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
3578 __ push(a6);
3579 __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
3580 __ push(a6);
3581
3582 // Technically restoring 'at' should work unless zero_reg is also restored
3583 // but it's safer to check for this.
3584 DCHECK(!(restored_regs.has(at)));
3585 // Restore the registers from the last output frame.
3586 __ mov(at, a2);
3587 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3588 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3589 if ((restored_regs.bits() & (1 << i)) != 0) {
3590 __ Ld(ToRegister(i), MemOperand(at, offset));
3591 }
3592 }
3593
3594 __ pop(at); // Get continuation, leave pc on stack.
3595 __ pop(ra);
3596 __ Jump(at);
3597 __ stop();
3598 }
3599
3600 } // namespace
3601
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3602 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3603 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3604 }
3605
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3606 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3607 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3608 }
3609
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)3610 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
3611 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
3612 }
3613
3614 namespace {
3615
3616 // Restarts execution either at the current or next (in execution order)
3617 // bytecode. If there is baseline code on the shared function info, converts an
3618 // interpreter frame into a baseline frame and continues execution in baseline
3619 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)3620 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
3621 bool next_bytecode,
3622 bool is_osr = false) {
3623 Label start;
3624 __ bind(&start);
3625
3626 // Get function from the frame.
3627 Register closure = a1;
3628 __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3629
3630 // Get the Code object from the shared function info.
3631 Register code_obj = s1;
3632 __ Ld(code_obj,
3633 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
3634 __ Ld(code_obj,
3635 FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
3636
3637 // Check if we have baseline code. For OSR entry it is safe to assume we
3638 // always have baseline code.
3639 if (!is_osr) {
3640 Label start_with_baseline;
3641 __ GetObjectType(code_obj, t2, t2);
3642 __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
3643
3644 // Start with bytecode as there is no baseline code.
3645 Builtin builtin_id = next_bytecode
3646 ? Builtin::kInterpreterEnterAtNextBytecode
3647 : Builtin::kInterpreterEnterAtBytecode;
3648 __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
3649 RelocInfo::CODE_TARGET);
3650
3651 // Start with baseline code.
3652 __ bind(&start_with_baseline);
3653 } else if (FLAG_debug_code) {
3654 __ GetObjectType(code_obj, t2, t2);
3655 __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
3656 }
3657
3658 if (FLAG_debug_code) {
3659 AssertCodeIsBaseline(masm, code_obj, t2);
3660 }
3661
3662 // Replace BytecodeOffset with the feedback vector.
3663 Register feedback_vector = a2;
3664 __ Ld(feedback_vector,
3665 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
3666 __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
3667
3668 Label install_baseline_code;
3669 // Check if feedback vector is valid. If not, call prepare for baseline to
3670 // allocate it.
3671 __ GetObjectType(feedback_vector, t2, t2);
3672 __ Branch(&install_baseline_code, ne, t2, Operand(FEEDBACK_VECTOR_TYPE));
3673
3674 // Save BytecodeOffset from the stack frame.
3675 __ SmiUntag(kInterpreterBytecodeOffsetRegister,
3676 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3677 // Replace BytecodeOffset with the feedback vector.
3678 __ Sd(feedback_vector,
3679 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
3680 feedback_vector = no_reg;
3681
3682 // Compute baseline pc for bytecode offset.
3683 ExternalReference get_baseline_pc_extref;
3684 if (next_bytecode || is_osr) {
3685 get_baseline_pc_extref =
3686 ExternalReference::baseline_pc_for_next_executed_bytecode();
3687 } else {
3688 get_baseline_pc_extref =
3689 ExternalReference::baseline_pc_for_bytecode_offset();
3690 }
3691
3692 Register get_baseline_pc = a3;
3693 __ li(get_baseline_pc, get_baseline_pc_extref);
3694
3695 // If the code deoptimizes during the implicit function entry stack interrupt
3696 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
3697 // not a valid bytecode offset.
3698 // TODO(pthier): Investigate if it is feasible to handle this special case
3699 // in TurboFan instead of here.
3700 Label valid_bytecode_offset, function_entry_bytecode;
3701 if (!is_osr) {
3702 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
3703 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
3704 kFunctionEntryBytecodeOffset));
3705 }
3706
3707 __ Dsubu(kInterpreterBytecodeOffsetRegister,
3708 kInterpreterBytecodeOffsetRegister,
3709 (BytecodeArray::kHeaderSize - kHeapObjectTag));
3710
3711 __ bind(&valid_bytecode_offset);
3712 // Get bytecode array from the stack frame.
3713 __ Ld(kInterpreterBytecodeArrayRegister,
3714 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3715 // Save the accumulator register, since it's clobbered by the below call.
3716 __ Push(kInterpreterAccumulatorRegister);
3717 {
3718 Register arg_reg_1 = a0;
3719 Register arg_reg_2 = a1;
3720 Register arg_reg_3 = a2;
3721 __ Move(arg_reg_1, code_obj);
3722 __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
3723 __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
3724 FrameScope scope(masm, StackFrame::INTERNAL);
3725 __ PrepareCallCFunction(3, 0, a4);
3726 __ CallCFunction(get_baseline_pc, 3, 0);
3727 }
3728 __ Daddu(code_obj, code_obj, kReturnRegister0);
3729 __ Pop(kInterpreterAccumulatorRegister);
3730
3731 if (is_osr) {
3732 // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
3733 // Sparkplug here.
3734 // TODO(liuyu): Remove Ld as arm64 after register reallocation.
3735 __ Ld(kInterpreterBytecodeArrayRegister,
3736 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
3737 ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
3738 Generate_OSREntry(masm, code_obj,
3739 Operand(Code::kHeaderSize - kHeapObjectTag));
3740 } else {
3741 __ Daddu(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
3742 __ Jump(code_obj);
3743 }
3744 __ Trap(); // Unreachable.
3745
3746 if (!is_osr) {
3747 __ bind(&function_entry_bytecode);
3748 // If the bytecode offset is kFunctionEntryOffset, get the start address of
3749 // the first bytecode.
3750 __ mov(kInterpreterBytecodeOffsetRegister, zero_reg);
3751 if (next_bytecode) {
3752 __ li(get_baseline_pc,
3753 ExternalReference::baseline_pc_for_bytecode_offset());
3754 }
3755 __ Branch(&valid_bytecode_offset);
3756 }
3757
3758 __ bind(&install_baseline_code);
3759 {
3760 FrameScope scope(masm, StackFrame::INTERNAL);
3761 __ Push(kInterpreterAccumulatorRegister);
3762 __ Push(closure);
3763 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
3764 __ Pop(kInterpreterAccumulatorRegister);
3765 }
3766 // Retry from the start after installing baseline code.
3767 __ Branch(&start);
3768 }
3769
3770 } // namespace
3771
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3772 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3773 MacroAssembler* masm) {
3774 Generate_BaselineOrInterpreterEntry(masm, false);
3775 }
3776
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3777 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3778 MacroAssembler* masm) {
3779 Generate_BaselineOrInterpreterEntry(masm, true);
3780 }
3781
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3782 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3783 MacroAssembler* masm) {
3784 Generate_BaselineOrInterpreterEntry(masm, false, true);
3785 }
3786
3787 #undef __
3788
3789 } // namespace internal
3790 } // namespace v8
3791
3792 #endif // V8_TARGET_ARCH_MIPS64
3793