1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_MIPS64
6
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/debug/debug.h"
10 #include "src/deoptimizer/deoptimizer.h"
11 #include "src/execution/frame-constants.h"
12 #include "src/execution/frames.h"
13 #include "src/logging/counters.h"
14 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
15 #include "src/codegen/macro-assembler-inl.h"
16 #include "src/codegen/mips64/constants-mips64.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/heap/heap-inl.h"
19 #include "src/objects/cell.h"
20 #include "src/objects/foreign.h"
21 #include "src/objects/heap-number.h"
22 #include "src/objects/js-generator.h"
23 #include "src/objects/objects-inl.h"
24 #include "src/objects/smi.h"
25 #include "src/runtime/runtime.h"
26 #include "src/wasm/wasm-linkage.h"
27 #include "src/wasm/wasm-objects.h"
28
29 namespace v8 {
30 namespace internal {
31
32 #define __ ACCESS_MASM(masm)
33
Generate_Adaptor(MacroAssembler * masm,Address address)34 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
35 __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
36 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
37 RelocInfo::CODE_TARGET);
38 }
39
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)40 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
41 Runtime::FunctionId function_id) {
42 // ----------- S t a t e -------------
43 // -- a0 : actual argument count
44 // -- a1 : target function (preserved for callee)
45 // -- a3 : new target (preserved for callee)
46 // -----------------------------------
47 {
48 FrameScope scope(masm, StackFrame::INTERNAL);
49 // Push a copy of the target function, the new target and the actual
50 // argument count.
51 // Push function as parameter to the runtime call.
52 __ SmiTag(kJavaScriptCallArgCountRegister);
53 __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
54 kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
55
56 __ CallRuntime(function_id, 1);
57 // Restore target function, new target and actual argument count.
58 __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
59 kJavaScriptCallArgCountRegister);
60 __ SmiUntag(kJavaScriptCallArgCountRegister);
61 }
62
63 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
64 __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
65 __ Jump(a2);
66 }
67
68 namespace {
69
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)70 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
71 // ----------- S t a t e -------------
72 // -- a0 : number of arguments
73 // -- a1 : constructor function
74 // -- a3 : new target
75 // -- cp : context
76 // -- ra : return address
77 // -- sp[...]: constructor arguments
78 // -----------------------------------
79
80 // Enter a construct frame.
81 {
82 FrameScope scope(masm, StackFrame::CONSTRUCT);
83
84 // Preserve the incoming parameters on the stack.
85 __ SmiTag(a0);
86 __ Push(cp, a0);
87 __ SmiUntag(a0);
88
89 // Set up pointer to last argument (skip receiver).
90 __ Daddu(
91 t2, fp,
92 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
93 // Copy arguments and receiver to the expression stack.
94 __ PushArray(t2, a0, t3, t0);
95 // The receiver for the builtin/api call.
96 __ PushRoot(RootIndex::kTheHoleValue);
97
98 // Call the function.
99 // a0: number of arguments (untagged)
100 // a1: constructor function
101 // a3: new target
102 __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
103
104 // Restore context from the frame.
105 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
106 // Restore smi-tagged arguments count from the frame.
107 __ Ld(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
108 // Leave construct frame.
109 }
110
111 // Remove caller arguments from the stack and return.
112 __ SmiScale(t3, t3, kPointerSizeLog2);
113 __ Daddu(sp, sp, t3);
114 __ Daddu(sp, sp, kPointerSize);
115 __ Ret();
116 }
117
118 } // namespace
119
120 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)121 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
122 // ----------- S t a t e -------------
123 // -- a0: number of arguments (untagged)
124 // -- a1: constructor function
125 // -- a3: new target
126 // -- cp: context
127 // -- ra: return address
128 // -- sp[...]: constructor arguments
129 // -----------------------------------
130
131 // Enter a construct frame.
132 {
133 FrameScope scope(masm, StackFrame::CONSTRUCT);
134 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
135
136 // Preserve the incoming parameters on the stack.
137 __ SmiTag(a0);
138 __ Push(cp, a0, a1);
139 __ PushRoot(RootIndex::kUndefinedValue);
140 __ Push(a3);
141
142 // ----------- S t a t e -------------
143 // -- sp[0*kPointerSize]: new target
144 // -- sp[1*kPointerSize]: padding
145 // -- a1 and sp[2*kPointerSize]: constructor function
146 // -- sp[3*kPointerSize]: number of arguments (tagged)
147 // -- sp[4*kPointerSize]: context
148 // -----------------------------------
149
150 __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
151 __ lwu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
152 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
153 __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
154 ¬_create_implicit_receiver);
155
156 // If not derived class constructor: Allocate the new receiver object.
157 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
158 t2, t3);
159 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
160 RelocInfo::CODE_TARGET);
161 __ Branch(&post_instantiation_deopt_entry);
162
163 // Else: use TheHoleValue as receiver for constructor call
164 __ bind(¬_create_implicit_receiver);
165 __ LoadRoot(v0, RootIndex::kTheHoleValue);
166
167 // ----------- S t a t e -------------
168 // -- v0: receiver
169 // -- Slot 4 / sp[0*kPointerSize]: new target
170 // -- Slot 3 / sp[1*kPointerSize]: padding
171 // -- Slot 2 / sp[2*kPointerSize]: constructor function
172 // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
173 // -- Slot 0 / sp[4*kPointerSize]: context
174 // -----------------------------------
175 // Deoptimizer enters here.
176 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
177 masm->pc_offset());
178 __ bind(&post_instantiation_deopt_entry);
179
180 // Restore new target.
181 __ Pop(a3);
182
183 // Push the allocated receiver to the stack.
184 __ Push(v0);
185
186 // We need two copies because we may have to return the original one
187 // and the calling conventions dictate that the called function pops the
188 // receiver. The second copy is pushed after the arguments, we saved in a6
189 // since v0 will store the return value of callRuntime.
190 __ mov(a6, v0);
191
192 // Set up pointer to last argument.
193 __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset +
194 kSystemPointerSize));
195
196 // ----------- S t a t e -------------
197 // -- r3: new target
198 // -- sp[0*kPointerSize]: implicit receiver
199 // -- sp[1*kPointerSize]: implicit receiver
200 // -- sp[2*kPointerSize]: padding
201 // -- sp[3*kPointerSize]: constructor function
202 // -- sp[4*kPointerSize]: number of arguments (tagged)
203 // -- sp[5*kPointerSize]: context
204 // -----------------------------------
205
206 // Restore constructor function and argument count.
207 __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
208 __ Ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
209 __ SmiUntag(a0);
210
211 Label enough_stack_space, stack_overflow;
212 __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
213 __ Branch(&enough_stack_space);
214
215 __ bind(&stack_overflow);
216 // Restore the context from the frame.
217 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
218 __ CallRuntime(Runtime::kThrowStackOverflow);
219 // Unreachable code.
220 __ break_(0xCC);
221
222 __ bind(&enough_stack_space);
223
224 // TODO(victorgomes): When the arguments adaptor is completely removed, we
225 // should get the formal parameter count and copy the arguments in its
226 // correct position (including any undefined), instead of delaying this to
227 // InvokeFunction.
228
229 // Copy arguments and receiver to the expression stack.
230 __ PushArray(t2, a0, t0, t1);
231 // We need two copies because we may have to return the original one
232 // and the calling conventions dictate that the called function pops the
233 // receiver. The second copy is pushed after the arguments,
234 __ Push(a6);
235
236 // Call the function.
237 __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
238
239 // ----------- S t a t e -------------
240 // -- v0: constructor result
241 // -- sp[0*kPointerSize]: implicit receiver
242 // -- sp[1*kPointerSize]: padding
243 // -- sp[2*kPointerSize]: constructor function
244 // -- sp[3*kPointerSize]: number of arguments
245 // -- sp[4*kPointerSize]: context
246 // -----------------------------------
247
248 // Store offset of return address for deoptimizer.
249 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
250 masm->pc_offset());
251
252 // Restore the context from the frame.
253 __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
254
255 // If the result is an object (in the ECMA sense), we should get rid
256 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
257 // on page 74.
258 Label use_receiver, do_throw, leave_frame;
259
260 // If the result is undefined, we jump out to using the implicit receiver.
261 __ JumpIfRoot(v0, RootIndex::kUndefinedValue, &use_receiver);
262
263 // Otherwise we do a smi check and fall through to check if the return value
264 // is a valid receiver.
265
266 // If the result is a smi, it is *not* an object in the ECMA sense.
267 __ JumpIfSmi(v0, &use_receiver);
268
269 // If the type of the result (stored in its map) is less than
270 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
271 __ GetObjectType(v0, t2, t2);
272 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
273 __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
274 __ Branch(&use_receiver);
275
276 __ bind(&do_throw);
277 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
278
279 // Throw away the result of the constructor invocation and use the
280 // on-stack receiver as the result.
281 __ bind(&use_receiver);
282 __ Ld(v0, MemOperand(sp, 0 * kPointerSize));
283 __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
284
285 __ bind(&leave_frame);
286 // Restore smi-tagged arguments count from the frame.
287 __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
288 // Leave construct frame.
289 }
290 // Remove caller arguments from the stack and return.
291 __ SmiScale(a4, a1, kPointerSizeLog2);
292 __ Daddu(sp, sp, a4);
293 __ Daddu(sp, sp, kPointerSize);
294 __ Ret();
295 }
296
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)297 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
298 Generate_JSBuiltinsConstructStubHelper(masm);
299 }
300
GetSharedFunctionInfoBytecode(MacroAssembler * masm,Register sfi_data,Register scratch1)301 static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
302 Register sfi_data,
303 Register scratch1) {
304 Label done;
305
306 __ GetObjectType(sfi_data, scratch1, scratch1);
307 __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
308 __ Ld(sfi_data,
309 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
310
311 __ bind(&done);
312 }
313
314 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)315 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
316 // ----------- S t a t e -------------
317 // -- v0 : the value to pass to the generator
318 // -- a1 : the JSGeneratorObject to resume
319 // -- ra : return address
320 // -----------------------------------
321 __ AssertGeneratorObject(a1);
322
323 // Store input value into generator object.
324 __ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
325 __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
326 kRAHasNotBeenSaved, kDontSaveFPRegs);
327
328 // Load suspended function and context.
329 __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
330 __ Ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
331
332 // Flood function if we are stepping.
333 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
334 Label stepping_prepared;
335 ExternalReference debug_hook =
336 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
337 __ li(a5, debug_hook);
338 __ Lb(a5, MemOperand(a5));
339 __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
340
341 // Flood function if we need to continue stepping in the suspended generator.
342 ExternalReference debug_suspended_generator =
343 ExternalReference::debug_suspended_generator_address(masm->isolate());
344 __ li(a5, debug_suspended_generator);
345 __ Ld(a5, MemOperand(a5));
346 __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
347 __ bind(&stepping_prepared);
348
349 // Check the stack for overflow. We are not trying to catch interruptions
350 // (i.e. debug break and preemption) here, so check the "real stack limit".
351 Label stack_overflow;
352 __ LoadStackLimit(kScratchReg,
353 MacroAssembler::StackLimitKind::kRealStackLimit);
354 __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
355
356 // ----------- S t a t e -------------
357 // -- a1 : the JSGeneratorObject to resume
358 // -- a4 : generator function
359 // -- cp : generator context
360 // -- ra : return address
361 // -----------------------------------
362
363 // Push holes for arguments to generator function. Since the parser forced
364 // context allocation for any variables in generators, the actual argument
365 // values have already been copied into the context and these dummy values
366 // will never be used.
367 __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
368 __ Lhu(a3,
369 FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
370 __ Ld(t1,
371 FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
372 {
373 Label done_loop, loop;
374 __ bind(&loop);
375 __ Dsubu(a3, a3, Operand(1));
376 __ Branch(&done_loop, lt, a3, Operand(zero_reg));
377 __ Dlsa(kScratchReg, t1, a3, kPointerSizeLog2);
378 __ Ld(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
379 __ Push(kScratchReg);
380 __ Branch(&loop);
381 __ bind(&done_loop);
382 // Push receiver.
383 __ Ld(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
384 __ Push(kScratchReg);
385 }
386
387 // Underlying function needs to have bytecode available.
388 if (FLAG_debug_code) {
389 __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
390 __ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
391 GetSharedFunctionInfoBytecode(masm, a3, a0);
392 __ GetObjectType(a3, a3, a3);
393 __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
394 Operand(BYTECODE_ARRAY_TYPE));
395 }
396
397 // Resume (Ignition/TurboFan) generator object.
398 {
399 __ Ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
400 __ Lhu(a0, FieldMemOperand(
401 a0, SharedFunctionInfo::kFormalParameterCountOffset));
402 // We abuse new.target both to indicate that this is a resume call and to
403 // pass in the generator object. In ordinary calls, new.target is always
404 // undefined because generator functions are non-constructable.
405 __ Move(a3, a1);
406 __ Move(a1, a4);
407 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
408 __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
409 __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
410 __ Jump(a2);
411 }
412
413 __ bind(&prepare_step_in_if_stepping);
414 {
415 FrameScope scope(masm, StackFrame::INTERNAL);
416 __ Push(a1, a4);
417 // Push hole as receiver since we do not use it for stepping.
418 __ PushRoot(RootIndex::kTheHoleValue);
419 __ CallRuntime(Runtime::kDebugOnFunctionCall);
420 __ Pop(a1);
421 }
422 __ Branch(USE_DELAY_SLOT, &stepping_prepared);
423 __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
424
425 __ bind(&prepare_step_in_suspended_generator);
426 {
427 FrameScope scope(masm, StackFrame::INTERNAL);
428 __ Push(a1);
429 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
430 __ Pop(a1);
431 }
432 __ Branch(USE_DELAY_SLOT, &stepping_prepared);
433 __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
434
435 __ bind(&stack_overflow);
436 {
437 FrameScope scope(masm, StackFrame::INTERNAL);
438 __ CallRuntime(Runtime::kThrowStackOverflow);
439 __ break_(0xCC); // This should be unreachable.
440 }
441 }
442
Generate_ConstructedNonConstructable(MacroAssembler * masm)443 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
444 FrameScope scope(masm, StackFrame::INTERNAL);
445 __ Push(a1);
446 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
447 }
448
449 // Clobbers scratch1 and scratch2; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc,Register scratch1,Register scratch2)450 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
451 Register scratch1, Register scratch2) {
452 // Check the stack for overflow. We are not trying to catch
453 // interruptions (e.g. debug break and preemption) here, so the "real stack
454 // limit" is checked.
455 Label okay;
456 __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
457 // Make a2 the space we have left. The stack might already be overflowed
458 // here which will cause r2 to become negative.
459 __ dsubu(scratch1, sp, scratch1);
460 // Check if the arguments will overflow the stack.
461 __ dsll(scratch2, argc, kPointerSizeLog2);
462 __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
463
464 // Out of stack space.
465 __ CallRuntime(Runtime::kThrowStackOverflow);
466
467 __ bind(&okay);
468 }
469
470 namespace {
471
472 // Called with the native C calling convention. The corresponding function
473 // signature is either:
474 //
475 // using JSEntryFunction = GeneratedCode<Address(
476 // Address root_register_value, Address new_target, Address target,
477 // Address receiver, intptr_t argc, Address** args)>;
478 // or
479 // using JSEntryFunction = GeneratedCode<Address(
480 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtins::Name entry_trampoline)481 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
482 Builtins::Name entry_trampoline) {
483 Label invoke, handler_entry, exit;
484
485 {
486 NoRootArrayScope no_root_array(masm);
487
488 // TODO(plind): unify the ABI description here.
489 // Registers:
490 // either
491 // a0: root register value
492 // a1: entry address
493 // a2: function
494 // a3: receiver
495 // a4: argc
496 // a5: argv
497 // or
498 // a0: root register value
499 // a1: microtask_queue
500 //
501 // Stack:
502 // 0 arg slots on mips64 (4 args slots on mips)
503
504 // Save callee saved registers on the stack.
505 __ MultiPush(kCalleeSaved | ra.bit());
506
507 // Save callee-saved FPU registers.
508 __ MultiPushFPU(kCalleeSavedFPU);
509 // Set up the reserved register for 0.0.
510 __ Move(kDoubleRegZero, 0.0);
511
512 // Initialize the root register.
513 // C calling convention. The first argument is passed in a0.
514 __ mov(kRootRegister, a0);
515 }
516
517 // a1: entry address
518 // a2: function
519 // a3: receiver
520 // a4: argc
521 // a5: argv
522
523 // We build an EntryFrame.
524 __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
525 __ li(s2, Operand(StackFrame::TypeToMarker(type)));
526 __ li(s3, Operand(StackFrame::TypeToMarker(type)));
527 ExternalReference c_entry_fp = ExternalReference::Create(
528 IsolateAddressId::kCEntryFPAddress, masm->isolate());
529 __ li(s4, c_entry_fp);
530 __ Ld(s4, MemOperand(s4));
531 __ Push(s1, s2, s3, s4);
532 // Set up frame pointer for the frame to be pushed.
533 __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
534
535 // Registers:
536 // either
537 // a1: entry address
538 // a2: function
539 // a3: receiver
540 // a4: argc
541 // a5: argv
542 // or
543 // a1: microtask_queue
544 //
545 // Stack:
546 // caller fp |
547 // function slot | entry frame
548 // context slot |
549 // bad fp (0xFF...F) |
550 // callee saved registers + ra
551 // [ O32: 4 args slots]
552 // args
553
554 // If this is the outermost JS call, set js_entry_sp value.
555 Label non_outermost_js;
556 ExternalReference js_entry_sp = ExternalReference::Create(
557 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
558 __ li(s1, js_entry_sp);
559 __ Ld(s2, MemOperand(s1));
560 __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
561 __ Sd(fp, MemOperand(s1));
562 __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
563 Label cont;
564 __ b(&cont);
565 __ nop(); // Branch delay slot nop.
566 __ bind(&non_outermost_js);
567 __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
568 __ bind(&cont);
569 __ push(s3);
570
571 // Jump to a faked try block that does the invoke, with a faked catch
572 // block that sets the pending exception.
573 __ jmp(&invoke);
574 __ bind(&handler_entry);
575
576 // Store the current pc as the handler offset. It's used later to create the
577 // handler table.
578 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
579
580 // Caught exception: Store result (exception) in the pending exception
581 // field in the JSEnv and return a failure sentinel. Coming in here the
582 // fp will be invalid because the PushStackHandler below sets it to 0 to
583 // signal the existence of the JSEntry frame.
584 __ li(s1, ExternalReference::Create(
585 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
586 __ Sd(v0, MemOperand(s1)); // We come back from 'invoke'. result is in v0.
587 __ LoadRoot(v0, RootIndex::kException);
588 __ b(&exit); // b exposes branch delay slot.
589 __ nop(); // Branch delay slot nop.
590
591 // Invoke: Link this frame into the handler chain.
592 __ bind(&invoke);
593 __ PushStackHandler();
594 // If an exception not caught by another handler occurs, this handler
595 // returns control to the code after the bal(&invoke) above, which
596 // restores all kCalleeSaved registers (including cp and fp) to their
597 // saved values before returning a failure to C.
598 //
599 // Registers:
600 // either
601 // a0: root register value
602 // a1: entry address
603 // a2: function
604 // a3: receiver
605 // a4: argc
606 // a5: argv
607 // or
608 // a0: root register value
609 // a1: microtask_queue
610 //
611 // Stack:
612 // handler frame
613 // entry frame
614 // callee saved registers + ra
615 // [ O32: 4 args slots]
616 // args
617 //
618 // Invoke the function by calling through JS entry trampoline builtin and
619 // pop the faked function when we return.
620
621 Handle<Code> trampoline_code =
622 masm->isolate()->builtins()->builtin_handle(entry_trampoline);
623 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
624
625 // Unlink this frame from the handler chain.
626 __ PopStackHandler();
627
628 __ bind(&exit); // v0 holds result
629 // Check if the current stack frame is marked as the outermost JS frame.
630 Label non_outermost_js_2;
631 __ pop(a5);
632 __ Branch(&non_outermost_js_2, ne, a5,
633 Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
634 __ li(a5, js_entry_sp);
635 __ Sd(zero_reg, MemOperand(a5));
636 __ bind(&non_outermost_js_2);
637
638 // Restore the top frame descriptors from the stack.
639 __ pop(a5);
640 __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
641 masm->isolate()));
642 __ Sd(a5, MemOperand(a4));
643
644 // Reset the stack to the callee saved registers.
645 __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
646
647 // Restore callee-saved fpu registers.
648 __ MultiPopFPU(kCalleeSavedFPU);
649
650 // Restore callee saved registers from the stack.
651 __ MultiPop(kCalleeSaved | ra.bit());
652 // Return.
653 __ Jump(ra);
654 }
655
656 } // namespace
657
Generate_JSEntry(MacroAssembler * masm)658 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
659 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
660 Builtins::kJSEntryTrampoline);
661 }
662
Generate_JSConstructEntry(MacroAssembler * masm)663 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
664 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
665 Builtins::kJSConstructEntryTrampoline);
666 }
667
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)668 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
669 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
670 Builtins::kRunMicrotasksTrampoline);
671 }
672
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)673 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
674 bool is_construct) {
675 // ----------- S t a t e -------------
676 // -- a1: new.target
677 // -- a2: function
678 // -- a3: receiver_pointer
679 // -- a4: argc
680 // -- a5: argv
681 // -----------------------------------
682
683 // Enter an internal frame.
684 {
685 FrameScope scope(masm, StackFrame::INTERNAL);
686
687 // Setup the context (we need to use the caller context from the isolate).
688 ExternalReference context_address = ExternalReference::Create(
689 IsolateAddressId::kContextAddress, masm->isolate());
690 __ li(cp, context_address);
691 __ Ld(cp, MemOperand(cp));
692
693 // Push the function onto the stack.
694 __ Push(a2);
695
696 // Check if we have enough stack space to push all arguments.
697 __ daddiu(a6, a4, 1);
698 Generate_CheckStackOverflow(masm, a6, a0, s2);
699
700 // Copy arguments to the stack in a loop.
701 // a4: argc
702 // a5: argv, i.e. points to first arg
703 Label loop, entry;
704 __ Dlsa(s1, a5, a4, kPointerSizeLog2);
705 __ b(&entry);
706 __ nop(); // Branch delay slot nop.
707 // s1 points past last arg.
708 __ bind(&loop);
709 __ daddiu(s1, s1, -kPointerSize);
710 __ Ld(s2, MemOperand(s1)); // Read next parameter.
711 __ Ld(s2, MemOperand(s2)); // Dereference handle.
712 __ push(s2); // Push parameter.
713 __ bind(&entry);
714 __ Branch(&loop, ne, a5, Operand(s1));
715
716 // Push the receive.
717 __ Push(a3);
718
719 // a0: argc
720 // a1: function
721 // a3: new.target
722 __ mov(a3, a1);
723 __ mov(a1, a2);
724 __ mov(a0, a4);
725
726 // Initialize all JavaScript callee-saved registers, since they will be seen
727 // by the garbage collector as part of handlers.
728 __ LoadRoot(a4, RootIndex::kUndefinedValue);
729 __ mov(a5, a4);
730 __ mov(s1, a4);
731 __ mov(s2, a4);
732 __ mov(s3, a4);
733 __ mov(s4, a4);
734 __ mov(s5, a4);
735 // s6 holds the root address. Do not clobber.
736 // s7 is cp. Do not init.
737
738 // Invoke the code.
739 Handle<Code> builtin = is_construct
740 ? BUILTIN_CODE(masm->isolate(), Construct)
741 : masm->isolate()->builtins()->Call();
742 __ Call(builtin, RelocInfo::CODE_TARGET);
743
744 // Leave internal frame.
745 }
746 __ Jump(ra);
747 }
748
Generate_JSEntryTrampoline(MacroAssembler * masm)749 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
750 Generate_JSEntryTrampolineHelper(masm, false);
751 }
752
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)753 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
754 Generate_JSEntryTrampolineHelper(masm, true);
755 }
756
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)757 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
758 // a1: microtask_queue
759 __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
760 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
761 }
762
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2)763 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
764 Register optimized_code,
765 Register closure,
766 Register scratch1,
767 Register scratch2) {
768 // Store code entry in the closure.
769 __ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
770 __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
771 __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
772 kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
773 OMIT_SMI_CHECK);
774 }
775
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)776 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
777 Register scratch2) {
778 Register params_size = scratch1;
779
780 // Get the size of the formal parameters + receiver (in bytes).
781 __ Ld(params_size,
782 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
783 __ Lw(params_size,
784 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
785
786 #ifdef V8_NO_ARGUMENTS_ADAPTOR
787 Register actual_params_size = scratch2;
788 // Compute the size of the actual parameters + receiver (in bytes).
789 __ Ld(actual_params_size,
790 MemOperand(fp, StandardFrameConstants::kArgCOffset));
791 __ dsll(actual_params_size, actual_params_size, kPointerSizeLog2);
792 __ Daddu(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
793
794 // If actual is bigger than formal, then we should use it to free up the stack
795 // arguments.
796 __ slt(t2, params_size, actual_params_size);
797 __ movn(params_size, actual_params_size, t2);
798 #endif
799
800 // Leave the frame (also dropping the register file).
801 __ LeaveFrame(StackFrame::INTERPRETED);
802
803 // Drop receiver + arguments.
804 __ Daddu(sp, sp, params_size);
805 }
806
807 // Tail-call |function_id| if |actual_marker| == |expected_marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register actual_marker,OptimizationMarker expected_marker,Runtime::FunctionId function_id)808 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
809 Register actual_marker,
810 OptimizationMarker expected_marker,
811 Runtime::FunctionId function_id) {
812 Label no_match;
813 __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
814 GenerateTailCallToReturnedCode(masm, function_id);
815 __ bind(&no_match);
816 }
817
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch1,Register scratch2)818 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
819 Register optimized_code_entry,
820 Register scratch1, Register scratch2) {
821 // ----------- S t a t e -------------
822 // -- a0 : actual argument count
823 // -- a3 : new target (preserved for callee if needed, and caller)
824 // -- a1 : target function (preserved for callee if needed, and caller)
825 // -----------------------------------
826 DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
827
828 Register closure = a1;
829 Label heal_optimized_code_slot;
830
831 // If the optimized code is cleared, go to runtime to update the optimization
832 // marker field.
833 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
834 &heal_optimized_code_slot);
835
836 // Check if the optimized code is marked for deopt. If it is, call the
837 // runtime to clear it.
838 __ Ld(a5,
839 FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
840 __ Lw(a5, FieldMemOperand(a5, CodeDataContainer::kKindSpecificFlagsOffset));
841 __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
842 __ Branch(&heal_optimized_code_slot, ne, a5, Operand(zero_reg));
843
844 // Optimized code is good, get it into the closure and link the closure into
845 // the optimized functions list, then tail call the optimized code.
846 // The feedback vector is no longer used, so re-use it as a scratch
847 // register.
848 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
849 scratch1, scratch2);
850
851 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
852 __ Daddu(a2, optimized_code_entry,
853 Operand(Code::kHeaderSize - kHeapObjectTag));
854 __ Jump(a2);
855
856 // Optimized code slot contains deoptimized code or code is cleared and
857 // optimized code marker isn't updated. Evict the code, update the marker
858 // and re-enter the closure's code.
859 __ bind(&heal_optimized_code_slot);
860 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
861 }
862
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register optimization_marker)863 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
864 Register optimization_marker) {
865 // ----------- S t a t e -------------
866 // -- a0 : actual argument count
867 // -- a3 : new target (preserved for callee if needed, and caller)
868 // -- a1 : target function (preserved for callee if needed, and caller)
869 // -- feedback vector (preserved for caller if needed)
870 // -- optimization_marker : a int32 containing a non-zero optimization
871 // marker.
872 // -----------------------------------
873 DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
874
875 // TODO(v8:8394): The logging of first execution will break if
876 // feedback vectors are not allocated. We need to find a different way of
877 // logging these events if required.
878 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
879 OptimizationMarker::kLogFirstExecution,
880 Runtime::kFunctionFirstExecution);
881 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
882 OptimizationMarker::kCompileOptimized,
883 Runtime::kCompileOptimized_NotConcurrent);
884 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
885 OptimizationMarker::kCompileOptimizedConcurrent,
886 Runtime::kCompileOptimized_Concurrent);
887
888 // Marker should be one of LogFirstExecution / CompileOptimized /
889 // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
890 // here.
891 if (FLAG_debug_code) {
892 __ stop();
893 }
894 }
895
896 // Advance the current bytecode offset. This simulates what all bytecode
897 // handlers do upon completion of the underlying operation. Will bail out to a
898 // label if the bytecode (without prefix) is a return bytecode. Will not advance
899 // the bytecode offset if the current bytecode is a JumpLoop, instead just
900 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Register scratch3,Label * if_return)901 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
902 Register bytecode_array,
903 Register bytecode_offset,
904 Register bytecode, Register scratch1,
905 Register scratch2, Register scratch3,
906 Label* if_return) {
907 Register bytecode_size_table = scratch1;
908
909 // The bytecode offset value will be increased by one in wide and extra wide
910 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
911 // will restore the original bytecode. In order to simplify the code, we have
912 // a backup of it.
913 Register original_bytecode_offset = scratch3;
914 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
915 bytecode_size_table, original_bytecode_offset));
916 __ Move(original_bytecode_offset, bytecode_offset);
917 __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
918
919 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
920 Label process_bytecode, extra_wide;
921 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
922 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
923 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
924 STATIC_ASSERT(3 ==
925 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
926 __ Branch(&process_bytecode, hi, bytecode, Operand(3));
927 __ And(scratch2, bytecode, Operand(1));
928 __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
929
930 // Load the next bytecode and update table to the wide scaled table.
931 __ Daddu(bytecode_offset, bytecode_offset, Operand(1));
932 __ Daddu(scratch2, bytecode_array, bytecode_offset);
933 __ Lbu(bytecode, MemOperand(scratch2));
934 __ Daddu(bytecode_size_table, bytecode_size_table,
935 Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
936 __ jmp(&process_bytecode);
937
938 __ bind(&extra_wide);
939 // Load the next bytecode and update table to the extra wide scaled table.
940 __ Daddu(bytecode_offset, bytecode_offset, Operand(1));
941 __ Daddu(scratch2, bytecode_array, bytecode_offset);
942 __ Lbu(bytecode, MemOperand(scratch2));
943 __ Daddu(bytecode_size_table, bytecode_size_table,
944 Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
945
946 __ bind(&process_bytecode);
947
948 // Bailout to the return label if this is a return bytecode.
949 #define JUMP_IF_EQUAL(NAME) \
950 __ Branch(if_return, eq, bytecode, \
951 Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
952 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
953 #undef JUMP_IF_EQUAL
954
955 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
956 // of the loop.
957 Label end, not_jump_loop;
958 __ Branch(¬_jump_loop, ne, bytecode,
959 Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
960 // We need to restore the original bytecode_offset since we might have
961 // increased it to skip the wide / extra-wide prefix bytecode.
962 __ Move(bytecode_offset, original_bytecode_offset);
963 __ jmp(&end);
964
965 __ bind(¬_jump_loop);
966 // Otherwise, load the size of the current bytecode and advance the offset.
967 __ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
968 __ Lw(scratch2, MemOperand(scratch2));
969 __ Daddu(bytecode_offset, bytecode_offset, scratch2);
970
971 __ bind(&end);
972 }
973
974 // Generate code for entering a JS function with the interpreter.
975 // On entry to the function the receiver and arguments have been pushed on the
976 // stack left to right.
977 //
978 // The live registers are:
979 // o a0 : actual argument count (not including the receiver)
980 // o a1: the JS function object being called.
981 // o a3: the incoming new target or generator object
982 // o cp: our context
983 // o fp: the caller's frame pointer
984 // o sp: stack pointer
985 // o ra: return address
986 //
987 // The function builds an interpreter frame. See InterpreterFrameConstants in
988 // frames.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)989 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
990 Register closure = a1;
991 Register feedback_vector = a2;
992
993 // Get the bytecode array from the function object and load it into
994 // kInterpreterBytecodeArrayRegister.
995 __ Ld(kScratchReg,
996 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
997 __ Ld(kInterpreterBytecodeArrayRegister,
998 FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
999 GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
1000 kScratchReg);
1001
1002 // The bytecode array could have been flushed from the shared function info,
1003 // if so, call into CompileLazy.
1004 Label compile_lazy;
1005 __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
1006 __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
1007
1008 // Load the feedback vector from the closure.
1009 __ Ld(feedback_vector,
1010 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1011 __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1012
1013 Label push_stack_frame;
1014 // Check if feedback vector is valid. If valid, check for optimized code
1015 // and update invocation count. Otherwise, setup the stack frame.
1016 __ Ld(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1017 __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
1018 __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
1019
1020 // Read off the optimization state in the feedback vector, and if there
1021 // is optimized code or an optimization marker, call that instead.
1022 Register optimization_state = a4;
1023 __ Lw(optimization_state,
1024 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1025
1026 // Check if the optimized code slot is not empty or has a optimization marker.
1027 Label has_optimized_code_or_marker;
1028
1029 __ andi(t0, optimization_state,
1030 FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
1031 __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
1032
1033 Label not_optimized;
1034 __ bind(¬_optimized);
1035
1036 // Increment invocation count for the function.
1037 __ Lw(a4, FieldMemOperand(feedback_vector,
1038 FeedbackVector::kInvocationCountOffset));
1039 __ Addu(a4, a4, Operand(1));
1040 __ Sw(a4, FieldMemOperand(feedback_vector,
1041 FeedbackVector::kInvocationCountOffset));
1042
1043 // Open a frame scope to indicate that there is a frame on the stack. The
1044 // MANUAL indicates that the scope shouldn't actually generate code to set up
1045 // the frame (that is done below).
1046 __ bind(&push_stack_frame);
1047 FrameScope frame_scope(masm, StackFrame::MANUAL);
1048 __ PushStandardFrame(closure);
1049
1050 // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
1051 // 8-bit fields next to each other, so we could just optimize by writing a
1052 // 16-bit. These static asserts guard our assumption is valid.
1053 STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1054 BytecodeArray::kOsrNestingLevelOffset + kCharSize);
1055 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1056 __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1057 BytecodeArray::kOsrNestingLevelOffset));
1058
1059 // Load initial bytecode offset.
1060 __ li(kInterpreterBytecodeOffsetRegister,
1061 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1062
1063 // Push bytecode array and Smi tagged bytecode array offset.
1064 __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
1065 __ Push(kInterpreterBytecodeArrayRegister, a4);
1066
1067 // Allocate the local and temporary register file on the stack.
1068 Label stack_overflow;
1069 {
1070 // Load frame size (word) from the BytecodeArray object.
1071 __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1072 BytecodeArray::kFrameSizeOffset));
1073
1074 // Do a stack check to ensure we don't go over the limit.
1075 __ Dsubu(a5, sp, Operand(a4));
1076 __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1077 __ Branch(&stack_overflow, lo, a5, Operand(a2));
1078
1079 // If ok, push undefined as the initial value for all register file entries.
1080 Label loop_header;
1081 Label loop_check;
1082 __ LoadRoot(a5, RootIndex::kUndefinedValue);
1083 __ Branch(&loop_check);
1084 __ bind(&loop_header);
1085 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1086 __ push(a5);
1087 // Continue loop if not done.
1088 __ bind(&loop_check);
1089 __ Dsubu(a4, a4, Operand(kPointerSize));
1090 __ Branch(&loop_header, ge, a4, Operand(zero_reg));
1091 }
1092
1093 // If the bytecode array has a valid incoming new target or generator object
1094 // register, initialize it with incoming value which was passed in r3.
1095 Label no_incoming_new_target_or_generator_register;
1096 __ Lw(a5, FieldMemOperand(
1097 kInterpreterBytecodeArrayRegister,
1098 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1099 __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
1100 Operand(zero_reg));
1101 __ Dlsa(a5, fp, a5, kPointerSizeLog2);
1102 __ Sd(a3, MemOperand(a5));
1103 __ bind(&no_incoming_new_target_or_generator_register);
1104
1105 // Perform interrupt stack check.
1106 // TODO(solanes): Merge with the real stack limit check above.
1107 Label stack_check_interrupt, after_stack_check_interrupt;
1108 __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1109 __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
1110 __ bind(&after_stack_check_interrupt);
1111
1112 // Load accumulator as undefined.
1113 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1114
1115 // Load the dispatch table into a register and dispatch to the bytecode
1116 // handler at the current bytecode offset.
1117 Label do_dispatch;
1118 __ bind(&do_dispatch);
1119 __ li(kInterpreterDispatchTableRegister,
1120 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1121 __ Daddu(a0, kInterpreterBytecodeArrayRegister,
1122 kInterpreterBytecodeOffsetRegister);
1123 __ Lbu(a7, MemOperand(a0));
1124 __ Dlsa(kScratchReg, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
1125 __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
1126 __ Call(kJavaScriptCallCodeStartRegister);
1127 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1128
1129 // Any returns to the entry trampoline are either due to the return bytecode
1130 // or the interpreter tail calling a builtin and then a dispatch.
1131
1132 // Get bytecode array and bytecode offset from the stack frame.
1133 __ Ld(kInterpreterBytecodeArrayRegister,
1134 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1135 __ Ld(kInterpreterBytecodeOffsetRegister,
1136 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1137 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1138
1139 // Either return, or advance to the next bytecode and dispatch.
1140 Label do_return;
1141 __ Daddu(a1, kInterpreterBytecodeArrayRegister,
1142 kInterpreterBytecodeOffsetRegister);
1143 __ Lbu(a1, MemOperand(a1));
1144 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1145 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1146 a4, &do_return);
1147 __ jmp(&do_dispatch);
1148
1149 __ bind(&do_return);
1150 // The return value is in v0.
1151 LeaveInterpreterFrame(masm, t0, t1);
1152 __ Jump(ra);
1153
1154 __ bind(&stack_check_interrupt);
1155 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1156 // for the call to the StackGuard.
1157 __ li(kInterpreterBytecodeOffsetRegister,
1158 Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1159 kFunctionEntryBytecodeOffset)));
1160 __ Sd(kInterpreterBytecodeOffsetRegister,
1161 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1162 __ CallRuntime(Runtime::kStackGuard);
1163
1164 // After the call, restore the bytecode array, bytecode offset and accumulator
1165 // registers again. Also, restore the bytecode offset in the stack to its
1166 // previous value.
1167 __ Ld(kInterpreterBytecodeArrayRegister,
1168 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1169 __ li(kInterpreterBytecodeOffsetRegister,
1170 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1171 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1172
1173 __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
1174 __ Sd(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1175
1176 __ jmp(&after_stack_check_interrupt);
1177
1178 __ bind(&has_optimized_code_or_marker);
1179 Label maybe_has_optimized_code;
1180 // Check if optimized code marker is available
1181 __ andi(t0, optimization_state,
1182 FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
1183 __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
1184
1185 Register optimization_marker = optimization_state;
1186 __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
1187 MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
1188 // Fall through if there's no runnable optimized code.
1189 __ jmp(¬_optimized);
1190
1191 __ bind(&maybe_has_optimized_code);
1192 Register optimized_code_entry = optimization_state;
1193 __ Ld(optimization_marker,
1194 FieldMemOperand(feedback_vector,
1195 FeedbackVector::kMaybeOptimizedCodeOffset));
1196
1197 TailCallOptimizedCodeSlot(masm, optimized_code_entry, t3, a5);
1198
1199 __ bind(&compile_lazy);
1200 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1201 // Unreachable code.
1202 __ break_(0xCC);
1203
1204 __ bind(&stack_overflow);
1205 __ CallRuntime(Runtime::kThrowStackOverflow);
1206 // Unreachable code.
1207 __ break_(0xCC);
1208 }
1209
Generate_InterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch,Register scratch2)1210 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
1211 Register num_args,
1212 Register start_address,
1213 Register scratch,
1214 Register scratch2) {
1215 // Find the address of the last argument.
1216 __ Dsubu(scratch, num_args, Operand(1));
1217 __ dsll(scratch, scratch, kPointerSizeLog2);
1218 __ Dsubu(start_address, start_address, scratch);
1219
1220 // Push the arguments.
1221 __ PushArray(start_address, num_args, scratch, scratch2,
1222 TurboAssembler::PushArrayOrder::kReverse);
1223 }
1224
1225 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1226 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1227 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1228 InterpreterPushArgsMode mode) {
1229 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1230 // ----------- S t a t e -------------
1231 // -- a0 : the number of arguments (not including the receiver)
1232 // -- a2 : the address of the first argument to be pushed. Subsequent
1233 // arguments should be consecutive above this, in the same order as
1234 // they are to be pushed onto the stack.
1235 // -- a1 : the target to call (can be any Object).
1236 // -----------------------------------
1237 Label stack_overflow;
1238 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1239 // The spread argument should not be pushed.
1240 __ Dsubu(a0, a0, Operand(1));
1241 }
1242
1243 __ Daddu(a3, a0, Operand(1)); // Add one for receiver.
1244
1245 __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
1246
1247 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1248 // Don't copy receiver.
1249 __ mov(a3, a0);
1250 }
1251
1252 // This function modifies a2, t0 and a4.
1253 Generate_InterpreterPushArgs(masm, a3, a2, a4, t0);
1254
1255 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1256 __ PushRoot(RootIndex::kUndefinedValue);
1257 }
1258
1259 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1260 // Pass the spread in the register a2.
1261 // a2 already points to the penultime argument, the spread
1262 // is below that.
1263 __ Ld(a2, MemOperand(a2, -kSystemPointerSize));
1264 }
1265
1266 // Call the target.
1267 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1268 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1269 RelocInfo::CODE_TARGET);
1270 } else {
1271 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1272 RelocInfo::CODE_TARGET);
1273 }
1274
1275 __ bind(&stack_overflow);
1276 {
1277 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1278 // Unreachable code.
1279 __ break_(0xCC);
1280 }
1281 }
1282
1283 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1284 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1285 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1286 // ----------- S t a t e -------------
1287 // -- a0 : argument count (not including receiver)
1288 // -- a3 : new target
1289 // -- a1 : constructor to call
1290 // -- a2 : allocation site feedback if available, undefined otherwise.
1291 // -- a4 : address of the first argument
1292 // -----------------------------------
1293 Label stack_overflow;
1294 __ daddiu(a6, a0, 1);
1295 __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
1296
1297 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1298 // The spread argument should not be pushed.
1299 __ Dsubu(a0, a0, Operand(1));
1300 }
1301
1302 // Push the arguments, This function modifies t0, a4 and a5.
1303 Generate_InterpreterPushArgs(masm, a0, a4, a5, t0);
1304
1305 // Push a slot for the receiver.
1306 __ push(zero_reg);
1307
1308 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1309 // Pass the spread in the register a2.
1310 // a4 already points to the penultimate argument, the spread
1311 // lies in the next interpreter register.
1312 __ Ld(a2, MemOperand(a4, -kSystemPointerSize));
1313 } else {
1314 __ AssertUndefinedOrAllocationSite(a2, t0);
1315 }
1316
1317 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1318 __ AssertFunction(a1);
1319
1320 // Tail call to the function-specific construct stub (still in the caller
1321 // context at this point).
1322 __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1323 RelocInfo::CODE_TARGET);
1324 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1325 // Call the constructor with a0, a1, and a3 unmodified.
1326 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1327 RelocInfo::CODE_TARGET);
1328 } else {
1329 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1330 // Call the constructor with a0, a1, and a3 unmodified.
1331 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1332 }
1333
1334 __ bind(&stack_overflow);
1335 {
1336 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1337 // Unreachable code.
1338 __ break_(0xCC);
1339 }
1340 }
1341
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1342 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1343 // Set the return address to the correct point in the interpreter entry
1344 // trampoline.
1345 Label builtin_trampoline, trampoline_loaded;
1346 Smi interpreter_entry_return_pc_offset(
1347 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1348 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1349
1350 // If the SFI function_data is an InterpreterData, the function will have a
1351 // custom copy of the interpreter entry trampoline for profiling. If so,
1352 // get the custom trampoline, otherwise grab the entry address of the global
1353 // trampoline.
1354 __ Ld(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1355 __ Ld(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1356 __ Ld(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1357 __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1358 kInterpreterDispatchTableRegister);
1359 __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1360 Operand(INTERPRETER_DATA_TYPE));
1361
1362 __ Ld(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1363 __ Daddu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1364 __ Branch(&trampoline_loaded);
1365
1366 __ bind(&builtin_trampoline);
1367 __ li(t0, ExternalReference::
1368 address_of_interpreter_entry_trampoline_instruction_start(
1369 masm->isolate()));
1370 __ Ld(t0, MemOperand(t0));
1371
1372 __ bind(&trampoline_loaded);
1373 __ Daddu(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1374
1375 // Initialize the dispatch table register.
1376 __ li(kInterpreterDispatchTableRegister,
1377 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1378
1379 // Get the bytecode array pointer from the frame.
1380 __ Ld(kInterpreterBytecodeArrayRegister,
1381 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1382
1383 if (FLAG_debug_code) {
1384 // Check function data field is actually a BytecodeArray object.
1385 __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1386 __ Assert(ne,
1387 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1388 kScratchReg, Operand(zero_reg));
1389 __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1390 __ Assert(eq,
1391 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1392 a1, Operand(BYTECODE_ARRAY_TYPE));
1393 }
1394
1395 // Get the target bytecode offset from the frame.
1396 __ SmiUntag(kInterpreterBytecodeOffsetRegister,
1397 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1398
1399 if (FLAG_debug_code) {
1400 Label okay;
1401 __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
1402 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1403 // Unreachable code.
1404 __ break_(0xCC);
1405 __ bind(&okay);
1406 }
1407
1408 // Dispatch to the target bytecode.
1409 __ Daddu(a1, kInterpreterBytecodeArrayRegister,
1410 kInterpreterBytecodeOffsetRegister);
1411 __ Lbu(a7, MemOperand(a1));
1412 __ Dlsa(a1, kInterpreterDispatchTableRegister, a7, kPointerSizeLog2);
1413 __ Ld(kJavaScriptCallCodeStartRegister, MemOperand(a1));
1414 __ Jump(kJavaScriptCallCodeStartRegister);
1415 }
1416
Generate_InterpreterEnterBytecodeAdvance(MacroAssembler * masm)1417 void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
1418 // Advance the current bytecode offset stored within the given interpreter
1419 // stack frame. This simulates what all bytecode handlers do upon completion
1420 // of the underlying operation.
1421 __ Ld(kInterpreterBytecodeArrayRegister,
1422 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1423 __ Ld(kInterpreterBytecodeOffsetRegister,
1424 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1425 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1426
1427 Label enter_bytecode, function_entry_bytecode;
1428 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1429 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1430 kFunctionEntryBytecodeOffset));
1431
1432 // Load the current bytecode.
1433 __ Daddu(a1, kInterpreterBytecodeArrayRegister,
1434 kInterpreterBytecodeOffsetRegister);
1435 __ Lbu(a1, MemOperand(a1));
1436
1437 // Advance to the next bytecode.
1438 Label if_return;
1439 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1440 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1441 a4, &if_return);
1442
1443 __ bind(&enter_bytecode);
1444 // Convert new bytecode offset to a Smi and save in the stackframe.
1445 __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1446 __ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1447
1448 Generate_InterpreterEnterBytecode(masm);
1449
1450 __ bind(&function_entry_bytecode);
1451 // If the code deoptimizes during the implicit function entry stack interrupt
1452 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1453 // not a valid bytecode offset. Detect this case and advance to the first
1454 // actual bytecode.
1455 __ li(kInterpreterBytecodeOffsetRegister,
1456 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1457 __ Branch(&enter_bytecode);
1458
1459 // We should never take the if_return path.
1460 __ bind(&if_return);
1461 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1462 }
1463
Generate_InterpreterEnterBytecodeDispatch(MacroAssembler * masm)1464 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1465 Generate_InterpreterEnterBytecode(masm);
1466 }
1467
1468 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1469 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1470 bool java_script_builtin,
1471 bool with_result) {
1472 const RegisterConfiguration* config(RegisterConfiguration::Default());
1473 int allocatable_register_count = config->num_allocatable_general_registers();
1474 Register scratch = t3;
1475 if (with_result) {
1476 if (java_script_builtin) {
1477 __ mov(scratch, v0);
1478 } else {
1479 // Overwrite the hole inserted by the deoptimizer with the return value from
1480 // the LAZY deopt point.
1481 __ Sd(v0,
1482 MemOperand(
1483 sp, config->num_allocatable_general_registers() * kPointerSize +
1484 BuiltinContinuationFrameConstants::kFixedFrameSize));
1485 }
1486 }
1487 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1488 int code = config->GetAllocatableGeneralCode(i);
1489 __ Pop(Register::from_code(code));
1490 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1491 __ SmiUntag(Register::from_code(code));
1492 }
1493 }
1494
1495 if (with_result && java_script_builtin) {
1496 // Overwrite the hole inserted by the deoptimizer with the return value from
1497 // the LAZY deopt point. t0 contains the arguments count, the return value
1498 // from LAZY is always the last argument.
1499 __ Daddu(a0, a0,
1500 Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1501 __ Dlsa(t0, sp, a0, kSystemPointerSizeLog2);
1502 __ Sd(scratch, MemOperand(t0));
1503 // Recover arguments count.
1504 __ Dsubu(a0, a0,
1505 Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1506 }
1507
1508 __ Ld(fp, MemOperand(
1509 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1510 // Load builtin index (stored as a Smi) and use it to get the builtin start
1511 // address from the builtins table.
1512 __ Pop(t0);
1513 __ Daddu(sp, sp,
1514 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1515 __ Pop(ra);
1516 __ LoadEntryFromBuiltinIndex(t0);
1517 __ Jump(t0);
1518 }
1519 } // namespace
1520
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1521 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1522 Generate_ContinueToBuiltinHelper(masm, false, false);
1523 }
1524
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1525 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1526 MacroAssembler* masm) {
1527 Generate_ContinueToBuiltinHelper(masm, false, true);
1528 }
1529
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1530 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1531 Generate_ContinueToBuiltinHelper(masm, true, false);
1532 }
1533
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1534 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1535 MacroAssembler* masm) {
1536 Generate_ContinueToBuiltinHelper(masm, true, true);
1537 }
1538
Generate_NotifyDeoptimized(MacroAssembler * masm)1539 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1540 {
1541 FrameScope scope(masm, StackFrame::INTERNAL);
1542 __ CallRuntime(Runtime::kNotifyDeoptimized);
1543 }
1544
1545 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
1546 __ Ld(v0, MemOperand(sp, 0 * kPointerSize));
1547 __ Ret(USE_DELAY_SLOT);
1548 // Safe to fill delay slot Addu will emit one instruction.
1549 __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state.
1550 }
1551
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1552 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1553 {
1554 FrameScope scope(masm, StackFrame::INTERNAL);
1555 __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1556 }
1557
1558 // If the code object is null, just return to the caller.
1559 __ Ret(eq, v0, Operand(Smi::zero()));
1560
1561 // Drop the handler frame that is be sitting on top of the actual
1562 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1563 __ LeaveFrame(StackFrame::STUB);
1564
1565 // Load deoptimization data from the code object.
1566 // <deopt_data> = <code>[#deoptimization_data_offset]
1567 __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
1568
1569 // Load the OSR entrypoint offset from the deoptimization data.
1570 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1571 __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1572 DeoptimizationData::kOsrPcOffsetIndex) -
1573 kHeapObjectTag));
1574
1575 // Compute the target address = code_obj + header_size + osr_offset
1576 // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1577 __ Daddu(v0, v0, a1);
1578 __ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
1579
1580 // And "return" to the OSR entry point of the function.
1581 __ Ret();
1582 }
1583
1584 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1585 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1586 // ----------- S t a t e -------------
1587 // -- a0 : argc
1588 // -- sp[0] : receiver
1589 // -- sp[4] : thisArg
1590 // -- sp[8] : argArray
1591 // -----------------------------------
1592
1593 Register argc = a0;
1594 Register arg_array = a2;
1595 Register receiver = a1;
1596 Register this_arg = a5;
1597 Register undefined_value = a3;
1598 Register scratch = a4;
1599
1600 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1601
1602 // 1. Load receiver into a1, argArray into a2 (if present), remove all
1603 // arguments from the stack (including the receiver), and push thisArg (if
1604 // present) instead.
1605 {
1606 // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
1607 // consistent state for a simple pop operation.
1608
1609 __ mov(scratch, argc);
1610 __ Ld(this_arg, MemOperand(sp, kPointerSize));
1611 __ Ld(arg_array, MemOperand(sp, 2 * kPointerSize));
1612 __ Movz(arg_array, undefined_value, scratch); // if argc == 0
1613 __ Movz(this_arg, undefined_value, scratch); // if argc == 0
1614 __ Dsubu(scratch, scratch, Operand(1));
1615 __ Movz(arg_array, undefined_value, scratch); // if argc == 1
1616 __ Ld(receiver, MemOperand(sp));
1617 __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
1618 __ Sd(this_arg, MemOperand(sp));
1619 }
1620
1621 // ----------- S t a t e -------------
1622 // -- a2 : argArray
1623 // -- a1 : receiver
1624 // -- a3 : undefined root value
1625 // -- sp[0] : thisArg
1626 // -----------------------------------
1627
1628 // 2. We don't need to check explicitly for callable receiver here,
1629 // since that's the first thing the Call/CallWithArrayLike builtins
1630 // will do.
1631
1632 // 3. Tail call with no arguments if argArray is null or undefined.
1633 Label no_arguments;
1634 __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
1635 __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
1636
1637 // 4a. Apply the receiver to the given argArray.
1638 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1639 RelocInfo::CODE_TARGET);
1640
1641 // 4b. The argArray is either null or undefined, so we tail call without any
1642 // arguments to the receiver.
1643 __ bind(&no_arguments);
1644 {
1645 __ mov(a0, zero_reg);
1646 DCHECK(receiver == a1);
1647 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1648 }
1649 }
1650
1651 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1652 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1653 // 1. Get the callable to call (passed as receiver) from the stack.
1654 {
1655 __ Pop(a1);
1656 }
1657
1658 // 2. Make sure we have at least one argument.
1659 // a0: actual number of arguments
1660 {
1661 Label done;
1662 __ Branch(&done, ne, a0, Operand(zero_reg));
1663 __ PushRoot(RootIndex::kUndefinedValue);
1664 __ Daddu(a0, a0, Operand(1));
1665 __ bind(&done);
1666 }
1667
1668 // 3. Adjust the actual number of arguments.
1669 __ daddiu(a0, a0, -1);
1670
1671 // 4. Call the callable.
1672 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1673 }
1674
Generate_ReflectApply(MacroAssembler * masm)1675 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1676 // ----------- S t a t e -------------
1677 // -- a0 : argc
1678 // -- sp[0] : receiver
1679 // -- sp[8] : target (if argc >= 1)
1680 // -- sp[16] : thisArgument (if argc >= 2)
1681 // -- sp[24] : argumentsList (if argc == 3)
1682 // -----------------------------------
1683
1684 Register argc = a0;
1685 Register arguments_list = a2;
1686 Register target = a1;
1687 Register this_argument = a5;
1688 Register undefined_value = a3;
1689 Register scratch = a4;
1690
1691 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1692
1693 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
1694 // remove all arguments from the stack (including the receiver), and push
1695 // thisArgument (if present) instead.
1696 {
1697 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
1698 // consistent state for a simple pop operation.
1699
1700 __ mov(scratch, argc);
1701 __ Ld(target, MemOperand(sp, kPointerSize));
1702 __ Ld(this_argument, MemOperand(sp, 2 * kPointerSize));
1703 __ Ld(arguments_list, MemOperand(sp, 3 * kPointerSize));
1704 __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
1705 __ Movz(this_argument, undefined_value, scratch); // if argc == 0
1706 __ Movz(target, undefined_value, scratch); // if argc == 0
1707 __ Dsubu(scratch, scratch, Operand(1));
1708 __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
1709 __ Movz(this_argument, undefined_value, scratch); // if argc == 1
1710 __ Dsubu(scratch, scratch, Operand(1));
1711 __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
1712
1713 __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
1714 __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
1715 }
1716
1717 // ----------- S t a t e -------------
1718 // -- a2 : argumentsList
1719 // -- a1 : target
1720 // -- a3 : undefined root value
1721 // -- sp[0] : thisArgument
1722 // -----------------------------------
1723
1724 // 2. We don't need to check explicitly for callable target here,
1725 // since that's the first thing the Call/CallWithArrayLike builtins
1726 // will do.
1727
1728 // 3. Apply the target to the given argumentsList.
1729 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1730 RelocInfo::CODE_TARGET);
1731 }
1732
Generate_ReflectConstruct(MacroAssembler * masm)1733 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1734 // ----------- S t a t e -------------
1735 // -- a0 : argc
1736 // -- sp[0] : receiver
1737 // -- sp[8] : target
1738 // -- sp[16] : argumentsList
1739 // -- sp[24] : new.target (optional)
1740 // -----------------------------------
1741
1742 Register argc = a0;
1743 Register arguments_list = a2;
1744 Register target = a1;
1745 Register new_target = a3;
1746 Register undefined_value = a4;
1747 Register scratch = a5;
1748
1749 __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
1750
1751 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
1752 // new.target into a3 (if present, otherwise use target), remove all
1753 // arguments from the stack (including the receiver), and push thisArgument
1754 // (if present) instead.
1755 {
1756 // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
1757 // consistent state for a simple pop operation.
1758
1759 __ mov(scratch, argc);
1760 __ Ld(target, MemOperand(sp, kPointerSize));
1761 __ Ld(arguments_list, MemOperand(sp, 2 * kPointerSize));
1762 __ Ld(new_target, MemOperand(sp, 3 * kPointerSize));
1763 __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
1764 __ Movz(new_target, undefined_value, scratch); // if argc == 0
1765 __ Movz(target, undefined_value, scratch); // if argc == 0
1766 __ Dsubu(scratch, scratch, Operand(1));
1767 __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
1768 __ Movz(new_target, target, scratch); // if argc == 1
1769 __ Dsubu(scratch, scratch, Operand(1));
1770 __ Movz(new_target, target, scratch); // if argc == 2
1771
1772 __ Dlsa(sp, sp, argc, kSystemPointerSizeLog2);
1773 __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
1774 }
1775
1776 // ----------- S t a t e -------------
1777 // -- a2 : argumentsList
1778 // -- a1 : target
1779 // -- a3 : new.target
1780 // -- sp[0] : receiver (undefined)
1781 // -----------------------------------
1782
1783 // 2. We don't need to check explicitly for constructor target here,
1784 // since that's the first thing the Construct/ConstructWithArrayLike
1785 // builtins will do.
1786
1787 // 3. We don't need to check explicitly for constructor new.target here,
1788 // since that's the second thing the Construct/ConstructWithArrayLike
1789 // builtins will do.
1790
1791 // 4. Construct the target with the given new.target and argumentsList.
1792 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1793 RelocInfo::CODE_TARGET);
1794 }
1795
EnterArgumentsAdaptorFrame(MacroAssembler * masm)1796 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1797 __ SmiTag(a0);
1798 __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1799 __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
1800 __ Push(Smi::zero()); // Padding.
1801 __ Daddu(fp, sp,
1802 Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
1803 }
1804
LeaveArgumentsAdaptorFrame(MacroAssembler * masm)1805 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1806 // ----------- S t a t e -------------
1807 // -- v0 : result being passed through
1808 // -----------------------------------
1809 // Get the number of arguments passed (as a smi), tear down the frame and
1810 // then tear down the parameters.
1811 __ Ld(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1812 __ mov(sp, fp);
1813 __ MultiPop(fp.bit() | ra.bit());
1814 __ SmiScale(a4, a1, kPointerSizeLog2);
1815 __ Daddu(sp, sp, a4);
1816 // Adjust for the receiver.
1817 __ Daddu(sp, sp, Operand(kPointerSize));
1818 }
1819
1820 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)1821 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
1822 Handle<Code> code) {
1823 // ----------- S t a t e -------------
1824 // -- a1 : target
1825 // -- a0 : number of parameters on the stack (not including the receiver)
1826 // -- a2 : arguments list (a FixedArray)
1827 // -- a4 : len (number of elements to push from args)
1828 // -- a3 : new.target (for [[Construct]])
1829 // -----------------------------------
1830 if (masm->emit_debug_code()) {
1831 // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
1832 Label ok, fail;
1833 __ AssertNotSmi(a2);
1834 __ GetObjectType(a2, t8, t8);
1835 __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
1836 __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
1837 __ Branch(&ok, eq, a4, Operand(zero_reg));
1838 // Fall through.
1839 __ bind(&fail);
1840 __ Abort(AbortReason::kOperandIsNotAFixedArray);
1841
1842 __ bind(&ok);
1843 }
1844
1845 Register args = a2;
1846 Register len = a4;
1847
1848 // Check for stack overflow.
1849 Label stack_overflow;
1850 __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
1851
1852 // Move the arguments already in the stack,
1853 // including the receiver and the return address.
1854 {
1855 Label copy;
1856 Register src = a6, dest = a7;
1857 __ mov(src, sp);
1858 __ dsll(t0, a4, kSystemPointerSizeLog2);
1859 __ Dsubu(sp, sp, Operand(t0));
1860 // Update stack pointer.
1861 __ mov(dest, sp);
1862 __ Daddu(t0, a0, Operand(zero_reg));
1863
1864 __ bind(©);
1865 __ Ld(t1, MemOperand(src, 0));
1866 __ Sd(t1, MemOperand(dest, 0));
1867 __ Dsubu(t0, t0, Operand(1));
1868 __ Daddu(src, src, Operand(kSystemPointerSize));
1869 __ Daddu(dest, dest, Operand(kSystemPointerSize));
1870 __ Branch(©, ge, t0, Operand(zero_reg));
1871 }
1872
1873 // Push arguments onto the stack (thisArgument is already on the stack).
1874 {
1875 Label done, push, loop;
1876 Register src = a6;
1877 Register scratch = len;
1878
1879 __ daddiu(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
1880 __ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT);
1881 __ Daddu(a0, a0, len); // The 'len' argument for Call() or Construct().
1882 __ dsll(scratch, len, kPointerSizeLog2);
1883 __ Dsubu(scratch, sp, Operand(scratch));
1884 __ LoadRoot(t1, RootIndex::kTheHoleValue);
1885 __ bind(&loop);
1886 __ Ld(a5, MemOperand(src));
1887 __ daddiu(src, src, kPointerSize);
1888 __ Branch(&push, ne, a5, Operand(t1));
1889 __ LoadRoot(a5, RootIndex::kUndefinedValue);
1890 __ bind(&push);
1891 __ Sd(a5, MemOperand(a7, 0));
1892 __ Daddu(a7, a7, Operand(kSystemPointerSize));
1893 __ Daddu(scratch, scratch, Operand(kSystemPointerSize));
1894 __ Branch(&loop, ne, scratch, Operand(sp));
1895 __ bind(&done);
1896 }
1897
1898 // Tail-call to the actual Call or Construct builtin.
1899 __ Jump(code, RelocInfo::CODE_TARGET);
1900
1901 __ bind(&stack_overflow);
1902 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1903 }
1904
1905 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)1906 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
1907 CallOrConstructMode mode,
1908 Handle<Code> code) {
1909 // ----------- S t a t e -------------
1910 // -- a0 : the number of arguments (not including the receiver)
1911 // -- a3 : the new.target (for [[Construct]] calls)
1912 // -- a1 : the target to call (can be any Object)
1913 // -- a2 : start index (to support rest parameters)
1914 // -----------------------------------
1915
1916 // Check if new.target has a [[Construct]] internal method.
1917 if (mode == CallOrConstructMode::kConstruct) {
1918 Label new_target_constructor, new_target_not_constructor;
1919 __ JumpIfSmi(a3, &new_target_not_constructor);
1920 __ ld(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
1921 __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
1922 __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
1923 __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
1924 __ bind(&new_target_not_constructor);
1925 {
1926 FrameScope scope(masm, StackFrame::MANUAL);
1927 __ EnterFrame(StackFrame::INTERNAL);
1928 __ Push(a3);
1929 __ CallRuntime(Runtime::kThrowNotConstructor);
1930 }
1931 __ bind(&new_target_constructor);
1932 }
1933
1934 #ifdef V8_NO_ARGUMENTS_ADAPTOR
1935 // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
1936 // code is erased.
1937 __ mov(a6, fp);
1938 __ Ld(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
1939 #else
1940
1941 // Check if we have an arguments adaptor frame below the function frame.
1942 Label arguments_adaptor, arguments_done;
1943 __ Ld(a6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1944 __ Ld(a7, MemOperand(a6, CommonFrameConstants::kContextOrFrameTypeOffset));
1945 __ Branch(&arguments_adaptor, eq, a7,
1946 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1947 {
1948 __ Ld(a7, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1949 __ Ld(a7, FieldMemOperand(a7, JSFunction::kSharedFunctionInfoOffset));
1950 __ Lhu(a7, FieldMemOperand(
1951 a7, SharedFunctionInfo::kFormalParameterCountOffset));
1952 __ mov(a6, fp);
1953 }
1954 __ Branch(&arguments_done);
1955 __ bind(&arguments_adaptor);
1956 {
1957 // Just get the length from the ArgumentsAdaptorFrame.
1958 __ SmiUntag(a7,
1959 MemOperand(a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
1960 }
1961 __ bind(&arguments_done);
1962 #endif
1963
1964 Label stack_done, stack_overflow;
1965 __ Subu(a7, a7, a2);
1966 __ Branch(&stack_done, le, a7, Operand(zero_reg));
1967 {
1968 // Check for stack overflow.
1969 __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
1970
1971 // Forward the arguments from the caller frame.
1972
1973 // Point to the first argument to copy (skipping the receiver).
1974 __ Daddu(a6, a6,
1975 Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
1976 kSystemPointerSize));
1977 __ Dlsa(a6, a6, a2, kSystemPointerSizeLog2);
1978
1979 // Move the arguments already in the stack,
1980 // including the receiver and the return address.
1981 {
1982 Label copy;
1983 Register src = t0, dest = a2;
1984 __ mov(src, sp);
1985 // Update stack pointer.
1986 __ dsll(t1, a7, kSystemPointerSizeLog2);
1987 __ Dsubu(sp, sp, Operand(t1));
1988 __ mov(dest, sp);
1989 __ Daddu(t2, a0, Operand(zero_reg));
1990
1991 __ bind(©);
1992 __ Ld(t1, MemOperand(src, 0));
1993 __ Sd(t1, MemOperand(dest, 0));
1994 __ Dsubu(t2, t2, Operand(1));
1995 __ Daddu(src, src, Operand(kSystemPointerSize));
1996 __ Daddu(dest, dest, Operand(kSystemPointerSize));
1997 __ Branch(©, ge, t2, Operand(zero_reg));
1998 }
1999
2000 // Copy arguments from the caller frame.
2001 // TODO(victorgomes): Consider using forward order as potentially more cache
2002 // friendly.
2003 {
2004 Label loop;
2005 __ Daddu(a0, a0, a7);
2006 __ bind(&loop);
2007 {
2008 __ Subu(a7, a7, Operand(1));
2009 __ Dlsa(t0, a6, a7, kPointerSizeLog2);
2010 __ Ld(kScratchReg, MemOperand(t0));
2011 __ Dlsa(t0, a2, a7, kPointerSizeLog2);
2012 __ Sd(kScratchReg, MemOperand(t0));
2013 __ Branch(&loop, ne, a7, Operand(zero_reg));
2014 }
2015 }
2016 }
2017 __ Branch(&stack_done);
2018 __ bind(&stack_overflow);
2019 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2020 __ bind(&stack_done);
2021
2022 // Tail-call to the {code} handler.
2023 __ Jump(code, RelocInfo::CODE_TARGET);
2024 }
2025
2026 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2027 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2028 ConvertReceiverMode mode) {
2029 // ----------- S t a t e -------------
2030 // -- a0 : the number of arguments (not including the receiver)
2031 // -- a1 : the function to call (checked to be a JSFunction)
2032 // -----------------------------------
2033 __ AssertFunction(a1);
2034
2035 // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2036 // Check that function is not a "classConstructor".
2037 Label class_constructor;
2038 __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2039 __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2040 __ And(kScratchReg, a3,
2041 Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
2042 __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
2043
2044 // Enter the context of the function; ToObject has to run in the function
2045 // context, and we also need to take the global proxy from the function
2046 // context in case of conversion.
2047 __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2048 // We need to convert the receiver for non-native sloppy mode functions.
2049 Label done_convert;
2050 __ Lwu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
2051 __ And(kScratchReg, a3,
2052 Operand(SharedFunctionInfo::IsNativeBit::kMask |
2053 SharedFunctionInfo::IsStrictBit::kMask));
2054 __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
2055 {
2056 // ----------- S t a t e -------------
2057 // -- a0 : the number of arguments (not including the receiver)
2058 // -- a1 : the function to call (checked to be a JSFunction)
2059 // -- a2 : the shared function info.
2060 // -- cp : the function context.
2061 // -----------------------------------
2062
2063 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2064 // Patch receiver to global proxy.
2065 __ LoadGlobalProxy(a3);
2066 } else {
2067 Label convert_to_object, convert_receiver;
2068 __ LoadReceiver(a3, a0);
2069 __ JumpIfSmi(a3, &convert_to_object);
2070 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2071 __ GetObjectType(a3, a4, a4);
2072 __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
2073 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2074 Label convert_global_proxy;
2075 __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2076 __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2077 __ bind(&convert_global_proxy);
2078 {
2079 // Patch receiver to global proxy.
2080 __ LoadGlobalProxy(a3);
2081 }
2082 __ Branch(&convert_receiver);
2083 }
2084 __ bind(&convert_to_object);
2085 {
2086 // Convert receiver using ToObject.
2087 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2088 // in the fast case? (fall back to AllocateInNewSpace?)
2089 FrameScope scope(masm, StackFrame::INTERNAL);
2090 __ SmiTag(a0);
2091 __ Push(a0, a1);
2092 __ mov(a0, a3);
2093 __ Push(cp);
2094 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2095 RelocInfo::CODE_TARGET);
2096 __ Pop(cp);
2097 __ mov(a3, v0);
2098 __ Pop(a0, a1);
2099 __ SmiUntag(a0);
2100 }
2101 __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2102 __ bind(&convert_receiver);
2103 }
2104 __ StoreReceiver(a3, a0, kScratchReg);
2105 }
2106 __ bind(&done_convert);
2107
2108 // ----------- S t a t e -------------
2109 // -- a0 : the number of arguments (not including the receiver)
2110 // -- a1 : the function to call (checked to be a JSFunction)
2111 // -- a2 : the shared function info.
2112 // -- cp : the function context.
2113 // -----------------------------------
2114
2115 __ Lhu(a2,
2116 FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2117 __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
2118
2119 // The function is a "classConstructor", need to raise an exception.
2120 __ bind(&class_constructor);
2121 {
2122 FrameScope frame(masm, StackFrame::INTERNAL);
2123 __ Push(a1);
2124 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2125 }
2126 }
2127
2128 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2129 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2130 // ----------- S t a t e -------------
2131 // -- a0 : the number of arguments (not including the receiver)
2132 // -- a1 : the function to call (checked to be a JSBoundFunction)
2133 // -----------------------------------
2134 __ AssertBoundFunction(a1);
2135
2136 // Patch the receiver to [[BoundThis]].
2137 {
2138 __ Ld(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2139 __ StoreReceiver(t0, a0, kScratchReg);
2140 }
2141
2142 // Load [[BoundArguments]] into a2 and length of that into a4.
2143 __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2144 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2145
2146 // ----------- S t a t e -------------
2147 // -- a0 : the number of arguments (not including the receiver)
2148 // -- a1 : the function to call (checked to be a JSBoundFunction)
2149 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2150 // -- a4 : the number of [[BoundArguments]]
2151 // -----------------------------------
2152
2153 // Reserve stack space for the [[BoundArguments]].
2154 {
2155 Label done;
2156 __ dsll(a5, a4, kPointerSizeLog2);
2157 __ Dsubu(t0, sp, Operand(a5));
2158 // Check the stack for overflow. We are not trying to catch interruptions
2159 // (i.e. debug break and preemption) here, so check the "real stack limit".
2160 __ LoadStackLimit(kScratchReg,
2161 MacroAssembler::StackLimitKind::kRealStackLimit);
2162 __ Branch(&done, hs, t0, Operand(kScratchReg));
2163 {
2164 FrameScope scope(masm, StackFrame::MANUAL);
2165 __ EnterFrame(StackFrame::INTERNAL);
2166 __ CallRuntime(Runtime::kThrowStackOverflow);
2167 }
2168 __ bind(&done);
2169 }
2170
2171 // Pop receiver.
2172 __ Pop(t0);
2173
2174 // Push [[BoundArguments]].
2175 {
2176 Label loop, done_loop;
2177 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2178 __ Daddu(a0, a0, Operand(a4));
2179 __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2180 __ bind(&loop);
2181 __ Dsubu(a4, a4, Operand(1));
2182 __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2183 __ Dlsa(a5, a2, a4, kPointerSizeLog2);
2184 __ Ld(kScratchReg, MemOperand(a5));
2185 __ Push(kScratchReg);
2186 __ Branch(&loop);
2187 __ bind(&done_loop);
2188 }
2189
2190 // Push receiver.
2191 __ Push(t0);
2192
2193 // Call the [[BoundTargetFunction]] via the Call builtin.
2194 __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2195 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2196 RelocInfo::CODE_TARGET);
2197 }
2198
2199 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2200 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2201 // ----------- S t a t e -------------
2202 // -- a0 : the number of arguments (not including the receiver)
2203 // -- a1 : the target to call (can be any Object).
2204 // -----------------------------------
2205
2206 Label non_callable, non_smi;
2207 __ JumpIfSmi(a1, &non_callable);
2208 __ bind(&non_smi);
2209 __ GetObjectType(a1, t1, t2);
2210 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2211 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
2212 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2213 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
2214
2215 // Check if target has a [[Call]] internal method.
2216 __ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2217 __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask));
2218 __ Branch(&non_callable, eq, t1, Operand(zero_reg));
2219
2220 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy),
2221 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE));
2222
2223 // 2. Call to something else, which might have a [[Call]] internal method (if
2224 // not we raise an exception).
2225 // Overwrite the original receiver with the (original) target.
2226 __ StoreReceiver(a1, a0, kScratchReg);
2227 // Let the "call_as_function_delegate" take care of the rest.
2228 __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
2229 __ Jump(masm->isolate()->builtins()->CallFunction(
2230 ConvertReceiverMode::kNotNullOrUndefined),
2231 RelocInfo::CODE_TARGET);
2232
2233 // 3. Call to something that is not callable.
2234 __ bind(&non_callable);
2235 {
2236 FrameScope scope(masm, StackFrame::INTERNAL);
2237 __ Push(a1);
2238 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2239 }
2240 }
2241
Generate_ConstructFunction(MacroAssembler * masm)2242 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2243 // ----------- S t a t e -------------
2244 // -- a0 : the number of arguments (not including the receiver)
2245 // -- a1 : the constructor to call (checked to be a JSFunction)
2246 // -- a3 : the new target (checked to be a constructor)
2247 // -----------------------------------
2248 __ AssertConstructor(a1);
2249 __ AssertFunction(a1);
2250
2251 // Calling convention for function specific ConstructStubs require
2252 // a2 to contain either an AllocationSite or undefined.
2253 __ LoadRoot(a2, RootIndex::kUndefinedValue);
2254
2255 Label call_generic_stub;
2256
2257 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2258 __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2259 __ lwu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
2260 __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2261 __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
2262
2263 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2264 RelocInfo::CODE_TARGET);
2265
2266 __ bind(&call_generic_stub);
2267 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2268 RelocInfo::CODE_TARGET);
2269 }
2270
2271 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2272 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2273 // ----------- S t a t e -------------
2274 // -- a0 : the number of arguments (not including the receiver)
2275 // -- a1 : the function to call (checked to be a JSBoundFunction)
2276 // -- a3 : the new target (checked to be a constructor)
2277 // -----------------------------------
2278 __ AssertConstructor(a1);
2279 __ AssertBoundFunction(a1);
2280
2281 // Load [[BoundArguments]] into a2 and length of that into a4.
2282 __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2283 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2284
2285 // ----------- S t a t e -------------
2286 // -- a0 : the number of arguments (not including the receiver)
2287 // -- a1 : the function to call (checked to be a JSBoundFunction)
2288 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2289 // -- a3 : the new target (checked to be a constructor)
2290 // -- a4 : the number of [[BoundArguments]]
2291 // -----------------------------------
2292
2293 // Reserve stack space for the [[BoundArguments]].
2294 {
2295 Label done;
2296 __ dsll(a5, a4, kPointerSizeLog2);
2297 __ Dsubu(t0, sp, Operand(a5));
2298 // Check the stack for overflow. We are not trying to catch interruptions
2299 // (i.e. debug break and preemption) here, so check the "real stack limit".
2300 __ LoadStackLimit(kScratchReg,
2301 MacroAssembler::StackLimitKind::kRealStackLimit);
2302 __ Branch(&done, hs, t0, Operand(kScratchReg));
2303 {
2304 FrameScope scope(masm, StackFrame::MANUAL);
2305 __ EnterFrame(StackFrame::INTERNAL);
2306 __ CallRuntime(Runtime::kThrowStackOverflow);
2307 }
2308 __ bind(&done);
2309 }
2310
2311 // Pop receiver.
2312 __ Pop(t0);
2313
2314 // Push [[BoundArguments]].
2315 {
2316 Label loop, done_loop;
2317 __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
2318 __ Daddu(a0, a0, Operand(a4));
2319 __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2320 __ bind(&loop);
2321 __ Dsubu(a4, a4, Operand(1));
2322 __ Branch(&done_loop, lt, a4, Operand(zero_reg));
2323 __ Dlsa(a5, a2, a4, kPointerSizeLog2);
2324 __ Ld(kScratchReg, MemOperand(a5));
2325 __ Push(kScratchReg);
2326 __ Branch(&loop);
2327 __ bind(&done_loop);
2328 }
2329
2330 // Push receiver.
2331 __ Push(t0);
2332
2333 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2334 {
2335 Label skip_load;
2336 __ Branch(&skip_load, ne, a1, Operand(a3));
2337 __ Ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2338 __ bind(&skip_load);
2339 }
2340
2341 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2342 __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2343 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2344 }
2345
2346 // static
Generate_Construct(MacroAssembler * masm)2347 void Builtins::Generate_Construct(MacroAssembler* masm) {
2348 // ----------- S t a t e -------------
2349 // -- a0 : the number of arguments (not including the receiver)
2350 // -- a1 : the constructor to call (can be any Object)
2351 // -- a3 : the new target (either the same as the constructor or
2352 // the JSFunction on which new was invoked initially)
2353 // -----------------------------------
2354
2355 // Check if target is a Smi.
2356 Label non_constructor, non_proxy;
2357 __ JumpIfSmi(a1, &non_constructor);
2358
2359 // Check if target has a [[Construct]] internal method.
2360 __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
2361 __ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
2362 __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask));
2363 __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
2364
2365 // Dispatch based on instance type.
2366 __ Lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
2367 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2368 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
2369
2370 // Only dispatch to bound functions after checking whether they are
2371 // constructors.
2372 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2373 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
2374
2375 // Only dispatch to proxies after checking whether they are constructors.
2376 __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE));
2377 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2378 RelocInfo::CODE_TARGET);
2379
2380 // Called Construct on an exotic Object with a [[Construct]] internal method.
2381 __ bind(&non_proxy);
2382 {
2383 // Overwrite the original receiver with the (original) target.
2384 __ StoreReceiver(a1, a0, kScratchReg);
2385 // Let the "call_as_constructor_delegate" take care of the rest.
2386 __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
2387 __ Jump(masm->isolate()->builtins()->CallFunction(),
2388 RelocInfo::CODE_TARGET);
2389 }
2390
2391 // Called Construct on an Object that doesn't have a [[Construct]] internal
2392 // method.
2393 __ bind(&non_constructor);
2394 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2395 RelocInfo::CODE_TARGET);
2396 }
2397
Generate_ArgumentsAdaptorTrampoline(MacroAssembler * masm)2398 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
2399 // State setup as expected by MacroAssembler::InvokePrologue.
2400 // ----------- S t a t e -------------
2401 // -- a0: actual arguments count
2402 // -- a1: function (passed through to callee)
2403 // -- a2: expected arguments count
2404 // -- a3: new target (passed through to callee)
2405 // -----------------------------------
2406
2407 Label invoke, dont_adapt_arguments, stack_overflow;
2408
2409 Label enough, too_few;
2410 __ Branch(&dont_adapt_arguments, eq, a2,
2411 Operand(kDontAdaptArgumentsSentinel));
2412 // We use Uless as the number of argument should always be greater than 0.
2413 __ Branch(&too_few, Uless, a0, Operand(a2));
2414
2415 { // Enough parameters: actual >= expected.
2416 // a0: actual number of arguments as a smi
2417 // a1: function
2418 // a2: expected number of arguments
2419 // a3: new target (passed through to callee)
2420 __ bind(&enough);
2421 EnterArgumentsAdaptorFrame(masm);
2422 __ StackOverflowCheck(a2, a5, kScratchReg, &stack_overflow);
2423
2424 // Calculate copy start address into a0 and copy end address into a4.
2425 __ dsll(a0, a2, kPointerSizeLog2);
2426 __ Daddu(a0, fp, a0);
2427
2428 // Adjust for return address and receiver.
2429 __ Daddu(a0, a0, Operand(2 * kPointerSize));
2430 // Compute copy end address.
2431 __ dsll(a4, a2, kPointerSizeLog2);
2432 __ dsubu(a4, a0, a4);
2433
2434 // Copy the arguments (including the receiver) to the new stack frame.
2435 // a0: copy start address
2436 // a1: function
2437 // a2: expected number of arguments
2438 // a3: new target (passed through to callee)
2439 // a4: copy end address
2440
2441 Label copy;
2442 __ bind(©);
2443 __ Ld(a5, MemOperand(a0));
2444 __ push(a5);
2445 __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a4));
2446 __ daddiu(a0, a0, -kPointerSize); // In delay slot.
2447
2448 __ jmp(&invoke);
2449 }
2450
2451 { // Too few parameters: Actual < expected.
2452 __ bind(&too_few);
2453 EnterArgumentsAdaptorFrame(masm);
2454 __ StackOverflowCheck(a2, a5, kScratchReg, &stack_overflow);
2455
2456 // Fill the remaining expected arguments with undefined.
2457 __ LoadRoot(t0, RootIndex::kUndefinedValue);
2458 __ SmiUntag(t1, a0);
2459 __ Dsubu(t2, a2, Operand(t1));
2460 __ dsll(a4, t2, kSystemPointerSizeLog2);
2461 __ Dsubu(a4, fp, a4);
2462 // Adjust for frame.
2463 __ Dsubu(a4, a4,
2464 Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
2465 kSystemPointerSize));
2466
2467 Label fill;
2468 __ bind(&fill);
2469 __ push(t0);
2470 __ Branch(&fill, ne, sp, Operand(a4));
2471
2472 // Calculate copy start address into r0 and copy end address is fp.
2473 __ SmiScale(a0, a0, kPointerSizeLog2);
2474 __ Daddu(a0, fp, a0);
2475
2476 // Copy the arguments (including the receiver) to the new stack frame.
2477 Label copy;
2478 __ bind(©);
2479
2480 // Adjust load for return address and receiver.
2481 __ Ld(t0, MemOperand(a0, 2 * kSystemPointerSize));
2482 __ push(t0);
2483
2484 __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(fp));
2485 __ Dsubu(a0, a0, Operand(kSystemPointerSize));
2486 }
2487
2488 // Call the entry point.
2489 __ bind(&invoke);
2490 __ mov(a0, a2);
2491 // a0 : expected number of arguments
2492 // a1 : function (passed through to callee)
2493 // a3: new target (passed through to callee)
2494 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
2495 __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
2496 __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
2497 __ Call(a2);
2498
2499 // Store offset of return address for deoptimizer.
2500 masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
2501
2502 // Exit frame and return.
2503 LeaveArgumentsAdaptorFrame(masm);
2504 __ Ret();
2505
2506 // -------------------------------------------
2507 // Don't adapt arguments.
2508 // -------------------------------------------
2509 __ bind(&dont_adapt_arguments);
2510 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
2511 __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
2512 __ Daddu(a2, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
2513 __ Jump(a2);
2514
2515 __ bind(&stack_overflow);
2516 {
2517 FrameScope frame(masm, StackFrame::MANUAL);
2518 __ CallRuntime(Runtime::kThrowStackOverflow);
2519 __ break_(0xCC);
2520 }
2521 }
2522
Generate_WasmCompileLazy(MacroAssembler * masm)2523 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2524 // The function index was put in t0 by the jump table trampoline.
2525 // Convert to Smi for the runtime call
2526 __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2527 {
2528 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2529 FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2530
2531 // Save all parameter registers (see wasm-linkage.cc). They might be
2532 // overwritten in the runtime call below. We don't have any callee-saved
2533 // registers in wasm, so no need to store anything else.
2534 constexpr RegList gp_regs =
2535 Register::ListOf(a0, a2, a3, a4, a5, a6, a7);
2536 constexpr RegList fp_regs =
2537 DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14);
2538 constexpr int16_t num_to_push = base::bits::CountPopulation(gp_regs) +
2539 base::bits::CountPopulation(fp_regs);
2540 // The number of regs to be pushed before kWasmInstanceRegister should be
2541 // equal to kNumberOfSavedAllParamRegs.
2542 STATIC_ASSERT(num_to_push ==
2543 WasmCompileLazyFrameConstants::kNumberOfSavedAllParamRegs);
2544 __ MultiPush(gp_regs);
2545 __ MultiPushFPU(fp_regs);
2546
2547 // Pass instance and function index as an explicit arguments to the runtime
2548 // function.
2549 __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2550 // Initialize the JavaScript context with 0. CEntry will use it to
2551 // set the current context on the isolate.
2552 __ Move(kContextRegister, Smi::zero());
2553 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2554
2555 // Restore registers.
2556 __ MultiPopFPU(fp_regs);
2557 __ MultiPop(gp_regs);
2558 }
2559 // Finally, jump to the entrypoint.
2560 __ Jump(v0);
2561 }
2562
Generate_WasmDebugBreak(MacroAssembler * masm)2563 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2564 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2565 {
2566 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2567
2568 // Save all parameter registers. They might hold live values, we restore
2569 // them after the runtime call.
2570 __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2571 __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2572
2573 // Initialize the JavaScript context with 0. CEntry will use it to
2574 // set the current context on the isolate.
2575 __ Move(cp, Smi::zero());
2576 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2577
2578 // Restore registers.
2579 __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2580 __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2581 }
2582 __ Ret();
2583 }
2584
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2585 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2586 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2587 bool builtin_exit_frame) {
2588 // Called from JavaScript; parameters are on stack as if calling JS function
2589 // a0: number of arguments including receiver
2590 // a1: pointer to builtin function
2591 // fp: frame pointer (restored after C call)
2592 // sp: stack pointer (restored as callee's sp after C call)
2593 // cp: current context (C callee-saved)
2594 //
2595 // If argv_mode == kArgvInRegister:
2596 // a2: pointer to the first argument
2597
2598 if (argv_mode == kArgvInRegister) {
2599 // Move argv into the correct register.
2600 __ mov(s1, a2);
2601 } else {
2602 // Compute the argv pointer in a callee-saved register.
2603 __ Dlsa(s1, sp, a0, kPointerSizeLog2);
2604 __ Dsubu(s1, s1, kPointerSize);
2605 }
2606
2607 // Enter the exit frame that transitions from JavaScript to C++.
2608 FrameScope scope(masm, StackFrame::MANUAL);
2609 __ EnterExitFrame(
2610 save_doubles == kSaveFPRegs, 0,
2611 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2612
2613 // s0: number of arguments including receiver (C callee-saved)
2614 // s1: pointer to first argument (C callee-saved)
2615 // s2: pointer to builtin function (C callee-saved)
2616
2617 // Prepare arguments for C routine.
2618 // a0 = argc
2619 __ mov(s0, a0);
2620 __ mov(s2, a1);
2621
2622 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2623 // also need to reserve the 4 argument slots on the stack.
2624
2625 __ AssertStackIsAligned();
2626
2627 // a0 = argc, a1 = argv, a2 = isolate
2628 __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2629 __ mov(a1, s1);
2630
2631 __ StoreReturnAddressAndCall(s2);
2632
2633 // Result returned in v0 or v1:v0 - do not destroy these registers!
2634
2635 // Check result for exception sentinel.
2636 Label exception_returned;
2637 __ LoadRoot(a4, RootIndex::kException);
2638 __ Branch(&exception_returned, eq, a4, Operand(v0));
2639
2640 // Check that there is no pending exception, otherwise we
2641 // should have returned the exception sentinel.
2642 if (FLAG_debug_code) {
2643 Label okay;
2644 ExternalReference pending_exception_address = ExternalReference::Create(
2645 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2646 __ li(a2, pending_exception_address);
2647 __ Ld(a2, MemOperand(a2));
2648 __ LoadRoot(a4, RootIndex::kTheHoleValue);
2649 // Cannot use check here as it attempts to generate call into runtime.
2650 __ Branch(&okay, eq, a4, Operand(a2));
2651 __ stop();
2652 __ bind(&okay);
2653 }
2654
2655 // Exit C frame and return.
2656 // v0:v1: result
2657 // sp: stack pointer
2658 // fp: frame pointer
2659 Register argc = argv_mode == kArgvInRegister
2660 // We don't want to pop arguments so set argc to no_reg.
2661 ? no_reg
2662 // s0: still holds argc (callee-saved).
2663 : s0;
2664 __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
2665
2666 // Handling of exception.
2667 __ bind(&exception_returned);
2668
2669 ExternalReference pending_handler_context_address = ExternalReference::Create(
2670 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2671 ExternalReference pending_handler_entrypoint_address =
2672 ExternalReference::Create(
2673 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2674 ExternalReference pending_handler_fp_address = ExternalReference::Create(
2675 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2676 ExternalReference pending_handler_sp_address = ExternalReference::Create(
2677 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2678
2679 // Ask the runtime for help to determine the handler. This will set v0 to
2680 // contain the current pending exception, don't clobber it.
2681 ExternalReference find_handler =
2682 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2683 {
2684 FrameScope scope(masm, StackFrame::MANUAL);
2685 __ PrepareCallCFunction(3, 0, a0);
2686 __ mov(a0, zero_reg);
2687 __ mov(a1, zero_reg);
2688 __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2689 __ CallCFunction(find_handler, 3);
2690 }
2691
2692 // Retrieve the handler context, SP and FP.
2693 __ li(cp, pending_handler_context_address);
2694 __ Ld(cp, MemOperand(cp));
2695 __ li(sp, pending_handler_sp_address);
2696 __ Ld(sp, MemOperand(sp));
2697 __ li(fp, pending_handler_fp_address);
2698 __ Ld(fp, MemOperand(fp));
2699
2700 // If the handler is a JS frame, restore the context to the frame. Note that
2701 // the context will be set to (cp == 0) for non-JS frames.
2702 Label zero;
2703 __ Branch(&zero, eq, cp, Operand(zero_reg));
2704 __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2705 __ bind(&zero);
2706
2707 // Reset the masking register. This is done independent of the underlying
2708 // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
2709 // with both configurations. It is safe to always do this, because the
2710 // underlying register is caller-saved and can be arbitrarily clobbered.
2711 __ ResetSpeculationPoisonRegister();
2712
2713 // Compute the handler entry address and jump to it.
2714 __ li(t9, pending_handler_entrypoint_address);
2715 __ Ld(t9, MemOperand(t9));
2716 __ Jump(t9);
2717 }
2718
Generate_DoubleToI(MacroAssembler * masm)2719 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2720 Label done;
2721 Register result_reg = t0;
2722
2723 Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2724 Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2725 Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2726 DoubleRegister double_scratch = kScratchDoubleReg;
2727
2728 // Account for saved regs.
2729 const int kArgumentOffset = 4 * kPointerSize;
2730
2731 __ Push(result_reg);
2732 __ Push(scratch, scratch2, scratch3);
2733
2734 // Load double input.
2735 __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
2736
2737 // Clear cumulative exception flags and save the FCSR.
2738 __ cfc1(scratch2, FCSR);
2739 __ ctc1(zero_reg, FCSR);
2740
2741 // Try a conversion to a signed integer.
2742 __ Trunc_w_d(double_scratch, double_scratch);
2743 // Move the converted value into the result register.
2744 __ mfc1(scratch3, double_scratch);
2745
2746 // Retrieve and restore the FCSR.
2747 __ cfc1(scratch, FCSR);
2748 __ ctc1(scratch2, FCSR);
2749
2750 // Check for overflow and NaNs.
2751 __ And(
2752 scratch, scratch,
2753 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2754 // If we had no exceptions then set result_reg and we are done.
2755 Label error;
2756 __ Branch(&error, ne, scratch, Operand(zero_reg));
2757 __ Move(result_reg, scratch3);
2758 __ Branch(&done);
2759 __ bind(&error);
2760
2761 // Load the double value and perform a manual truncation.
2762 Register input_high = scratch2;
2763 Register input_low = scratch3;
2764
2765 __ Lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
2766 __ Lw(input_high,
2767 MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
2768
2769 Label normal_exponent;
2770 // Extract the biased exponent in result.
2771 __ Ext(result_reg, input_high, HeapNumber::kExponentShift,
2772 HeapNumber::kExponentBits);
2773
2774 // Check for Infinity and NaNs, which should return 0.
2775 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
2776 __ Movz(result_reg, zero_reg, scratch);
2777 __ Branch(&done, eq, scratch, Operand(zero_reg));
2778
2779 // Express exponent as delta to (number of mantissa bits + 31).
2780 __ Subu(result_reg, result_reg,
2781 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
2782
2783 // If the delta is strictly positive, all bits would be shifted away,
2784 // which means that we can return 0.
2785 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
2786 __ mov(result_reg, zero_reg);
2787 __ Branch(&done);
2788
2789 __ bind(&normal_exponent);
2790 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2791 // Calculate shift.
2792 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
2793
2794 // Save the sign.
2795 Register sign = result_reg;
2796 result_reg = no_reg;
2797 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
2798
2799 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
2800 // to check for this specific case.
2801 Label high_shift_needed, high_shift_done;
2802 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
2803 __ mov(input_high, zero_reg);
2804 __ Branch(&high_shift_done);
2805 __ bind(&high_shift_needed);
2806
2807 // Set the implicit 1 before the mantissa part in input_high.
2808 __ Or(input_high, input_high,
2809 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2810 // Shift the mantissa bits to the correct position.
2811 // We don't need to clear non-mantissa bits as they will be shifted away.
2812 // If they weren't, it would mean that the answer is in the 32bit range.
2813 __ sllv(input_high, input_high, scratch);
2814
2815 __ bind(&high_shift_done);
2816
2817 // Replace the shifted bits with bits from the lower mantissa word.
2818 Label pos_shift, shift_done;
2819 __ li(kScratchReg, 32);
2820 __ subu(scratch, kScratchReg, scratch);
2821 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
2822
2823 // Negate scratch.
2824 __ Subu(scratch, zero_reg, scratch);
2825 __ sllv(input_low, input_low, scratch);
2826 __ Branch(&shift_done);
2827
2828 __ bind(&pos_shift);
2829 __ srlv(input_low, input_low, scratch);
2830
2831 __ bind(&shift_done);
2832 __ Or(input_high, input_high, Operand(input_low));
2833 // Restore sign if necessary.
2834 __ mov(scratch, sign);
2835 result_reg = sign;
2836 sign = no_reg;
2837 __ Subu(result_reg, zero_reg, input_high);
2838 __ Movz(result_reg, input_high, scratch);
2839
2840 __ bind(&done);
2841
2842 __ Sd(result_reg, MemOperand(sp, kArgumentOffset));
2843 __ Pop(scratch, scratch2, scratch3);
2844 __ Pop(result_reg);
2845 __ Ret();
2846 }
2847
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2848 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2849 // TODO(v8:10701): Implement for this platform.
2850 __ Trap();
2851 }
2852
2853 namespace {
2854
AddressOffset(ExternalReference ref0,ExternalReference ref1)2855 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2856 int64_t offset = (ref0.address() - ref1.address());
2857 DCHECK(static_cast<int>(offset) == offset);
2858 return static_cast<int>(offset);
2859 }
2860
2861 // Calls an API function. Allocates HandleScope, extracts returned value
2862 // from handle and propagates exceptions. Restores context. stack_space
2863 // - space to be unwound on exit (includes the call JS arguments space and
2864 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)2865 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
2866 ExternalReference thunk_ref, int stack_space,
2867 MemOperand* stack_space_operand,
2868 MemOperand return_value_operand) {
2869 Isolate* isolate = masm->isolate();
2870 ExternalReference next_address =
2871 ExternalReference::handle_scope_next_address(isolate);
2872 const int kNextOffset = 0;
2873 const int kLimitOffset = AddressOffset(
2874 ExternalReference::handle_scope_limit_address(isolate), next_address);
2875 const int kLevelOffset = AddressOffset(
2876 ExternalReference::handle_scope_level_address(isolate), next_address);
2877
2878 DCHECK(function_address == a1 || function_address == a2);
2879
2880 Label profiler_enabled, end_profiler_check;
2881 __ li(t9, ExternalReference::is_profiling_address(isolate));
2882 __ Lb(t9, MemOperand(t9, 0));
2883 __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
2884 __ li(t9, ExternalReference::address_of_runtime_stats_flag());
2885 __ Lw(t9, MemOperand(t9, 0));
2886 __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
2887 {
2888 // Call the api function directly.
2889 __ mov(t9, function_address);
2890 __ Branch(&end_profiler_check);
2891 }
2892
2893 __ bind(&profiler_enabled);
2894 {
2895 // Additional parameter is the address of the actual callback.
2896 __ li(t9, thunk_ref);
2897 }
2898 __ bind(&end_profiler_check);
2899
2900 // Allocate HandleScope in callee-save registers.
2901 __ li(s5, next_address);
2902 __ Ld(s0, MemOperand(s5, kNextOffset));
2903 __ Ld(s1, MemOperand(s5, kLimitOffset));
2904 __ Lw(s2, MemOperand(s5, kLevelOffset));
2905 __ Addu(s2, s2, Operand(1));
2906 __ Sw(s2, MemOperand(s5, kLevelOffset));
2907
2908 __ StoreReturnAddressAndCall(t9);
2909
2910 Label promote_scheduled_exception;
2911 Label delete_allocated_handles;
2912 Label leave_exit_frame;
2913 Label return_value_loaded;
2914
2915 // Load value from ReturnValue.
2916 __ Ld(v0, return_value_operand);
2917 __ bind(&return_value_loaded);
2918
2919 // No more valid handles (the result handle was the last one). Restore
2920 // previous handle scope.
2921 __ Sd(s0, MemOperand(s5, kNextOffset));
2922 if (__ emit_debug_code()) {
2923 __ Lw(a1, MemOperand(s5, kLevelOffset));
2924 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
2925 Operand(s2));
2926 }
2927 __ Subu(s2, s2, Operand(1));
2928 __ Sw(s2, MemOperand(s5, kLevelOffset));
2929 __ Ld(kScratchReg, MemOperand(s5, kLimitOffset));
2930 __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
2931
2932 // Leave the API exit frame.
2933 __ bind(&leave_exit_frame);
2934
2935 if (stack_space_operand == nullptr) {
2936 DCHECK_NE(stack_space, 0);
2937 __ li(s0, Operand(stack_space));
2938 } else {
2939 DCHECK_EQ(stack_space, 0);
2940 STATIC_ASSERT(kCArgSlotCount == 0);
2941 __ Ld(s0, *stack_space_operand);
2942 }
2943
2944 static constexpr bool kDontSaveDoubles = false;
2945 static constexpr bool kRegisterContainsSlotCount = false;
2946 __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
2947 kRegisterContainsSlotCount);
2948
2949 // Check if the function scheduled an exception.
2950 __ LoadRoot(a4, RootIndex::kTheHoleValue);
2951 __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
2952 __ Ld(a5, MemOperand(kScratchReg));
2953 __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
2954
2955 __ Ret();
2956
2957 // Re-throw by promoting a scheduled exception.
2958 __ bind(&promote_scheduled_exception);
2959 __ TailCallRuntime(Runtime::kPromoteScheduledException);
2960
2961 // HandleScope limit has changed. Delete allocated extensions.
2962 __ bind(&delete_allocated_handles);
2963 __ Sd(s1, MemOperand(s5, kLimitOffset));
2964 __ mov(s0, v0);
2965 __ mov(a0, v0);
2966 __ PrepareCallCFunction(1, s1);
2967 __ li(a0, ExternalReference::isolate_address(isolate));
2968 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
2969 __ mov(v0, s0);
2970 __ jmp(&leave_exit_frame);
2971 }
2972
2973 } // namespace
2974
Generate_CallApiCallback(MacroAssembler * masm)2975 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
2976 // ----------- S t a t e -------------
2977 // -- cp : context
2978 // -- a1 : api function address
2979 // -- a2 : arguments count (not including the receiver)
2980 // -- a3 : call data
2981 // -- a0 : holder
2982 // -- sp[0] : receiver
2983 // -- sp[8] : first argument
2984 // -- ...
2985 // -- sp[(argc) * 8] : last argument
2986 // -----------------------------------
2987
2988 Register api_function_address = a1;
2989 Register argc = a2;
2990 Register call_data = a3;
2991 Register holder = a0;
2992 Register scratch = t0;
2993 Register base = t1; // For addressing MemOperands on the stack.
2994
2995 DCHECK(!AreAliased(api_function_address, argc, call_data,
2996 holder, scratch, base));
2997
2998 using FCA = FunctionCallbackArguments;
2999
3000 STATIC_ASSERT(FCA::kArgsLength == 6);
3001 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3002 STATIC_ASSERT(FCA::kDataIndex == 4);
3003 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3004 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3005 STATIC_ASSERT(FCA::kIsolateIndex == 1);
3006 STATIC_ASSERT(FCA::kHolderIndex == 0);
3007
3008 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3009 //
3010 // Target state:
3011 // sp[0 * kPointerSize]: kHolder
3012 // sp[1 * kPointerSize]: kIsolate
3013 // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
3014 // sp[3 * kPointerSize]: undefined (kReturnValue)
3015 // sp[4 * kPointerSize]: kData
3016 // sp[5 * kPointerSize]: undefined (kNewTarget)
3017
3018 // Set up the base register for addressing through MemOperands. It will point
3019 // at the receiver (located at sp + argc * kPointerSize).
3020 __ Dlsa(base, sp, argc, kPointerSizeLog2);
3021
3022 // Reserve space on the stack.
3023 __ Dsubu(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
3024
3025 // kHolder.
3026 __ Sd(holder, MemOperand(sp, 0 * kPointerSize));
3027
3028 // kIsolate.
3029 __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3030 __ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
3031
3032 // kReturnValueDefaultValue and kReturnValue.
3033 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3034 __ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
3035 __ Sd(scratch, MemOperand(sp, 3 * kPointerSize));
3036
3037 // kData.
3038 __ Sd(call_data, MemOperand(sp, 4 * kPointerSize));
3039
3040 // kNewTarget.
3041 __ Sd(scratch, MemOperand(sp, 5 * kPointerSize));
3042
3043 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3044 // We use it below to set up the FunctionCallbackInfo object.
3045 __ mov(scratch, sp);
3046
3047 // Allocate the v8::Arguments structure in the arguments' space since
3048 // it's not controlled by GC.
3049 static constexpr int kApiStackSpace = 4;
3050 static constexpr bool kDontSaveDoubles = false;
3051 FrameScope frame_scope(masm, StackFrame::MANUAL);
3052 __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3053
3054 // EnterExitFrame may align the sp.
3055
3056 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3057 // Arguments are after the return address (pushed by EnterExitFrame()).
3058 __ Sd(scratch, MemOperand(sp, 1 * kPointerSize));
3059
3060 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3061 // on the stack).
3062 __ Daddu(scratch, scratch,
3063 Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3064
3065 __ Sd(scratch, MemOperand(sp, 2 * kPointerSize));
3066
3067 // FunctionCallbackInfo::length_.
3068 // Stored as int field, 32-bit integers within struct on stack always left
3069 // justified by n64 ABI.
3070 __ Sw(argc, MemOperand(sp, 3 * kPointerSize));
3071
3072 // We also store the number of bytes to drop from the stack after returning
3073 // from the API function here.
3074 // Note: Unlike on other architectures, this stores the number of slots to
3075 // drop, not the number of bytes.
3076 __ Daddu(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
3077 __ Sd(scratch, MemOperand(sp, 4 * kPointerSize));
3078
3079 // v8::InvocationCallback's argument.
3080 DCHECK(!AreAliased(api_function_address, scratch, a0));
3081 __ Daddu(a0, sp, Operand(1 * kPointerSize));
3082
3083 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3084
3085 // There are two stack slots above the arguments we constructed on the stack.
3086 // TODO(jgruber): Document what these arguments are.
3087 static constexpr int kStackSlotsAboveFCA = 2;
3088 MemOperand return_value_operand(
3089 fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3090
3091 static constexpr int kUseStackSpaceOperand = 0;
3092 MemOperand stack_space_operand(sp, 4 * kPointerSize);
3093
3094 AllowExternalCallThatCantCauseGC scope(masm);
3095 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3096 kUseStackSpaceOperand, &stack_space_operand,
3097 return_value_operand);
3098 }
3099
Generate_CallApiGetter(MacroAssembler * masm)3100 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3101 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3102 // name below the exit frame to make GC aware of them.
3103 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3104 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3105 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3106 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3107 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3108 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3109 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3110 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3111
3112 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3113 Register holder = ApiGetterDescriptor::HolderRegister();
3114 Register callback = ApiGetterDescriptor::CallbackRegister();
3115 Register scratch = a4;
3116 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3117
3118 Register api_function_address = a2;
3119
3120 // Here and below +1 is for name() pushed after the args_ array.
3121 using PCA = PropertyCallbackArguments;
3122 __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
3123 __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
3124 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3125 __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
3126 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3127 __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
3128 __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3129 kPointerSize));
3130 __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3131 __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
3132 __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
3133 // should_throw_on_error -> false
3134 DCHECK_EQ(0, Smi::zero().ptr());
3135 __ Sd(zero_reg,
3136 MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
3137 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3138 __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
3139
3140 // v8::PropertyCallbackInfo::args_ array and name handle.
3141 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3142
3143 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3144 __ mov(a0, sp); // a0 = Handle<Name>
3145 __ Daddu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
3146
3147 const int kApiStackSpace = 1;
3148 FrameScope frame_scope(masm, StackFrame::MANUAL);
3149 __ EnterExitFrame(false, kApiStackSpace);
3150
3151 // Create v8::PropertyCallbackInfo object on the stack and initialize
3152 // it's args_ field.
3153 __ Sd(a1, MemOperand(sp, 1 * kPointerSize));
3154 __ Daddu(a1, sp, Operand(1 * kPointerSize));
3155 // a1 = v8::PropertyCallbackInfo&
3156
3157 ExternalReference thunk_ref =
3158 ExternalReference::invoke_accessor_getter_callback();
3159
3160 __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3161 __ Ld(api_function_address,
3162 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3163
3164 // +3 is to skip prolog, return address and name handle.
3165 MemOperand return_value_operand(
3166 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3167 MemOperand* const kUseStackSpaceConstant = nullptr;
3168 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3169 kStackUnwindSpace, kUseStackSpaceConstant,
3170 return_value_operand);
3171 }
3172
Generate_DirectCEntry(MacroAssembler * masm)3173 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3174 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3175 // purpose Code object) to be able to call into C functions that may trigger
3176 // GC and thus move the caller.
3177 //
3178 // DirectCEntry places the return address on the stack (updated by the GC),
3179 // making the call GC safe. The irregexp backend relies on this.
3180
3181 // Make place for arguments to fit C calling convention. Callers use
3182 // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
3183 // have to do that here. Any caller must drop kCArgsSlotsSize stack space
3184 // after the call.
3185 __ daddiu(sp, sp, -kCArgsSlotsSize);
3186
3187 __ Sd(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
3188 __ Call(t9); // Call the C++ function.
3189 __ Ld(t9, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
3190
3191 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3192 // In case of an error the return address may point to a memory area
3193 // filled with kZapValue by the GC. Dereference the address and check for
3194 // this.
3195 __ Uld(a4, MemOperand(t9));
3196 __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
3197 Operand(reinterpret_cast<uint64_t>(kZapValue)));
3198 }
3199
3200 __ Jump(t9);
3201 }
3202
3203 namespace {
3204
3205 // This code tries to be close to ia32 code so that any changes can be
3206 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3207 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3208 DeoptimizeKind deopt_kind) {
3209 Isolate* isolate = masm->isolate();
3210
3211 // Unlike on ARM we don't save all the registers, just the useful ones.
3212 // For the rest, there are gaps on the stack, so the offsets remain the same.
3213 const int kNumberOfRegisters = Register::kNumRegisters;
3214
3215 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3216 RegList saved_regs = restored_regs | sp.bit() | ra.bit();
3217
3218 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
3219
3220 // Save all double FPU registers before messing with them.
3221 __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
3222 const RegisterConfiguration* config = RegisterConfiguration::Default();
3223 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3224 int code = config->GetAllocatableDoubleCode(i);
3225 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3226 int offset = code * kDoubleSize;
3227 __ Sdc1(fpu_reg, MemOperand(sp, offset));
3228 }
3229
3230 // Push saved_regs (needed to populate FrameDescription::registers_).
3231 // Leave gaps for other registers.
3232 __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
3233 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3234 if ((saved_regs & (1 << i)) != 0) {
3235 __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
3236 }
3237 }
3238
3239 __ li(a2,
3240 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3241 __ Sd(fp, MemOperand(a2));
3242
3243 const int kSavedRegistersAreaSize =
3244 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3245
3246 __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
3247 // Get the address of the location in the code object (a3) (return
3248 // address for lazy deoptimization) and compute the fp-to-sp delta in
3249 // register a4.
3250 __ mov(a3, ra);
3251 __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize));
3252
3253 __ Dsubu(a4, fp, a4);
3254
3255 // Allocate a new deoptimizer object.
3256 __ PrepareCallCFunction(6, a5);
3257 // Pass six arguments, according to n64 ABI.
3258 __ mov(a0, zero_reg);
3259 Label context_check;
3260 __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3261 __ JumpIfSmi(a1, &context_check);
3262 __ Ld(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3263 __ bind(&context_check);
3264 __ li(a1, Operand(static_cast<int>(deopt_kind)));
3265 // a2: bailout id already loaded.
3266 // a3: code address or 0 already loaded.
3267 // a4: already has fp-to-sp delta.
3268 __ li(a5, ExternalReference::isolate_address(isolate));
3269
3270 // Call Deoptimizer::New().
3271 {
3272 AllowExternalCallThatCantCauseGC scope(masm);
3273 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
3274 }
3275
3276 // Preserve "deoptimizer" object in register v0 and get the input
3277 // frame descriptor pointer to a1 (deoptimizer->input_);
3278 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
3279 __ mov(a0, v0);
3280 __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
3281
3282 // Copy core registers into FrameDescription::registers_[kNumRegisters].
3283 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3284 for (int i = 0; i < kNumberOfRegisters; i++) {
3285 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3286 if ((saved_regs & (1 << i)) != 0) {
3287 __ Ld(a2, MemOperand(sp, i * kPointerSize));
3288 __ Sd(a2, MemOperand(a1, offset));
3289 } else if (FLAG_debug_code) {
3290 __ li(a2, kDebugZapValue);
3291 __ Sd(a2, MemOperand(a1, offset));
3292 }
3293 }
3294
3295 int double_regs_offset = FrameDescription::double_registers_offset();
3296 // Copy FPU registers to
3297 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
3298 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3299 int code = config->GetAllocatableDoubleCode(i);
3300 int dst_offset = code * kDoubleSize + double_regs_offset;
3301 int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
3302 __ Ldc1(f0, MemOperand(sp, src_offset));
3303 __ Sdc1(f0, MemOperand(a1, dst_offset));
3304 }
3305
3306 // Remove the saved registers from the stack.
3307 __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize));
3308
3309 // Compute a pointer to the unwinding limit in register a2; that is
3310 // the first stack slot not part of the input frame.
3311 __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
3312 __ Daddu(a2, a2, sp);
3313
3314 // Unwind the stack down to - but not including - the unwinding
3315 // limit and copy the contents of the activation frame to the input
3316 // frame description.
3317 __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
3318 Label pop_loop;
3319 Label pop_loop_header;
3320 __ BranchShort(&pop_loop_header);
3321 __ bind(&pop_loop);
3322 __ pop(a4);
3323 __ Sd(a4, MemOperand(a3, 0));
3324 __ daddiu(a3, a3, sizeof(uint64_t));
3325 __ bind(&pop_loop_header);
3326 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
3327 // Compute the output frame in the deoptimizer.
3328 __ push(a0); // Preserve deoptimizer object across call.
3329 // a0: deoptimizer object; a1: scratch.
3330 __ PrepareCallCFunction(1, a1);
3331 // Call Deoptimizer::ComputeOutputFrames().
3332 {
3333 AllowExternalCallThatCantCauseGC scope(masm);
3334 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3335 }
3336 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
3337
3338 __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
3339
3340 // Replace the current (input) frame with the output frames.
3341 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3342 // Outer loop state: a4 = current "FrameDescription** output_",
3343 // a1 = one past the last FrameDescription**.
3344 __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
3345 __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
3346 __ Dlsa(a1, a4, a1, kPointerSizeLog2);
3347 __ BranchShort(&outer_loop_header);
3348 __ bind(&outer_push_loop);
3349 // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
3350 __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
3351 __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
3352 __ BranchShort(&inner_loop_header);
3353 __ bind(&inner_push_loop);
3354 __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
3355 __ Daddu(a6, a2, Operand(a3));
3356 __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
3357 __ push(a7);
3358 __ bind(&inner_loop_header);
3359 __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
3360
3361 __ Daddu(a4, a4, Operand(kPointerSize));
3362 __ bind(&outer_loop_header);
3363 __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
3364
3365 __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
3366 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3367 int code = config->GetAllocatableDoubleCode(i);
3368 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3369 int src_offset = code * kDoubleSize + double_regs_offset;
3370 __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
3371 }
3372
3373 // Push pc and continuation from the last output frame.
3374 __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
3375 __ push(a6);
3376 __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
3377 __ push(a6);
3378
3379 // Technically restoring 'at' should work unless zero_reg is also restored
3380 // but it's safer to check for this.
3381 DCHECK(!(at.bit() & restored_regs));
3382 // Restore the registers from the last output frame.
3383 __ mov(at, a2);
3384 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3385 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3386 if ((restored_regs & (1 << i)) != 0) {
3387 __ Ld(ToRegister(i), MemOperand(at, offset));
3388 }
3389 }
3390
3391 __ pop(at); // Get continuation, leave pc on stack.
3392 __ pop(ra);
3393 __ Jump(at);
3394 __ stop();
3395 }
3396
3397 } // namespace
3398
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3399 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3400 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3401 }
3402
Generate_DeoptimizationEntry_Soft(MacroAssembler * masm)3403 void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
3404 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
3405 }
3406
Generate_DeoptimizationEntry_Bailout(MacroAssembler * masm)3407 void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
3408 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
3409 }
3410
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3411 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3412 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3413 }
3414
3415 #undef __
3416
3417 } // namespace internal
3418 } // namespace v8
3419
3420 #endif // V8_TARGET_ARCH_MIPS64
3421