1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_MIPS
6
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/debug/debug.h"
10 #include "src/deoptimizer/deoptimizer.h"
11 #include "src/execution/frame-constants.h"
12 #include "src/execution/frames.h"
13 #include "src/logging/counters.h"
14 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
15 #include "src/codegen/macro-assembler-inl.h"
16 #include "src/codegen/mips/constants-mips.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/heap/heap-inl.h"
19 #include "src/objects/cell.h"
20 #include "src/objects/foreign.h"
21 #include "src/objects/heap-number.h"
22 #include "src/objects/js-generator.h"
23 #include "src/objects/objects-inl.h"
24 #include "src/objects/smi.h"
25 #include "src/runtime/runtime.h"
26 #include "src/wasm/wasm-linkage.h"
27 #include "src/wasm/wasm-objects.h"
28
29 namespace v8 {
30 namespace internal {
31
32 #define __ ACCESS_MASM(masm)
33
Generate_Adaptor(MacroAssembler * masm,Address address)34 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
35 __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
36 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
37 RelocInfo::CODE_TARGET);
38 }
39
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)40 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
41 Runtime::FunctionId function_id) {
42 // ----------- S t a t e -------------
43 // -- a0 : actual argument count
44 // -- a1 : target function (preserved for callee)
45 // -- a3 : new target (preserved for callee)
46 // -----------------------------------
47 {
48 FrameScope scope(masm, StackFrame::INTERNAL);
49 // Push a copy of the target function, the new target and the actual
50 // argument count.
51 // Push function as parameter to the runtime call.
52 __ SmiTag(kJavaScriptCallArgCountRegister);
53 __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
54 kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
55
56 __ CallRuntime(function_id, 1);
57
58 // Restore target function, new target and actual argument count.
59 __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
60 kJavaScriptCallArgCountRegister);
61 __ SmiUntag(kJavaScriptCallArgCountRegister);
62 }
63
64 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
65 __ Addu(a2, v0, Code::kHeaderSize - kHeapObjectTag);
66 __ Jump(a2);
67 }
68
69 namespace {
70
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)71 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
72 // ----------- S t a t e -------------
73 // -- a0 : number of arguments
74 // -- a1 : constructor function
75 // -- a3 : new target
76 // -- cp : context
77 // -- ra : return address
78 // -- sp[...]: constructor arguments
79 // -----------------------------------
80
81 // Enter a construct frame.
82 {
83 FrameScope scope(masm, StackFrame::CONSTRUCT);
84
85 // Preserve the incoming parameters on the stack.
86 __ SmiTag(a0);
87 __ Push(cp, a0);
88 __ SmiUntag(a0);
89 // Set up pointer to last argument (skip receiver).
90 __ Addu(
91 t2, fp,
92 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
93 // Copy arguments and receiver to the expression stack.
94 __ PushArray(t2, a0, t3, t0);
95 // The receiver for the builtin/api call.
96 __ PushRoot(RootIndex::kTheHoleValue);
97
98 // Call the function.
99 // a0: number of arguments (untagged)
100 // a1: constructor function
101 // a3: new target
102 __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
103
104 // Restore context from the frame.
105 __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
106 // Restore smi-tagged arguments count from the frame.
107 __ lw(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
108 // Leave construct frame.
109 }
110
111 // Remove caller arguments from the stack and return.
112 __ Lsa(sp, sp, t3, kPointerSizeLog2 - 1);
113 __ Addu(sp, sp, kPointerSize);
114 __ Ret();
115 }
116
117 } // namespace
118
119 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)120 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
121 // ----------- S t a t e -------------
122 // -- a0: number of arguments (untagged)
123 // -- a1: constructor function
124 // -- a3: new target
125 // -- cp: context
126 // -- ra: return address
127 // -- sp[...]: constructor arguments
128 // -----------------------------------
129
130 // Enter a construct frame.
131 {
132 FrameScope scope(masm, StackFrame::CONSTRUCT);
133 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
134
135 // Preserve the incoming parameters on the stack.
136 __ SmiTag(a0);
137 __ Push(cp, a0, a1);
138 __ PushRoot(RootIndex::kTheHoleValue);
139 __ Push(a3);
140
141 // ----------- S t a t e -------------
142 // -- sp[0*kPointerSize]: new target
143 // -- sp[1*kPointerSize]: padding
144 // -- a1 and sp[2*kPointerSize]: constructor function
145 // -- sp[3*kPointerSize]: number of arguments (tagged)
146 // -- sp[4*kPointerSize]: context
147 // -----------------------------------
148
149 __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
150 __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
151 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
152 __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
153 ¬_create_implicit_receiver);
154
155 // If not derived class constructor: Allocate the new receiver object.
156 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
157 t2, t3);
158 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject),
159 RelocInfo::CODE_TARGET);
160 __ Branch(&post_instantiation_deopt_entry);
161
162 // Else: use TheHoleValue as receiver for constructor call
163 __ bind(¬_create_implicit_receiver);
164 __ LoadRoot(v0, RootIndex::kTheHoleValue);
165
166 // ----------- S t a t e -------------
167 // -- v0: receiver
168 // -- Slot 4 / sp[0*kPointerSize]: new target
169 // -- Slot 3 / sp[1*kPointerSize]: padding
170 // -- Slot 2 / sp[2*kPointerSize]: constructor function
171 // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
172 // -- Slot 0 / sp[4*kPointerSize]: context
173 // -----------------------------------
174 // Deoptimizer enters here.
175 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
176 masm->pc_offset());
177 __ bind(&post_instantiation_deopt_entry);
178
179 // Restore new target.
180 __ Pop(a3);
181
182 // Push the allocated receiver to the stack.
183 __ Push(v0);
184 // We need two copies because we may have to return the original one
185 // and the calling conventions dictate that the called function pops the
186 // receiver. The second copy is pushed after the arguments, we saved in s0
187 // since v0 will store the return value of callRuntime.
188 __ mov(s0, v0);
189
190 // Set up pointer to last argument.
191 __ Addu(
192 t2, fp,
193 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
194
195 // ----------- S t a t e -------------
196 // -- r3: new target
197 // -- sp[0*kPointerSize]: implicit receiver
198 // -- sp[1*kPointerSize]: implicit receiver
199 // -- sp[2*kPointerSize]: padding
200 // -- sp[3*kPointerSize]: constructor function
201 // -- sp[4*kPointerSize]: number of arguments (tagged)
202 // -- sp[5*kPointerSize]: context
203 // -----------------------------------
204
205 // Restore constructor function and argument count.
206 __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
207 __ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
208 __ SmiUntag(a0);
209
210 Label enough_stack_space, stack_overflow;
211 __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
212 __ Branch(&enough_stack_space);
213
214 __ bind(&stack_overflow);
215 // Restore the context from the frame.
216 __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
217 __ CallRuntime(Runtime::kThrowStackOverflow);
218 // Unreachable code.
219 __ break_(0xCC);
220
221 __ bind(&enough_stack_space);
222
223 // TODO(victorgomes): When the arguments adaptor is completely removed, we
224 // should get the formal parameter count and copy the arguments in its
225 // correct position (including any undefined), instead of delaying this to
226 // InvokeFunction.
227
228 // Copy arguments and receiver to the expression stack.
229 __ PushArray(t2, a0, t0, t1);
230 // We need two copies because we may have to return the original one
231 // and the calling conventions dictate that the called function pops the
232 // receiver. The second copy is pushed after the arguments.
233 __ Push(s0);
234
235 // Call the function.
236 __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
237
238 // ----------- S t a t e -------------
239 // -- v0: constructor result
240 // -- sp[0*kPointerSize]: implicit receiver
241 // -- sp[1*kPointerSize]: padding
242 // -- sp[2*kPointerSize]: constructor function
243 // -- sp[3*kPointerSize]: number of arguments
244 // -- sp[4*kPointerSize]: context
245 // -----------------------------------
246
247 // Store offset of return address for deoptimizer.
248 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
249 masm->pc_offset());
250
251 // Restore the context from the frame.
252 __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
253
254 // If the result is an object (in the ECMA sense), we should get rid
255 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
256 // on page 74.
257 Label use_receiver, do_throw, leave_frame;
258
259 // If the result is undefined, we jump out to using the implicit receiver.
260 __ JumpIfRoot(v0, RootIndex::kUndefinedValue, &use_receiver);
261
262 // Otherwise we do a smi check and fall through to check if the return value
263 // is a valid receiver.
264
265 // If the result is a smi, it is *not* an object in the ECMA sense.
266 __ JumpIfSmi(v0, &use_receiver);
267
268 // If the type of the result (stored in its map) is less than
269 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
270 __ GetObjectType(v0, t2, t2);
271 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
272 __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
273 __ Branch(&use_receiver);
274
275 __ bind(&do_throw);
276 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
277
278 // Throw away the result of the constructor invocation and use the
279 // on-stack receiver as the result.
280 __ bind(&use_receiver);
281 __ lw(v0, MemOperand(sp, 0 * kPointerSize));
282 __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw);
283
284 __ bind(&leave_frame);
285 // Restore smi-tagged arguments count from the frame.
286 __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
287 // Leave construct frame.
288 }
289 // Remove caller arguments from the stack and return.
290 __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
291 __ Addu(sp, sp, kPointerSize);
292 __ Ret();
293 }
294
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)295 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
296 Generate_JSBuiltinsConstructStubHelper(masm);
297 }
298
Generate_ConstructedNonConstructable(MacroAssembler * masm)299 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
300 FrameScope scope(masm, StackFrame::INTERNAL);
301 __ Push(a1);
302 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
303 }
304
305 // Clobbers scratch1 and scratch2; preserves all other registers.
Generate_CheckStackOverflow(MacroAssembler * masm,Register argc,Register scratch1,Register scratch2)306 static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
307 Register scratch1, Register scratch2) {
308 // Check the stack for overflow. We are not trying to catch
309 // interruptions (e.g. debug break and preemption) here, so the "real stack
310 // limit" is checked.
311 Label okay;
312 __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
313 // Make a2 the space we have left. The stack might already be overflowed
314 // here which will cause a2 to become negative.
315 __ Subu(scratch1, sp, scratch1);
316 // Check if the arguments will overflow the stack.
317 __ sll(scratch2, argc, kPointerSizeLog2);
318 // Signed comparison.
319 __ Branch(&okay, gt, scratch1, Operand(scratch2));
320
321 // Out of stack space.
322 __ CallRuntime(Runtime::kThrowStackOverflow);
323
324 __ bind(&okay);
325 }
326
327 namespace {
328
329 // Used by JSEntryTrampoline to refer C++ parameter to JSEntryVariant.
330 constexpr int kPushedStackSpace =
331 kCArgsSlotsSize + (kNumCalleeSaved + 1) * kPointerSize +
332 kNumCalleeSavedFPU * kDoubleSize + 4 * kPointerSize +
333 EntryFrameConstants::kCallerFPOffset;
334
335 // Called with the native C calling convention. The corresponding function
336 // signature is either:
337 //
338 // using JSEntryFunction = GeneratedCode<Address(
339 // Address root_register_value, Address new_target, Address target,
340 // Address receiver, intptr_t argc, Address** argv)>;
341 // or
342 // using JSEntryFunction = GeneratedCode<Address(
343 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
344 //
345 // Passes through a0, a1, a2, a3 and stack to JSEntryTrampoline.
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtins::Name entry_trampoline)346 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
347 Builtins::Name entry_trampoline) {
348 Label invoke, handler_entry, exit;
349
350 int pushed_stack_space = kCArgsSlotsSize;
351 {
352 NoRootArrayScope no_root_array(masm);
353
354 // Registers:
355 // a0: root_register_value
356
357 // Save callee saved registers on the stack.
358 __ MultiPush(kCalleeSaved | ra.bit());
359 pushed_stack_space +=
360 kNumCalleeSaved * kPointerSize + kPointerSize /* ra */;
361
362 // Save callee-saved FPU registers.
363 __ MultiPushFPU(kCalleeSavedFPU);
364 pushed_stack_space += kNumCalleeSavedFPU * kDoubleSize;
365
366 // Set up the reserved register for 0.0.
367 __ Move(kDoubleRegZero, 0.0);
368
369 // Initialize the root register.
370 // C calling convention. The first argument is passed in a0.
371 __ mov(kRootRegister, a0);
372 }
373
374 // We build an EntryFrame.
375 __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
376 __ li(t2, Operand(StackFrame::TypeToMarker(type)));
377 __ li(t1, Operand(StackFrame::TypeToMarker(type)));
378 __ li(t0, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
379 masm->isolate()));
380 __ lw(t0, MemOperand(t0));
381 __ Push(t3, t2, t1, t0);
382 pushed_stack_space += 4 * kPointerSize;
383
384 // Set up frame pointer for the frame to be pushed.
385 __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
386 pushed_stack_space += EntryFrameConstants::kCallerFPOffset;
387
388 // Registers:
389 // a0: root_register_value
390 //
391 // Stack:
392 // caller fp |
393 // function slot | entry frame
394 // context slot |
395 // bad fp (0xFF...F) |
396 // callee saved registers + ra
397 // 4 args slots
398
399 // If this is the outermost JS call, set js_entry_sp value.
400 Label non_outermost_js;
401 ExternalReference js_entry_sp = ExternalReference::Create(
402 IsolateAddressId::kJSEntrySPAddress, masm->isolate());
403 __ li(t1, js_entry_sp);
404 __ lw(t2, MemOperand(t1));
405 __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
406 __ sw(fp, MemOperand(t1));
407 __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
408 Label cont;
409 __ b(&cont);
410 __ nop(); // Branch delay slot nop.
411 __ bind(&non_outermost_js);
412 __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME));
413 __ bind(&cont);
414 __ push(t0);
415
416 // Jump to a faked try block that does the invoke, with a faked catch
417 // block that sets the pending exception.
418 __ jmp(&invoke);
419 __ bind(&handler_entry);
420
421 // Store the current pc as the handler offset. It's used later to create the
422 // handler table.
423 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
424
425 // Caught exception: Store result (exception) in the pending exception
426 // field in the JSEnv and return a failure sentinel. Coming in here the
427 // fp will be invalid because the PushStackHandler below sets it to 0 to
428 // signal the existence of the JSEntry frame.
429 __ li(t0, ExternalReference::Create(
430 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
431 __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
432 __ LoadRoot(v0, RootIndex::kException);
433 __ b(&exit); // b exposes branch delay slot.
434 __ nop(); // Branch delay slot nop.
435
436 // Invoke: Link this frame into the handler chain.
437 __ bind(&invoke);
438 __ PushStackHandler();
439 // If an exception not caught by another handler occurs, this handler
440 // returns control to the code after the bal(&invoke) above, which
441 // restores all kCalleeSaved registers (including cp and fp) to their
442 // saved values before returning a failure to C.
443 //
444 // Preserve a1, a2 and a3 passed by C++ and pass them to the trampoline.
445 //
446 // Stack:
447 // handler frame
448 // entry frame
449 // callee saved registers + ra
450 // 4 args slots
451 //
452 // Invoke the function by calling through JS entry trampoline builtin and
453 // pop the faked function when we return.
454 Handle<Code> trampoline_code =
455 masm->isolate()->builtins()->builtin_handle(entry_trampoline);
456 DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
457 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
458
459 // Unlink this frame from the handler chain.
460 __ PopStackHandler();
461
462 __ bind(&exit); // v0 holds result
463 // Check if the current stack frame is marked as the outermost JS frame.
464 Label non_outermost_js_2;
465 __ pop(t1);
466 __ Branch(&non_outermost_js_2, ne, t1,
467 Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
468 __ li(t1, ExternalReference(js_entry_sp));
469 __ sw(zero_reg, MemOperand(t1));
470 __ bind(&non_outermost_js_2);
471
472 // Restore the top frame descriptors from the stack.
473 __ pop(t1);
474 __ li(t0, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
475 masm->isolate()));
476 __ sw(t1, MemOperand(t0));
477
478 // Reset the stack to the callee saved registers.
479 __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
480
481 // Restore callee-saved fpu registers.
482 __ MultiPopFPU(kCalleeSavedFPU);
483
484 // Restore callee saved registers from the stack.
485 __ MultiPop(kCalleeSaved | ra.bit());
486 // Return.
487 __ Jump(ra);
488 }
489
490 } // namespace
491
Generate_JSEntry(MacroAssembler * masm)492 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
493 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
494 Builtins::kJSEntryTrampoline);
495 }
496
Generate_JSConstructEntry(MacroAssembler * masm)497 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
498 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
499 Builtins::kJSConstructEntryTrampoline);
500 }
501
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)502 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
503 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
504 Builtins::kRunMicrotasksTrampoline);
505 }
506
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)507 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
508 bool is_construct) {
509 // ----------- S t a t e -------------
510 // -- a0: root_register_value (unused)
511 // -- a1: new.target
512 // -- a2: function
513 // -- a3: receiver_pointer
514 // -- [fp + kPushedStackSpace + 0 * kPointerSize]: argc
515 // -- [fp + kPushedStackSpace + 1 * kPointerSize]: argv
516 // -----------------------------------
517
518 // Enter an internal frame.
519 {
520 FrameScope scope(masm, StackFrame::INTERNAL);
521
522 // Setup the context (we need to use the caller context from the isolate).
523 ExternalReference context_address = ExternalReference::Create(
524 IsolateAddressId::kContextAddress, masm->isolate());
525 __ li(cp, context_address);
526 __ lw(cp, MemOperand(cp));
527
528 // Push the function onto the stack.
529 __ Push(a2);
530
531 __ lw(s0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
532 __ lw(a0,
533 MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgcOffset));
534 __ lw(s0,
535 MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
536
537 // Check if we have enough stack space to push all arguments.
538 // Clobbers a2 and t0.
539 __ addiu(t1, a0, 1);
540 Generate_CheckStackOverflow(masm, t1, t0, t2);
541
542 // Copy arguments to the stack in a loop.
543 // a0: argc
544 // s0: argv, i.e. points to first arg
545 Label loop, entry;
546 __ Lsa(t2, s0, a0, kPointerSizeLog2);
547 __ b(&entry);
548 __ nop(); // Branch delay slot nop.
549 // t2 points past last arg.
550 __ bind(&loop);
551 __ addiu(t2, t2, -kPointerSize);
552 __ lw(t0, MemOperand(t2)); // Read next parameter.
553 __ lw(t0, MemOperand(t0)); // Dereference handle.
554 __ push(t0); // Push parameter.
555 __ bind(&entry);
556 __ Branch(&loop, ne, s0, Operand(t2));
557
558 // Push the receiver.
559 __ Push(a3);
560
561 // a0: argc
562 // a1: function
563 // a3: new.target
564 __ mov(a3, a1);
565 __ mov(a1, a2);
566
567 // Initialize all JavaScript callee-saved registers, since they will be seen
568 // by the garbage collector as part of handlers.
569 __ LoadRoot(t0, RootIndex::kUndefinedValue);
570 __ mov(s0, t0);
571 __ mov(s1, t0);
572 __ mov(s2, t0);
573 __ mov(s3, t0);
574 __ mov(s4, t0);
575 __ mov(s5, t0);
576 // s6 holds the root address. Do not clobber.
577 // s7 is cp. Do not init.
578
579 // Invoke the code.
580 Handle<Code> builtin = is_construct
581 ? BUILTIN_CODE(masm->isolate(), Construct)
582 : masm->isolate()->builtins()->Call();
583 __ Call(builtin, RelocInfo::CODE_TARGET);
584
585 // Leave internal frame.
586 }
587
588 __ Jump(ra);
589 }
590
Generate_JSEntryTrampoline(MacroAssembler * masm)591 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
592 Generate_JSEntryTrampolineHelper(masm, false);
593 }
594
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)595 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
596 Generate_JSEntryTrampolineHelper(masm, true);
597 }
598
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)599 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
600 // a1: microtask_queue
601 __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
602 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
603 }
604
GetSharedFunctionInfoBytecode(MacroAssembler * masm,Register sfi_data,Register scratch1)605 static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
606 Register sfi_data,
607 Register scratch1) {
608 Label done;
609
610 __ GetObjectType(sfi_data, scratch1, scratch1);
611 __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
612 __ lw(sfi_data,
613 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
614
615 __ bind(&done);
616 }
617
618 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)619 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
620 // ----------- S t a t e -------------
621 // -- v0 : the value to pass to the generator
622 // -- a1 : the JSGeneratorObject to resume
623 // -- ra : return address
624 // -----------------------------------
625
626 __ AssertGeneratorObject(a1);
627
628 // Store input value into generator object.
629 __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
630 __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
631 kRAHasNotBeenSaved, kDontSaveFPRegs);
632
633 // Load suspended function and context.
634 __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
635 __ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset));
636
637 // Flood function if we are stepping.
638 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
639 Label stepping_prepared;
640 ExternalReference debug_hook =
641 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
642 __ li(t1, debug_hook);
643 __ lb(t1, MemOperand(t1));
644 __ Branch(&prepare_step_in_if_stepping, ne, t1, Operand(zero_reg));
645
646 // Flood function if we need to continue stepping in the suspended generator.
647 ExternalReference debug_suspended_generator =
648 ExternalReference::debug_suspended_generator_address(masm->isolate());
649 __ li(t1, debug_suspended_generator);
650 __ lw(t1, MemOperand(t1));
651 __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1));
652 __ bind(&stepping_prepared);
653
654 // Check the stack for overflow. We are not trying to catch interruptions
655 // (i.e. debug break and preemption) here, so check the "real stack limit".
656 Label stack_overflow;
657 __ LoadStackLimit(kScratchReg,
658 MacroAssembler::StackLimitKind::kRealStackLimit);
659 __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
660
661 // ----------- S t a t e -------------
662 // -- a1 : the JSGeneratorObject to resume
663 // -- t0 : generator function
664 // -- cp : generator context
665 // -- ra : return address
666 // -----------------------------------
667
668 // Copy the function arguments from the generator object's register file.
669
670 __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
671 __ lhu(a3,
672 FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
673 __ lw(t1,
674 FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
675 {
676 Label done_loop, loop;
677 __ bind(&loop);
678 __ Subu(a3, a3, Operand(1));
679 __ Branch(&done_loop, lt, a3, Operand(zero_reg));
680 __ Lsa(kScratchReg, t1, a3, kPointerSizeLog2);
681 __ Lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
682 __ Push(kScratchReg);
683 __ Branch(&loop);
684 __ bind(&done_loop);
685 // Push receiver.
686 __ Lw(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
687 __ Push(kScratchReg);
688 }
689
690 // Underlying function needs to have bytecode available.
691 if (FLAG_debug_code) {
692 __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
693 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
694 GetSharedFunctionInfoBytecode(masm, a3, a0);
695 __ GetObjectType(a3, a3, a3);
696 __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
697 Operand(BYTECODE_ARRAY_TYPE));
698 }
699
700 // Resume (Ignition/TurboFan) generator object.
701 {
702 __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
703 __ lhu(a0, FieldMemOperand(
704 a0, SharedFunctionInfo::kFormalParameterCountOffset));
705 // We abuse new.target both to indicate that this is a resume call and to
706 // pass in the generator object. In ordinary calls, new.target is always
707 // undefined because generator functions are non-constructable.
708 __ Move(a3, a1);
709 __ Move(a1, t0);
710 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
711 __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
712 __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
713 __ Jump(a2);
714 }
715
716 __ bind(&prepare_step_in_if_stepping);
717 {
718 FrameScope scope(masm, StackFrame::INTERNAL);
719 __ Push(a1, t0);
720 // Push hole as receiver since we do not use it for stepping.
721 __ PushRoot(RootIndex::kTheHoleValue);
722 __ CallRuntime(Runtime::kDebugOnFunctionCall);
723 __ Pop(a1);
724 }
725 __ Branch(USE_DELAY_SLOT, &stepping_prepared);
726 __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
727
728 __ bind(&prepare_step_in_suspended_generator);
729 {
730 FrameScope scope(masm, StackFrame::INTERNAL);
731 __ Push(a1);
732 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
733 __ Pop(a1);
734 }
735 __ Branch(USE_DELAY_SLOT, &stepping_prepared);
736 __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
737
738 __ bind(&stack_overflow);
739 {
740 FrameScope scope(masm, StackFrame::INTERNAL);
741 __ CallRuntime(Runtime::kThrowStackOverflow);
742 __ break_(0xCC); // This should be unreachable.
743 }
744 }
745
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register scratch2)746 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
747 Register optimized_code,
748 Register closure,
749 Register scratch1,
750 Register scratch2) {
751 // Store code entry in the closure.
752 __ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
753 __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
754 __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
755 kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
756 OMIT_SMI_CHECK);
757 }
758
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)759 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
760 Register scratch2) {
761 Register params_size = scratch1;
762
763 // Get the size of the formal parameters + receiver (in bytes).
764 __ lw(params_size,
765 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
766 __ lw(params_size,
767 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
768
769 #ifdef V8_NO_ARGUMENTS_ADAPTOR
770 Register actual_params_size = scratch2;
771 // Compute the size of the actual parameters + receiver (in bytes).
772 __ Lw(actual_params_size,
773 MemOperand(fp, StandardFrameConstants::kArgCOffset));
774 __ sll(actual_params_size, actual_params_size, kPointerSizeLog2);
775 __ Addu(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
776
777 // If actual is bigger than formal, then we should use it to free up the stack
778 // arguments.
779 __ slt(t2, params_size, actual_params_size);
780 __ movn(params_size, actual_params_size, t2);
781 #endif
782
783 // Leave the frame (also dropping the register file).
784 __ LeaveFrame(StackFrame::INTERPRETED);
785
786 // Drop receiver + arguments.
787 __ Addu(sp, sp, params_size);
788 }
789
790 // Tail-call |function_id| if |actual_marker| == |expected_marker|
TailCallRuntimeIfMarkerEquals(MacroAssembler * masm,Register actual_marker,OptimizationMarker expected_marker,Runtime::FunctionId function_id)791 static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
792 Register actual_marker,
793 OptimizationMarker expected_marker,
794 Runtime::FunctionId function_id) {
795 Label no_match;
796 __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
797 GenerateTailCallToReturnedCode(masm, function_id);
798 __ bind(&no_match);
799 }
800
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch1,Register scratch2)801 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
802 Register optimized_code_entry,
803 Register scratch1, Register scratch2) {
804 // ----------- S t a t e -------------
805 // -- a0 : actual argument count
806 // -- a3 : new target (preserved for callee if needed, and caller)
807 // -- a1 : target function (preserved for callee if needed, and caller)
808 // -----------------------------------
809 DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2));
810
811 Register closure = a1;
812 Label heal_optimized_code_slot;
813
814 // If the optimized code is cleared, go to runtime to update the optimization
815 // marker field.
816 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
817 &heal_optimized_code_slot);
818
819 // Check if the optimized code is marked for deopt. If it is, call the
820 // runtime to clear it.
821 __ Lw(scratch1,
822 FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
823 __ Lw(scratch1,
824 FieldMemOperand(scratch1, CodeDataContainer::kKindSpecificFlagsOffset));
825 __ And(scratch1, scratch1, Operand(1 << Code::kMarkedForDeoptimizationBit));
826 __ Branch(&heal_optimized_code_slot, ne, scratch1, Operand(zero_reg));
827
828 // Optimized code is good, get it into the closure and link the closure into
829 // the optimized functions list, then tail call the optimized code.
830 // The feedback vector is no longer used, so re-use it as a scratch
831 // register.
832 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
833 scratch1, scratch2);
834 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
835 __ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag);
836 __ Jump(a2);
837
838 // Optimized code slot contains deoptimized code or code is cleared and
839 // optimized code marker isn't updated. Evict the code, update the marker
840 // and re-enter the closure's code.
841 __ bind(&heal_optimized_code_slot);
842 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
843 }
844
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register optimization_marker)845 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
846 Register optimization_marker) {
847 // ----------- S t a t e -------------
848 // -- a0 : actual argument count
849 // -- a3 : new target (preserved for callee if needed, and caller)
850 // -- a1 : target function (preserved for callee if needed, and caller)
851 // -- feedback vector (preserved for caller if needed)
852 // -- optimization_marker : a int32 containing a non-zero optimization
853 // marker.
854 // -----------------------------------
855 DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
856
857 // TODO(v8:8394): The logging of first execution will break if
858 // feedback vectors are not allocated. We need to find a different way of
859 // logging these events if required.
860 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
861 OptimizationMarker::kLogFirstExecution,
862 Runtime::kFunctionFirstExecution);
863 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
864 OptimizationMarker::kCompileOptimized,
865 Runtime::kCompileOptimized_NotConcurrent);
866 TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
867 OptimizationMarker::kCompileOptimizedConcurrent,
868 Runtime::kCompileOptimized_Concurrent);
869
870 // Marker should be one of LogFirstExecution / CompileOptimized /
871 // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
872 // here.
873 if (FLAG_debug_code) {
874 __ stop();
875 }
876 }
877
878 // Advance the current bytecode offset. This simulates what all bytecode
879 // handlers do upon completion of the underlying operation. Will bail out to a
880 // label if the bytecode (without prefix) is a return bytecode. Will not advance
881 // the bytecode offset if the current bytecode is a JumpLoop, instead just
882 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Register scratch3,Label * if_return)883 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
884 Register bytecode_array,
885 Register bytecode_offset,
886 Register bytecode, Register scratch1,
887 Register scratch2, Register scratch3,
888 Label* if_return) {
889 Register bytecode_size_table = scratch1;
890
891 // The bytecode offset value will be increased by one in wide and extra wide
892 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
893 // will restore the original bytecode. In order to simplify the code, we have
894 // a backup of it.
895 Register original_bytecode_offset = scratch3;
896 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
897 bytecode_size_table, original_bytecode_offset));
898 __ Move(original_bytecode_offset, bytecode_offset);
899 __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
900
901 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
902 Label process_bytecode, extra_wide;
903 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
904 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
905 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
906 STATIC_ASSERT(3 ==
907 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
908 __ Branch(&process_bytecode, hi, bytecode, Operand(3));
909 __ And(scratch2, bytecode, Operand(1));
910 __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
911
912 // Load the next bytecode and update table to the wide scaled table.
913 __ Addu(bytecode_offset, bytecode_offset, Operand(1));
914 __ Addu(scratch2, bytecode_array, bytecode_offset);
915 __ lbu(bytecode, MemOperand(scratch2));
916 __ Addu(bytecode_size_table, bytecode_size_table,
917 Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
918 __ jmp(&process_bytecode);
919
920 __ bind(&extra_wide);
921 // Load the next bytecode and update table to the extra wide scaled table.
922 __ Addu(bytecode_offset, bytecode_offset, Operand(1));
923 __ Addu(scratch2, bytecode_array, bytecode_offset);
924 __ lbu(bytecode, MemOperand(scratch2));
925 __ Addu(bytecode_size_table, bytecode_size_table,
926 Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
927
928 __ bind(&process_bytecode);
929
930 // Bailout to the return label if this is a return bytecode.
931 #define JUMP_IF_EQUAL(NAME) \
932 __ Branch(if_return, eq, bytecode, \
933 Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
934 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
935 #undef JUMP_IF_EQUAL
936
937 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
938 // of the loop.
939 Label end, not_jump_loop;
940 __ Branch(¬_jump_loop, ne, bytecode,
941 Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
942 // We need to restore the original bytecode_offset since we might have
943 // increased it to skip the wide / extra-wide prefix bytecode.
944 __ Move(bytecode_offset, original_bytecode_offset);
945 __ jmp(&end);
946
947 __ bind(¬_jump_loop);
948 // Otherwise, load the size of the current bytecode and advance the offset.
949 __ Lsa(scratch2, bytecode_size_table, bytecode, 2);
950 __ lw(scratch2, MemOperand(scratch2));
951 __ Addu(bytecode_offset, bytecode_offset, scratch2);
952
953 __ bind(&end);
954 }
955
956 // Generate code for entering a JS function with the interpreter.
957 // On entry to the function the receiver and arguments have been pushed on the
958 // stack left to right.
959 //
960 // The live registers are:
961 // o a0 : actual argument count (not including the receiver)
962 // o a1: the JS function object being called.
963 // o a3: the incoming new target or generator object
964 // o cp: our context
965 // o fp: the caller's frame pointer
966 // o sp: stack pointer
967 // o ra: return address
968 //
969 // The function builds an interpreter frame. See InterpreterFrameConstants in
970 // frames.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)971 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
972 Register closure = a1;
973 Register feedback_vector = a2;
974
975 // Get the bytecode array from the function object and load it into
976 // kInterpreterBytecodeArrayRegister.
977 __ lw(kScratchReg,
978 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
979 __ lw(kInterpreterBytecodeArrayRegister,
980 FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
981 GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
982 kScratchReg);
983
984 // The bytecode array could have been flushed from the shared function info,
985 // if so, call into CompileLazy.
986 Label compile_lazy;
987 __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
988 __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
989
990 // Load the feedback vector from the closure.
991 __ lw(feedback_vector,
992 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
993 __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
994
995 Label push_stack_frame;
996 // Check if feedback vector is valid. If valid, check for optimized code
997 // and update invocation count. Otherwise, setup the stack frame.
998 __ lw(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
999 __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
1000 __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
1001
1002 // Read off the optimization state in the feedback vector, and if there
1003 // is optimized code or an optimization marker, call that instead.
1004 Register optimization_state = t0;
1005 __ Lw(optimization_state,
1006 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1007
1008 // Check if the optimized code slot is not empty or has a optimization marker.
1009 Label has_optimized_code_or_marker;
1010
1011 __ andi(t1, optimization_state,
1012 FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
1013 __ Branch(&has_optimized_code_or_marker, ne, t1, Operand(zero_reg));
1014
1015 Label not_optimized;
1016 __ bind(¬_optimized);
1017
1018 // Increment invocation count for the function.
1019 __ lw(t0, FieldMemOperand(feedback_vector,
1020 FeedbackVector::kInvocationCountOffset));
1021 __ Addu(t0, t0, Operand(1));
1022 __ sw(t0, FieldMemOperand(feedback_vector,
1023 FeedbackVector::kInvocationCountOffset));
1024
1025 // Open a frame scope to indicate that there is a frame on the stack. The
1026 // MANUAL indicates that the scope shouldn't actually generate code to set up
1027 // the frame (that is done below).
1028 __ bind(&push_stack_frame);
1029 FrameScope frame_scope(masm, StackFrame::MANUAL);
1030 __ PushStandardFrame(closure);
1031
1032 // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
1033 // 8-bit fields next to each other, so we could just optimize by writing a
1034 // 16-bit. These static asserts guard our assumption is valid.
1035 STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
1036 BytecodeArray::kOsrNestingLevelOffset + kCharSize);
1037 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
1038 __ sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1039 BytecodeArray::kOsrNestingLevelOffset));
1040
1041 // Load initial bytecode offset.
1042 __ li(kInterpreterBytecodeOffsetRegister,
1043 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1044
1045 // Push bytecode array and Smi tagged bytecode array offset.
1046 __ SmiTag(t0, kInterpreterBytecodeOffsetRegister);
1047 __ Push(kInterpreterBytecodeArrayRegister, t0);
1048
1049 // Allocate the local and temporary register file on the stack.
1050 Label stack_overflow;
1051 {
1052 // Load frame size from the BytecodeArray object.
1053 __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1054 BytecodeArray::kFrameSizeOffset));
1055
1056 // Do a stack check to ensure we don't go over the limit.
1057 __ Subu(t1, sp, Operand(t0));
1058 __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
1059 __ Branch(&stack_overflow, lo, t1, Operand(a2));
1060
1061 // If ok, push undefined as the initial value for all register file entries.
1062 Label loop_header;
1063 Label loop_check;
1064 __ LoadRoot(t1, RootIndex::kUndefinedValue);
1065 __ Branch(&loop_check);
1066 __ bind(&loop_header);
1067 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1068 __ push(t1);
1069 // Continue loop if not done.
1070 __ bind(&loop_check);
1071 __ Subu(t0, t0, Operand(kPointerSize));
1072 __ Branch(&loop_header, ge, t0, Operand(zero_reg));
1073 }
1074
1075 // If the bytecode array has a valid incoming new target or generator object
1076 // register, initialize it with incoming value which was passed in r3.
1077 Label no_incoming_new_target_or_generator_register;
1078 __ lw(t1, FieldMemOperand(
1079 kInterpreterBytecodeArrayRegister,
1080 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1081 __ Branch(&no_incoming_new_target_or_generator_register, eq, t1,
1082 Operand(zero_reg));
1083 __ Lsa(t1, fp, t1, kPointerSizeLog2);
1084 __ sw(a3, MemOperand(t1));
1085 __ bind(&no_incoming_new_target_or_generator_register);
1086
1087 // Perform interrupt stack check.
1088 // TODO(solanes): Merge with the real stack limit check above.
1089 Label stack_check_interrupt, after_stack_check_interrupt;
1090 __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kInterruptStackLimit);
1091 __ Branch(&stack_check_interrupt, lo, sp, Operand(a2));
1092 __ bind(&after_stack_check_interrupt);
1093
1094 // Load accumulator with undefined.
1095 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1096
1097 // Load the dispatch table into a register and dispatch to the bytecode
1098 // handler at the current bytecode offset.
1099 Label do_dispatch;
1100 __ bind(&do_dispatch);
1101 __ li(kInterpreterDispatchTableRegister,
1102 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1103 __ Addu(a0, kInterpreterBytecodeArrayRegister,
1104 kInterpreterBytecodeOffsetRegister);
1105 __ lbu(t3, MemOperand(a0));
1106 __ Lsa(kScratchReg, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
1107 __ lw(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg));
1108 __ Call(kJavaScriptCallCodeStartRegister);
1109 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1110
1111 // Any returns to the entry trampoline are either due to the return bytecode
1112 // or the interpreter tail calling a builtin and then a dispatch.
1113
1114 // Get bytecode array and bytecode offset from the stack frame.
1115 __ lw(kInterpreterBytecodeArrayRegister,
1116 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1117 __ lw(kInterpreterBytecodeOffsetRegister,
1118 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1119 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1120 // Either return, or advance to the next bytecode and dispatch.
1121 Label do_return;
1122 __ Addu(a1, kInterpreterBytecodeArrayRegister,
1123 kInterpreterBytecodeOffsetRegister);
1124 __ lbu(a1, MemOperand(a1));
1125 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1126 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1127 t0, &do_return);
1128 __ jmp(&do_dispatch);
1129
1130 __ bind(&do_return);
1131 // The return value is in v0.
1132 LeaveInterpreterFrame(masm, t0, t1);
1133 __ Jump(ra);
1134
1135 __ bind(&stack_check_interrupt);
1136 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1137 // for the call to the StackGuard.
1138 __ li(kInterpreterBytecodeOffsetRegister,
1139 Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1140 kFunctionEntryBytecodeOffset)));
1141 __ Sw(kInterpreterBytecodeOffsetRegister,
1142 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1143 __ CallRuntime(Runtime::kStackGuard);
1144
1145 // After the call, restore the bytecode array, bytecode offset and accumulator
1146 // registers again. Also, restore the bytecode offset in the stack to its
1147 // previous value.
1148 __ Lw(kInterpreterBytecodeArrayRegister,
1149 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1150 __ li(kInterpreterBytecodeOffsetRegister,
1151 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1152 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1153
1154 __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1155 __ Sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1156
1157 __ jmp(&after_stack_check_interrupt);
1158
1159 __ bind(&has_optimized_code_or_marker);
1160
1161 Label maybe_has_optimized_code;
1162 // Check if optimized code marker is available
1163 __ andi(t1, optimization_state,
1164 FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker);
1165 __ Branch(&maybe_has_optimized_code, eq, t1, Operand(zero_reg));
1166
1167 Register optimization_marker = optimization_state;
1168 __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
1169 MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
1170 // Fall through if there's no runnable optimized code.
1171 __ jmp(¬_optimized);
1172
1173 __ bind(&maybe_has_optimized_code);
1174 Register optimized_code_entry = optimization_state;
1175 __ Lw(optimization_marker,
1176 FieldMemOperand(feedback_vector,
1177 FeedbackVector::kMaybeOptimizedCodeOffset));
1178
1179 TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3);
1180
1181 __ bind(&compile_lazy);
1182 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1183 // Unreachable code.
1184 __ break_(0xCC);
1185
1186 __ bind(&stack_overflow);
1187 __ CallRuntime(Runtime::kThrowStackOverflow);
1188 // Unreachable code.
1189 __ break_(0xCC);
1190 }
1191
Generate_InterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch,Register scratch2)1192 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
1193 Register num_args,
1194 Register start_address,
1195 Register scratch, Register scratch2) {
1196 // Find the address of the last argument.
1197 __ Subu(scratch, num_args, Operand(1));
1198 __ sll(scratch, scratch, kPointerSizeLog2);
1199 __ Subu(start_address, start_address, scratch);
1200
1201 // Push the arguments.
1202 __ PushArray(start_address, num_args, scratch, scratch2,
1203 TurboAssembler::PushArrayOrder::kReverse);
1204 }
1205
1206 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1207 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1208 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1209 InterpreterPushArgsMode mode) {
1210 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1211 // ----------- S t a t e -------------
1212 // -- a0 : the number of arguments (not including the receiver)
1213 // -- a2 : the address of the first argument to be pushed. Subsequent
1214 // arguments should be consecutive above this, in the same order as
1215 // they are to be pushed onto the stack.
1216 // -- a1 : the target to call (can be any Object).
1217 // -----------------------------------
1218 Label stack_overflow;
1219 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1220 // The spread argument should not be pushed.
1221 __ Subu(a0, a0, Operand(1));
1222 }
1223
1224 __ Addu(t0, a0, Operand(1)); // Add one for receiver.
1225
1226 __ StackOverflowCheck(t0, t4, t1, &stack_overflow);
1227
1228 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1229 // Don't copy receiver.
1230 __ mov(t0, a0);
1231 }
1232
1233 // This function modifies a2, t4 and t1.
1234 Generate_InterpreterPushArgs(masm, t0, a2, t4, t1);
1235
1236 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1237 __ PushRoot(RootIndex::kUndefinedValue);
1238 }
1239
1240 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1241 // Pass the spread in the register a2.
1242 // a2 already points to the penultime argument, the spread
1243 // is below that.
1244 __ Lw(a2, MemOperand(a2, -kSystemPointerSize));
1245 }
1246
1247 // Call the target.
1248 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1249 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1250 RelocInfo::CODE_TARGET);
1251 } else {
1252 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1253 RelocInfo::CODE_TARGET);
1254 }
1255
1256 __ bind(&stack_overflow);
1257 {
1258 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1259 // Unreachable code.
1260 __ break_(0xCC);
1261 }
1262 }
1263
1264 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1265 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1266 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1267 // ----------- S t a t e -------------
1268 // -- a0 : argument count (not including receiver)
1269 // -- a3 : new target
1270 // -- a1 : constructor to call
1271 // -- a2 : allocation site feedback if available, undefined otherwise.
1272 // -- t4 : address of the first argument
1273 // -----------------------------------
1274 Label stack_overflow;
1275 __ addiu(t2, a0, 1);
1276 __ StackOverflowCheck(t2, t1, t0, &stack_overflow);
1277
1278 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1279 // The spread argument should not be pushed.
1280 __ Subu(a0, a0, Operand(1));
1281 }
1282
1283 Generate_InterpreterPushArgs(masm, a0, t4, t1, t0);
1284
1285 // Push a slot for the receiver.
1286 __ push(zero_reg);
1287
1288 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1289 // Pass the spread in the register a2.
1290 // t4 already points to the penultimate argument, the spread
1291 // lies in the next interpreter register.
1292 // __ Subu(t4, t4, Operand(kSystemPointerSize));
1293 __ Lw(a2, MemOperand(t4, -kSystemPointerSize));
1294 } else {
1295 __ AssertUndefinedOrAllocationSite(a2, t0);
1296 }
1297
1298 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1299 __ AssertFunction(a1);
1300
1301 // Tail call to the array construct stub (still in the caller
1302 // context at this point).
1303 __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
1304 RelocInfo::CODE_TARGET);
1305 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1306 // Call the constructor with a0, a1, and a3 unmodified.
1307 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1308 RelocInfo::CODE_TARGET);
1309 } else {
1310 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1311 // Call the constructor with a0, a1, and a3 unmodified.
1312 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1313 }
1314
1315 __ bind(&stack_overflow);
1316 {
1317 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1318 // Unreachable code.
1319 __ break_(0xCC);
1320 }
1321 }
1322
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1323 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1324 // Set the return address to the correct point in the interpreter entry
1325 // trampoline.
1326 Label builtin_trampoline, trampoline_loaded;
1327 Smi interpreter_entry_return_pc_offset(
1328 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1329 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1330
1331 // If the SFI function_data is an InterpreterData, the function will have a
1332 // custom copy of the interpreter entry trampoline for profiling. If so,
1333 // get the custom trampoline, otherwise grab the entry address of the global
1334 // trampoline.
1335 __ lw(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1336 __ lw(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
1337 __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
1338 __ GetObjectType(t0, kInterpreterDispatchTableRegister,
1339 kInterpreterDispatchTableRegister);
1340 __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
1341 Operand(INTERPRETER_DATA_TYPE));
1342
1343 __ lw(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
1344 __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
1345 __ Branch(&trampoline_loaded);
1346
1347 __ bind(&builtin_trampoline);
1348 __ li(t0, ExternalReference::
1349 address_of_interpreter_entry_trampoline_instruction_start(
1350 masm->isolate()));
1351 __ lw(t0, MemOperand(t0));
1352
1353 __ bind(&trampoline_loaded);
1354 __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
1355
1356 // Initialize the dispatch table register.
1357 __ li(kInterpreterDispatchTableRegister,
1358 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1359
1360 // Get the bytecode array pointer from the frame.
1361 __ lw(kInterpreterBytecodeArrayRegister,
1362 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1363
1364 if (FLAG_debug_code) {
1365 // Check function data field is actually a BytecodeArray object.
1366 __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
1367 __ Assert(ne,
1368 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1369 kScratchReg, Operand(zero_reg));
1370 __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
1371 __ Assert(eq,
1372 AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1373 a1, Operand(BYTECODE_ARRAY_TYPE));
1374 }
1375
1376 // Get the target bytecode offset from the frame.
1377 __ lw(kInterpreterBytecodeOffsetRegister,
1378 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1379 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1380
1381 if (FLAG_debug_code) {
1382 Label okay;
1383 __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
1384 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1385 // Unreachable code.
1386 __ break_(0xCC);
1387 __ bind(&okay);
1388 }
1389
1390 // Dispatch to the target bytecode.
1391 __ Addu(a1, kInterpreterBytecodeArrayRegister,
1392 kInterpreterBytecodeOffsetRegister);
1393 __ lbu(t3, MemOperand(a1));
1394 __ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2);
1395 __ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1));
1396 __ Jump(kJavaScriptCallCodeStartRegister);
1397 }
1398
Generate_InterpreterEnterBytecodeAdvance(MacroAssembler * masm)1399 void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
1400 // Advance the current bytecode offset stored within the given interpreter
1401 // stack frame. This simulates what all bytecode handlers do upon completion
1402 // of the underlying operation.
1403 __ lw(kInterpreterBytecodeArrayRegister,
1404 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1405 __ lw(kInterpreterBytecodeOffsetRegister,
1406 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1407 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1408
1409 Label enter_bytecode, function_entry_bytecode;
1410 __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
1411 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1412 kFunctionEntryBytecodeOffset));
1413
1414 // Load the current bytecode.
1415 __ Addu(a1, kInterpreterBytecodeArrayRegister,
1416 kInterpreterBytecodeOffsetRegister);
1417 __ lbu(a1, MemOperand(a1));
1418
1419 // Advance to the next bytecode.
1420 Label if_return;
1421 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1422 kInterpreterBytecodeOffsetRegister, a1, a2, a3,
1423 t0, &if_return);
1424
1425 __ bind(&enter_bytecode);
1426 // Convert new bytecode offset to a Smi and save in the stackframe.
1427 __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
1428 __ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1429
1430 Generate_InterpreterEnterBytecode(masm);
1431
1432 __ bind(&function_entry_bytecode);
1433 // If the code deoptimizes during the implicit function entry stack interrupt
1434 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1435 // not a valid bytecode offset. Detect this case and advance to the first
1436 // actual bytecode.
1437 __ li(kInterpreterBytecodeOffsetRegister,
1438 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1439 __ Branch(&enter_bytecode);
1440
1441 // We should never take the if_return path.
1442 __ bind(&if_return);
1443 __ Abort(AbortReason::kInvalidBytecodeAdvance);
1444 }
1445
Generate_InterpreterEnterBytecodeDispatch(MacroAssembler * masm)1446 void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
1447 Generate_InterpreterEnterBytecode(masm);
1448 }
1449
1450 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1451 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1452 bool java_script_builtin,
1453 bool with_result) {
1454 const RegisterConfiguration* config(RegisterConfiguration::Default());
1455 int allocatable_register_count = config->num_allocatable_general_registers();
1456 UseScratchRegisterScope temps(masm);
1457 Register scratch = temps.Acquire(); // Temp register is not allocatable.
1458 // Register scratch = t3;
1459 if (with_result) {
1460 if (java_script_builtin) {
1461 __ mov(scratch, v0);
1462 } else {
1463 // Overwrite the hole inserted by the deoptimizer with the return value
1464 // from the LAZY deopt point.
1465 __ sw(v0,
1466 MemOperand(
1467 sp, config->num_allocatable_general_registers() * kPointerSize +
1468 BuiltinContinuationFrameConstants::kFixedFrameSize));
1469 }
1470 }
1471 for (int i = allocatable_register_count - 1; i >= 0; --i) {
1472 int code = config->GetAllocatableGeneralCode(i);
1473 __ Pop(Register::from_code(code));
1474 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1475 __ SmiUntag(Register::from_code(code));
1476 }
1477 }
1478
1479 if (with_result && java_script_builtin) {
1480 // Overwrite the hole inserted by the deoptimizer with the return value from
1481 // the LAZY deopt point. t0 contains the arguments count, the return value
1482 // from LAZY is always the last argument.
1483 __ Addu(a0, a0,
1484 Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1485 __ Lsa(t0, sp, a0, kSystemPointerSizeLog2);
1486 __ Sw(scratch, MemOperand(t0));
1487 // Recover arguments count.
1488 __ Subu(a0, a0,
1489 Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
1490 }
1491
1492 __ lw(fp, MemOperand(
1493 sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1494 // Load builtin index (stored as a Smi) and use it to get the builtin start
1495 // address from the builtins table.
1496 __ Pop(t0);
1497 __ Addu(sp, sp,
1498 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
1499 __ Pop(ra);
1500 __ LoadEntryFromBuiltinIndex(t0);
1501 __ Jump(t0);
1502 }
1503 } // namespace
1504
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)1505 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
1506 Generate_ContinueToBuiltinHelper(masm, false, false);
1507 }
1508
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)1509 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
1510 MacroAssembler* masm) {
1511 Generate_ContinueToBuiltinHelper(masm, false, true);
1512 }
1513
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)1514 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
1515 Generate_ContinueToBuiltinHelper(masm, true, false);
1516 }
1517
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)1518 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
1519 MacroAssembler* masm) {
1520 Generate_ContinueToBuiltinHelper(masm, true, true);
1521 }
1522
Generate_NotifyDeoptimized(MacroAssembler * masm)1523 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1524 {
1525 FrameScope scope(masm, StackFrame::INTERNAL);
1526 __ CallRuntime(Runtime::kNotifyDeoptimized);
1527 }
1528
1529 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
1530 __ lw(v0, MemOperand(sp, 0 * kPointerSize));
1531 __ Ret(USE_DELAY_SLOT);
1532 // Safe to fill delay slot Addu will emit one instruction.
1533 __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove accumulator.
1534 }
1535
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)1536 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
1537 {
1538 FrameScope scope(masm, StackFrame::INTERNAL);
1539 __ CallRuntime(Runtime::kCompileForOnStackReplacement);
1540 }
1541
1542 // If the code object is null, just return to the caller.
1543 __ Ret(eq, v0, Operand(Smi::zero()));
1544
1545 // Drop the handler frame that is be sitting on top of the actual
1546 // JavaScript frame. This is the case then OSR is triggered from bytecode.
1547 __ LeaveFrame(StackFrame::STUB);
1548
1549 // Load deoptimization data from the code object.
1550 // <deopt_data> = <code>[#deoptimization_data_offset]
1551 __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
1552
1553 // Load the OSR entrypoint offset from the deoptimization data.
1554 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
1555 __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
1556 DeoptimizationData::kOsrPcOffsetIndex) -
1557 kHeapObjectTag));
1558 __ SmiUntag(a1);
1559
1560 // Compute the target address = code_obj + header_size + osr_offset
1561 // <entry_addr> = <code_obj> + #header_size + <osr_offset>
1562 __ Addu(v0, v0, a1);
1563 __ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
1564
1565 // And "return" to the OSR entry point of the function.
1566 __ Ret();
1567 }
1568
1569 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)1570 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
1571 // ----------- S t a t e -------------
1572 // -- a0 : argc
1573 // -- sp[0] : receiver
1574 // -- sp[4] : thisArg
1575 // -- sp[8] : argArray
1576 // -----------------------------------
1577
1578 // 1. Load receiver into a1, argArray into a2 (if present), remove all
1579 // arguments from the stack (including the receiver), and push thisArg (if
1580 // present) instead.
1581 {
1582 Label no_arg;
1583 __ LoadRoot(a2, RootIndex::kUndefinedValue);
1584 __ mov(a3, a2);
1585 // Lsa() cannot be used hare as scratch value used later.
1586 __ lw(a1, MemOperand(sp)); // receiver
1587 __ Branch(&no_arg, eq, a0, Operand(zero_reg));
1588 __ lw(a3, MemOperand(sp, kSystemPointerSize)); // thisArg
1589 __ Branch(&no_arg, eq, a0, Operand(1));
1590 __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
1591 __ bind(&no_arg);
1592 __ Lsa(sp, sp, a0, kPointerSizeLog2);
1593 __ sw(a3, MemOperand(sp));
1594 }
1595
1596 // ----------- S t a t e -------------
1597 // -- a2 : argArray
1598 // -- a1 : receiver
1599 // -- sp[0] : thisArg
1600 // -----------------------------------
1601
1602 // 2. We don't need to check explicitly for callable receiver here,
1603 // since that's the first thing the Call/CallWithArrayLike builtins
1604 // will do.
1605
1606 // 3. Tail call with no arguments if argArray is null or undefined.
1607 Label no_arguments;
1608 __ JumpIfRoot(a2, RootIndex::kNullValue, &no_arguments);
1609 __ JumpIfRoot(a2, RootIndex::kUndefinedValue, &no_arguments);
1610
1611 // 4a. Apply the receiver to the given argArray.
1612 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1613 RelocInfo::CODE_TARGET);
1614
1615 // 4b. The argArray is either null or undefined, so we tail call without any
1616 // arguments to the receiver.
1617 __ bind(&no_arguments);
1618 {
1619 __ mov(a0, zero_reg);
1620 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1621 }
1622 }
1623
1624 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)1625 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
1626 // 1. Get the callable to call (passed as receiver) from the stack.
1627 __ Pop(a1);
1628
1629 // 2. Make sure we have at least one argument.
1630 // a0: actual number of arguments
1631 {
1632 Label done;
1633 __ Branch(&done, ne, a0, Operand(zero_reg));
1634 __ PushRoot(RootIndex::kUndefinedValue);
1635 __ Addu(a0, a0, Operand(1));
1636 __ bind(&done);
1637 }
1638
1639 // 3. Adjust the actual number of arguments.
1640 __ addiu(a0, a0, -1);
1641
1642 // 4. Call the callable.
1643 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
1644 }
1645
Generate_ReflectApply(MacroAssembler * masm)1646 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
1647 // ----------- S t a t e -------------
1648 // -- a0 : argc
1649 // -- sp[0] : receiver
1650 // -- sp[4] : target (if argc >= 1)
1651 // -- sp[8] : thisArgument (if argc >= 2)
1652 // -- sp[12] : argumentsList (if argc == 3)
1653 // -----------------------------------
1654
1655 // 1. Load target into a1 (if present), argumentsList into a0 (if present),
1656 // remove all arguments from the stack (including the receiver), and push
1657 // thisArgument (if present) instead.
1658 {
1659 Label no_arg;
1660 __ LoadRoot(a1, RootIndex::kUndefinedValue);
1661 __ mov(a2, a1);
1662 __ mov(a3, a1);
1663 __ Branch(&no_arg, eq, a0, Operand(zero_reg));
1664 __ lw(a1, MemOperand(sp, kSystemPointerSize)); // target
1665 __ Branch(&no_arg, eq, a0, Operand(1));
1666 __ lw(a3, MemOperand(sp, 2 * kSystemPointerSize)); // thisArgument
1667 __ Branch(&no_arg, eq, a0, Operand(2));
1668 __ lw(a2, MemOperand(sp, 3 * kSystemPointerSize)); // argumentsList
1669 __ bind(&no_arg);
1670 __ Lsa(sp, sp, a0, kPointerSizeLog2);
1671 __ sw(a3, MemOperand(sp));
1672 }
1673
1674 // ----------- S t a t e -------------
1675 // -- a2 : argumentsList
1676 // -- a1 : target
1677 // -- sp[0] : thisArgument
1678 // -----------------------------------
1679
1680 // 2. We don't need to check explicitly for callable target here,
1681 // since that's the first thing the Call/CallWithArrayLike builtins
1682 // will do.
1683
1684 // 3. Apply the target to the given argumentsList.
1685 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
1686 RelocInfo::CODE_TARGET);
1687 }
1688
Generate_ReflectConstruct(MacroAssembler * masm)1689 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
1690 // ----------- S t a t e -------------
1691 // -- a0 : argc
1692 // -- sp[0] : receiver
1693 // -- sp[4] : target
1694 // -- sp[8] : argumentsList
1695 // -- sp[12] : new.target (optional)
1696 // -----------------------------------
1697
1698 // 1. Load target into a1 (if present), argumentsList into a2 (if present),
1699 // new.target into a3 (if present, otherwise use target), remove all
1700 // arguments from the stack (including the receiver), and push thisArgument
1701 // (if present) instead.
1702 {
1703 Label no_arg;
1704 __ LoadRoot(a1, RootIndex::kUndefinedValue);
1705 __ mov(a2, a1);
1706 __ mov(t0, a1);
1707 __ Branch(&no_arg, eq, a0, Operand(zero_reg));
1708 __ lw(a1, MemOperand(sp, kSystemPointerSize)); // target
1709 __ mov(a3, a1); // new.target defaults to target
1710 __ Branch(&no_arg, eq, a0, Operand(1));
1711 __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argumentsList
1712 __ Branch(&no_arg, eq, a0, Operand(2));
1713 __ lw(a3, MemOperand(sp, 3 * kSystemPointerSize)); // new.target
1714 __ bind(&no_arg);
1715 __ Lsa(sp, sp, a0, kPointerSizeLog2);
1716 __ sw(t0, MemOperand(sp)); // set undefined to the receiver
1717 }
1718
1719 // ----------- S t a t e -------------
1720 // -- a2 : argumentsList
1721 // -- a3 : new.target
1722 // -- a1 : target
1723 // -- sp[0] : receiver (undefined)
1724 // -----------------------------------
1725
1726 // 2. We don't need to check explicitly for constructor target here,
1727 // since that's the first thing the Construct/ConstructWithArrayLike
1728 // builtins will do.
1729
1730 // 3. We don't need to check explicitly for constructor new.target here,
1731 // since that's the second thing the Construct/ConstructWithArrayLike
1732 // builtins will do.
1733
1734 // 4. Construct the target with the given new.target and argumentsList.
1735 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
1736 RelocInfo::CODE_TARGET);
1737 }
1738
EnterArgumentsAdaptorFrame(MacroAssembler * masm)1739 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1740 __ sll(a0, a0, kSmiTagSize);
1741 __ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1742 __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
1743 __ Push(Smi::zero()); // Padding.
1744 __ Addu(fp, sp,
1745 Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp));
1746 }
1747
LeaveArgumentsAdaptorFrame(MacroAssembler * masm)1748 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1749 // ----------- S t a t e -------------
1750 // -- v0 : result being passed through
1751 // -----------------------------------
1752 // Get the number of arguments passed (as a smi), tear down the frame and
1753 // then tear down the parameters.
1754 __ lw(a1, MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
1755 __ mov(sp, fp);
1756 __ MultiPop(fp.bit() | ra.bit());
1757 __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
1758 // Adjust for the receiver.
1759 __ Addu(sp, sp, Operand(kPointerSize));
1760 }
1761
1762 // static
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)1763 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
1764 Handle<Code> code) {
1765 // ----------- S t a t e -------------
1766 // -- a1 : target
1767 // -- a0 : number of parameters on the stack (not including the receiver)
1768 // -- a2 : arguments list (a FixedArray)
1769 // -- t0 : len (number of elements to push from args)
1770 // -- a3 : new.target (for [[Construct]])
1771 // -----------------------------------
1772 if (masm->emit_debug_code()) {
1773 // Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0.
1774 Label ok, fail;
1775 __ AssertNotSmi(a2);
1776 __ GetObjectType(a2, t8, t8);
1777 __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
1778 __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
1779 __ Branch(&ok, eq, t0, Operand(0));
1780 // Fall through.
1781 __ bind(&fail);
1782 __ Abort(AbortReason::kOperandIsNotAFixedArray);
1783
1784 __ bind(&ok);
1785 }
1786
1787 // Check for stack overflow.
1788 Label stack_overflow;
1789 __ StackOverflowCheck(t0, kScratchReg, t1, &stack_overflow);
1790
1791 // Move the arguments already in the stack,
1792 // including the receiver and the return address.
1793 {
1794 Label copy;
1795 Register src = t3, dest = t4;
1796 __ mov(src, sp);
1797 __ sll(t1, t0, kSystemPointerSizeLog2);
1798 __ Subu(sp, sp, Operand(t1));
1799 // Update stack pointer.
1800 __ mov(dest, sp);
1801 __ Addu(t1, a0, Operand(zero_reg));
1802
1803 __ bind(©);
1804 __ Lw(t2, MemOperand(src, 0));
1805 __ Sw(t2, MemOperand(dest, 0));
1806 __ Subu(t1, t1, Operand(1));
1807 __ Addu(src, src, Operand(kSystemPointerSize));
1808 __ Addu(dest, dest, Operand(kSystemPointerSize));
1809 __ Branch(©, ge, t1, Operand(zero_reg));
1810 }
1811
1812 // Push arguments onto the stack (thisArgument is already on the stack).
1813 {
1814 __ mov(t2, zero_reg);
1815 Label done, push, loop;
1816 __ LoadRoot(t1, RootIndex::kTheHoleValue);
1817 __ bind(&loop);
1818 __ Branch(&done, eq, t2, Operand(t0));
1819 __ Lsa(kScratchReg, a2, t2, kPointerSizeLog2);
1820 __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
1821 __ Addu(t2, t2, Operand(1));
1822 __ Branch(&push, ne, t1, Operand(kScratchReg));
1823 __ LoadRoot(kScratchReg, RootIndex::kUndefinedValue);
1824 __ bind(&push);
1825 __ Sw(kScratchReg, MemOperand(t4, 0));
1826 __ Addu(t4, t4, Operand(kSystemPointerSize));
1827 __ Branch(&loop);
1828 __ bind(&done);
1829 __ Addu(a0, a0, t2);
1830 }
1831
1832 // Tail-call to the actual Call or Construct builtin.
1833 __ Jump(code, RelocInfo::CODE_TARGET);
1834
1835 __ bind(&stack_overflow);
1836 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1837 }
1838
1839 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)1840 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
1841 CallOrConstructMode mode,
1842 Handle<Code> code) {
1843 // ----------- S t a t e -------------
1844 // -- a0 : the number of arguments (not including the receiver)
1845 // -- a3 : the new.target (for [[Construct]] calls)
1846 // -- a1 : the target to call (can be any Object)
1847 // -- a2 : start index (to support rest parameters)
1848 // -----------------------------------
1849
1850 // Check if new.target has a [[Construct]] internal method.
1851 if (mode == CallOrConstructMode::kConstruct) {
1852 Label new_target_constructor, new_target_not_constructor;
1853 __ JumpIfSmi(a3, &new_target_not_constructor);
1854 __ lw(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
1855 __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
1856 __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
1857 __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
1858 __ bind(&new_target_not_constructor);
1859 {
1860 FrameScope scope(masm, StackFrame::MANUAL);
1861 __ EnterFrame(StackFrame::INTERNAL);
1862 __ Push(a3);
1863 __ CallRuntime(Runtime::kThrowNotConstructor);
1864 }
1865 __ bind(&new_target_constructor);
1866 }
1867
1868 #ifdef V8_NO_ARGUMENTS_ADAPTOR
1869 // TODO(victorgomes): Remove this copy when all the arguments adaptor frame
1870 // code is erased.
1871 __ mov(t3, fp);
1872 __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
1873 #else
1874
1875 // Check if we have an arguments adaptor frame below the function frame.
1876 Label arguments_adaptor, arguments_done;
1877 __ lw(t3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1878 __ lw(t2, MemOperand(t3, CommonFrameConstants::kContextOrFrameTypeOffset));
1879 __ Branch(&arguments_adaptor, eq, t2,
1880 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
1881 {
1882 __ lw(t2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1883 __ lw(t2, FieldMemOperand(t2, JSFunction::kSharedFunctionInfoOffset));
1884 __ lhu(t2, FieldMemOperand(
1885 t2, SharedFunctionInfo::kFormalParameterCountOffset));
1886 __ mov(t3, fp);
1887 }
1888 __ Branch(&arguments_done);
1889 __ bind(&arguments_adaptor);
1890 {
1891 // Just get the length from the ArgumentsAdaptorFrame.
1892 __ lw(t2, MemOperand(t3, ArgumentsAdaptorFrameConstants::kLengthOffset));
1893 __ SmiUntag(t2);
1894 }
1895 __ bind(&arguments_done);
1896 #endif
1897
1898 Label stack_done, stack_overflow;
1899 __ Subu(t2, t2, a2);
1900 __ Branch(&stack_done, le, t2, Operand(zero_reg));
1901 {
1902 // Check for stack overflow.
1903 __ StackOverflowCheck(t2, t0, t1, &stack_overflow);
1904
1905 // Forward the arguments from the caller frame.
1906 // Point to the first argument to copy (skipping the receiver).
1907 __ Addu(t3, t3,
1908 Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
1909 kSystemPointerSize));
1910 __ Lsa(t3, t3, a2, kSystemPointerSizeLog2);
1911
1912 // Move the arguments already in the stack,
1913 // including the receiver and the return address.
1914 {
1915 Label copy;
1916 Register src = t5, dest = a2;
1917 __ mov(src, sp);
1918 // Update stack pointer.
1919 __ sll(t6, t2, kSystemPointerSizeLog2);
1920 __ Subu(sp, sp, Operand(t6));
1921 __ mov(dest, sp);
1922 __ Addu(t7, a0, Operand(zero_reg));
1923
1924 __ bind(©);
1925 __ Lw(t6, MemOperand(src, 0));
1926 __ Sw(t6, MemOperand(dest, 0));
1927 __ Subu(t7, t7, Operand(1));
1928 __ Addu(src, src, Operand(kSystemPointerSize));
1929 __ Addu(dest, dest, Operand(kSystemPointerSize));
1930 __ Branch(©, ge, t7, Operand(zero_reg));
1931 }
1932
1933 // Copy arguments from the caller frame.
1934 // TODO(victorgomes): Consider using forward order as potentially more cache
1935 // friendly.
1936 {
1937 Label loop;
1938 __ Addu(a0, a0, t2);
1939 __ bind(&loop);
1940 {
1941 __ Subu(t2, t2, Operand(1));
1942 __ Lsa(kScratchReg, t3, t2, kPointerSizeLog2);
1943 __ lw(kScratchReg, MemOperand(kScratchReg));
1944 __ Lsa(t0, a2, t2, kPointerSizeLog2);
1945 __ Sw(kScratchReg, MemOperand(t0));
1946 __ Branch(&loop, ne, t2, Operand(zero_reg));
1947 }
1948 }
1949 }
1950 __ Branch(&stack_done);
1951 __ bind(&stack_overflow);
1952 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1953 __ bind(&stack_done);
1954
1955 // Tail-call to the {code} handler.
1956 __ Jump(code, RelocInfo::CODE_TARGET);
1957 }
1958
1959 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)1960 void Builtins::Generate_CallFunction(MacroAssembler* masm,
1961 ConvertReceiverMode mode) {
1962 // ----------- S t a t e -------------
1963 // -- a0 : the number of arguments (not including the receiver)
1964 // -- a1 : the function to call (checked to be a JSFunction)
1965 // -----------------------------------
1966 __ AssertFunction(a1);
1967
1968 // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
1969 // Check that the function is not a "classConstructor".
1970 Label class_constructor;
1971 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1972 __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
1973 __ And(kScratchReg, a3,
1974 Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
1975 __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
1976
1977 // Enter the context of the function; ToObject has to run in the function
1978 // context, and we also need to take the global proxy from the function
1979 // context in case of conversion.
1980 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
1981 // We need to convert the receiver for non-native sloppy mode functions.
1982 Label done_convert;
1983 __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
1984 __ And(kScratchReg, a3,
1985 Operand(SharedFunctionInfo::IsNativeBit::kMask |
1986 SharedFunctionInfo::IsStrictBit::kMask));
1987 __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
1988 {
1989 // ----------- S t a t e -------------
1990 // -- a0 : the number of arguments (not including the receiver)
1991 // -- a1 : the function to call (checked to be a JSFunction)
1992 // -- a2 : the shared function info.
1993 // -- cp : the function context.
1994 // -----------------------------------
1995
1996 if (mode == ConvertReceiverMode::kNullOrUndefined) {
1997 // Patch receiver to global proxy.
1998 __ LoadGlobalProxy(a3);
1999 } else {
2000 Label convert_to_object, convert_receiver;
2001 __ LoadReceiver(a3, a0);
2002 __ JumpIfSmi(a3, &convert_to_object);
2003 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2004 __ GetObjectType(a3, t0, t0);
2005 __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
2006 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2007 Label convert_global_proxy;
2008 __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
2009 __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
2010 __ bind(&convert_global_proxy);
2011 {
2012 // Patch receiver to global proxy.
2013 __ LoadGlobalProxy(a3);
2014 }
2015 __ Branch(&convert_receiver);
2016 }
2017 __ bind(&convert_to_object);
2018 {
2019 // Convert receiver using ToObject.
2020 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2021 // in the fast case? (fall back to AllocateInNewSpace?)
2022 FrameScope scope(masm, StackFrame::INTERNAL);
2023 __ sll(a0, a0, kSmiTagSize); // Smi tagged.
2024 __ Push(a0, a1);
2025 __ mov(a0, a3);
2026 __ Push(cp);
2027 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2028 RelocInfo::CODE_TARGET);
2029 __ Pop(cp);
2030 __ mov(a3, v0);
2031 __ Pop(a0, a1);
2032 __ sra(a0, a0, kSmiTagSize); // Un-tag.
2033 }
2034 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2035 __ bind(&convert_receiver);
2036 }
2037 __ StoreReceiver(a3, a0, kScratchReg);
2038 }
2039 __ bind(&done_convert);
2040
2041 // ----------- S t a t e -------------
2042 // -- a0 : the number of arguments (not including the receiver)
2043 // -- a1 : the function to call (checked to be a JSFunction)
2044 // -- a2 : the shared function info.
2045 // -- cp : the function context.
2046 // -----------------------------------
2047
2048 __ lhu(a2,
2049 FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
2050 __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
2051
2052 // The function is a "classConstructor", need to raise an exception.
2053 __ bind(&class_constructor);
2054 {
2055 FrameScope frame(masm, StackFrame::INTERNAL);
2056 __ Push(a1);
2057 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2058 }
2059 }
2060
2061 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2062 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2063 // ----------- S t a t e -------------
2064 // -- a0 : the number of arguments (not including the receiver)
2065 // -- a1 : the function to call (checked to be a JSBoundFunction)
2066 // -----------------------------------
2067 __ AssertBoundFunction(a1);
2068
2069 // Patch the receiver to [[BoundThis]].
2070 {
2071 __ lw(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
2072 __ StoreReceiver(t0, a0, kScratchReg);
2073 }
2074
2075 // Load [[BoundArguments]] into a2 and length of that into t0.
2076 __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2077 __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
2078 __ SmiUntag(t0);
2079
2080 // ----------- S t a t e -------------
2081 // -- a0 : the number of arguments (not including the receiver)
2082 // -- a1 : the function to call (checked to be a JSBoundFunction)
2083 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2084 // -- t0 : the number of [[BoundArguments]]
2085 // -----------------------------------
2086
2087 // Reserve stack space for the [[BoundArguments]].
2088 {
2089 Label done;
2090 __ sll(t1, t0, kPointerSizeLog2);
2091 __ Subu(t1, sp, Operand(t1));
2092 // Check the stack for overflow. We are not trying to catch interruptions
2093 // (i.e. debug break and preemption) here, so check the "real stack limit".
2094 __ LoadStackLimit(kScratchReg,
2095 MacroAssembler::StackLimitKind::kRealStackLimit);
2096 __ Branch(&done, hs, t1, Operand(kScratchReg));
2097 {
2098 FrameScope scope(masm, StackFrame::MANUAL);
2099 __ EnterFrame(StackFrame::INTERNAL);
2100 __ CallRuntime(Runtime::kThrowStackOverflow);
2101 }
2102 __ bind(&done);
2103 }
2104
2105 // Pop receiver.
2106 __ Pop(t1);
2107
2108 // Push [[BoundArguments]].
2109 {
2110 Label loop, done_loop;
2111 __ Addu(a0, a0, Operand(t0));
2112 __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2113 __ bind(&loop);
2114 __ Subu(t0, t0, Operand(1));
2115 __ Branch(&done_loop, lt, t0, Operand(zero_reg));
2116 __ Lsa(kScratchReg, a2, t0, kPointerSizeLog2);
2117 __ Lw(kScratchReg, MemOperand(kScratchReg));
2118 __ Push(kScratchReg);
2119 __ Branch(&loop);
2120 __ bind(&done_loop);
2121 }
2122
2123 // Push receiver.
2124 __ Push(t1);
2125
2126 // Call the [[BoundTargetFunction]] via the Call builtin.
2127 __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2128 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2129 RelocInfo::CODE_TARGET);
2130 }
2131
2132 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2133 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2134 // ----------- S t a t e -------------
2135 // -- a0 : the number of arguments (not including the receiver)
2136 // -- a1 : the target to call (can be any Object).
2137 // -----------------------------------
2138
2139 Label non_callable, non_smi;
2140 __ JumpIfSmi(a1, &non_callable);
2141 __ bind(&non_smi);
2142 __ GetObjectType(a1, t1, t2);
2143 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2144 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
2145 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2146 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
2147
2148 // Check if target has a [[Call]] internal method.
2149 __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
2150 __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask));
2151 __ Branch(&non_callable, eq, t1, Operand(zero_reg));
2152
2153 // Check if target is a proxy and call CallProxy external builtin
2154 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy),
2155 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_PROXY_TYPE));
2156
2157 // 2. Call to something else, which might have a [[Call]] internal method (if
2158 // not we raise an exception).
2159 // Overwrite the original receiver with the (original) target.
2160 __ StoreReceiver(a1, a0, kScratchReg);
2161 // Let the "call_as_function_delegate" take care of the rest.
2162 __ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
2163 __ Jump(masm->isolate()->builtins()->CallFunction(
2164 ConvertReceiverMode::kNotNullOrUndefined),
2165 RelocInfo::CODE_TARGET);
2166
2167 // 3. Call to something that is not callable.
2168 __ bind(&non_callable);
2169 {
2170 FrameScope scope(masm, StackFrame::INTERNAL);
2171 __ Push(a1);
2172 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2173 }
2174 }
2175
2176 // static
Generate_ConstructFunction(MacroAssembler * masm)2177 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2178 // ----------- S t a t e -------------
2179 // -- a0 : the number of arguments (not including the receiver)
2180 // -- a1 : the constructor to call (checked to be a JSFunction)
2181 // -- a3 : the new target (checked to be a constructor)
2182 // -----------------------------------
2183 __ AssertConstructor(a1);
2184 __ AssertFunction(a1);
2185
2186 // Calling convention for function specific ConstructStubs require
2187 // a2 to contain either an AllocationSite or undefined.
2188 __ LoadRoot(a2, RootIndex::kUndefinedValue);
2189
2190 Label call_generic_stub;
2191
2192 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2193 __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
2194 __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFlagsOffset));
2195 __ And(t0, t0, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2196 __ Branch(&call_generic_stub, eq, t0, Operand(zero_reg));
2197
2198 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2199 RelocInfo::CODE_TARGET);
2200
2201 __ bind(&call_generic_stub);
2202 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2203 RelocInfo::CODE_TARGET);
2204 }
2205
2206 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2207 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2208 // ----------- S t a t e -------------
2209 // -- a0 : the number of arguments (not including the receiver)
2210 // -- a1 : the function to call (checked to be a JSBoundFunction)
2211 // -- a3 : the new target (checked to be a constructor)
2212 // -----------------------------------
2213 __ AssertConstructor(a1);
2214 __ AssertBoundFunction(a1);
2215
2216 // Load [[BoundArguments]] into a2 and length of that into t0.
2217 __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
2218 __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset));
2219 __ SmiUntag(t0);
2220
2221 // ----------- S t a t e -------------
2222 // -- a0 : the number of arguments (not including the receiver)
2223 // -- a1 : the function to call (checked to be a JSBoundFunction)
2224 // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
2225 // -- a3 : the new target (checked to be a constructor)
2226 // -- t0 : the number of [[BoundArguments]]
2227 // -----------------------------------
2228
2229 // Reserve stack space for the [[BoundArguments]].
2230 {
2231 Label done;
2232 __ sll(t1, t0, kPointerSizeLog2);
2233 __ Subu(t1, sp, Operand(t1));
2234 // Check the stack for overflow. We are not trying to catch interruptions
2235 // (i.e. debug break and preemption) here, so check the "real stack limit".
2236 __ LoadStackLimit(kScratchReg,
2237 MacroAssembler::StackLimitKind::kRealStackLimit);
2238 __ Branch(&done, hs, t1, Operand(kScratchReg));
2239 {
2240 FrameScope scope(masm, StackFrame::MANUAL);
2241 __ EnterFrame(StackFrame::INTERNAL);
2242 __ CallRuntime(Runtime::kThrowStackOverflow);
2243 }
2244 __ bind(&done);
2245 }
2246
2247 // Pop receiver
2248 __ Pop(t1);
2249
2250 // Push [[BoundArguments]].
2251 {
2252 Label loop, done_loop;
2253 __ Addu(a0, a0, Operand(t0));
2254 __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2255 __ bind(&loop);
2256 __ Subu(t0, t0, Operand(1));
2257 __ Branch(&done_loop, lt, t0, Operand(zero_reg));
2258 __ Lsa(kScratchReg, a2, t0, kPointerSizeLog2);
2259 __ Lw(kScratchReg, MemOperand(kScratchReg));
2260 __ Push(kScratchReg);
2261 __ Branch(&loop);
2262 __ bind(&done_loop);
2263 }
2264
2265 // Push receiver.
2266 __ Push(t1);
2267
2268 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2269 {
2270 Label skip_load;
2271 __ Branch(&skip_load, ne, a1, Operand(a3));
2272 __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2273 __ bind(&skip_load);
2274 }
2275
2276 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2277 __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
2278 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2279 }
2280
2281 // static
Generate_Construct(MacroAssembler * masm)2282 void Builtins::Generate_Construct(MacroAssembler* masm) {
2283 // ----------- S t a t e -------------
2284 // -- a0 : the number of arguments (not including the receiver)
2285 // -- a1 : the constructor to call (can be any Object)
2286 // -- a3 : the new target (either the same as the constructor or
2287 // the JSFunction on which new was invoked initially)
2288 // -----------------------------------
2289
2290 // Check if target is a Smi.
2291 Label non_constructor, non_proxy;
2292 __ JumpIfSmi(a1, &non_constructor);
2293
2294 // Check if target has a [[Construct]] internal method.
2295 __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
2296 __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
2297 __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask));
2298 __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
2299
2300 // Dispatch based on instance type.
2301 __ lhu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
2302 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2303 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
2304
2305 // Only dispatch to bound functions after checking whether they are
2306 // constructors.
2307 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2308 RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
2309
2310 // Only dispatch to proxies after checking whether they are constructors.
2311 __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE));
2312 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2313 RelocInfo::CODE_TARGET);
2314
2315 // Called Construct on an exotic Object with a [[Construct]] internal method.
2316 __ bind(&non_proxy);
2317 {
2318 // Overwrite the original receiver with the (original) target.
2319 __ StoreReceiver(a1, a0, kScratchReg);
2320 // Let the "call_as_constructor_delegate" take care of the rest.
2321 __ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
2322 __ Jump(masm->isolate()->builtins()->CallFunction(),
2323 RelocInfo::CODE_TARGET);
2324 }
2325
2326 // Called Construct on an Object that doesn't have a [[Construct]] internal
2327 // method.
2328 __ bind(&non_constructor);
2329 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2330 RelocInfo::CODE_TARGET);
2331 }
2332
Generate_ArgumentsAdaptorTrampoline(MacroAssembler * masm)2333 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
2334 // State setup as expected by MacroAssembler::InvokePrologue.
2335 // ----------- S t a t e -------------
2336 // -- a0: actual arguments count
2337 // -- a1: function (passed through to callee)
2338 // -- a2: expected arguments count
2339 // -- a3: new target (passed through to callee)
2340 // -----------------------------------
2341
2342 Label invoke, dont_adapt_arguments, stack_overflow;
2343
2344 Label enough, too_few;
2345 __ Branch(&dont_adapt_arguments, eq, a2,
2346 Operand(kDontAdaptArgumentsSentinel));
2347 // We use Uless as the number of argument should always be greater than 0.
2348 __ Branch(&too_few, Uless, a0, Operand(a2));
2349
2350 { // Enough parameters: actual >= expected.
2351 // a0: actual number of arguments as a smi
2352 // a1: function
2353 // a2: expected number of arguments
2354 // a3: new target (passed through to callee)
2355 __ bind(&enough);
2356 EnterArgumentsAdaptorFrame(masm);
2357 __ StackOverflowCheck(a2, t1, kScratchReg, &stack_overflow);
2358
2359 // Calculate copy start address into a0 and copy end address into t1.
2360 __ Lsa(a0, fp, a2, kPointerSizeLog2);
2361 // Adjust for return address and receiver.
2362 __ Addu(a0, a0, Operand(2 * kPointerSize));
2363 // Compute copy end address.
2364 __ sll(t1, a2, kPointerSizeLog2);
2365 __ subu(t1, a0, t1);
2366
2367 // Copy the arguments (including the receiver) to the new stack frame.
2368 // a0: copy start address
2369 // a1: function
2370 // a2: expected number of arguments
2371 // a3: new target (passed through to callee)
2372 // t1: copy end address
2373
2374 Label copy;
2375 __ bind(©);
2376 __ lw(t0, MemOperand(a0));
2377 __ push(t0);
2378 __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(t1));
2379 __ addiu(a0, a0, -kPointerSize); // In delay slot.
2380
2381 __ jmp(&invoke);
2382 }
2383
2384 { // Too few parameters: Actual < expected.
2385 __ bind(&too_few);
2386 EnterArgumentsAdaptorFrame(masm);
2387 __ StackOverflowCheck(a2, t1, kScratchReg, &stack_overflow);
2388
2389 // Fill the remaining expected arguments with undefined.
2390 __ LoadRoot(t0, RootIndex::kUndefinedValue);
2391 __ SmiUntag(t2, a0);
2392 __ Subu(t2, a2, Operand(t2));
2393 __ sll(t1, t2, kSystemPointerSizeLog2);
2394 __ Subu(t1, fp, t1);
2395 // Adjust for frame.
2396 __ Subu(t1, t1,
2397 Operand(ArgumentsAdaptorFrameConstants::kFixedFrameSizeFromFp +
2398 kSystemPointerSize));
2399
2400 Label fill;
2401 __ bind(&fill);
2402 __ push(t0);
2403 __ Branch(&fill, ne, sp, Operand(t1));
2404
2405 // Calculate copy start address into a0 and copy end address is fp.
2406 __ Lsa(a0, fp, a0, kPointerSizeLog2 - kSmiTagSize);
2407
2408 // Copy the arguments (including the receiver) to the new stack frame.
2409 Label copy;
2410 __ bind(©);
2411
2412 // Adjust load for return address and receiver.
2413 __ Lw(t0, MemOperand(a0, 2 * kSystemPointerSize));
2414 __ push(t0);
2415
2416 __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(fp));
2417 __ Subu(a0, a0, Operand(kSystemPointerSize));
2418 }
2419
2420 // Call the entry point.
2421 __ bind(&invoke);
2422 __ mov(a0, a2);
2423 // a0 : expected number of arguments
2424 // a1 : function (passed through to callee)
2425 // a3 : new target (passed through to callee)
2426 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
2427 __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
2428 __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
2429 __ Call(a2);
2430
2431 // Store offset of return address for deoptimizer.
2432 masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
2433
2434 // Exit frame and return.
2435 LeaveArgumentsAdaptorFrame(masm);
2436 __ Ret();
2437
2438 // -------------------------------------------
2439 // Don't adapt arguments.
2440 // -------------------------------------------
2441 __ bind(&dont_adapt_arguments);
2442 static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
2443 __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
2444 __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag);
2445 __ Jump(a2);
2446
2447 __ bind(&stack_overflow);
2448 {
2449 FrameScope frame(masm, StackFrame::MANUAL);
2450 __ CallRuntime(Runtime::kThrowStackOverflow);
2451 __ break_(0xCC);
2452 }
2453 }
2454
Generate_WasmCompileLazy(MacroAssembler * masm)2455 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2456 // The function index was put in t0 by the jump table trampoline.
2457 // Convert to Smi for the runtime call.
2458 __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2459 {
2460 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2461 FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2462
2463 // Save all parameter registers (see wasm-linkage.cc). They might be
2464 // overwritten in the runtime call below. We don't have any callee-saved
2465 // registers in wasm, so no need to store anything else.
2466 constexpr RegList gp_regs = Register::ListOf(a0, a2, a3);
2467 constexpr RegList fp_regs =
2468 DoubleRegister::ListOf(f2, f4, f6, f8, f10, f12, f14);
2469 constexpr int16_t num_to_push = base::bits::CountPopulation(gp_regs) +
2470 base::bits::CountPopulation(fp_regs);
2471 // The number of regs to be pushed before kWasmInstanceRegister should be
2472 // equal to kNumberOfSavedAllParamRegs.
2473 STATIC_ASSERT(num_to_push ==
2474 WasmCompileLazyFrameConstants::kNumberOfSavedAllParamRegs);
2475 __ MultiPush(gp_regs);
2476 __ MultiPushFPU(fp_regs);
2477
2478 // Pass instance and function index as an explicit arguments to the runtime
2479 // function.
2480 __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
2481 // Initialize the JavaScript context with 0. CEntry will use it to
2482 // set the current context on the isolate.
2483 __ Move(kContextRegister, Smi::zero());
2484 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2485
2486 // Restore registers.
2487 __ MultiPopFPU(fp_regs);
2488 __ MultiPop(gp_regs);
2489 }
2490 // Finally, jump to the entrypoint.
2491 __ Jump(kScratchReg, v0, 0);
2492 }
2493
Generate_WasmDebugBreak(MacroAssembler * masm)2494 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2495 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2496 {
2497 FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2498
2499 // Save all parameter registers. They might hold live values, we restore
2500 // them after the runtime call.
2501 __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2502 __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2503
2504 // Initialize the JavaScript context with 0. CEntry will use it to
2505 // set the current context on the isolate.
2506 __ Move(cp, Smi::zero());
2507 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2508
2509 // Restore registers.
2510 __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
2511 __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2512 }
2513 __ Ret();
2514 }
2515
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2516 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2517 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2518 bool builtin_exit_frame) {
2519 // Called from JavaScript; parameters are on stack as if calling JS function
2520 // a0: number of arguments including receiver
2521 // a1: pointer to builtin function
2522 // fp: frame pointer (restored after C call)
2523 // sp: stack pointer (restored as callee's sp after C call)
2524 // cp: current context (C callee-saved)
2525 //
2526 // If argv_mode == kArgvInRegister:
2527 // a2: pointer to the first argument
2528
2529 if (argv_mode == kArgvInRegister) {
2530 // Move argv into the correct register.
2531 __ mov(s1, a2);
2532 } else {
2533 // Compute the argv pointer in a callee-saved register.
2534 __ Lsa(s1, sp, a0, kPointerSizeLog2);
2535 __ Subu(s1, s1, kPointerSize);
2536 }
2537
2538 // Enter the exit frame that transitions from JavaScript to C++.
2539 FrameScope scope(masm, StackFrame::MANUAL);
2540 __ EnterExitFrame(
2541 save_doubles == kSaveFPRegs, 0,
2542 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2543
2544 // s0: number of arguments including receiver (C callee-saved)
2545 // s1: pointer to first argument (C callee-saved)
2546 // s2: pointer to builtin function (C callee-saved)
2547
2548 // Prepare arguments for C routine.
2549 // a0 = argc
2550 __ mov(s0, a0);
2551 __ mov(s2, a1);
2552
2553 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
2554 // also need to reserve the 4 argument slots on the stack.
2555
2556 __ AssertStackIsAligned();
2557
2558 // a0 = argc, a1 = argv, a2 = isolate
2559 __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2560 __ mov(a1, s1);
2561
2562 __ StoreReturnAddressAndCall(s2);
2563
2564 // Result returned in v0 or v1:v0 - do not destroy these registers!
2565
2566 // Check result for exception sentinel.
2567 Label exception_returned;
2568 __ LoadRoot(t0, RootIndex::kException);
2569 __ Branch(&exception_returned, eq, t0, Operand(v0));
2570
2571 // Check that there is no pending exception, otherwise we
2572 // should have returned the exception sentinel.
2573 if (FLAG_debug_code) {
2574 Label okay;
2575 ExternalReference pending_exception_address = ExternalReference::Create(
2576 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
2577 __ li(a2, pending_exception_address);
2578 __ lw(a2, MemOperand(a2));
2579 __ LoadRoot(t0, RootIndex::kTheHoleValue);
2580 // Cannot use check here as it attempts to generate call into runtime.
2581 __ Branch(&okay, eq, t0, Operand(a2));
2582 __ stop();
2583 __ bind(&okay);
2584 }
2585
2586 // Exit C frame and return.
2587 // v0:v1: result
2588 // sp: stack pointer
2589 // fp: frame pointer
2590 Register argc = argv_mode == kArgvInRegister
2591 // We don't want to pop arguments so set argc to no_reg.
2592 ? no_reg
2593 // s0: still holds argc (callee-saved).
2594 : s0;
2595 __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
2596
2597 // Handling of exception.
2598 __ bind(&exception_returned);
2599
2600 ExternalReference pending_handler_context_address = ExternalReference::Create(
2601 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
2602 ExternalReference pending_handler_entrypoint_address =
2603 ExternalReference::Create(
2604 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
2605 ExternalReference pending_handler_fp_address = ExternalReference::Create(
2606 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
2607 ExternalReference pending_handler_sp_address = ExternalReference::Create(
2608 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
2609
2610 // Ask the runtime for help to determine the handler. This will set v0 to
2611 // contain the current pending exception, don't clobber it.
2612 ExternalReference find_handler =
2613 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
2614 {
2615 FrameScope scope(masm, StackFrame::MANUAL);
2616 __ PrepareCallCFunction(3, 0, a0);
2617 __ mov(a0, zero_reg);
2618 __ mov(a1, zero_reg);
2619 __ li(a2, ExternalReference::isolate_address(masm->isolate()));
2620 __ CallCFunction(find_handler, 3);
2621 }
2622
2623 // Retrieve the handler context, SP and FP.
2624 __ li(cp, pending_handler_context_address);
2625 __ lw(cp, MemOperand(cp));
2626 __ li(sp, pending_handler_sp_address);
2627 __ lw(sp, MemOperand(sp));
2628 __ li(fp, pending_handler_fp_address);
2629 __ lw(fp, MemOperand(fp));
2630
2631 // If the handler is a JS frame, restore the context to the frame. Note that
2632 // the context will be set to (cp == 0) for non-JS frames.
2633 Label zero;
2634 __ Branch(&zero, eq, cp, Operand(zero_reg));
2635 __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2636 __ bind(&zero);
2637
2638 // Reset the masking register. This is done independent of the underlying
2639 // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
2640 // with both configurations. It is safe to always do this, because the
2641 // underlying register is caller-saved and can be arbitrarily clobbered.
2642 __ ResetSpeculationPoisonRegister();
2643
2644 // Compute the handler entry address and jump to it.
2645 __ li(t9, pending_handler_entrypoint_address);
2646 __ lw(t9, MemOperand(t9));
2647 __ Jump(t9);
2648 }
2649
Generate_DoubleToI(MacroAssembler * masm)2650 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
2651 Label done;
2652 Register result_reg = t0;
2653
2654 Register scratch = GetRegisterThatIsNotOneOf(result_reg);
2655 Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
2656 Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
2657 DoubleRegister double_scratch = kScratchDoubleReg;
2658
2659 // Account for saved regs.
2660 const int kArgumentOffset = 4 * kPointerSize;
2661
2662 __ Push(result_reg);
2663 __ Push(scratch, scratch2, scratch3);
2664
2665 // Load double input.
2666 __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset));
2667
2668 // Clear cumulative exception flags and save the FCSR.
2669 __ cfc1(scratch2, FCSR);
2670 __ ctc1(zero_reg, FCSR);
2671
2672 // Try a conversion to a signed integer.
2673 __ Trunc_w_d(double_scratch, double_scratch);
2674 // Move the converted value into the result register.
2675 __ mfc1(scratch3, double_scratch);
2676
2677 // Retrieve and restore the FCSR.
2678 __ cfc1(scratch, FCSR);
2679 __ ctc1(scratch2, FCSR);
2680
2681 // Check for overflow and NaNs.
2682 __ And(
2683 scratch, scratch,
2684 kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2685 // If we had no exceptions then set result_reg and we are done.
2686 Label error;
2687 __ Branch(&error, ne, scratch, Operand(zero_reg));
2688 __ Move(result_reg, scratch3);
2689 __ Branch(&done);
2690 __ bind(&error);
2691
2692 // Load the double value and perform a manual truncation.
2693 Register input_high = scratch2;
2694 Register input_low = scratch3;
2695
2696 __ lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
2697 __ lw(input_high,
2698 MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
2699
2700 Label normal_exponent;
2701 // Extract the biased exponent in result.
2702 __ Ext(result_reg, input_high, HeapNumber::kExponentShift,
2703 HeapNumber::kExponentBits);
2704
2705 // Check for Infinity and NaNs, which should return 0.
2706 __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
2707 __ Movz(result_reg, zero_reg, scratch);
2708 __ Branch(&done, eq, scratch, Operand(zero_reg));
2709
2710 // Express exponent as delta to (number of mantissa bits + 31).
2711 __ Subu(result_reg, result_reg,
2712 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
2713
2714 // If the delta is strictly positive, all bits would be shifted away,
2715 // which means that we can return 0.
2716 __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
2717 __ mov(result_reg, zero_reg);
2718 __ Branch(&done);
2719
2720 __ bind(&normal_exponent);
2721 const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
2722 // Calculate shift.
2723 __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
2724
2725 // Save the sign.
2726 Register sign = result_reg;
2727 result_reg = no_reg;
2728 __ And(sign, input_high, Operand(HeapNumber::kSignMask));
2729
2730 // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
2731 // to check for this specific case.
2732 Label high_shift_needed, high_shift_done;
2733 __ Branch(&high_shift_needed, lt, scratch, Operand(32));
2734 __ mov(input_high, zero_reg);
2735 __ Branch(&high_shift_done);
2736 __ bind(&high_shift_needed);
2737
2738 // Set the implicit 1 before the mantissa part in input_high.
2739 __ Or(input_high, input_high,
2740 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
2741 // Shift the mantissa bits to the correct position.
2742 // We don't need to clear non-mantissa bits as they will be shifted away.
2743 // If they weren't, it would mean that the answer is in the 32bit range.
2744 __ sllv(input_high, input_high, scratch);
2745
2746 __ bind(&high_shift_done);
2747
2748 // Replace the shifted bits with bits from the lower mantissa word.
2749 Label pos_shift, shift_done;
2750 __ li(kScratchReg, 32);
2751 __ subu(scratch, kScratchReg, scratch);
2752 __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
2753
2754 // Negate scratch.
2755 __ Subu(scratch, zero_reg, scratch);
2756 __ sllv(input_low, input_low, scratch);
2757 __ Branch(&shift_done);
2758
2759 __ bind(&pos_shift);
2760 __ srlv(input_low, input_low, scratch);
2761
2762 __ bind(&shift_done);
2763 __ Or(input_high, input_high, Operand(input_low));
2764 // Restore sign if necessary.
2765 __ mov(scratch, sign);
2766 result_reg = sign;
2767 sign = no_reg;
2768 __ Subu(result_reg, zero_reg, input_high);
2769 __ Movz(result_reg, input_high, scratch);
2770
2771 __ bind(&done);
2772 __ sw(result_reg, MemOperand(sp, kArgumentOffset));
2773 __ Pop(scratch, scratch2, scratch3);
2774 __ Pop(result_reg);
2775 __ Ret();
2776 }
2777
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2778 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2779 // TODO(v8:10701): Implement for this platform.
2780 __ Trap();
2781 }
2782
2783 namespace {
2784
AddressOffset(ExternalReference ref0,ExternalReference ref1)2785 int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2786 return ref0.address() - ref1.address();
2787 }
2788
2789 // Calls an API function. Allocates HandleScope, extracts returned value
2790 // from handle and propagates exceptions. Restores context. stack_space
2791 // - space to be unwound on exit (includes the call JS arguments space and
2792 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)2793 void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
2794 ExternalReference thunk_ref, int stack_space,
2795 MemOperand* stack_space_operand,
2796 MemOperand return_value_operand) {
2797 Isolate* isolate = masm->isolate();
2798 ExternalReference next_address =
2799 ExternalReference::handle_scope_next_address(isolate);
2800 const int kNextOffset = 0;
2801 const int kLimitOffset = AddressOffset(
2802 ExternalReference::handle_scope_limit_address(isolate), next_address);
2803 const int kLevelOffset = AddressOffset(
2804 ExternalReference::handle_scope_level_address(isolate), next_address);
2805
2806 DCHECK(function_address == a1 || function_address == a2);
2807
2808 Label profiler_enabled, end_profiler_check;
2809 __ li(t9, ExternalReference::is_profiling_address(isolate));
2810 __ lb(t9, MemOperand(t9, 0));
2811 __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
2812 __ li(t9, ExternalReference::address_of_runtime_stats_flag());
2813 __ lw(t9, MemOperand(t9, 0));
2814 __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg));
2815 {
2816 // Call the api function directly.
2817 __ mov(t9, function_address);
2818 __ Branch(&end_profiler_check);
2819 }
2820 __ bind(&profiler_enabled);
2821 {
2822 // Additional parameter is the address of the actual callback.
2823 __ li(t9, thunk_ref);
2824 }
2825 __ bind(&end_profiler_check);
2826
2827 // Allocate HandleScope in callee-save registers.
2828 __ li(s5, next_address);
2829 __ lw(s0, MemOperand(s5, kNextOffset));
2830 __ lw(s1, MemOperand(s5, kLimitOffset));
2831 __ lw(s2, MemOperand(s5, kLevelOffset));
2832 __ Addu(s2, s2, Operand(1));
2833 __ sw(s2, MemOperand(s5, kLevelOffset));
2834
2835 __ StoreReturnAddressAndCall(t9);
2836
2837 Label promote_scheduled_exception;
2838 Label delete_allocated_handles;
2839 Label leave_exit_frame;
2840 Label return_value_loaded;
2841
2842 // Load value from ReturnValue.
2843 __ lw(v0, return_value_operand);
2844 __ bind(&return_value_loaded);
2845
2846 // No more valid handles (the result handle was the last one). Restore
2847 // previous handle scope.
2848 __ sw(s0, MemOperand(s5, kNextOffset));
2849 if (__ emit_debug_code()) {
2850 __ lw(a1, MemOperand(s5, kLevelOffset));
2851 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
2852 Operand(s2));
2853 }
2854 __ Subu(s2, s2, Operand(1));
2855 __ sw(s2, MemOperand(s5, kLevelOffset));
2856 __ lw(kScratchReg, MemOperand(s5, kLimitOffset));
2857 __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
2858
2859 // Leave the API exit frame.
2860 __ bind(&leave_exit_frame);
2861
2862 if (stack_space_operand == nullptr) {
2863 DCHECK_NE(stack_space, 0);
2864 __ li(s0, Operand(stack_space));
2865 } else {
2866 DCHECK_EQ(stack_space, 0);
2867 // The ExitFrame contains four MIPS argument slots after the call so this
2868 // must be accounted for.
2869 // TODO(jgruber): Investigate if this is needed by the direct call.
2870 __ Drop(kCArgSlotCount);
2871 __ lw(s0, *stack_space_operand);
2872 }
2873
2874 static constexpr bool kDontSaveDoubles = false;
2875 static constexpr bool kRegisterContainsSlotCount = false;
2876 __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
2877 kRegisterContainsSlotCount);
2878
2879 // Check if the function scheduled an exception.
2880 __ LoadRoot(t0, RootIndex::kTheHoleValue);
2881 __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
2882 __ lw(t1, MemOperand(kScratchReg));
2883 __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
2884
2885 __ Ret();
2886
2887 // Re-throw by promoting a scheduled exception.
2888 __ bind(&promote_scheduled_exception);
2889 __ TailCallRuntime(Runtime::kPromoteScheduledException);
2890
2891 // HandleScope limit has changed. Delete allocated extensions.
2892 __ bind(&delete_allocated_handles);
2893 __ sw(s1, MemOperand(s5, kLimitOffset));
2894 __ mov(s0, v0);
2895 __ mov(a0, v0);
2896 __ PrepareCallCFunction(1, s1);
2897 __ li(a0, ExternalReference::isolate_address(isolate));
2898 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
2899 __ mov(v0, s0);
2900 __ jmp(&leave_exit_frame);
2901 }
2902
2903 } // namespace
2904
Generate_CallApiCallback(MacroAssembler * masm)2905 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
2906 // ----------- S t a t e -------------
2907 // -- cp : context
2908 // -- a1 : api function address
2909 // -- a2 : arguments count (not including the receiver)
2910 // -- a3 : call data
2911 // -- a0 : holder
2912 // -- sp[0] : receiver
2913 // -- sp[8] : first argument
2914 // -- ...
2915 // -- sp[(argc) * 8] : last argument
2916 // -----------------------------------
2917
2918 Register api_function_address = a1;
2919 Register argc = a2;
2920 Register call_data = a3;
2921 Register holder = a0;
2922 Register scratch = t0;
2923 Register base = t1; // For addressing MemOperands on the stack.
2924
2925 DCHECK(!AreAliased(api_function_address, argc, call_data,
2926 holder, scratch, base));
2927
2928 using FCA = FunctionCallbackArguments;
2929
2930 STATIC_ASSERT(FCA::kArgsLength == 6);
2931 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
2932 STATIC_ASSERT(FCA::kDataIndex == 4);
2933 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
2934 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
2935 STATIC_ASSERT(FCA::kIsolateIndex == 1);
2936 STATIC_ASSERT(FCA::kHolderIndex == 0);
2937
2938 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
2939 //
2940 // Target state:
2941 // sp[0 * kPointerSize]: kHolder
2942 // sp[1 * kPointerSize]: kIsolate
2943 // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
2944 // sp[3 * kPointerSize]: undefined (kReturnValue)
2945 // sp[4 * kPointerSize]: kData
2946 // sp[5 * kPointerSize]: undefined (kNewTarget)
2947
2948 // Set up the base register for addressing through MemOperands. It will point
2949 // at the receiver (located at sp + argc * kPointerSize).
2950 __ Lsa(base, sp, argc, kPointerSizeLog2);
2951
2952 // Reserve space on the stack.
2953 __ Subu(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
2954
2955 // kHolder.
2956 __ sw(holder, MemOperand(sp, 0 * kPointerSize));
2957
2958 // kIsolate.
2959 __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
2960 __ sw(scratch, MemOperand(sp, 1 * kPointerSize));
2961
2962 // kReturnValueDefaultValue and kReturnValue.
2963 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
2964 __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
2965 __ sw(scratch, MemOperand(sp, 3 * kPointerSize));
2966
2967 // kData.
2968 __ sw(call_data, MemOperand(sp, 4 * kPointerSize));
2969
2970 // kNewTarget.
2971 __ sw(scratch, MemOperand(sp, 5 * kPointerSize));
2972
2973 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
2974 // We use it below to set up the FunctionCallbackInfo object.
2975 __ mov(scratch, sp);
2976
2977 // Allocate the v8::Arguments structure in the arguments' space since
2978 // it's not controlled by GC.
2979 static constexpr int kApiStackSpace = 4;
2980 static constexpr bool kDontSaveDoubles = false;
2981 FrameScope frame_scope(masm, StackFrame::MANUAL);
2982 __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
2983
2984 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
2985 // Arguments are after the return address (pushed by EnterExitFrame()).
2986 __ sw(scratch, MemOperand(sp, 1 * kPointerSize));
2987
2988 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
2989 // on the stack).
2990 __ Addu(scratch, scratch,
2991 Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
2992 __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
2993
2994 // FunctionCallbackInfo::length_.
2995 __ sw(argc, MemOperand(sp, 3 * kPointerSize));
2996
2997 // We also store the number of bytes to drop from the stack after returning
2998 // from the API function here.
2999 // Note: Unlike on other architectures, this stores the number of slots to
3000 // drop, not the number of bytes.
3001 __ Addu(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
3002 __ sw(scratch, MemOperand(sp, 4 * kPointerSize));
3003
3004 // v8::InvocationCallback's argument.
3005 DCHECK(!AreAliased(api_function_address, scratch, a0));
3006 __ Addu(a0, sp, Operand(1 * kPointerSize));
3007
3008 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3009
3010 // There are two stack slots above the arguments we constructed on the stack.
3011 // TODO(jgruber): Document what these arguments are.
3012 static constexpr int kStackSlotsAboveFCA = 2;
3013 MemOperand return_value_operand(
3014 fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
3015
3016 static constexpr int kUseStackSpaceOperand = 0;
3017 MemOperand stack_space_operand(sp, 4 * kPointerSize);
3018
3019 AllowExternalCallThatCantCauseGC scope(masm);
3020 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3021 kUseStackSpaceOperand, &stack_space_operand,
3022 return_value_operand);
3023 }
3024
Generate_CallApiGetter(MacroAssembler * masm)3025 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3026 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3027 // name below the exit frame to make GC aware of them.
3028 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3029 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3030 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3031 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3032 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3033 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3034 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3035 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3036
3037 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3038 Register holder = ApiGetterDescriptor::HolderRegister();
3039 Register callback = ApiGetterDescriptor::CallbackRegister();
3040 Register scratch = t0;
3041 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3042
3043 Register api_function_address = a2;
3044
3045 // Here and below +1 is for name() pushed after the args_ array.
3046 using PCA = PropertyCallbackArguments;
3047 __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
3048 __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
3049 __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3050 __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
3051 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3052 __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
3053 __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
3054 kPointerSize));
3055 __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
3056 __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
3057 __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
3058 // should_throw_on_error -> false
3059 DCHECK_EQ(0, Smi::zero().ptr());
3060 __ sw(zero_reg,
3061 MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
3062 __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3063 __ sw(scratch, MemOperand(sp, 0 * kPointerSize));
3064
3065 // v8::PropertyCallbackInfo::args_ array and name handle.
3066 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3067
3068 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3069 __ mov(a0, sp); // a0 = Handle<Name>
3070 __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
3071
3072 const int kApiStackSpace = 1;
3073 FrameScope frame_scope(masm, StackFrame::MANUAL);
3074 __ EnterExitFrame(false, kApiStackSpace);
3075
3076 // Create v8::PropertyCallbackInfo object on the stack and initialize
3077 // it's args_ field.
3078 __ sw(a1, MemOperand(sp, 1 * kPointerSize));
3079 __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = v8::PropertyCallbackInfo&
3080
3081 ExternalReference thunk_ref =
3082 ExternalReference::invoke_accessor_getter_callback();
3083
3084 __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3085 __ lw(api_function_address,
3086 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3087
3088 // +3 is to skip prolog, return address and name handle.
3089 MemOperand return_value_operand(
3090 fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3091 MemOperand* const kUseStackSpaceConstant = nullptr;
3092 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3093 kStackUnwindSpace, kUseStackSpaceConstant,
3094 return_value_operand);
3095 }
3096
Generate_DirectCEntry(MacroAssembler * masm)3097 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3098 // The sole purpose of DirectCEntry is for movable callers (e.g. any general
3099 // purpose Code object) to be able to call into C functions that may trigger
3100 // GC and thus move the caller.
3101 //
3102 // DirectCEntry places the return address on the stack (updated by the GC),
3103 // making the call GC safe. The irregexp backend relies on this.
3104
3105 // Make place for arguments to fit C calling convention. Callers use
3106 // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't
3107 // have to do that here. Any caller must drop kCArgsSlotsSize stack space
3108 // after the call.
3109 __ Subu(sp, sp, Operand(kCArgsSlotsSize));
3110
3111 __ sw(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address.
3112 __ Call(t9); // Call the C++ function.
3113 __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code.
3114
3115 if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3116 // In case of an error the return address may point to a memory area
3117 // filled with kZapValue by the GC. Dereference the address and check for
3118 // this.
3119 __ lw(t0, MemOperand(t9));
3120 __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, t0,
3121 Operand(reinterpret_cast<uint32_t>(kZapValue)));
3122 }
3123
3124 __ Jump(t9);
3125 }
3126
Generate_MemCopyUint8Uint8(MacroAssembler * masm)3127 void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) {
3128 // This code assumes that cache lines are 32 bytes and if the cache line is
3129 // larger it will not work correctly.
3130 {
3131 Label lastb, unaligned, aligned, chkw, loop16w, chk1w, wordCopy_loop,
3132 skip_pref, lastbloop, leave, ua_chk16w, ua_loop16w, ua_skip_pref,
3133 ua_chkw, ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
3134
3135 // The size of each prefetch.
3136 uint32_t pref_chunk = 32;
3137 // The maximum size of a prefetch, it must not be less than pref_chunk.
3138 // If the real size of a prefetch is greater than max_pref_size and
3139 // the kPrefHintPrepareForStore hint is used, the code will not work
3140 // correctly.
3141 uint32_t max_pref_size = 128;
3142 DCHECK(pref_chunk < max_pref_size);
3143
3144 // pref_limit is set based on the fact that we never use an offset
3145 // greater then 5 on a store pref and that a single pref can
3146 // never be larger then max_pref_size.
3147 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
3148 int32_t pref_hint_load = kPrefHintLoadStreamed;
3149 int32_t pref_hint_store = kPrefHintPrepareForStore;
3150 uint32_t loadstore_chunk = 4;
3151
3152 // The initial prefetches may fetch bytes that are before the buffer being
3153 // copied. Start copies with an offset of 4 so avoid this situation when
3154 // using kPrefHintPrepareForStore.
3155 DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
3156 pref_chunk * 4 >= max_pref_size);
3157
3158 // If the size is less than 8, go to lastb. Regardless of size,
3159 // copy dst pointer to v0 for the retuen value.
3160 __ slti(t2, a2, 2 * loadstore_chunk);
3161 __ bne(t2, zero_reg, &lastb);
3162 __ mov(v0, a0); // In delay slot.
3163
3164 // If src and dst have different alignments, go to unaligned, if they
3165 // have the same alignment (but are not actually aligned) do a partial
3166 // load/store to make them aligned. If they are both already aligned
3167 // we can start copying at aligned.
3168 __ xor_(t8, a1, a0);
3169 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
3170 __ bne(t8, zero_reg, &unaligned);
3171 __ subu(a3, zero_reg, a0); // In delay slot.
3172
3173 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
3174 __ beq(a3, zero_reg, &aligned); // Already aligned.
3175 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
3176
3177 if (kArchEndian == kLittle) {
3178 __ lwr(t8, MemOperand(a1));
3179 __ addu(a1, a1, a3);
3180 __ swr(t8, MemOperand(a0));
3181 __ addu(a0, a0, a3);
3182 } else {
3183 __ lwl(t8, MemOperand(a1));
3184 __ addu(a1, a1, a3);
3185 __ swl(t8, MemOperand(a0));
3186 __ addu(a0, a0, a3);
3187 }
3188 // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
3189 // count how many bytes we have to copy after all the 64 byte chunks are
3190 // copied and a3 to the dst pointer after all the 64 byte chunks have been
3191 // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
3192 __ bind(&aligned);
3193 __ andi(t8, a2, 0x3F);
3194 __ beq(a2, t8, &chkw); // Less than 64?
3195 __ subu(a3, a2, t8); // In delay slot.
3196 __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
3197
3198 // When in the loop we prefetch with kPrefHintPrepareForStore hint,
3199 // in this case the a0+x should be past the "t0-32" address. This means:
3200 // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
3201 // x=64 the last "safe" a0 address is "t0-96". In the current version we
3202 // will use "pref hint, 128(a0)", so "t0-160" is the limit.
3203 if (pref_hint_store == kPrefHintPrepareForStore) {
3204 __ addu(t0, a0, a2); // t0 is the "past the end" address.
3205 __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
3206 }
3207
3208 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
3209 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
3210 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
3211 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
3212
3213 if (pref_hint_store != kPrefHintPrepareForStore) {
3214 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
3215 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
3216 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
3217 }
3218 __ bind(&loop16w);
3219 __ lw(t0, MemOperand(a1));
3220
3221 if (pref_hint_store == kPrefHintPrepareForStore) {
3222 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
3223 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
3224 }
3225 __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
3226
3227 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
3228 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
3229
3230 __ bind(&skip_pref);
3231 __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
3232 __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
3233 __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
3234 __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
3235 __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
3236 __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
3237 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
3238
3239 __ sw(t0, MemOperand(a0));
3240 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
3241 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
3242 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
3243 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
3244 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
3245 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
3246 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
3247
3248 __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
3249 __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
3250 __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
3251 __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
3252 __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
3253 __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
3254 __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
3255 __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
3256 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
3257
3258 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
3259 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
3260 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
3261 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
3262 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
3263 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
3264 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
3265 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
3266 __ addiu(a0, a0, 16 * loadstore_chunk);
3267 __ bne(a0, a3, &loop16w);
3268 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
3269 __ mov(a2, t8);
3270
3271 // Here we have src and dest word-aligned but less than 64-bytes to go.
3272 // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
3273 // down to chk1w to handle the tail end of the copy.
3274 __ bind(&chkw);
3275 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
3276 __ andi(t8, a2, 0x1F);
3277 __ beq(a2, t8, &chk1w); // Less than 32?
3278 __ nop(); // In delay slot.
3279 __ lw(t0, MemOperand(a1));
3280 __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
3281 __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
3282 __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
3283 __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
3284 __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
3285 __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
3286 __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
3287 __ addiu(a1, a1, 8 * loadstore_chunk);
3288 __ sw(t0, MemOperand(a0));
3289 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
3290 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
3291 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
3292 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
3293 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
3294 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
3295 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
3296 __ addiu(a0, a0, 8 * loadstore_chunk);
3297
3298 // Here we have less than 32 bytes to copy. Set up for a loop to copy
3299 // one word at a time. Set a2 to count how many bytes we have to copy
3300 // after all the word chunks are copied and a3 to the dst pointer after
3301 // all the word chunks have been copied. We will loop, incrementing a0
3302 // and a1 until a0 equals a3.
3303 __ bind(&chk1w);
3304 __ andi(a2, t8, loadstore_chunk - 1);
3305 __ beq(a2, t8, &lastb);
3306 __ subu(a3, t8, a2); // In delay slot.
3307 __ addu(a3, a0, a3);
3308
3309 __ bind(&wordCopy_loop);
3310 __ lw(t3, MemOperand(a1));
3311 __ addiu(a0, a0, loadstore_chunk);
3312 __ addiu(a1, a1, loadstore_chunk);
3313 __ bne(a0, a3, &wordCopy_loop);
3314 __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
3315
3316 __ bind(&lastb);
3317 __ Branch(&leave, le, a2, Operand(zero_reg));
3318 __ addu(a3, a0, a2);
3319
3320 __ bind(&lastbloop);
3321 __ lb(v1, MemOperand(a1));
3322 __ addiu(a0, a0, 1);
3323 __ addiu(a1, a1, 1);
3324 __ bne(a0, a3, &lastbloop);
3325 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
3326
3327 __ bind(&leave);
3328 __ jr(ra);
3329 __ nop();
3330
3331 // Unaligned case. Only the dst gets aligned so we need to do partial
3332 // loads of the source followed by normal stores to the dst (once we
3333 // have aligned the destination).
3334 __ bind(&unaligned);
3335 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
3336 __ beq(a3, zero_reg, &ua_chk16w);
3337 __ subu(a2, a2, a3); // In delay slot.
3338
3339 if (kArchEndian == kLittle) {
3340 __ lwr(v1, MemOperand(a1));
3341 __ lwl(v1,
3342 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3343 __ addu(a1, a1, a3);
3344 __ swr(v1, MemOperand(a0));
3345 __ addu(a0, a0, a3);
3346 } else {
3347 __ lwl(v1, MemOperand(a1));
3348 __ lwr(v1,
3349 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3350 __ addu(a1, a1, a3);
3351 __ swl(v1, MemOperand(a0));
3352 __ addu(a0, a0, a3);
3353 }
3354
3355 // Now the dst (but not the source) is aligned. Set a2 to count how many
3356 // bytes we have to copy after all the 64 byte chunks are copied and a3 to
3357 // the dst pointer after all the 64 byte chunks have been copied. We will
3358 // loop, incrementing a0 and a1 until a0 equals a3.
3359 __ bind(&ua_chk16w);
3360 __ andi(t8, a2, 0x3F);
3361 __ beq(a2, t8, &ua_chkw);
3362 __ subu(a3, a2, t8); // In delay slot.
3363 __ addu(a3, a0, a3);
3364
3365 if (pref_hint_store == kPrefHintPrepareForStore) {
3366 __ addu(t0, a0, a2);
3367 __ Subu(t9, t0, pref_limit);
3368 }
3369
3370 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
3371 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
3372 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
3373
3374 if (pref_hint_store != kPrefHintPrepareForStore) {
3375 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
3376 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
3377 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
3378 }
3379
3380 __ bind(&ua_loop16w);
3381 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
3382 if (kArchEndian == kLittle) {
3383 __ lwr(t0, MemOperand(a1));
3384 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
3385 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
3386
3387 if (pref_hint_store == kPrefHintPrepareForStore) {
3388 __ sltu(v1, t9, a0);
3389 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
3390 }
3391 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
3392
3393 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
3394 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
3395
3396 __ bind(&ua_skip_pref);
3397 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
3398 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
3399 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
3400 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
3401 __ lwl(t0,
3402 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3403 __ lwl(t1,
3404 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
3405 __ lwl(t2,
3406 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
3407 __ lwl(t3,
3408 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
3409 __ lwl(t4,
3410 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
3411 __ lwl(t5,
3412 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
3413 __ lwl(t6,
3414 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
3415 __ lwl(t7,
3416 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
3417 } else {
3418 __ lwl(t0, MemOperand(a1));
3419 __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
3420 __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
3421
3422 if (pref_hint_store == kPrefHintPrepareForStore) {
3423 __ sltu(v1, t9, a0);
3424 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
3425 }
3426 __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
3427
3428 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
3429 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
3430
3431 __ bind(&ua_skip_pref);
3432 __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
3433 __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
3434 __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
3435 __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
3436 __ lwr(t0,
3437 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3438 __ lwr(t1,
3439 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
3440 __ lwr(t2,
3441 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
3442 __ lwr(t3,
3443 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
3444 __ lwr(t4,
3445 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
3446 __ lwr(t5,
3447 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
3448 __ lwr(t6,
3449 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
3450 __ lwr(t7,
3451 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
3452 }
3453 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
3454 __ sw(t0, MemOperand(a0));
3455 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
3456 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
3457 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
3458 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
3459 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
3460 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
3461 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
3462 if (kArchEndian == kLittle) {
3463 __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
3464 __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
3465 __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
3466 __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
3467 __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
3468 __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
3469 __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
3470 __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
3471 __ lwl(t0,
3472 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
3473 __ lwl(t1,
3474 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
3475 __ lwl(t2,
3476 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
3477 __ lwl(t3,
3478 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
3479 __ lwl(t4,
3480 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
3481 __ lwl(t5,
3482 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
3483 __ lwl(t6,
3484 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
3485 __ lwl(t7,
3486 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
3487 } else {
3488 __ lwl(t0, MemOperand(a1, 8, loadstore_chunk));
3489 __ lwl(t1, MemOperand(a1, 9, loadstore_chunk));
3490 __ lwl(t2, MemOperand(a1, 10, loadstore_chunk));
3491 __ lwl(t3, MemOperand(a1, 11, loadstore_chunk));
3492 __ lwl(t4, MemOperand(a1, 12, loadstore_chunk));
3493 __ lwl(t5, MemOperand(a1, 13, loadstore_chunk));
3494 __ lwl(t6, MemOperand(a1, 14, loadstore_chunk));
3495 __ lwl(t7, MemOperand(a1, 15, loadstore_chunk));
3496 __ lwr(t0,
3497 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
3498 __ lwr(t1,
3499 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
3500 __ lwr(t2,
3501 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
3502 __ lwr(t3,
3503 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
3504 __ lwr(t4,
3505 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
3506 __ lwr(t5,
3507 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
3508 __ lwr(t6,
3509 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
3510 __ lwr(t7,
3511 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
3512 }
3513 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
3514 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
3515 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
3516 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
3517 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
3518 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
3519 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
3520 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
3521 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
3522 __ addiu(a0, a0, 16 * loadstore_chunk);
3523 __ bne(a0, a3, &ua_loop16w);
3524 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
3525 __ mov(a2, t8);
3526
3527 // Here less than 64-bytes. Check for
3528 // a 32 byte chunk and copy if there is one. Otherwise jump down to
3529 // ua_chk1w to handle the tail end of the copy.
3530 __ bind(&ua_chkw);
3531 __ Pref(pref_hint_load, MemOperand(a1));
3532 __ andi(t8, a2, 0x1F);
3533
3534 __ beq(a2, t8, &ua_chk1w);
3535 __ nop(); // In delay slot.
3536 if (kArchEndian == kLittle) {
3537 __ lwr(t0, MemOperand(a1));
3538 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
3539 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
3540 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
3541 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
3542 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
3543 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
3544 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
3545 __ lwl(t0,
3546 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3547 __ lwl(t1,
3548 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
3549 __ lwl(t2,
3550 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
3551 __ lwl(t3,
3552 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
3553 __ lwl(t4,
3554 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
3555 __ lwl(t5,
3556 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
3557 __ lwl(t6,
3558 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
3559 __ lwl(t7,
3560 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
3561 } else {
3562 __ lwl(t0, MemOperand(a1));
3563 __ lwl(t1, MemOperand(a1, 1, loadstore_chunk));
3564 __ lwl(t2, MemOperand(a1, 2, loadstore_chunk));
3565 __ lwl(t3, MemOperand(a1, 3, loadstore_chunk));
3566 __ lwl(t4, MemOperand(a1, 4, loadstore_chunk));
3567 __ lwl(t5, MemOperand(a1, 5, loadstore_chunk));
3568 __ lwl(t6, MemOperand(a1, 6, loadstore_chunk));
3569 __ lwl(t7, MemOperand(a1, 7, loadstore_chunk));
3570 __ lwr(t0,
3571 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3572 __ lwr(t1,
3573 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
3574 __ lwr(t2,
3575 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
3576 __ lwr(t3,
3577 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
3578 __ lwr(t4,
3579 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
3580 __ lwr(t5,
3581 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
3582 __ lwr(t6,
3583 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
3584 __ lwr(t7,
3585 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
3586 }
3587 __ addiu(a1, a1, 8 * loadstore_chunk);
3588 __ sw(t0, MemOperand(a0));
3589 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
3590 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
3591 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
3592 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
3593 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
3594 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
3595 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
3596 __ addiu(a0, a0, 8 * loadstore_chunk);
3597
3598 // Less than 32 bytes to copy. Set up for a loop to
3599 // copy one word at a time.
3600 __ bind(&ua_chk1w);
3601 __ andi(a2, t8, loadstore_chunk - 1);
3602 __ beq(a2, t8, &ua_smallCopy);
3603 __ subu(a3, t8, a2); // In delay slot.
3604 __ addu(a3, a0, a3);
3605
3606 __ bind(&ua_wordCopy_loop);
3607 if (kArchEndian == kLittle) {
3608 __ lwr(v1, MemOperand(a1));
3609 __ lwl(v1,
3610 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3611 } else {
3612 __ lwl(v1, MemOperand(a1));
3613 __ lwr(v1,
3614 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
3615 }
3616 __ addiu(a0, a0, loadstore_chunk);
3617 __ addiu(a1, a1, loadstore_chunk);
3618 __ bne(a0, a3, &ua_wordCopy_loop);
3619 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
3620
3621 // Copy the last 8 bytes.
3622 __ bind(&ua_smallCopy);
3623 __ beq(a2, zero_reg, &leave);
3624 __ addu(a3, a0, a2); // In delay slot.
3625
3626 __ bind(&ua_smallCopy_loop);
3627 __ lb(v1, MemOperand(a1));
3628 __ addiu(a0, a0, 1);
3629 __ addiu(a1, a1, 1);
3630 __ bne(a0, a3, &ua_smallCopy_loop);
3631 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
3632
3633 __ jr(ra);
3634 __ nop();
3635 }
3636 }
3637
3638 namespace {
3639
3640 // This code tries to be close to ia32 code so that any changes can be
3641 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3642 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3643 DeoptimizeKind deopt_kind) {
3644 Isolate* isolate = masm->isolate();
3645
3646 // Unlike on ARM we don't save all the registers, just the useful ones.
3647 // For the rest, there are gaps on the stack, so the offsets remain the same.
3648 static constexpr int kNumberOfRegisters = Register::kNumRegisters;
3649
3650 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3651 RegList saved_regs = restored_regs | sp.bit() | ra.bit();
3652
3653 static constexpr int kDoubleRegsSize =
3654 kDoubleSize * DoubleRegister::kNumRegisters;
3655
3656 // Save all FPU registers before messing with them.
3657 __ Subu(sp, sp, Operand(kDoubleRegsSize));
3658 const RegisterConfiguration* config = RegisterConfiguration::Default();
3659 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3660 int code = config->GetAllocatableDoubleCode(i);
3661 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3662 int offset = code * kDoubleSize;
3663 __ Sdc1(fpu_reg, MemOperand(sp, offset));
3664 }
3665
3666 // Push saved_regs (needed to populate FrameDescription::registers_).
3667 // Leave gaps for other registers.
3668 __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
3669 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3670 if ((saved_regs & (1 << i)) != 0) {
3671 __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
3672 }
3673 }
3674
3675 __ li(a2,
3676 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
3677 __ sw(fp, MemOperand(a2));
3678
3679 static constexpr int kSavedRegistersAreaSize =
3680 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
3681
3682 __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
3683 // Get the address of the location in the code object (a3) (return
3684 // address for lazy deoptimization) and compute the fp-to-sp delta in
3685 // register t0.
3686 __ mov(a3, ra);
3687 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize));
3688 __ Subu(t0, fp, t0);
3689
3690 // Allocate a new deoptimizer object.
3691 __ PrepareCallCFunction(6, t1);
3692 // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
3693 __ mov(a0, zero_reg);
3694 Label context_check;
3695 __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3696 __ JumpIfSmi(a1, &context_check);
3697 __ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3698 __ bind(&context_check);
3699 __ li(a1, Operand(static_cast<int>(deopt_kind)));
3700 // a2: bailout id already loaded.
3701 // a3: code address or 0 already loaded.
3702 __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
3703 __ li(t1, ExternalReference::isolate_address(isolate));
3704 __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
3705 // Call Deoptimizer::New().
3706 {
3707 AllowExternalCallThatCantCauseGC scope(masm);
3708 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
3709 }
3710
3711 // Preserve "deoptimizer" object in register v0 and get the input
3712 // frame descriptor pointer to a1 (deoptimizer->input_);
3713 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
3714 __ mov(a0, v0);
3715 __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
3716
3717 // Copy core registers into FrameDescription::registers_[kNumRegisters].
3718 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3719 for (int i = 0; i < kNumberOfRegisters; i++) {
3720 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3721 if ((saved_regs & (1 << i)) != 0) {
3722 __ lw(a2, MemOperand(sp, i * kPointerSize));
3723 __ sw(a2, MemOperand(a1, offset));
3724 } else if (FLAG_debug_code) {
3725 __ li(a2, kDebugZapValue);
3726 __ sw(a2, MemOperand(a1, offset));
3727 }
3728 }
3729
3730 int double_regs_offset = FrameDescription::double_registers_offset();
3731 // Copy FPU registers to
3732 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
3733 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3734 int code = config->GetAllocatableDoubleCode(i);
3735 int dst_offset = code * kDoubleSize + double_regs_offset;
3736 int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
3737 __ Ldc1(f0, MemOperand(sp, src_offset));
3738 __ Sdc1(f0, MemOperand(a1, dst_offset));
3739 }
3740
3741 // Remove the saved registers from the stack.
3742 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize));
3743
3744 // Compute a pointer to the unwinding limit in register a2; that is
3745 // the first stack slot not part of the input frame.
3746 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
3747 __ Addu(a2, a2, sp);
3748
3749 // Unwind the stack down to - but not including - the unwinding
3750 // limit and copy the contents of the activation frame to the input
3751 // frame description.
3752 __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
3753 Label pop_loop;
3754 Label pop_loop_header;
3755 __ BranchShort(&pop_loop_header);
3756 __ bind(&pop_loop);
3757 __ pop(t0);
3758 __ sw(t0, MemOperand(a3, 0));
3759 __ addiu(a3, a3, sizeof(uint32_t));
3760 __ bind(&pop_loop_header);
3761 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
3762
3763 // Compute the output frame in the deoptimizer.
3764 __ push(a0); // Preserve deoptimizer object across call.
3765 // a0: deoptimizer object; a1: scratch.
3766 __ PrepareCallCFunction(1, a1);
3767 // Call Deoptimizer::ComputeOutputFrames().
3768 {
3769 AllowExternalCallThatCantCauseGC scope(masm);
3770 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3771 }
3772 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
3773
3774 __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
3775
3776 // Replace the current (input) frame with the output frames.
3777 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3778 // Outer loop state: t0 = current "FrameDescription** output_",
3779 // a1 = one past the last FrameDescription**.
3780 __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
3781 __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
3782 __ Lsa(a1, t0, a1, kPointerSizeLog2);
3783 __ BranchShort(&outer_loop_header);
3784 __ bind(&outer_push_loop);
3785 // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
3786 __ lw(a2, MemOperand(t0, 0)); // output_[ix]
3787 __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
3788 __ BranchShort(&inner_loop_header);
3789 __ bind(&inner_push_loop);
3790 __ Subu(a3, a3, Operand(sizeof(uint32_t)));
3791 __ Addu(t2, a2, Operand(a3));
3792 __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
3793 __ push(t3);
3794 __ bind(&inner_loop_header);
3795 __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
3796
3797 __ Addu(t0, t0, Operand(kPointerSize));
3798 __ bind(&outer_loop_header);
3799 __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
3800
3801 __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
3802 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3803 int code = config->GetAllocatableDoubleCode(i);
3804 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
3805 int src_offset = code * kDoubleSize + double_regs_offset;
3806 __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
3807 }
3808
3809 // Push pc and continuation from the last output frame.
3810 __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
3811 __ push(t2);
3812 __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
3813 __ push(t2);
3814
3815 // Technically restoring 'at' should work unless zero_reg is also restored
3816 // but it's safer to check for this.
3817 DCHECK(!(at.bit() & restored_regs));
3818 // Restore the registers from the last output frame.
3819 __ mov(at, a2);
3820 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3821 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
3822 if ((restored_regs & (1 << i)) != 0) {
3823 __ lw(ToRegister(i), MemOperand(at, offset));
3824 }
3825 }
3826
3827 __ pop(at); // Get continuation, leave pc on stack.
3828 __ pop(ra);
3829 __ Jump(at);
3830 __ stop();
3831 }
3832
3833 } // namespace
3834
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3835 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3836 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3837 }
3838
Generate_DeoptimizationEntry_Soft(MacroAssembler * masm)3839 void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
3840 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
3841 }
3842
Generate_DeoptimizationEntry_Bailout(MacroAssembler * masm)3843 void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
3844 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
3845 }
3846
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3847 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3848 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3849 }
3850
3851 #undef __
3852
3853 } // namespace internal
3854 } // namespace v8
3855
3856 #endif // V8_TARGET_ARCH_MIPS
3857