• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
6 
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
11 #include "src/codegen/macro-assembler-inl.h"
12 #include "src/codegen/register-configuration.h"
13 #include "src/debug/debug.h"
14 #include "src/deoptimizer/deoptimizer.h"
15 #include "src/execution/frame-constants.h"
16 #include "src/execution/frames.h"
17 #include "src/heap/heap-inl.h"
18 #include "src/logging/counters.h"
19 #include "src/objects/cell.h"
20 #include "src/objects/foreign.h"
21 #include "src/objects/heap-number.h"
22 #include "src/objects/js-generator.h"
23 #include "src/objects/smi.h"
24 #include "src/runtime/runtime.h"
25 
26 #if V8_ENABLE_WEBASSEMBLY
27 #include "src/wasm/wasm-linkage.h"
28 #include "src/wasm/wasm-objects.h"
29 #endif  // V8_ENABLE_WEBASSEMBLY
30 
31 namespace v8 {
32 namespace internal {
33 
34 #define __ ACCESS_MASM(masm)
35 namespace {
36 
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)37 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
38                                  Register scratch) {
39   DCHECK(!AreAliased(code, scratch));
40   // Verify that the code kind is baseline code via the CodeKind.
41   __ LoadU32(scratch, FieldMemOperand(code, Code::kFlagsOffset));
42   __ DecodeField<Code::KindField>(scratch);
43   __ CmpS64(scratch, Operand(static_cast<int>(CodeKind::BASELINE)), r0);
44   __ Assert(eq, AbortReason::kExpectedBaselineData);
45 }
46 
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)47 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
48                                                     Register sfi_data,
49                                                     Register scratch1,
50                                                     Label* is_baseline) {
51   USE(GetSharedFunctionInfoBytecodeOrBaseline);
52   ASM_CODE_COMMENT(masm);
53   Label done;
54   __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
55   if (FLAG_debug_code) {
56     Label not_baseline;
57     __ b(ne, &not_baseline);
58     AssertCodeIsBaseline(masm, sfi_data, scratch1);
59     __ beq(is_baseline);
60     __ bind(&not_baseline);
61   } else {
62     __ beq(is_baseline);
63   }
64   __ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
65   __ bne(&done);
66   __ LoadTaggedPointerField(
67       sfi_data,
68       FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
69 
70   __ bind(&done);
71 }
72 
Generate_OSREntry(MacroAssembler * masm,Register entry_address,intptr_t offset)73 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
74                        intptr_t offset) {
75   __ AddS64(ip, entry_address, Operand(offset), r0);
76   __ mtlr(ip);
77 
78   // "return" to the OSR entry point of the function.
79   __ Ret();
80 }
81 
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array,Register scratch)82 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
83                                  Register scratch) {
84   // Reset the bytecode age and OSR state (optimized to a single write).
85   static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
86   STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
87   __ mov(scratch, Operand(0));
88   __ StoreU32(scratch,
89               FieldMemOperand(bytecode_array,
90                               BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
91               r0);
92 }
93 
94 // Restarts execution either at the current or next (in execution order)
95 // bytecode. If there is baseline code on the shared function info, converts an
96 // interpreter frame into a baseline frame and continues execution in baseline
97 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)98 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
99                                          bool next_bytecode,
100                                          bool is_osr = false) {
101   Label start;
102   __ bind(&start);
103 
104   // Get function from the frame.
105   Register closure = r4;
106   __ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset),
107              r0);
108 
109   // Get the Code object from the shared function info.
110   Register code_obj = r9;
111   __ LoadTaggedPointerField(
112       code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
113       r0);
114   __ LoadTaggedPointerField(
115       code_obj,
116       FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
117 
118   // Check if we have baseline code. For OSR entry it is safe to assume we
119   // always have baseline code.
120   if (!is_osr) {
121     Label start_with_baseline;
122     __ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
123     __ b(eq, &start_with_baseline);
124 
125     // Start with bytecode as there is no baseline code.
126     Builtin builtin_id = next_bytecode
127                              ? Builtin::kInterpreterEnterAtNextBytecode
128                              : Builtin::kInterpreterEnterAtBytecode;
129     __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
130             RelocInfo::CODE_TARGET);
131 
132     // Start with baseline code.
133     __ bind(&start_with_baseline);
134   } else if (FLAG_debug_code) {
135     __ CompareObjectType(code_obj, r6, r6, CODET_TYPE);
136     __ Assert(eq, AbortReason::kExpectedBaselineData);
137   }
138 
139   if (FLAG_debug_code) {
140     AssertCodeIsBaseline(masm, code_obj, r6);
141   }
142 
143   // Load the feedback vector.
144   Register feedback_vector = r5;
145   __ LoadTaggedPointerField(
146       feedback_vector,
147       FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
148   __ LoadTaggedPointerField(
149       feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
150       r0);
151 
152   Label install_baseline_code;
153   // Check if feedback vector is valid. If not, call prepare for baseline to
154   // allocate it.
155   __ CompareObjectType(feedback_vector, r6, r6, FEEDBACK_VECTOR_TYPE);
156   __ b(ne, &install_baseline_code);
157 
158   // Save BytecodeOffset from the stack frame.
159   __ LoadU64(kInterpreterBytecodeOffsetRegister,
160              MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
161   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
162   // Replace BytecodeOffset with the feedback vector.
163   __ StoreU64(feedback_vector,
164               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
165   feedback_vector = no_reg;
166 
167   // Compute baseline pc for bytecode offset.
168   ExternalReference get_baseline_pc_extref;
169   if (next_bytecode || is_osr) {
170     get_baseline_pc_extref =
171         ExternalReference::baseline_pc_for_next_executed_bytecode();
172   } else {
173     get_baseline_pc_extref =
174         ExternalReference::baseline_pc_for_bytecode_offset();
175   }
176   Register get_baseline_pc = r6;
177   __ Move(get_baseline_pc, get_baseline_pc_extref);
178 
179   // If the code deoptimizes during the implicit function entry stack interrupt
180   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
181   // not a valid bytecode offset.
182   // TODO(pthier): Investigate if it is feasible to handle this special case
183   // in TurboFan instead of here.
184   Label valid_bytecode_offset, function_entry_bytecode;
185   if (!is_osr) {
186     __ CmpS64(kInterpreterBytecodeOffsetRegister,
187               Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
188                       kFunctionEntryBytecodeOffset),
189               r0);
190     __ b(eq, &function_entry_bytecode);
191   }
192 
193   __ SubS64(kInterpreterBytecodeOffsetRegister,
194             kInterpreterBytecodeOffsetRegister,
195             Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
196 
197   __ bind(&valid_bytecode_offset);
198   // Get bytecode array from the stack frame.
199   __ LoadU64(kInterpreterBytecodeArrayRegister,
200              MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
201   // Save the accumulator register, since it's clobbered by the below call.
202   __ Push(kInterpreterAccumulatorRegister);
203   {
204     Register arg_reg_1 = r3;
205     Register arg_reg_2 = r4;
206     Register arg_reg_3 = r5;
207     __ mr(arg_reg_1, code_obj);
208     __ mr(arg_reg_2, kInterpreterBytecodeOffsetRegister);
209     __ mr(arg_reg_3, kInterpreterBytecodeArrayRegister);
210     FrameScope scope(masm, StackFrame::INTERNAL);
211     __ PrepareCallCFunction(4, 0, ip);
212     __ CallCFunction(get_baseline_pc, 3, 0);
213   }
214   __ AddS64(code_obj, code_obj, kReturnRegister0);
215   __ Pop(kInterpreterAccumulatorRegister);
216 
217   if (is_osr) {
218     Register scratch = ip;
219     ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister,
220                                 scratch);
221     Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
222   } else {
223     __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
224     __ Jump(code_obj);
225   }
226   __ Trap();  // Unreachable.
227 
228   if (!is_osr) {
229     __ bind(&function_entry_bytecode);
230     // If the bytecode offset is kFunctionEntryOffset, get the start address of
231     // the first bytecode.
232     __ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
233     if (next_bytecode) {
234       __ Move(get_baseline_pc,
235               ExternalReference::baseline_pc_for_bytecode_offset());
236     }
237     __ b(&valid_bytecode_offset);
238   }
239 
240   __ bind(&install_baseline_code);
241   {
242     FrameScope scope(masm, StackFrame::INTERNAL);
243     __ Push(kInterpreterAccumulatorRegister);
244     __ Push(closure);
245     __ CallRuntime(Runtime::kInstallBaselineCode, 1);
246     __ Pop(kInterpreterAccumulatorRegister);
247   }
248   // Retry from the start after installing baseline code.
249   __ b(&start);
250 }
251 
252 }  // namespace
253 
Generate_Adaptor(MacroAssembler * masm,Address address)254 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
255   __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
256   __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
257           RelocInfo::CODE_TARGET);
258 }
259 
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)260 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
261                                            Runtime::FunctionId function_id) {
262   // ----------- S t a t e -------------
263   //  -- r3 : actual argument count
264   //  -- r4 : target function (preserved for callee)
265   //  -- r6 : new target (preserved for callee)
266   // -----------------------------------
267   {
268     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
269     // Push a copy of the target function, the new target and the actual
270     // argument count.
271     // Push function as parameter to the runtime call.
272     __ SmiTag(kJavaScriptCallArgCountRegister);
273     __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
274             kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
275 
276     __ CallRuntime(function_id, 1);
277     __ mr(r5, r3);
278 
279     // Restore target function, new target and actual argument count.
280     __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
281            kJavaScriptCallArgCountRegister);
282     __ SmiUntag(kJavaScriptCallArgCountRegister);
283   }
284   static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
285   __ JumpCodeObject(r5);
286 }
287 
288 namespace {
289 
290 enum class ArgumentsElementType {
291   kRaw,    // Push arguments as they are.
292   kHandle  // Dereference arguments before pushing.
293 };
294 
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,ArgumentsElementType element_type)295 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
296                             Register scratch,
297                             ArgumentsElementType element_type) {
298   DCHECK(!AreAliased(array, argc, scratch));
299   Label loop, done;
300   __ subi(scratch, argc, Operand(kJSArgcReceiverSlots));
301   __ cmpi(scratch, Operand::Zero());
302   __ beq(&done);
303   __ mtctr(scratch);
304   __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
305   __ add(scratch, array, scratch);
306 
307   __ bind(&loop);
308   __ LoadU64WithUpdate(ip, MemOperand(scratch, -kSystemPointerSize));
309   if (element_type == ArgumentsElementType::kHandle) {
310     __ LoadU64(ip, MemOperand(ip));
311   }
312   __ push(ip);
313   __ bdnz(&loop);
314   __ bind(&done);
315 }
316 
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)317 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
318   // ----------- S t a t e -------------
319   //  -- r3     : number of arguments
320   //  -- r4     : constructor function
321   //  -- r6     : new target
322   //  -- cp     : context
323   //  -- lr     : return address
324   //  -- sp[...]: constructor arguments
325   // -----------------------------------
326 
327   Register scratch = r5;
328 
329   Label stack_overflow;
330 
331   __ StackOverflowCheck(r3, scratch, &stack_overflow);
332   // Enter a construct frame.
333   {
334     FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
335 
336     // Preserve the incoming parameters on the stack.
337 
338     __ SmiTag(r3);
339     __ Push(cp, r3);
340     __ SmiUntag(r3, SetRC);
341 
342     // TODO(victorgomes): When the arguments adaptor is completely removed, we
343     // should get the formal parameter count and copy the arguments in its
344     // correct position (including any undefined), instead of delaying this to
345     // InvokeFunction.
346 
347     // Set up pointer to first argument (skip receiver).
348     __ addi(
349         r7, fp,
350         Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
351     // Copy arguments and receiver to the expression stack.
352     // r7: Pointer to start of arguments.
353     // r3: Number of arguments.
354     Generate_PushArguments(masm, r7, r3, r8, ArgumentsElementType::kRaw);
355 
356     // The receiver for the builtin/api call.
357     __ PushRoot(RootIndex::kTheHoleValue);
358 
359     // Call the function.
360     // r3: number of arguments (untagged)
361     // r4: constructor function
362     // r6: new target
363     {
364       ConstantPoolUnavailableScope constant_pool_unavailable(masm);
365       __ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
366     }
367 
368     // Restore context from the frame.
369     __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
370     // Restore smi-tagged arguments count from the frame.
371     __ LoadU64(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
372 
373     // Leave construct frame.
374   }
375   // Remove caller arguments from the stack and return.
376   __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
377                    TurboAssembler::kCountIncludesReceiver);
378   __ blr();
379 
380   __ bind(&stack_overflow);
381   {
382     FrameScope scope(masm, StackFrame::INTERNAL);
383     __ CallRuntime(Runtime::kThrowStackOverflow);
384     __ bkpt(0);  // Unreachable code.
385   }
386 }
387 
OnStackReplacement(MacroAssembler * masm,bool is_interpreter)388 void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
389   ASM_CODE_COMMENT(masm);
390   {
391     FrameScope scope(masm, StackFrame::INTERNAL);
392     __ CallRuntime(Runtime::kCompileOptimizedOSR);
393   }
394 
395   // If the code object is null, just return to the caller.
396   Label skip;
397   __ CmpSmiLiteral(r3, Smi::zero(), r0);
398   __ bne(&skip);
399   __ Ret();
400 
401   __ bind(&skip);
402 
403   if (is_interpreter) {
404     // Drop the handler frame that is be sitting on top of the actual
405     // JavaScript frame. This is the case then OSR is triggered from bytecode.
406     __ LeaveFrame(StackFrame::STUB);
407   }
408 
409   // Load deoptimization data from the code object.
410   // <deopt_data> = <code>[#deoptimization_data_offset]
411   __ LoadTaggedPointerField(
412       r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
413       r0);
414 
415   {
416     ConstantPoolUnavailableScope constant_pool_unavailable(masm);
417     __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
418 
419     if (FLAG_enable_embedded_constant_pool) {
420       __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3);
421     }
422 
423     // Load the OSR entrypoint offset from the deoptimization data.
424     // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
425     __ SmiUntag(r4,
426                 FieldMemOperand(r4, FixedArray::OffsetOfElementAt(
427                                         DeoptimizationData::kOsrPcOffsetIndex)),
428                 LeaveRC, r0);
429 
430     // Compute the target address = code start + osr_offset
431     __ add(r0, r3, r4);
432 
433     // And "return" to the OSR entry point of the function.
434     __ mtlr(r0);
435     __ blr();
436   }
437 }
438 
439 }  // namespace
440 
441 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)442 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
443   // ----------- S t a t e -------------
444   //  --      r3: number of arguments (untagged)
445   //  --      r4: constructor function
446   //  --      r6: new target
447   //  --      cp: context
448   //  --      lr: return address
449   //  -- sp[...]: constructor arguments
450   // -----------------------------------
451 
452   FrameScope scope(masm, StackFrame::MANUAL);
453   // Enter a construct frame.
454   Label post_instantiation_deopt_entry, not_create_implicit_receiver;
455   __ EnterFrame(StackFrame::CONSTRUCT);
456 
457   // Preserve the incoming parameters on the stack.
458   __ SmiTag(r3);
459   __ Push(cp, r3, r4);
460   __ PushRoot(RootIndex::kUndefinedValue);
461   __ Push(r6);
462 
463   // ----------- S t a t e -------------
464   //  --        sp[0*kSystemPointerSize]: new target
465   //  --        sp[1*kSystemPointerSize]: padding
466   //  -- r4 and sp[2*kSystemPointerSize]: constructor function
467   //  --        sp[3*kSystemPointerSize]: number of arguments (tagged)
468   //  --        sp[4*kSystemPointerSize]: context
469   // -----------------------------------
470 
471   __ LoadTaggedPointerField(
472       r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
473   __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
474   __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
475   __ JumpIfIsInRange(
476       r7, static_cast<uint32_t>(FunctionKind::kDefaultDerivedConstructor),
477       static_cast<uint32_t>(FunctionKind::kDerivedConstructor),
478       &not_create_implicit_receiver);
479 
480   // If not derived class constructor: Allocate the new receiver object.
481   __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r7,
482                       r8);
483   __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
484   __ b(&post_instantiation_deopt_entry);
485 
486   // Else: use TheHoleValue as receiver for constructor call
487   __ bind(&not_create_implicit_receiver);
488   __ LoadRoot(r3, RootIndex::kTheHoleValue);
489 
490   // ----------- S t a t e -------------
491   //  --                          r3: receiver
492   //  -- Slot 4 / sp[0*kSystemPointerSize]: new target
493   //  -- Slot 3 / sp[1*kSystemPointerSize]: padding
494   //  -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
495   //  -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
496   //  -- Slot 0 / sp[4*kSystemPointerSize]: context
497   // -----------------------------------
498   // Deoptimizer enters here.
499   masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
500       masm->pc_offset());
501   __ bind(&post_instantiation_deopt_entry);
502 
503   // Restore new target.
504   __ Pop(r6);
505 
506   // Push the allocated receiver to the stack.
507   __ Push(r3);
508   // We need two copies because we may have to return the original one
509   // and the calling conventions dictate that the called function pops the
510   // receiver. The second copy is pushed after the arguments, we saved in r6
511   // since r0 needs to store the number of arguments before
512   // InvokingFunction.
513   __ mr(r9, r3);
514 
515   // Set up pointer to first argument (skip receiver).
516   __ addi(
517       r7, fp,
518       Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
519 
520   // ----------- S t a t e -------------
521   //  --                 r6: new target
522   //  -- sp[0*kSystemPointerSize]: implicit receiver
523   //  -- sp[1*kSystemPointerSize]: implicit receiver
524   //  -- sp[2*kSystemPointerSize]: padding
525   //  -- sp[3*kSystemPointerSize]: constructor function
526   //  -- sp[4*kSystemPointerSize]: number of arguments (tagged)
527   //  -- sp[5*kSystemPointerSize]: context
528   // -----------------------------------
529 
530   // Restore constructor function and argument count.
531   __ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
532   __ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
533   __ SmiUntag(r3);
534 
535   Label stack_overflow;
536   __ StackOverflowCheck(r3, r8, &stack_overflow);
537 
538   // Copy arguments to the expression stack.
539   // r7: Pointer to start of argument.
540   // r3: Number of arguments.
541   Generate_PushArguments(masm, r7, r3, r8, ArgumentsElementType::kRaw);
542 
543   // Push implicit receiver.
544   __ Push(r9);
545 
546   // Call the function.
547   {
548     ConstantPoolUnavailableScope constant_pool_unavailable(masm);
549     __ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
550   }
551 
552   // ----------- S t a t e -------------
553   //  --                 r0: constructor result
554   //  -- sp[0*kSystemPointerSize]: implicit receiver
555   //  -- sp[1*kSystemPointerSize]: padding
556   //  -- sp[2*kSystemPointerSize]: constructor function
557   //  -- sp[3*kSystemPointerSize]: number of arguments
558   //  -- sp[4*kSystemPointerSize]: context
559   // -----------------------------------
560 
561   // Store offset of return address for deoptimizer.
562   masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
563       masm->pc_offset());
564 
565   // If the result is an object (in the ECMA sense), we should get rid
566   // of the receiver and use the result; see ECMA-262 section 13.2.2-7
567   // on page 74.
568   Label use_receiver, do_throw, leave_and_return, check_receiver;
569 
570   // If the result is undefined, we jump out to using the implicit receiver.
571   __ JumpIfNotRoot(r3, RootIndex::kUndefinedValue, &check_receiver);
572 
573   // Otherwise we do a smi check and fall through to check if the return value
574   // is a valid receiver.
575 
576   // Throw away the result of the constructor invocation and use the
577   // on-stack receiver as the result.
578   __ bind(&use_receiver);
579   __ LoadU64(r3, MemOperand(sp));
580   __ JumpIfRoot(r3, RootIndex::kTheHoleValue, &do_throw);
581 
582   __ bind(&leave_and_return);
583   // Restore smi-tagged arguments count from the frame.
584   __ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
585   // Leave construct frame.
586   __ LeaveFrame(StackFrame::CONSTRUCT);
587 
588   // Remove caller arguments from the stack and return.
589   __ DropArguments(r4, TurboAssembler::kCountIsSmi,
590                    TurboAssembler::kCountIncludesReceiver);
591   __ blr();
592 
593   __ bind(&check_receiver);
594   // If the result is a smi, it is *not* an object in the ECMA sense.
595   __ JumpIfSmi(r3, &use_receiver);
596 
597   // If the type of the result (stored in its map) is less than
598   // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
599   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
600   __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
601   __ bge(&leave_and_return);
602   __ b(&use_receiver);
603 
604   __ bind(&do_throw);
605   // Restore the context from the frame.
606   __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
607   __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
608   __ bkpt(0);
609 
610   __ bind(&stack_overflow);
611   // Restore the context from the frame.
612   __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
613   __ CallRuntime(Runtime::kThrowStackOverflow);
614   // Unreachable code.
615   __ bkpt(0);
616 }
617 
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)618 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
619   Generate_JSBuiltinsConstructStubHelper(masm);
620 }
621 
GetSharedFunctionInfoBytecode(MacroAssembler * masm,Register sfi_data,Register scratch1)622 static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
623                                           Register sfi_data,
624                                           Register scratch1) {
625   Label done;
626 
627   __ CompareObjectType(sfi_data, scratch1, scratch1, INTERPRETER_DATA_TYPE);
628   __ bne(&done);
629   __ LoadTaggedPointerField(
630       sfi_data,
631       FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
632   __ bind(&done);
633 }
634 
635 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)636 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
637   // ----------- S t a t e -------------
638   //  -- r3 : the value to pass to the generator
639   //  -- r4 : the JSGeneratorObject to resume
640   //  -- lr : return address
641   // -----------------------------------
642   // Store input value into generator object.
643   __ StoreTaggedField(
644       r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset), r0);
645   __ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
646                       kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
647   // Check that r4 is still valid, RecordWrite might have clobbered it.
648   __ AssertGeneratorObject(r4);
649 
650   // Load suspended function and context.
651   __ LoadTaggedPointerField(
652       r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
653   __ LoadTaggedPointerField(cp, FieldMemOperand(r7, JSFunction::kContextOffset),
654                             r0);
655 
656   // Flood function if we are stepping.
657   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
658   Label stepping_prepared;
659   Register scratch = r8;
660   ExternalReference debug_hook =
661       ExternalReference::debug_hook_on_function_call_address(masm->isolate());
662   __ Move(scratch, debug_hook);
663   __ LoadU8(scratch, MemOperand(scratch), r0);
664   __ extsb(scratch, scratch);
665   __ CmpSmiLiteral(scratch, Smi::zero(), r0);
666   __ bne(&prepare_step_in_if_stepping);
667 
668   // Flood function if we need to continue stepping in the suspended generator.
669 
670   ExternalReference debug_suspended_generator =
671       ExternalReference::debug_suspended_generator_address(masm->isolate());
672 
673   __ Move(scratch, debug_suspended_generator);
674   __ LoadU64(scratch, MemOperand(scratch));
675   __ CmpS64(scratch, r4);
676   __ beq(&prepare_step_in_suspended_generator);
677   __ bind(&stepping_prepared);
678 
679   // Check the stack for overflow. We are not trying to catch interruptions
680   // (i.e. debug break and preemption) here, so check the "real stack limit".
681   Label stack_overflow;
682   __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
683   __ CmpU64(sp, scratch);
684   __ blt(&stack_overflow);
685 
686   // ----------- S t a t e -------------
687   //  -- r4    : the JSGeneratorObject to resume
688   //  -- r7    : generator function
689   //  -- cp    : generator context
690   //  -- lr    : return address
691   // -----------------------------------
692 
693   // Copy the function arguments from the generator object's register file.
694   __ LoadTaggedPointerField(
695       r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
696   __ LoadU16(
697       r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
698   __ subi(r6, r6, Operand(kJSArgcReceiverSlots));
699   __ LoadTaggedPointerField(
700       r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
701       r0);
702   {
703     Label done_loop, loop;
704     __ bind(&loop);
705     __ subi(r6, r6, Operand(1));
706     __ cmpi(r6, Operand::Zero());
707     __ blt(&done_loop);
708     __ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
709     __ add(scratch, r5, r10);
710     __ LoadAnyTaggedField(
711         scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
712     __ Push(scratch);
713     __ b(&loop);
714     __ bind(&done_loop);
715 
716     // Push receiver.
717     __ LoadAnyTaggedField(
718         scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
719     __ Push(scratch);
720   }
721 
722   // Underlying function needs to have bytecode available.
723   if (FLAG_debug_code) {
724     __ LoadTaggedPointerField(
725         r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
726     __ LoadTaggedPointerField(
727         r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
728     GetSharedFunctionInfoBytecode(masm, r6, r3);
729     __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
730     __ Assert(eq, AbortReason::kMissingBytecodeArray);
731   }
732 
733   // Resume (Ignition/TurboFan) generator object.
734   {
735     __ LoadTaggedPointerField(
736         r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
737     __ LoadU16(r3, FieldMemOperand(
738                        r3, SharedFunctionInfo::kFormalParameterCountOffset));
739     // We abuse new.target both to indicate that this is a resume call and to
740     // pass in the generator object.  In ordinary calls, new.target is always
741     // undefined because generator functions are non-constructable.
742     __ mr(r6, r4);
743     __ mr(r4, r7);
744     static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
745     __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset),
746                               r0);
747     __ JumpCodeObject(r5);
748   }
749 
750   __ bind(&prepare_step_in_if_stepping);
751   {
752     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
753     __ Push(r4, r7);
754     // Push hole as receiver since we do not use it for stepping.
755     __ PushRoot(RootIndex::kTheHoleValue);
756     __ CallRuntime(Runtime::kDebugOnFunctionCall);
757     __ Pop(r4);
758     __ LoadTaggedPointerField(
759         r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
760   }
761   __ b(&stepping_prepared);
762 
763   __ bind(&prepare_step_in_suspended_generator);
764   {
765     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
766     __ Push(r4);
767     __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
768     __ Pop(r4);
769     __ LoadTaggedPointerField(
770         r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
771   }
772   __ b(&stepping_prepared);
773 
774   __ bind(&stack_overflow);
775   {
776     FrameScope scope(masm, StackFrame::INTERNAL);
777     __ CallRuntime(Runtime::kThrowStackOverflow);
778     __ bkpt(0);  // This should be unreachable.
779   }
780 }
781 
Generate_ConstructedNonConstructable(MacroAssembler * masm)782 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
783   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
784   __ push(r4);
785   __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
786   __ Trap();  // Unreachable.
787 }
788 
789 namespace {
790 
791 // Called with the native C calling convention. The corresponding function
792 // signature is either:
793 //
794 //   using JSEntryFunction = GeneratedCode<Address(
795 //       Address root_register_value, Address new_target, Address target,
796 //       Address receiver, intptr_t argc, Address** args)>;
797 // or
798 //   using JSEntryFunction = GeneratedCode<Address(
799 //       Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)800 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
801                              Builtin entry_trampoline) {
802   // The register state is either:
803   //   r3: root_register_value
804   //   r4: code entry
805   //   r5: function
806   //   r6: receiver
807   //   r7: argc
808   //   r8: argv
809   // or
810   //   r3: root_register_value
811   //   r4: microtask_queue
812 
813   Label invoke, handler_entry, exit;
814 
815   {
816     NoRootArrayScope no_root_array(masm);
817 
818     // PPC LINUX ABI:
819     // preserve LR in pre-reserved slot in caller's frame
820     __ mflr(r0);
821     __ StoreU64(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
822 
823     // Save callee saved registers on the stack.
824     __ MultiPush(kCalleeSaved);
825 
826     // Save callee-saved double registers.
827     __ MultiPushDoubles(kCalleeSavedDoubles);
828     // Set up the reserved register for 0.0.
829     __ LoadDoubleLiteral(kDoubleRegZero, base::Double(0.0), r0);
830 
831     // Initialize the root register.
832     // C calling convention. The first argument is passed in r3.
833     __ mr(kRootRegister, r3);
834   }
835 
836   // Push a frame with special values setup to mark it as an entry frame.
837   // r4: code entry
838   // r5: function
839   // r6: receiver
840   // r7: argc
841   // r8: argv
842   __ li(r0, Operand(-1));  // Push a bad frame pointer to fail if it is used.
843   __ push(r0);
844   if (FLAG_enable_embedded_constant_pool) {
845     __ li(kConstantPoolRegister, Operand::Zero());
846     __ push(kConstantPoolRegister);
847   }
848   __ mov(r0, Operand(StackFrame::TypeToMarker(type)));
849   __ push(r0);
850   __ push(r0);
851   // Save copies of the top frame descriptor on the stack.
852   __ Move(r3, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
853                                         masm->isolate()));
854   __ LoadU64(r0, MemOperand(r3));
855   __ push(r0);
856 
857   // Clear c_entry_fp, now we've pushed its previous value to the stack.
858   // If the c_entry_fp is not already zero and we don't clear it, the
859   // SafeStackFrameIterator will assume we are executing C++ and miss the JS
860   // frames on top.
861   __ li(r0, Operand::Zero());
862   __ StoreU64(r0, MemOperand(r3));
863 
864   Register scratch = r9;
865   // Set up frame pointer for the frame to be pushed.
866   __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
867 
868   // If this is the outermost JS call, set js_entry_sp value.
869   Label non_outermost_js;
870   ExternalReference js_entry_sp =
871       ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
872                                 masm->isolate());
873   __ Move(r3, js_entry_sp);
874   __ LoadU64(scratch, MemOperand(r3));
875   __ cmpi(scratch, Operand::Zero());
876   __ bne(&non_outermost_js);
877   __ StoreU64(fp, MemOperand(r3));
878   __ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
879   Label cont;
880   __ b(&cont);
881   __ bind(&non_outermost_js);
882   __ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
883   __ bind(&cont);
884   __ push(scratch);  // frame-type
885 
886   // Jump to a faked try block that does the invoke, with a faked catch
887   // block that sets the pending exception.
888   __ b(&invoke);
889 
890   // Block literal pool emission whilst taking the position of the handler
891   // entry. This avoids making the assumption that literal pools are always
892   // emitted after an instruction is emitted, rather than before.
893   {
894     ConstantPoolUnavailableScope constant_pool_unavailable(masm);
895     __ bind(&handler_entry);
896 
897     // Store the current pc as the handler offset. It's used later to create the
898     // handler table.
899     masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
900 
901     // Caught exception: Store result (exception) in the pending exception
902     // field in the JSEnv and return a failure sentinel.  Coming in here the
903     // fp will be invalid because the PushStackHandler below sets it to 0 to
904     // signal the existence of the JSEntry frame.
905     __ Move(scratch,
906             ExternalReference::Create(
907                 IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
908   }
909 
910   __ StoreU64(r3, MemOperand(scratch));
911   __ LoadRoot(r3, RootIndex::kException);
912   __ b(&exit);
913 
914   // Invoke: Link this frame into the handler chain.
915   __ bind(&invoke);
916   // Must preserve r4-r8.
917   __ PushStackHandler();
918   // If an exception not caught by another handler occurs, this handler
919   // returns control to the code after the b(&invoke) above, which
920   // restores all kCalleeSaved registers (including cp and fp) to their
921   // saved values before returning a failure to C.
922 
923   // Invoke the function by calling through JS entry trampoline builtin.
924   // Notice that we cannot store a reference to the trampoline code directly in
925   // this stub, because runtime stubs are not traversed when doing GC.
926 
927   // Invoke the function by calling through JS entry trampoline builtin and
928   // pop the faked function when we return.
929   Handle<Code> trampoline_code =
930       masm->isolate()->builtins()->code_handle(entry_trampoline);
931   __ Call(trampoline_code, RelocInfo::CODE_TARGET);
932 
933   // Unlink this frame from the handler chain.
934   __ PopStackHandler();
935 
936   __ bind(&exit);  // r3 holds result
937   // Check if the current stack frame is marked as the outermost JS frame.
938   Label non_outermost_js_2;
939   __ pop(r8);
940   __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
941   __ bne(&non_outermost_js_2);
942   __ mov(scratch, Operand::Zero());
943   __ Move(r8, js_entry_sp);
944   __ StoreU64(scratch, MemOperand(r8));
945   __ bind(&non_outermost_js_2);
946 
947   // Restore the top frame descriptors from the stack.
948   __ pop(r6);
949   __ Move(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
950                                              masm->isolate()));
951   __ StoreU64(r6, MemOperand(scratch));
952 
953   // Reset the stack to the callee saved registers.
954   __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
955 
956   // Restore callee-saved double registers.
957   __ MultiPopDoubles(kCalleeSavedDoubles);
958 
959   // Restore callee-saved registers.
960   __ MultiPop(kCalleeSaved);
961 
962   // Return
963   __ LoadU64(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
964   __ mtlr(r0);
965   __ blr();
966 }
967 
968 }  // namespace
969 
Generate_JSEntry(MacroAssembler * masm)970 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
971   Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
972 }
973 
Generate_JSConstructEntry(MacroAssembler * masm)974 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
975   Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
976                           Builtin::kJSConstructEntryTrampoline);
977 }
978 
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)979 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
980   Generate_JSEntryVariant(masm, StackFrame::ENTRY,
981                           Builtin::kRunMicrotasksTrampoline);
982 }
983 
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)984 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
985                                              bool is_construct) {
986   // Called from Generate_JS_Entry
987   // r4: new.target
988   // r5: function
989   // r6: receiver
990   // r7: argc
991   // r8: argv
992   // r0,r3,r9, cp may be clobbered
993 
994   // Enter an internal frame.
995   {
996     FrameScope scope(masm, StackFrame::INTERNAL);
997 
998     // Setup the context (we need to use the caller context from the isolate).
999     ExternalReference context_address = ExternalReference::Create(
1000         IsolateAddressId::kContextAddress, masm->isolate());
1001     __ Move(cp, context_address);
1002     __ LoadU64(cp, MemOperand(cp));
1003 
1004     // Push the function.
1005     __ Push(r5);
1006 
1007     // Check if we have enough stack space to push all arguments.
1008     Label enough_stack_space, stack_overflow;
1009     __ mr(r3, r7);
1010     __ StackOverflowCheck(r3, r9, &stack_overflow);
1011     __ b(&enough_stack_space);
1012     __ bind(&stack_overflow);
1013     __ CallRuntime(Runtime::kThrowStackOverflow);
1014     // Unreachable code.
1015     __ bkpt(0);
1016 
1017     __ bind(&enough_stack_space);
1018 
1019     // Copy arguments to the stack.
1020     // r4: function
1021     // r7: argc
1022     // r8: argv, i.e. points to first arg
1023     Generate_PushArguments(masm, r8, r7, r9, ArgumentsElementType::kHandle);
1024 
1025     // Push the receiver.
1026     __ Push(r6);
1027 
1028     // r3: argc
1029     // r4: function
1030     // r6: new.target
1031     __ mr(r3, r7);
1032     __ mr(r6, r4);
1033     __ mr(r4, r5);
1034 
1035     // Initialize all JavaScript callee-saved registers, since they will be seen
1036     // by the garbage collector as part of handlers.
1037     __ LoadRoot(r7, RootIndex::kUndefinedValue);
1038     __ mr(r8, r7);
1039     __ mr(r14, r7);
1040     __ mr(r15, r7);
1041     __ mr(r16, r7);
1042     __ mr(r17, r7);
1043 
1044     // Invoke the code.
1045     Handle<Code> builtin = is_construct
1046                                ? BUILTIN_CODE(masm->isolate(), Construct)
1047                                : masm->isolate()->builtins()->Call();
1048     __ Call(builtin, RelocInfo::CODE_TARGET);
1049 
1050     // Exit the JS frame and remove the parameters (except function), and
1051     // return.
1052   }
1053   __ blr();
1054 
1055   // r3: result
1056 }
1057 
Generate_JSEntryTrampoline(MacroAssembler * masm)1058 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
1059   Generate_JSEntryTrampolineHelper(masm, false);
1060 }
1061 
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)1062 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
1063   Generate_JSEntryTrampolineHelper(masm, true);
1064 }
1065 
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)1066 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
1067   // This expects two C++ function parameters passed by Invoke() in
1068   // execution.cc.
1069   //   r3: root_register_value
1070   //   r4: microtask_queue
1071 
1072   __ mr(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r4);
1073   __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
1074 }
1075 
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register slot_address)1076 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
1077                                                 Register optimized_code,
1078                                                 Register closure,
1079                                                 Register scratch1,
1080                                                 Register slot_address) {
1081   DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
1082   DCHECK_EQ(closure, kJSFunctionRegister);
1083   DCHECK(!AreAliased(optimized_code, closure));
1084   // Store code entry in the closure.
1085   __ StoreTaggedField(optimized_code,
1086                       FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
1087   // Write barrier clobbers scratch1 below.
1088   Register value = scratch1;
1089   __ mr(value, optimized_code);
1090 
1091   __ RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
1092                       kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
1093                       RememberedSetAction::kOmit, SmiCheck::kOmit);
1094 }
1095 
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)1096 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
1097                                   Register scratch2) {
1098   Register params_size = scratch1;
1099   // Get the size of the formal parameters + receiver (in bytes).
1100   __ LoadU64(params_size,
1101              MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1102   __ lwz(params_size,
1103          FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
1104 
1105   Register actual_params_size = scratch2;
1106   // Compute the size of the actual parameters + receiver (in bytes).
1107   __ LoadU64(actual_params_size,
1108              MemOperand(fp, StandardFrameConstants::kArgCOffset));
1109   __ ShiftLeftU64(actual_params_size, actual_params_size,
1110                   Operand(kSystemPointerSizeLog2));
1111 
1112   // If actual is bigger than formal, then we should use it to free up the stack
1113   // arguments.
1114   Label corrected_args_count;
1115   __ CmpS64(params_size, actual_params_size);
1116   __ bge(&corrected_args_count);
1117   __ mr(params_size, actual_params_size);
1118   __ bind(&corrected_args_count);
1119   // Leave the frame (also dropping the register file).
1120   __ LeaveFrame(StackFrame::INTERPRETED);
1121 
1122   __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
1123                    TurboAssembler::kCountIncludesReceiver);
1124 }
1125 
1126 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)1127 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
1128                                          Register actual_state,
1129                                          TieringState expected_state,
1130                                          Runtime::FunctionId function_id) {
1131   Label no_match;
1132   __ cmpi(actual_state, Operand(static_cast<int>(expected_state)));
1133   __ bne(&no_match);
1134   GenerateTailCallToReturnedCode(masm, function_id);
1135   __ bind(&no_match);
1136 }
1137 
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch)1138 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
1139                                       Register optimized_code_entry,
1140                                       Register scratch) {
1141   // ----------- S t a t e -------------
1142   //  -- r3 : actual argument count
1143   //  -- r6 : new target (preserved for callee if needed, and caller)
1144   //  -- r4 : target function (preserved for callee if needed, and caller)
1145   // -----------------------------------
1146   DCHECK(!AreAliased(r4, r6, optimized_code_entry, scratch));
1147 
1148   Register closure = r4;
1149   Label heal_optimized_code_slot;
1150 
1151   // If the optimized code is cleared, go to runtime to update the optimization
1152   // marker field.
1153   __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
1154                    &heal_optimized_code_slot);
1155 
1156   // Check if the optimized code is marked for deopt. If it is, call the
1157   // runtime to clear it.
1158   __ LoadTaggedPointerField(
1159       scratch,
1160       FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset),
1161       r0);
1162   __ LoadS32(
1163       scratch,
1164       FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
1165       r0);
1166   __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
1167   __ bne(&heal_optimized_code_slot, cr0);
1168 
1169   // Optimized code is good, get it into the closure and link the closure
1170   // into the optimized functions list, then tail call the optimized code.
1171   ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
1172                                       scratch, r8);
1173   static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
1174   __ LoadCodeObjectEntry(r5, optimized_code_entry);
1175   __ Jump(r5);
1176 
1177   // Optimized code slot contains deoptimized code or code is cleared and
1178   // optimized code marker isn't updated. Evict the code, update the marker
1179   // and re-enter the closure's code.
1180   __ bind(&heal_optimized_code_slot);
1181   GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
1182 }
1183 
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)1184 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
1185                               Register tiering_state) {
1186   // ----------- S t a t e -------------
1187   //  -- r3 : actual argument count
1188   //  -- r6 : new target (preserved for callee if needed, and caller)
1189   //  -- r4 : target function (preserved for callee if needed, and caller)
1190   //  -- feedback vector (preserved for caller if needed)
1191   //  -- tiering_state : a int32 containing a non-zero optimization
1192   //  marker.
1193   // -----------------------------------
1194   DCHECK(!AreAliased(feedback_vector, r4, r6, tiering_state));
1195 
1196   TailCallRuntimeIfStateEquals(masm, tiering_state,
1197                                TieringState::kRequestTurbofan_Synchronous,
1198                                Runtime::kCompileTurbofan_Synchronous);
1199   TailCallRuntimeIfStateEquals(masm, tiering_state,
1200                                TieringState::kRequestTurbofan_Concurrent,
1201                                Runtime::kCompileTurbofan_Concurrent);
1202 
1203   __ stop();
1204 }
1205 
1206 // Advance the current bytecode offset. This simulates what all bytecode
1207 // handlers do upon completion of the underlying operation. Will bail out to a
1208 // label if the bytecode (without prefix) is a return bytecode. Will not advance
1209 // the bytecode offset if the current bytecode is a JumpLoop, instead just
1210 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Label * if_return)1211 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
1212                                           Register bytecode_array,
1213                                           Register bytecode_offset,
1214                                           Register bytecode, Register scratch1,
1215                                           Register scratch2, Label* if_return) {
1216   Register bytecode_size_table = scratch1;
1217   Register scratch3 = bytecode;
1218 
1219   // The bytecode offset value will be increased by one in wide and extra wide
1220   // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
1221   // will restore the original bytecode. In order to simplify the code, we have
1222   // a backup of it.
1223   Register original_bytecode_offset = scratch2;
1224   DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
1225                      bytecode, original_bytecode_offset));
1226   __ Move(bytecode_size_table,
1227           ExternalReference::bytecode_size_table_address());
1228   __ Move(original_bytecode_offset, bytecode_offset);
1229 
1230   // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1231   Label process_bytecode, extra_wide;
1232   STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
1233   STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1234   STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
1235   STATIC_ASSERT(3 ==
1236                 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1237   __ cmpi(bytecode, Operand(0x3));
1238   __ bgt(&process_bytecode);
1239   __ andi(r0, bytecode, Operand(0x1));
1240   __ bne(&extra_wide, cr0);
1241 
1242   // Load the next bytecode and update table to the wide scaled table.
1243   __ addi(bytecode_offset, bytecode_offset, Operand(1));
1244   __ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
1245   __ addi(bytecode_size_table, bytecode_size_table,
1246           Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1247   __ b(&process_bytecode);
1248 
1249   __ bind(&extra_wide);
1250   // Load the next bytecode and update table to the extra wide scaled table.
1251   __ addi(bytecode_offset, bytecode_offset, Operand(1));
1252   __ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
1253   __ addi(bytecode_size_table, bytecode_size_table,
1254           Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
1255 
1256   // Load the size of the current bytecode.
1257   __ bind(&process_bytecode);
1258 
1259   // Bailout to the return label if this is a return bytecode.
1260 #define JUMP_IF_EQUAL(NAME)                                           \
1261   __ cmpi(bytecode,                                                   \
1262           Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
1263   __ beq(if_return);
1264   RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1265 #undef JUMP_IF_EQUAL
1266 
1267   // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1268   // of the loop.
1269   Label end, not_jump_loop;
1270   __ cmpi(bytecode,
1271           Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1272   __ bne(&not_jump_loop);
1273   // We need to restore the original bytecode_offset since we might have
1274   // increased it to skip the wide / extra-wide prefix bytecode.
1275   __ Move(bytecode_offset, original_bytecode_offset);
1276   __ b(&end);
1277 
1278   __ bind(&not_jump_loop);
1279   // Otherwise, load the size of the current bytecode and advance the offset.
1280   __ lbzx(scratch3, MemOperand(bytecode_size_table, bytecode));
1281   __ add(bytecode_offset, bytecode_offset, scratch3);
1282 
1283   __ bind(&end);
1284 }
1285 
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1286 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1287     MacroAssembler* masm, Register optimization_state,
1288     Register feedback_vector) {
1289   DCHECK(!AreAliased(optimization_state, feedback_vector));
1290   Label maybe_has_optimized_code;
1291   // Check if optimized code is available
1292   __ TestBitMask(optimization_state,
1293                  FeedbackVector::kTieringStateIsAnyRequestMask, r0);
1294   __ beq(&maybe_has_optimized_code, cr0);
1295 
1296   Register tiering_state = optimization_state;
1297   __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1298   MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1299 
1300   __ bind(&maybe_has_optimized_code);
1301   Register optimized_code_entry = optimization_state;
1302   __ LoadAnyTaggedField(
1303       tiering_state,
1304       FieldMemOperand(feedback_vector,
1305                       FeedbackVector::kMaybeOptimizedCodeOffset),
1306       r0);
1307   TailCallOptimizedCodeSlot(masm, optimized_code_entry, r9);
1308 }
1309 
1310 // Read off the optimization state in the feedback vector and check if there
1311 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1312 static void LoadTieringStateAndJumpIfNeedsProcessing(
1313     MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1314     Label* has_optimized_code_or_state) {
1315   ASM_CODE_COMMENT(masm);
1316   USE(LoadTieringStateAndJumpIfNeedsProcessing);
1317   DCHECK(!AreAliased(optimization_state, feedback_vector));
1318   __ LoadU32(optimization_state,
1319              FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1320   CHECK(is_uint16(
1321       FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1322   __ mov(
1323       r0,
1324       Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1325   __ AndU32(r0, optimization_state, r0, SetRC);
1326   __ bne(has_optimized_code_or_state);
1327 }
1328 
1329 #if ENABLE_SPARKPLUG
1330 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1331 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1332   auto descriptor =
1333       Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1334   Register closure = descriptor.GetRegisterParameter(
1335       BaselineOutOfLinePrologueDescriptor::kClosure);
1336   // Load the feedback vector from the closure.
1337   Register feedback_vector = ip;
1338   __ LoadTaggedPointerField(
1339       feedback_vector,
1340       FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
1341   __ LoadTaggedPointerField(
1342       feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
1343       r0);
1344 
1345   if (FLAG_debug_code) {
1346     Register scratch = r11;
1347     __ CompareObjectType(feedback_vector, scratch, scratch,
1348                          FEEDBACK_VECTOR_TYPE);
1349     __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1350   }
1351 
1352   // Check for an tiering state.
1353   Label has_optimized_code_or_state;
1354   Register optimization_state = r11;
1355   {
1356     LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1357                                              feedback_vector,
1358                                              &has_optimized_code_or_state);
1359   }
1360 
1361   // Increment invocation count for the function.
1362   {
1363     Register invocation_count = r13;
1364     __ LoadU64(invocation_count,
1365                FieldMemOperand(feedback_vector,
1366                                FeedbackVector::kInvocationCountOffset),
1367                r0);
1368     __ AddS64(invocation_count, invocation_count, Operand(1));
1369     __ StoreU64(invocation_count,
1370                 FieldMemOperand(feedback_vector,
1371                                 FeedbackVector::kInvocationCountOffset),
1372                 r0);
1373   }
1374 
1375   FrameScope frame_scope(masm, StackFrame::MANUAL);
1376   {
1377     ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1378     // Normally the first thing we'd do here is Push(lr, fp), but we already
1379     // entered the frame in BaselineCompiler::Prologue, as we had to use the
1380     // value lr before the call to this BaselineOutOfLinePrologue builtin.
1381 
1382     Register callee_context = descriptor.GetRegisterParameter(
1383         BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1384     Register callee_js_function = descriptor.GetRegisterParameter(
1385         BaselineOutOfLinePrologueDescriptor::kClosure);
1386     __ Push(callee_context, callee_js_function);
1387     DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1388     DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1389 
1390     Register argc = descriptor.GetRegisterParameter(
1391         BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1392     // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1393     // the frame, so load it into a register.
1394     Register bytecodeArray = descriptor.GetRegisterParameter(
1395         BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1396     ResetBytecodeAgeAndOsrState(masm, bytecodeArray, r13);
1397 
1398     __ Push(argc, bytecodeArray);
1399 
1400     // Baseline code frames store the feedback vector where interpreter would
1401     // store the bytecode offset.
1402     if (FLAG_debug_code) {
1403       Register scratch = r13;
1404       __ CompareObjectType(feedback_vector, scratch, scratch,
1405                            FEEDBACK_VECTOR_TYPE);
1406       __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1407     }
1408     __ Push(feedback_vector);
1409   }
1410 
1411   Label call_stack_guard;
1412   Register frame_size = descriptor.GetRegisterParameter(
1413       BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1414   {
1415     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1416     // Stack check. This folds the checks for both the interrupt stack limit
1417     // check and the real stack limit into one by just checking for the
1418     // interrupt limit. The interrupt limit is either equal to the real stack
1419     // limit or tighter. By ensuring we have space until that limit after
1420     // building the frame we can quickly precheck both at once.
1421 
1422     Register sp_minus_frame_size = r13;
1423     Register interrupt_limit = r0;
1424     __ SubS64(sp_minus_frame_size, sp, frame_size);
1425     __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
1426     __ CmpU64(sp_minus_frame_size, interrupt_limit);
1427     __ blt(&call_stack_guard);
1428   }
1429 
1430   // Do "fast" return to the caller pc in lr.
1431   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1432   __ Ret();
1433 
1434   __ bind(&has_optimized_code_or_state);
1435   {
1436     ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1437 
1438     // Drop the frame created by the baseline call.
1439     __ Pop(r0, fp);
1440     __ mtlr(r0);
1441     MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1442                                                  feedback_vector);
1443     __ Trap();
1444   }
1445 
1446   __ bind(&call_stack_guard);
1447   {
1448     ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1449     FrameScope frame_scope(masm, StackFrame::INTERNAL);
1450     // Save incoming new target or generator
1451     __ Push(kJavaScriptCallNewTargetRegister);
1452     __ SmiTag(frame_size);
1453     __ Push(frame_size);
1454     __ CallRuntime(Runtime::kStackGuardWithGap);
1455     __ Pop(kJavaScriptCallNewTargetRegister);
1456   }
1457 
1458   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1459   __ Ret();
1460 }
1461 #endif
1462 
1463 // Generate code for entering a JS function with the interpreter.
1464 // On entry to the function the receiver and arguments have been pushed on the
1465 // stack left to right.
1466 //
1467 // The live registers are:
1468 //   o r3: actual argument count
1469 //   o r4: the JS function object being called.
1470 //   o r6: the incoming new target or generator object
1471 //   o cp: our context
1472 //   o pp: the caller's constant pool pointer (if enabled)
1473 //   o fp: the caller's frame pointer
1474 //   o sp: stack pointer
1475 //   o lr: return address
1476 //
1477 // The function builds an interpreter frame.  See InterpreterFrameConstants in
1478 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1479 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1480   Register closure = r4;
1481   Register feedback_vector = r5;
1482 
1483   // Get the bytecode array from the function object and load it into
1484   // kInterpreterBytecodeArrayRegister.
1485   __ LoadTaggedPointerField(
1486       r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
1487   // Load original bytecode array or the debug copy.
1488   __ LoadTaggedPointerField(
1489       kInterpreterBytecodeArrayRegister,
1490       FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
1491   GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, ip);
1492 
1493   // The bytecode array could have been flushed from the shared function info,
1494   // if so, call into CompileLazy.
1495   Label compile_lazy;
1496   __ CompareObjectType(kInterpreterBytecodeArrayRegister, r7, no_reg,
1497                        BYTECODE_ARRAY_TYPE);
1498   __ bne(&compile_lazy);
1499 
1500   // Load the feedback vector from the closure.
1501   __ LoadTaggedPointerField(
1502       feedback_vector,
1503       FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
1504   __ LoadTaggedPointerField(
1505       feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
1506       r0);
1507 
1508   Label push_stack_frame;
1509   // Check if feedback vector is valid. If valid, check for optimized code
1510   // and update invocation count. Otherwise, setup the stack frame.
1511   __ LoadTaggedPointerField(
1512       r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
1513   __ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
1514   __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
1515   __ bne(&push_stack_frame);
1516 
1517   Register optimization_state = r7;
1518 
1519   // Read off the optimization state in the feedback vector.
1520   __ LoadU32(optimization_state,
1521              FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
1522              r0);
1523 
1524   // Check if the optimized code slot is not empty or has a tiering state.
1525   Label has_optimized_code_or_state;
1526   __ TestBitMask(
1527       optimization_state,
1528       FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask, r0);
1529   __ bne(&has_optimized_code_or_state, cr0);
1530 
1531   Label not_optimized;
1532   __ bind(&not_optimized);
1533 
1534   // Increment invocation count for the function.
1535   __ LoadU32(
1536       r8,
1537       FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
1538       r0);
1539   __ addi(r8, r8, Operand(1));
1540   __ StoreU32(
1541       r8,
1542       FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
1543       r0);
1544 
1545   // Open a frame scope to indicate that there is a frame on the stack.  The
1546   // MANUAL indicates that the scope shouldn't actually generate code to set up
1547   // the frame (that is done below).
1548 
1549   __ bind(&push_stack_frame);
1550 
1551   FrameScope frame_scope(masm, StackFrame::MANUAL);
1552   __ PushStandardFrame(closure);
1553 
1554   ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r8);
1555 
1556   // Load initial bytecode offset.
1557   __ mov(kInterpreterBytecodeOffsetRegister,
1558          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1559 
1560   // Push bytecode array and Smi tagged bytecode array offset.
1561   __ SmiTag(r7, kInterpreterBytecodeOffsetRegister);
1562   __ Push(kInterpreterBytecodeArrayRegister, r7);
1563 
1564   // Allocate the local and temporary register file on the stack.
1565   Label stack_overflow;
1566   {
1567     // Load frame size (word) from the BytecodeArray object.
1568     __ lwz(r5, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1569                                BytecodeArray::kFrameSizeOffset));
1570 
1571     // Do a stack check to ensure we don't go over the limit.
1572     __ sub(r8, sp, r5);
1573     __ LoadStackLimit(r0, StackLimitKind::kRealStackLimit);
1574     __ CmpU64(r8, r0);
1575     __ blt(&stack_overflow);
1576 
1577     // If ok, push undefined as the initial value for all register file entries.
1578     // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1579     Label loop, no_args;
1580     __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1581     __ ShiftRightU64(r5, r5, Operand(kSystemPointerSizeLog2), SetRC);
1582     __ beq(&no_args, cr0);
1583     __ mtctr(r5);
1584     __ bind(&loop);
1585     __ push(kInterpreterAccumulatorRegister);
1586     __ bdnz(&loop);
1587     __ bind(&no_args);
1588   }
1589 
1590   // If the bytecode array has a valid incoming new target or generator object
1591   // register, initialize it with incoming value which was passed in r6.
1592   Label no_incoming_new_target_or_generator_register;
1593   __ LoadS32(r8,
1594              FieldMemOperand(
1595                  kInterpreterBytecodeArrayRegister,
1596                  BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset),
1597              r0);
1598   __ cmpi(r8, Operand::Zero());
1599   __ beq(&no_incoming_new_target_or_generator_register);
1600   __ ShiftLeftU64(r8, r8, Operand(kSystemPointerSizeLog2));
1601   __ StoreU64(r6, MemOperand(fp, r8));
1602   __ bind(&no_incoming_new_target_or_generator_register);
1603 
1604   // Perform interrupt stack check.
1605   // TODO(solanes): Merge with the real stack limit check above.
1606   Label stack_check_interrupt, after_stack_check_interrupt;
1607   __ LoadStackLimit(r0, StackLimitKind::kInterruptStackLimit);
1608   __ CmpU64(sp, r0);
1609   __ blt(&stack_check_interrupt);
1610   __ bind(&after_stack_check_interrupt);
1611 
1612   // The accumulator is already loaded with undefined.
1613 
1614   // Load the dispatch table into a register and dispatch to the bytecode
1615   // handler at the current bytecode offset.
1616   Label do_dispatch;
1617   __ bind(&do_dispatch);
1618   __ Move(
1619       kInterpreterDispatchTableRegister,
1620       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1621   __ lbzx(r6, MemOperand(kInterpreterBytecodeArrayRegister,
1622                          kInterpreterBytecodeOffsetRegister));
1623   __ ShiftLeftU64(r6, r6, Operand(kSystemPointerSizeLog2));
1624   __ LoadU64(kJavaScriptCallCodeStartRegister,
1625              MemOperand(kInterpreterDispatchTableRegister, r6));
1626   __ Call(kJavaScriptCallCodeStartRegister);
1627 
1628   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1629 
1630   // Any returns to the entry trampoline are either due to the return bytecode
1631   // or the interpreter tail calling a builtin and then a dispatch.
1632 
1633   // Get bytecode array and bytecode offset from the stack frame.
1634   __ LoadU64(kInterpreterBytecodeArrayRegister,
1635              MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1636   __ LoadU64(kInterpreterBytecodeOffsetRegister,
1637              MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1638   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1639 
1640   // Either return, or advance to the next bytecode and dispatch.
1641   Label do_return;
1642   __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
1643                          kInterpreterBytecodeOffsetRegister));
1644   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1645                                 kInterpreterBytecodeOffsetRegister, r4, r5, r6,
1646                                 &do_return);
1647   __ b(&do_dispatch);
1648 
1649   __ bind(&do_return);
1650   // The return value is in r3.
1651   LeaveInterpreterFrame(masm, r5, r7);
1652   __ blr();
1653 
1654   __ bind(&stack_check_interrupt);
1655   // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1656   // for the call to the StackGuard.
1657   __ mov(kInterpreterBytecodeOffsetRegister,
1658          Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1659                               kFunctionEntryBytecodeOffset)));
1660   __ StoreU64(kInterpreterBytecodeOffsetRegister,
1661               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1662   __ CallRuntime(Runtime::kStackGuard);
1663 
1664   // After the call, restore the bytecode array, bytecode offset and accumulator
1665   // registers again. Also, restore the bytecode offset in the stack to its
1666   // previous value.
1667   __ LoadU64(kInterpreterBytecodeArrayRegister,
1668              MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1669   __ mov(kInterpreterBytecodeOffsetRegister,
1670          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1671   __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1672 
1673   __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
1674   __ StoreU64(r0,
1675               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1676 
1677   __ jmp(&after_stack_check_interrupt);
1678 
1679   __ bind(&has_optimized_code_or_state);
1680   MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1681                                                feedback_vector);
1682 
1683   __ bind(&compile_lazy);
1684   GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1685 
1686   __ bind(&stack_overflow);
1687   __ CallRuntime(Runtime::kThrowStackOverflow);
1688   __ bkpt(0);  // Should not return.
1689 }
1690 
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1691 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1692                                         Register start_address,
1693                                         Register scratch) {
1694   ASM_CODE_COMMENT(masm);
1695   __ subi(scratch, num_args, Operand(1));
1696   __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
1697   __ sub(start_address, start_address, scratch);
1698   // Push the arguments.
1699   __ PushArray(start_address, num_args, scratch, r0,
1700                TurboAssembler::PushArrayOrder::kReverse);
1701 }
1702 
1703 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1704 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1705     MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1706     InterpreterPushArgsMode mode) {
1707   DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1708   // ----------- S t a t e -------------
1709   //  -- r3 : the number of arguments
1710   //  -- r5 : the address of the first argument to be pushed. Subsequent
1711   //          arguments should be consecutive above this, in the same order as
1712   //          they are to be pushed onto the stack.
1713   //  -- r4 : the target to call (can be any Object).
1714   // -----------------------------------
1715   Label stack_overflow;
1716 
1717   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1718     // The spread argument should not be pushed.
1719     __ subi(r3, r3, Operand(1));
1720   }
1721 
1722   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1723     __ subi(r6, r3, Operand(kJSArgcReceiverSlots));
1724   } else {
1725     __ mr(r6, r3);
1726   }
1727 
1728   __ StackOverflowCheck(r6, ip, &stack_overflow);
1729 
1730   // Push the arguments.
1731   GenerateInterpreterPushArgs(masm, r6, r5, r7);
1732 
1733   if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1734     __ PushRoot(RootIndex::kUndefinedValue);
1735   }
1736 
1737   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1738     // Pass the spread in the register r3.
1739     // r2 already points to the penultimate argument, the spread
1740     // lies in the next interpreter register.
1741     __ LoadU64(r5, MemOperand(r5, -kSystemPointerSize));
1742   }
1743 
1744   // Call the target.
1745   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1746     __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1747             RelocInfo::CODE_TARGET);
1748   } else {
1749     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1750             RelocInfo::CODE_TARGET);
1751   }
1752 
1753   __ bind(&stack_overflow);
1754   {
1755     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1756     // Unreachable Code.
1757     __ bkpt(0);
1758   }
1759 }
1760 
1761 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1762 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1763     MacroAssembler* masm, InterpreterPushArgsMode mode) {
1764   // ----------- S t a t e -------------
1765   // -- r3 : argument count
1766   // -- r6 : new target
1767   // -- r4 : constructor to call
1768   // -- r5 : allocation site feedback if available, undefined otherwise.
1769   // -- r7 : address of the first argument
1770   // -----------------------------------
1771   Label stack_overflow;
1772   __ StackOverflowCheck(r3, ip, &stack_overflow);
1773 
1774   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1775     // The spread argument should not be pushed.
1776     __ subi(r3, r3, Operand(1));
1777   }
1778 
1779   Register argc_without_receiver = ip;
1780   __ subi(argc_without_receiver, r3, Operand(kJSArgcReceiverSlots));
1781 
1782   // Push the arguments.
1783   GenerateInterpreterPushArgs(masm, argc_without_receiver, r7, r8);
1784 
1785   // Push a slot for the receiver to be constructed.
1786   __ li(r0, Operand::Zero());
1787   __ push(r0);
1788 
1789   if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1790     // Pass the spread in the register r2.
1791     // r4 already points to the penultimate argument, the spread
1792     // lies in the next interpreter register.
1793     __ subi(r7, r7, Operand(kSystemPointerSize));
1794     __ LoadU64(r5, MemOperand(r7));
1795   } else {
1796     __ AssertUndefinedOrAllocationSite(r5, r8);
1797   }
1798 
1799   if (mode == InterpreterPushArgsMode::kArrayFunction) {
1800     __ AssertFunction(r4);
1801 
1802     // Tail call to the array construct stub (still in the caller
1803     // context at this point).
1804     Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1805     __ Jump(code, RelocInfo::CODE_TARGET);
1806   } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1807     // Call the constructor with r3, r4, and r6 unmodified.
1808     __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1809             RelocInfo::CODE_TARGET);
1810   } else {
1811     DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1812     // Call the constructor with r3, r4, and r6 unmodified.
1813     __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1814   }
1815 
1816   __ bind(&stack_overflow);
1817   {
1818     __ TailCallRuntime(Runtime::kThrowStackOverflow);
1819     // Unreachable Code.
1820     __ bkpt(0);
1821   }
1822 }
1823 
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1824 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1825   // Set the return address to the correct point in the interpreter entry
1826   // trampoline.
1827   Label builtin_trampoline, trampoline_loaded;
1828   Smi interpreter_entry_return_pc_offset(
1829       masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1830   DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1831 
1832   // If the SFI function_data is an InterpreterData, the function will have a
1833   // custom copy of the interpreter entry trampoline for profiling. If so,
1834   // get the custom trampoline, otherwise grab the entry address of the global
1835   // trampoline.
1836   __ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1837   __ LoadTaggedPointerField(
1838       r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
1839   __ LoadTaggedPointerField(
1840       r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
1841   __ CompareObjectType(r5, kInterpreterDispatchTableRegister,
1842                        kInterpreterDispatchTableRegister,
1843                        INTERPRETER_DATA_TYPE);
1844   __ bne(&builtin_trampoline);
1845 
1846   __ LoadTaggedPointerField(
1847       r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
1848       r0);
1849   __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
1850   __ b(&trampoline_loaded);
1851 
1852   __ bind(&builtin_trampoline);
1853   __ Move(r5, ExternalReference::
1854                   address_of_interpreter_entry_trampoline_instruction_start(
1855                       masm->isolate()));
1856   __ LoadU64(r5, MemOperand(r5));
1857 
1858   __ bind(&trampoline_loaded);
1859   __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset.value()));
1860   __ mtlr(r0);
1861 
1862   // Initialize the dispatch table register.
1863   __ Move(
1864       kInterpreterDispatchTableRegister,
1865       ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1866 
1867   // Get the bytecode array pointer from the frame.
1868   __ LoadU64(kInterpreterBytecodeArrayRegister,
1869              MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1870 
1871   if (FLAG_debug_code) {
1872     // Check function data field is actually a BytecodeArray object.
1873     __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
1874     __ Assert(ne,
1875               AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
1876               cr0);
1877     __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
1878                          BYTECODE_ARRAY_TYPE);
1879     __ Assert(
1880         eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1881   }
1882 
1883   // Get the target bytecode offset from the frame.
1884   __ LoadU64(kInterpreterBytecodeOffsetRegister,
1885              MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1886   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1887 
1888   if (FLAG_debug_code) {
1889     Label okay;
1890     __ cmpi(kInterpreterBytecodeOffsetRegister,
1891             Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1892                     kFunctionEntryBytecodeOffset));
1893     __ bge(&okay);
1894     __ bkpt(0);
1895     __ bind(&okay);
1896   }
1897 
1898   // Dispatch to the target bytecode.
1899   UseScratchRegisterScope temps(masm);
1900   Register scratch = temps.Acquire();
1901   __ lbzx(ip, MemOperand(kInterpreterBytecodeArrayRegister,
1902                          kInterpreterBytecodeOffsetRegister));
1903   __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
1904   __ LoadU64(kJavaScriptCallCodeStartRegister,
1905              MemOperand(kInterpreterDispatchTableRegister, scratch));
1906   __ Jump(kJavaScriptCallCodeStartRegister);
1907 }
1908 
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1909 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1910   // Get bytecode array and bytecode offset from the stack frame.
1911   __ LoadU64(kInterpreterBytecodeArrayRegister,
1912              MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1913   __ LoadU64(kInterpreterBytecodeOffsetRegister,
1914              MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1915   __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1916 
1917   Label enter_bytecode, function_entry_bytecode;
1918   __ cmpi(kInterpreterBytecodeOffsetRegister,
1919           Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1920                   kFunctionEntryBytecodeOffset));
1921   __ beq(&function_entry_bytecode);
1922 
1923   // Load the current bytecode.
1924   __ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
1925                          kInterpreterBytecodeOffsetRegister));
1926 
1927   // Advance to the next bytecode.
1928   Label if_return;
1929   AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1930                                 kInterpreterBytecodeOffsetRegister, r4, r5, r6,
1931                                 &if_return);
1932 
1933   __ bind(&enter_bytecode);
1934   // Convert new bytecode offset to a Smi and save in the stackframe.
1935   __ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
1936   __ StoreU64(r5,
1937               MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1938 
1939   Generate_InterpreterEnterBytecode(masm);
1940 
1941   __ bind(&function_entry_bytecode);
1942   // If the code deoptimizes during the implicit function entry stack interrupt
1943   // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1944   // not a valid bytecode offset. Detect this case and advance to the first
1945   // actual bytecode.
1946   __ mov(kInterpreterBytecodeOffsetRegister,
1947          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1948   __ b(&enter_bytecode);
1949 
1950   // We should never take the if_return path.
1951   __ bind(&if_return);
1952   __ Abort(AbortReason::kInvalidBytecodeAdvance);
1953 }
1954 
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)1955 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
1956   Generate_InterpreterEnterBytecode(masm);
1957 }
1958 
1959 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)1960 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
1961                                       bool java_script_builtin,
1962                                       bool with_result) {
1963   const RegisterConfiguration* config(RegisterConfiguration::Default());
1964   int allocatable_register_count = config->num_allocatable_general_registers();
1965   Register scratch = ip;
1966   if (with_result) {
1967     if (java_script_builtin) {
1968       __ mr(scratch, r3);
1969     } else {
1970       // Overwrite the hole inserted by the deoptimizer with the return value
1971       // from the LAZY deopt point.
1972       __ StoreU64(
1973           r3, MemOperand(
1974                   sp, config->num_allocatable_general_registers() *
1975                               kSystemPointerSize +
1976                           BuiltinContinuationFrameConstants::kFixedFrameSize));
1977     }
1978   }
1979   for (int i = allocatable_register_count - 1; i >= 0; --i) {
1980     int code = config->GetAllocatableGeneralCode(i);
1981     __ Pop(Register::from_code(code));
1982     if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
1983       __ SmiUntag(Register::from_code(code));
1984     }
1985   }
1986   if (java_script_builtin && with_result) {
1987     // Overwrite the hole inserted by the deoptimizer with the return value from
1988     // the LAZY deopt point. r0 contains the arguments count, the return value
1989     // from LAZY is always the last argument.
1990     constexpr int return_value_offset =
1991         BuiltinContinuationFrameConstants::kFixedSlotCount -
1992         kJSArgcReceiverSlots;
1993     __ addi(r3, r3, Operand(return_value_offset));
1994     __ ShiftLeftU64(r0, r3, Operand(kSystemPointerSizeLog2));
1995     __ StoreU64(scratch, MemOperand(sp, r0));
1996     // Recover arguments count.
1997     __ subi(r3, r3, Operand(return_value_offset));
1998   }
1999   __ LoadU64(
2000       fp,
2001       MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
2002   // Load builtin index (stored as a Smi) and use it to get the builtin start
2003   // address from the builtins table.
2004   UseScratchRegisterScope temps(masm);
2005   Register builtin = temps.Acquire();
2006   __ Pop(builtin);
2007   __ addi(sp, sp,
2008           Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
2009   __ Pop(r0);
2010   __ mtlr(r0);
2011   __ LoadEntryFromBuiltinIndex(builtin);
2012   __ Jump(builtin);
2013 }
2014 }  // namespace
2015 
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)2016 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
2017   Generate_ContinueToBuiltinHelper(masm, false, false);
2018 }
2019 
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)2020 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
2021     MacroAssembler* masm) {
2022   Generate_ContinueToBuiltinHelper(masm, false, true);
2023 }
2024 
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)2025 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
2026   Generate_ContinueToBuiltinHelper(masm, true, false);
2027 }
2028 
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)2029 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
2030     MacroAssembler* masm) {
2031   Generate_ContinueToBuiltinHelper(masm, true, true);
2032 }
2033 
Generate_NotifyDeoptimized(MacroAssembler * masm)2034 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
2035   {
2036     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2037     __ CallRuntime(Runtime::kNotifyDeoptimized);
2038   }
2039 
2040   DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
2041   __ LoadU64(r3, MemOperand(sp, 0 * kSystemPointerSize));
2042   __ addi(sp, sp, Operand(1 * kSystemPointerSize));
2043   __ Ret();
2044 }
2045 
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)2046 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
2047   return OnStackReplacement(masm, true);
2048 }
2049 
2050 #if ENABLE_SPARKPLUG
Generate_BaselineOnStackReplacement(MacroAssembler * masm)2051 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
2052   __ LoadU64(kContextRegister,
2053              MemOperand(fp, BaselineFrameConstants::kContextOffset), r0);
2054   return OnStackReplacement(masm, false);
2055 }
2056 #endif
2057 
2058 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)2059 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
2060   // ----------- S t a t e -------------
2061   //  -- r3    : argc
2062   //  -- sp[0] : receiver
2063   //  -- sp[4] : thisArg
2064   //  -- sp[8] : argArray
2065   // -----------------------------------
2066 
2067   // 1. Load receiver into r4, argArray into r5 (if present), remove all
2068   // arguments from the stack (including the receiver), and push thisArg (if
2069   // present) instead.
2070   {
2071     __ LoadRoot(r8, RootIndex::kUndefinedValue);
2072     __ mr(r5, r8);
2073 
2074     Label done;
2075     __ LoadU64(r4, MemOperand(sp));  // receiver
2076     __ CmpS64(r3, Operand(JSParameterCount(1)), r0);
2077     __ blt(&done);
2078     __ LoadU64(r8, MemOperand(sp, kSystemPointerSize));  // thisArg
2079     __ CmpS64(r3, Operand(JSParameterCount(2)), r0);
2080     __ blt(&done);
2081     __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize));  // argArray
2082 
2083     __ bind(&done);
2084     __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
2085                                        TurboAssembler::kCountIncludesReceiver);
2086   }
2087 
2088   // ----------- S t a t e -------------
2089   //  -- r5    : argArray
2090   //  -- r4    : receiver
2091   //  -- sp[0] : thisArg
2092   // -----------------------------------
2093 
2094   // 2. We don't need to check explicitly for callable receiver here,
2095   // since that's the first thing the Call/CallWithArrayLike builtins
2096   // will do.
2097 
2098   // 3. Tail call with no arguments if argArray is null or undefined.
2099   Label no_arguments;
2100   __ JumpIfRoot(r5, RootIndex::kNullValue, &no_arguments);
2101   __ JumpIfRoot(r5, RootIndex::kUndefinedValue, &no_arguments);
2102 
2103   // 4a. Apply the receiver to the given argArray.
2104   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2105           RelocInfo::CODE_TARGET);
2106 
2107   // 4b. The argArray is either null or undefined, so we tail call without any
2108   // arguments to the receiver.
2109   __ bind(&no_arguments);
2110   {
2111     __ mov(r3, Operand(JSParameterCount(0)));
2112     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2113   }
2114 }
2115 
2116 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)2117 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
2118   // 1. Get the callable to call (passed as receiver) from the stack.
2119   __ Pop(r4);
2120 
2121   // 2. Make sure we have at least one argument.
2122   // r3: actual number of arguments
2123   {
2124     Label done;
2125     __ CmpS64(r3, Operand(JSParameterCount(0)), r0);
2126     __ bne(&done);
2127     __ PushRoot(RootIndex::kUndefinedValue);
2128     __ addi(r3, r3, Operand(1));
2129     __ bind(&done);
2130   }
2131 
2132   // 3. Adjust the actual number of arguments.
2133   __ subi(r3, r3, Operand(1));
2134 
2135   // 4. Call the callable.
2136   __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2137 }
2138 
Generate_ReflectApply(MacroAssembler * masm)2139 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2140   // ----------- S t a t e -------------
2141   //  -- r3     : argc
2142   //  -- sp[0]  : receiver
2143   //  -- sp[4]  : target         (if argc >= 1)
2144   //  -- sp[8]  : thisArgument   (if argc >= 2)
2145   //  -- sp[12] : argumentsList  (if argc == 3)
2146   // -----------------------------------
2147 
2148   // 1. Load target into r4 (if present), argumentsList into r5 (if present),
2149   // remove all arguments from the stack (including the receiver), and push
2150   // thisArgument (if present) instead.
2151   {
2152     __ LoadRoot(r4, RootIndex::kUndefinedValue);
2153     __ mr(r8, r4);
2154     __ mr(r5, r4);
2155 
2156     Label done;
2157     __ CmpS64(r3, Operand(JSParameterCount(1)), r0);
2158     __ blt(&done);
2159     __ LoadU64(r4, MemOperand(sp, kSystemPointerSize));  // thisArg
2160     __ CmpS64(r3, Operand(JSParameterCount(2)), r0);
2161     __ blt(&done);
2162     __ LoadU64(r8, MemOperand(sp, 2 * kSystemPointerSize));  // argArray
2163     __ CmpS64(r3, Operand(JSParameterCount(3)), r0);
2164     __ blt(&done);
2165     __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize));  // argArray
2166 
2167     __ bind(&done);
2168     __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
2169                                        TurboAssembler::kCountIncludesReceiver);
2170   }
2171 
2172   // ----------- S t a t e -------------
2173   //  -- r5    : argumentsList
2174   //  -- r4    : target
2175   //  -- sp[0] : thisArgument
2176   // -----------------------------------
2177 
2178   // 2. We don't need to check explicitly for callable target here,
2179   // since that's the first thing the Call/CallWithArrayLike builtins
2180   // will do.
2181 
2182   // 3. Apply the target to the given argumentsList.
2183   __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2184           RelocInfo::CODE_TARGET);
2185 }
2186 
Generate_ReflectConstruct(MacroAssembler * masm)2187 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2188   // ----------- S t a t e -------------
2189   //  -- r3     : argc
2190   //  -- sp[0]  : receiver
2191   //  -- sp[4]  : target
2192   //  -- sp[8]  : argumentsList
2193   //  -- sp[12] : new.target (optional)
2194   // -----------------------------------
2195 
2196   // 1. Load target into r4 (if present), argumentsList into r5 (if present),
2197   // new.target into r6 (if present, otherwise use target), remove all
2198   // arguments from the stack (including the receiver), and push thisArgument
2199   // (if present) instead.
2200   {
2201     __ LoadRoot(r4, RootIndex::kUndefinedValue);
2202     __ mr(r5, r4);
2203 
2204     Label done;
2205     __ mr(r7, r4);
2206     __ CmpS64(r3, Operand(JSParameterCount(1)), r0);
2207     __ blt(&done);
2208     __ LoadU64(r4, MemOperand(sp, kSystemPointerSize));  // thisArg
2209     __ mr(r6, r4);
2210     __ CmpS64(r3, Operand(JSParameterCount(2)), r0);
2211     __ blt(&done);
2212     __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize));  // argArray
2213     __ CmpS64(r3, Operand(JSParameterCount(3)), r0);
2214     __ blt(&done);
2215     __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize));  // argArray
2216     __ bind(&done);
2217     __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
2218                                        TurboAssembler::kCountIncludesReceiver);
2219   }
2220 
2221   // ----------- S t a t e -------------
2222   //  -- r5    : argumentsList
2223   //  -- r6    : new.target
2224   //  -- r4    : target
2225   //  -- sp[0] : receiver (undefined)
2226   // -----------------------------------
2227 
2228   // 2. We don't need to check explicitly for constructor target here,
2229   // since that's the first thing the Construct/ConstructWithArrayLike
2230   // builtins will do.
2231 
2232   // 3. We don't need to check explicitly for constructor new.target here,
2233   // since that's the second thing the Construct/ConstructWithArrayLike
2234   // builtins will do.
2235 
2236   // 4. Construct the target with the given new.target and argumentsList.
2237   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2238           RelocInfo::CODE_TARGET);
2239 }
2240 
2241 namespace {
2242 
2243 // Allocate new stack space for |count| arguments and shift all existing
2244 // arguments already on the stack. |pointer_to_new_space_out| points to the
2245 // first free slot on the stack to copy additional arguments to and
2246 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2)2247 void Generate_AllocateSpaceAndShiftExistingArguments(
2248     MacroAssembler* masm, Register count, Register argc_in_out,
2249     Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
2250   DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2251                      scratch2));
2252   Register old_sp = scratch1;
2253   Register new_space = scratch2;
2254   __ addi(old_sp, sp, Operand(-kSystemPointerSize));
2255   __ ShiftLeftU64(new_space, count, Operand(kSystemPointerSizeLog2));
2256   __ AllocateStackSpace(new_space);
2257 
2258   Register dest = pointer_to_new_space_out;
2259   __ addi(dest, sp, Operand(-kSystemPointerSize));
2260   Label loop, skip;
2261   __ mr(r0, argc_in_out);
2262   __ cmpi(r0, Operand::Zero());
2263   __ ble(&skip);
2264   __ mtctr(r0);
2265   __ bind(&loop);
2266   __ LoadU64WithUpdate(r0, MemOperand(old_sp, kSystemPointerSize));
2267   __ StoreU64WithUpdate(r0, MemOperand(dest, kSystemPointerSize));
2268   __ bdnz(&loop);
2269 
2270   __ bind(&skip);
2271   // Update total number of arguments, restore dest.
2272   __ add(argc_in_out, argc_in_out, count);
2273   __ addi(dest, dest, Operand(kSystemPointerSize));
2274 }
2275 
2276 }  // namespace
2277 
2278 // static
2279 // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2280 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2281                                                Handle<Code> code) {
2282   // ----------- S t a t e -------------
2283   //  -- r4 : target
2284   //  -- r3 : number of parameters on the stack
2285   //  -- r5 : arguments list (a FixedArray)
2286   //  -- r7 : len (number of elements to push from args)
2287   //  -- r6 : new.target (for [[Construct]])
2288   // -----------------------------------
2289 
2290   Register scratch = ip;
2291 
2292   if (FLAG_debug_code) {
2293     // Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
2294     Label ok, fail;
2295     __ AssertNotSmi(r5);
2296     __ LoadTaggedPointerField(scratch,
2297                               FieldMemOperand(r5, HeapObject::kMapOffset), r0);
2298     __ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2299     __ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
2300     __ beq(&ok);
2301     __ cmpi(scratch, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2302     __ bne(&fail);
2303     __ cmpi(r7, Operand::Zero());
2304     __ beq(&ok);
2305     // Fall through.
2306     __ bind(&fail);
2307     __ Abort(AbortReason::kOperandIsNotAFixedArray);
2308 
2309     __ bind(&ok);
2310   }
2311 
2312   // Check for stack overflow.
2313   Label stack_overflow;
2314   __ StackOverflowCheck(r7, scratch, &stack_overflow);
2315 
2316   // Move the arguments already in the stack,
2317   // including the receiver and the return address.
2318   // r7: Number of arguments to make room for.
2319   // r3: Number of arguments already on the stack.
2320   // r8: Points to first free slot on the stack after arguments were shifted.
2321   Generate_AllocateSpaceAndShiftExistingArguments(masm, r7, r3, r8, ip, r9);
2322 
2323   // Push arguments onto the stack (thisArgument is already on the stack).
2324   {
2325     Label loop, no_args, skip;
2326     __ cmpi(r7, Operand::Zero());
2327     __ beq(&no_args);
2328     __ addi(r5, r5,
2329             Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
2330     __ mtctr(r7);
2331     __ bind(&loop);
2332     __ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize), r0);
2333     __ addi(r5, r5, Operand(kTaggedSize));
2334     __ CompareRoot(scratch, RootIndex::kTheHoleValue);
2335     __ bne(&skip);
2336     __ LoadRoot(scratch, RootIndex::kUndefinedValue);
2337     __ bind(&skip);
2338     __ StoreU64(scratch, MemOperand(r8));
2339     __ addi(r8, r8, Operand(kSystemPointerSize));
2340     __ bdnz(&loop);
2341     __ bind(&no_args);
2342   }
2343 
2344   // Tail-call to the actual Call or Construct builtin.
2345   __ Jump(code, RelocInfo::CODE_TARGET);
2346 
2347   __ bind(&stack_overflow);
2348   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2349 }
2350 
2351 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2352 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2353                                                       CallOrConstructMode mode,
2354                                                       Handle<Code> code) {
2355   // ----------- S t a t e -------------
2356   //  -- r3 : the number of arguments
2357   //  -- r6 : the new.target (for [[Construct]] calls)
2358   //  -- r4 : the target to call (can be any Object)
2359   //  -- r5 : start index (to support rest parameters)
2360   // -----------------------------------
2361 
2362   Register scratch = r9;
2363 
2364   if (mode == CallOrConstructMode::kConstruct) {
2365     Label new_target_constructor, new_target_not_constructor;
2366     __ JumpIfSmi(r6, &new_target_not_constructor);
2367     __ LoadTaggedPointerField(scratch,
2368                               FieldMemOperand(r6, HeapObject::kMapOffset), r0);
2369     __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2370     __ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
2371     __ bne(&new_target_constructor, cr0);
2372     __ bind(&new_target_not_constructor);
2373     {
2374       FrameScope scope(masm, StackFrame::MANUAL);
2375       __ EnterFrame(StackFrame::INTERNAL);
2376       __ Push(r6);
2377       __ CallRuntime(Runtime::kThrowNotConstructor);
2378       __ Trap();  // Unreachable.
2379     }
2380     __ bind(&new_target_constructor);
2381   }
2382 
2383   Label stack_done, stack_overflow;
2384   __ LoadU64(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2385   __ subi(r8, r8, Operand(kJSArgcReceiverSlots));
2386   __ sub(r8, r8, r5, LeaveOE, SetRC);
2387   __ ble(&stack_done, cr0);
2388   {
2389     // ----------- S t a t e -------------
2390     //  -- r3 : the number of arguments already in the stack
2391     //  -- r4 : the target to call (can be any Object)
2392     //  -- r5 : start index (to support rest parameters)
2393     //  -- r6 : the new.target (for [[Construct]] calls)
2394     //  -- fp : point to the caller stack frame
2395     //  -- r8 : number of arguments to copy, i.e. arguments count - start index
2396     // -----------------------------------
2397 
2398     // Check for stack overflow.
2399     __ StackOverflowCheck(r8, scratch, &stack_overflow);
2400 
2401     // Forward the arguments from the caller frame.
2402     // Point to the first argument to copy (skipping the receiver).
2403     __ addi(r7, fp,
2404             Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2405                     kSystemPointerSize));
2406     __ ShiftLeftU64(scratch, r5, Operand(kSystemPointerSizeLog2));
2407     __ add(r7, r7, scratch);
2408 
2409     // Move the arguments already in the stack,
2410     // including the receiver and the return address.
2411     // r8: Number of arguments to make room for.
2412     // r3: Number of arguments already on the stack.
2413     // r5: Points to first free slot on the stack after arguments were shifted.
2414     Generate_AllocateSpaceAndShiftExistingArguments(masm, r8, r3, r5, scratch,
2415                                                     ip);
2416 
2417     // Copy arguments from the caller frame.
2418     // TODO(victorgomes): Consider using forward order as potentially more cache
2419     // friendly.
2420     {
2421       Label loop;
2422       __ bind(&loop);
2423       {
2424         __ subi(r8, r8, Operand(1));
2425         __ ShiftLeftU64(scratch, r8, Operand(kSystemPointerSizeLog2));
2426         __ LoadU64(r0, MemOperand(r7, scratch));
2427         __ StoreU64(r0, MemOperand(r5, scratch));
2428         __ cmpi(r8, Operand::Zero());
2429         __ bne(&loop);
2430       }
2431     }
2432   }
2433   __ b(&stack_done);
2434   __ bind(&stack_overflow);
2435   __ TailCallRuntime(Runtime::kThrowStackOverflow);
2436   __ bind(&stack_done);
2437 
2438   // Tail-call to the {code} handler.
2439   __ Jump(code, RelocInfo::CODE_TARGET);
2440 }
2441 
2442 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2443 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2444                                      ConvertReceiverMode mode) {
2445   // ----------- S t a t e -------------
2446   //  -- r3 : the number of arguments
2447   //  -- r4 : the function to call (checked to be a JSFunction)
2448   // -----------------------------------
2449   __ AssertCallableFunction(r4);
2450 
2451   __ LoadTaggedPointerField(
2452       r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
2453 
2454   // Enter the context of the function; ToObject has to run in the function
2455   // context, and we also need to take the global proxy from the function
2456   // context in case of conversion.
2457   __ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
2458                             r0);
2459   // We need to convert the receiver for non-native sloppy mode functions.
2460   Label done_convert;
2461   __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
2462   __ andi(r0, r6,
2463           Operand(SharedFunctionInfo::IsStrictBit::kMask |
2464                   SharedFunctionInfo::IsNativeBit::kMask));
2465   __ bne(&done_convert, cr0);
2466   {
2467     // ----------- S t a t e -------------
2468     //  -- r3 : the number of arguments
2469     //  -- r4 : the function to call (checked to be a JSFunction)
2470     //  -- r5 : the shared function info.
2471     //  -- cp : the function context.
2472     // -----------------------------------
2473 
2474     if (mode == ConvertReceiverMode::kNullOrUndefined) {
2475       // Patch receiver to global proxy.
2476       __ LoadGlobalProxy(r6);
2477     } else {
2478       Label convert_to_object, convert_receiver;
2479       __ LoadReceiver(r6, r3);
2480       __ JumpIfSmi(r6, &convert_to_object);
2481       STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2482       __ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
2483       __ bge(&done_convert);
2484       if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2485         Label convert_global_proxy;
2486         __ JumpIfRoot(r6, RootIndex::kUndefinedValue, &convert_global_proxy);
2487         __ JumpIfNotRoot(r6, RootIndex::kNullValue, &convert_to_object);
2488         __ bind(&convert_global_proxy);
2489         {
2490           // Patch receiver to global proxy.
2491           __ LoadGlobalProxy(r6);
2492         }
2493         __ b(&convert_receiver);
2494       }
2495       __ bind(&convert_to_object);
2496       {
2497         // Convert receiver using ToObject.
2498         // TODO(bmeurer): Inline the allocation here to avoid building the frame
2499         // in the fast case? (fall back to AllocateInNewSpace?)
2500         FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2501         __ SmiTag(r3);
2502         __ Push(r3, r4);
2503         __ mr(r3, r6);
2504         __ Push(cp);
2505         __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2506                 RelocInfo::CODE_TARGET);
2507         __ Pop(cp);
2508         __ mr(r6, r3);
2509         __ Pop(r3, r4);
2510         __ SmiUntag(r3);
2511       }
2512       __ LoadTaggedPointerField(
2513           r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
2514       __ bind(&convert_receiver);
2515     }
2516     __ StoreReceiver(r6, r3, r7);
2517   }
2518   __ bind(&done_convert);
2519 
2520   // ----------- S t a t e -------------
2521   //  -- r3 : the number of arguments
2522   //  -- r4 : the function to call (checked to be a JSFunction)
2523   //  -- r5 : the shared function info.
2524   //  -- cp : the function context.
2525   // -----------------------------------
2526 
2527   __ LoadU16(
2528       r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
2529   __ InvokeFunctionCode(r4, no_reg, r5, r3, InvokeType::kJump);
2530 }
2531 
2532 namespace {
2533 
Generate_PushBoundArguments(MacroAssembler * masm)2534 void Generate_PushBoundArguments(MacroAssembler* masm) {
2535   // ----------- S t a t e -------------
2536   //  -- r3 : the number of arguments
2537   //  -- r4 : target (checked to be a JSBoundFunction)
2538   //  -- r6 : new.target (only in case of [[Construct]])
2539   // -----------------------------------
2540 
2541   // Load [[BoundArguments]] into r5 and length of that into r7.
2542   Label no_bound_arguments;
2543   __ LoadTaggedPointerField(
2544       r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
2545   __ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
2546   __ beq(&no_bound_arguments, cr0);
2547   {
2548     // ----------- S t a t e -------------
2549     //  -- r3 : the number of arguments
2550     //  -- r4 : target (checked to be a JSBoundFunction)
2551     //  -- r5 : the [[BoundArguments]] (implemented as FixedArray)
2552     //  -- r6 : new.target (only in case of [[Construct]])
2553     //  -- r7 : the number of [[BoundArguments]]
2554     // -----------------------------------
2555 
2556     Register scratch = r9;
2557     // Reserve stack space for the [[BoundArguments]].
2558     {
2559       Label done;
2560       __ ShiftLeftU64(r10, r7, Operand(kSystemPointerSizeLog2));
2561       __ sub(r0, sp, r10);
2562       // Check the stack for overflow. We are not trying to catch interruptions
2563       // (i.e. debug break and preemption) here, so check the "real stack
2564       // limit".
2565       {
2566         __ LoadStackLimit(scratch, StackLimitKind::kRealStackLimit);
2567         __ CmpU64(r0, scratch);
2568       }
2569       __ bgt(&done);  // Signed comparison.
2570       {
2571         FrameScope scope(masm, StackFrame::MANUAL);
2572         __ EnterFrame(StackFrame::INTERNAL);
2573         __ CallRuntime(Runtime::kThrowStackOverflow);
2574       }
2575       __ bind(&done);
2576     }
2577 
2578     // Pop receiver.
2579     __ Pop(r8);
2580 
2581     // Push [[BoundArguments]].
2582     {
2583       Label loop, done;
2584       __ add(r3, r3, r7);  // Adjust effective number of arguments.
2585       __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2586       __ mtctr(r7);
2587 
2588       __ bind(&loop);
2589       __ subi(r7, r7, Operand(1));
2590       __ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
2591       __ add(scratch, scratch, r5);
2592       __ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
2593       __ Push(scratch);
2594       __ bdnz(&loop);
2595       __ bind(&done);
2596     }
2597 
2598     // Push receiver.
2599     __ Push(r8);
2600   }
2601   __ bind(&no_bound_arguments);
2602 }
2603 
2604 }  // namespace
2605 
2606 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2607 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2608   // ----------- S t a t e -------------
2609   //  -- r3 : the number of arguments
2610   //  -- r4 : the function to call (checked to be a JSBoundFunction)
2611   // -----------------------------------
2612   __ AssertBoundFunction(r4);
2613 
2614   // Patch the receiver to [[BoundThis]].
2615   __ LoadAnyTaggedField(
2616       r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), r0);
2617   __ StoreReceiver(r6, r3, ip);
2618 
2619   // Push the [[BoundArguments]] onto the stack.
2620   Generate_PushBoundArguments(masm);
2621 
2622   // Call the [[BoundTargetFunction]] via the Call builtin.
2623   __ LoadTaggedPointerField(
2624       r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
2625   __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2626           RelocInfo::CODE_TARGET);
2627 }
2628 
2629 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2630 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2631   // ----------- S t a t e -------------
2632   //  -- r3 : the number of arguments
2633   //  -- r4 : the target to call (can be any Object).
2634   // -----------------------------------
2635   Register argc = r3;
2636   Register target = r4;
2637   Register map = r7;
2638   Register instance_type = r8;
2639   DCHECK(!AreAliased(argc, target, map, instance_type));
2640 
2641   Label non_callable, class_constructor;
2642   __ JumpIfSmi(target, &non_callable);
2643   __ LoadMap(map, target);
2644   __ CompareInstanceTypeRange(map, instance_type,
2645                               FIRST_CALLABLE_JS_FUNCTION_TYPE,
2646                               LAST_CALLABLE_JS_FUNCTION_TYPE);
2647   __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2648           RelocInfo::CODE_TARGET, le);
2649   __ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
2650   __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2651           RelocInfo::CODE_TARGET, eq);
2652 
2653   // Check if target has a [[Call]] internal method.
2654   {
2655     Register flags = r7;
2656     __ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2657     map = no_reg;
2658     __ TestBit(flags, Map::Bits1::IsCallableBit::kShift, r0);
2659     __ beq(&non_callable, cr0);
2660   }
2661 
2662   // Check if target is a proxy and call CallProxy external builtin
2663   __ cmpi(instance_type, Operand(JS_PROXY_TYPE));
2664   __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
2665 
2666   // Check if target is a wrapped function and call CallWrappedFunction external
2667   // builtin
2668   __ cmpi(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
2669   __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2670           RelocInfo::CODE_TARGET, eq);
2671 
2672   // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2673   // Check that the function is not a "classConstructor".
2674   __ cmpi(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2675   __ beq(&class_constructor);
2676 
2677   // 2. Call to something else, which might have a [[Call]] internal method (if
2678   // not we raise an exception).
2679   // Overwrite the original receiver the (original) target.
2680   __ StoreReceiver(target, argc, r8);
2681   // Let the "call_as_function_delegate" take care of the rest.
2682   __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2683   __ Jump(masm->isolate()->builtins()->CallFunction(
2684               ConvertReceiverMode::kNotNullOrUndefined),
2685           RelocInfo::CODE_TARGET);
2686 
2687   // 3. Call to something that is not callable.
2688   __ bind(&non_callable);
2689   {
2690     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2691     __ Push(target);
2692     __ CallRuntime(Runtime::kThrowCalledNonCallable);
2693     __ Trap();  // Unreachable.
2694   }
2695 
2696   // 4. The function is a "classConstructor", need to raise an exception.
2697   __ bind(&class_constructor);
2698   {
2699     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2700     __ Push(target);
2701     __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2702     __ Trap();  // Unreachable.
2703   }
2704 }
2705 
2706 // static
Generate_ConstructFunction(MacroAssembler * masm)2707 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2708   // ----------- S t a t e -------------
2709   //  -- r3 : the number of arguments
2710   //  -- r4 : the constructor to call (checked to be a JSFunction)
2711   //  -- r6 : the new target (checked to be a constructor)
2712   // -----------------------------------
2713   __ AssertConstructor(r4);
2714   __ AssertFunction(r4);
2715 
2716   // Calling convention for function specific ConstructStubs require
2717   // r5 to contain either an AllocationSite or undefined.
2718   __ LoadRoot(r5, RootIndex::kUndefinedValue);
2719 
2720   Label call_generic_stub;
2721 
2722   // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2723   __ LoadTaggedPointerField(
2724       r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
2725   __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
2726   __ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2727   __ and_(r7, r7, ip, SetRC);
2728   __ beq(&call_generic_stub, cr0);
2729 
2730   __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2731           RelocInfo::CODE_TARGET);
2732 
2733   __ bind(&call_generic_stub);
2734   __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2735           RelocInfo::CODE_TARGET);
2736 }
2737 
2738 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2739 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2740   // ----------- S t a t e -------------
2741   //  -- r3 : the number of arguments
2742   //  -- r4 : the function to call (checked to be a JSBoundFunction)
2743   //  -- r6 : the new target (checked to be a constructor)
2744   // -----------------------------------
2745   __ AssertConstructor(r4);
2746   __ AssertBoundFunction(r4);
2747 
2748   // Push the [[BoundArguments]] onto the stack.
2749   Generate_PushBoundArguments(masm);
2750 
2751   // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2752   Label skip;
2753   __ CompareTagged(r4, r6);
2754   __ bne(&skip);
2755   __ LoadTaggedPointerField(
2756       r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
2757   __ bind(&skip);
2758 
2759   // Construct the [[BoundTargetFunction]] via the Construct builtin.
2760   __ LoadTaggedPointerField(
2761       r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
2762   __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2763 }
2764 
2765 // static
Generate_Construct(MacroAssembler * masm)2766 void Builtins::Generate_Construct(MacroAssembler* masm) {
2767   // ----------- S t a t e -------------
2768   //  -- r3 : the number of arguments
2769   //  -- r4 : the constructor to call (can be any Object)
2770   //  -- r6 : the new target (either the same as the constructor or
2771   //          the JSFunction on which new was invoked initially)
2772   // -----------------------------------
2773   Register argc = r3;
2774   Register target = r4;
2775   Register map = r7;
2776   Register instance_type = r8;
2777   DCHECK(!AreAliased(argc, target, map, instance_type));
2778 
2779   // Check if target is a Smi.
2780   Label non_constructor, non_proxy;
2781   __ JumpIfSmi(target, &non_constructor);
2782 
2783   // Check if target has a [[Construct]] internal method.
2784   __ LoadTaggedPointerField(
2785       map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
2786   {
2787     Register flags = r5;
2788     DCHECK(!AreAliased(argc, target, map, instance_type, flags));
2789     __ lbz(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2790     __ TestBit(flags, Map::Bits1::IsConstructorBit::kShift, r0);
2791     __ beq(&non_constructor, cr0);
2792   }
2793 
2794   // Dispatch based on instance type.
2795   __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
2796                               LAST_JS_FUNCTION_TYPE);
2797   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2798           RelocInfo::CODE_TARGET, le);
2799 
2800   // Only dispatch to bound functions after checking whether they are
2801   // constructors.
2802   __ cmpi(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
2803   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2804           RelocInfo::CODE_TARGET, eq);
2805 
2806   // Only dispatch to proxies after checking whether they are constructors.
2807   __ cmpi(instance_type, Operand(JS_PROXY_TYPE));
2808   __ bne(&non_proxy);
2809   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2810           RelocInfo::CODE_TARGET);
2811 
2812   // Called Construct on an exotic Object with a [[Construct]] internal method.
2813   __ bind(&non_proxy);
2814   {
2815     // Overwrite the original receiver with the (original) target.
2816     __ StoreReceiver(target, argc, r8);
2817     // Let the "call_as_constructor_delegate" take care of the rest.
2818     __ LoadNativeContextSlot(target,
2819                              Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2820     __ Jump(masm->isolate()->builtins()->CallFunction(),
2821             RelocInfo::CODE_TARGET);
2822   }
2823 
2824   // Called Construct on an Object that doesn't have a [[Construct]] internal
2825   // method.
2826   __ bind(&non_constructor);
2827   __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2828           RelocInfo::CODE_TARGET);
2829 }
2830 
2831 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2832 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2833   // The function index was put in a register by the jump table trampoline.
2834   // Convert to Smi for the runtime call.
2835   __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2836 
2837   {
2838     HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2839     FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2840 
2841     // Save all parameter registers (see wasm-linkage.h). They might be
2842     // overwritten in the runtime call below. We don't have any callee-saved
2843     // registers in wasm, so no need to store anything else.
2844     RegList gp_regs;
2845     for (Register gp_param_reg : wasm::kGpParamRegisters) {
2846       gp_regs.set(gp_param_reg);
2847     }
2848 
2849     DoubleRegList fp_regs;
2850     for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2851       fp_regs.set(fp_param_reg);
2852     }
2853 
2854     // List must match register numbers under kFpParamRegisters.
2855     constexpr Simd128RegList simd_regs = {v1, v2, v3, v4, v5, v6, v7, v8};
2856 
2857     CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
2858     CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
2859     CHECK_EQ(simd_regs.Count(), arraysize(wasm::kFpParamRegisters));
2860     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
2861              gp_regs.Count());
2862     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2863              fp_regs.Count());
2864     CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2865              simd_regs.Count());
2866 
2867     __ MultiPush(gp_regs);
2868     __ MultiPushF64AndV128(fp_regs, simd_regs);
2869 
2870     // Push the Wasm instance for loading the jump table address after the
2871     // runtime call.
2872     __ Push(kWasmInstanceRegister);
2873 
2874     // Push the Wasm instance again as an explicit argument to the runtime
2875     // function.
2876     __ Push(kWasmInstanceRegister);
2877     // Push the function index as second argument.
2878     __ Push(kWasmCompileLazyFuncIndexRegister);
2879     // Initialize the JavaScript context with 0. CEntry will use it to
2880     // set the current context on the isolate.
2881     __ LoadSmiLiteral(cp, Smi::zero());
2882     __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2883     // The runtime function returns the jump table slot offset as a Smi. Use
2884     // that to compute the jump target in r11.
2885     __ Pop(kWasmInstanceRegister);
2886     __ LoadU64(
2887         r11,
2888         MemOperand(kWasmInstanceRegister,
2889                    WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag),
2890         r0);
2891     __ SmiUntag(kReturnRegister0);
2892     __ AddS64(r11, r11, kReturnRegister0);
2893     // r11 now holds the jump table slot where we want to jump to in the end.
2894 
2895     // Restore registers.
2896     __ MultiPopF64AndV128(fp_regs, simd_regs);
2897     __ MultiPop(gp_regs);
2898   }
2899 
2900   // Finally, jump to the jump table slot for the function.
2901   __ Jump(r11);
2902 }
2903 
Generate_WasmDebugBreak(MacroAssembler * masm)2904 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2905   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
2906   {
2907     FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2908 
2909     // Save all parameter registers. They might hold live values, we restore
2910     // them after the runtime call.
2911     __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2912     __ MultiPushF64AndV128(WasmDebugBreakFrameConstants::kPushedFpRegs,
2913                            WasmDebugBreakFrameConstants::kPushedSimd128Regs);
2914 
2915     // Initialize the JavaScript context with 0. CEntry will use it to
2916     // set the current context on the isolate.
2917     __ LoadSmiLiteral(cp, Smi::zero());
2918     __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2919 
2920     // Restore registers.
2921     __ MultiPopF64AndV128(WasmDebugBreakFrameConstants::kPushedFpRegs,
2922                           WasmDebugBreakFrameConstants::kPushedSimd128Regs);
2923     __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2924   }
2925   __ Ret();
2926 }
2927 
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2928 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2929   // TODO(v8:10701): Implement for this platform.
2930   __ Trap();
2931 }
2932 
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)2933 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
2934   // TODO(v8:12191): Implement for this platform.
2935   __ Trap();
2936 }
2937 
Generate_WasmSuspend(MacroAssembler * masm)2938 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
2939   // TODO(v8:12191): Implement for this platform.
2940   __ Trap();
2941 }
2942 
Generate_WasmResume(MacroAssembler * masm)2943 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
2944   // TODO(v8:12191): Implement for this platform.
2945   __ Trap();
2946 }
2947 
Generate_WasmOnStackReplace(MacroAssembler * masm)2948 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2949   // Only needed on x64.
2950   __ Trap();
2951 }
2952 #endif  // V8_ENABLE_WEBASSEMBLY
2953 
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2954 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2955                                SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2956                                bool builtin_exit_frame) {
2957   // Called from JavaScript; parameters are on stack as if calling JS function.
2958   // r3: number of arguments including receiver
2959   // r4: pointer to builtin function
2960   // fp: frame pointer  (restored after C call)
2961   // sp: stack pointer  (restored as callee's sp after C call)
2962   // cp: current context  (C callee-saved)
2963   //
2964   // If argv_mode == ArgvMode::kRegister:
2965   // r5: pointer to the first argument
2966 
2967   __ mr(r15, r4);
2968 
2969   if (argv_mode == ArgvMode::kRegister) {
2970     // Move argv into the correct register.
2971     __ mr(r4, r5);
2972   } else {
2973     // Compute the argv pointer.
2974     __ ShiftLeftU64(r4, r3, Operand(kSystemPointerSizeLog2));
2975     __ add(r4, r4, sp);
2976     __ subi(r4, r4, Operand(kSystemPointerSize));
2977   }
2978 
2979   // Enter the exit frame that transitions from JavaScript to C++.
2980   FrameScope scope(masm, StackFrame::MANUAL);
2981 
2982   // Need at least one extra slot for return address location.
2983   int arg_stack_space = 1;
2984 
2985   // Pass buffer for return value on stack if necessary
2986   bool needs_return_buffer =
2987       (result_size == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
2988   if (needs_return_buffer) {
2989     arg_stack_space += result_size;
2990   }
2991 
2992   __ EnterExitFrame(
2993       save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
2994       builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
2995 
2996   // Store a copy of argc in callee-saved registers for later.
2997   __ mr(r14, r3);
2998 
2999   // r3, r14: number of arguments including receiver  (C callee-saved)
3000   // r4: pointer to the first argument
3001   // r15: pointer to builtin function  (C callee-saved)
3002 
3003   // Result returned in registers or stack, depending on result size and ABI.
3004 
3005   Register isolate_reg = r5;
3006   if (needs_return_buffer) {
3007     // The return value is a non-scalar value.
3008     // Use frame storage reserved by calling function to pass return
3009     // buffer as implicit first argument.
3010     __ mr(r5, r4);
3011     __ mr(r4, r3);
3012     __ addi(r3, sp,
3013             Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
3014     isolate_reg = r6;
3015   }
3016 
3017   // Call C built-in.
3018   __ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
3019 
3020   Register target = r15;
3021   __ StoreReturnAddressAndCall(target);
3022 
3023   // If return value is on the stack, pop it to registers.
3024   if (needs_return_buffer) {
3025     __ LoadU64(r4, MemOperand(r3, kSystemPointerSize));
3026     __ LoadU64(r3, MemOperand(r3));
3027   }
3028 
3029   // Check result for exception sentinel.
3030   Label exception_returned;
3031   __ CompareRoot(r3, RootIndex::kException);
3032   __ beq(&exception_returned);
3033 
3034   // Check that there is no pending exception, otherwise we
3035   // should have returned the exception sentinel.
3036   if (FLAG_debug_code) {
3037     Label okay;
3038     ExternalReference pending_exception_address = ExternalReference::Create(
3039         IsolateAddressId::kPendingExceptionAddress, masm->isolate());
3040 
3041     __ Move(r6, pending_exception_address);
3042     __ LoadU64(r6, MemOperand(r6));
3043     __ CompareRoot(r6, RootIndex::kTheHoleValue);
3044     // Cannot use check here as it attempts to generate call into runtime.
3045     __ beq(&okay);
3046     __ stop();
3047     __ bind(&okay);
3048   }
3049 
3050   // Exit C frame and return.
3051   // r3:r4: result
3052   // sp: stack pointer
3053   // fp: frame pointer
3054   Register argc = argv_mode == ArgvMode::kRegister
3055                       // We don't want to pop arguments so set argc to no_reg.
3056                       ? no_reg
3057                       // r14: still holds argc (callee-saved).
3058                       : r14;
3059   __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
3060   __ blr();
3061 
3062   // Handling of exception.
3063   __ bind(&exception_returned);
3064 
3065   ExternalReference pending_handler_context_address = ExternalReference::Create(
3066       IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
3067   ExternalReference pending_handler_entrypoint_address =
3068       ExternalReference::Create(
3069           IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
3070   ExternalReference pending_handler_constant_pool_address =
3071       ExternalReference::Create(
3072           IsolateAddressId::kPendingHandlerConstantPoolAddress,
3073           masm->isolate());
3074   ExternalReference pending_handler_fp_address = ExternalReference::Create(
3075       IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
3076   ExternalReference pending_handler_sp_address = ExternalReference::Create(
3077       IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
3078 
3079   // Ask the runtime for help to determine the handler. This will set r3 to
3080   // contain the current pending exception, don't clobber it.
3081   ExternalReference find_handler =
3082       ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
3083   {
3084     FrameScope scope(masm, StackFrame::MANUAL);
3085     __ PrepareCallCFunction(3, 0, r3);
3086     __ li(r3, Operand::Zero());
3087     __ li(r4, Operand::Zero());
3088     __ Move(r5, ExternalReference::isolate_address(masm->isolate()));
3089     __ CallCFunction(find_handler, 3);
3090   }
3091 
3092   // Retrieve the handler context, SP and FP.
3093   __ Move(cp, pending_handler_context_address);
3094   __ LoadU64(cp, MemOperand(cp));
3095   __ Move(sp, pending_handler_sp_address);
3096   __ LoadU64(sp, MemOperand(sp));
3097   __ Move(fp, pending_handler_fp_address);
3098   __ LoadU64(fp, MemOperand(fp));
3099 
3100   // If the handler is a JS frame, restore the context to the frame. Note that
3101   // the context will be set to (cp == 0) for non-JS frames.
3102   Label skip;
3103   __ cmpi(cp, Operand::Zero());
3104   __ beq(&skip);
3105   __ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3106   __ bind(&skip);
3107 
3108   // Clear c_entry_fp, like we do in `LeaveExitFrame`.
3109   {
3110     UseScratchRegisterScope temps(masm);
3111     __ Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
3112                                           masm->isolate()));
3113     __ mov(r0, Operand::Zero());
3114     __ StoreU64(r0, MemOperand(ip));
3115   }
3116 
3117   // Compute the handler entry address and jump to it.
3118   ConstantPoolUnavailableScope constant_pool_unavailable(masm);
3119   __ Move(ip, pending_handler_entrypoint_address);
3120   __ LoadU64(ip, MemOperand(ip));
3121   if (FLAG_enable_embedded_constant_pool) {
3122     __ Move(kConstantPoolRegister, pending_handler_constant_pool_address);
3123     __ LoadU64(kConstantPoolRegister, MemOperand(kConstantPoolRegister));
3124   }
3125   __ Jump(ip);
3126 }
3127 
Generate_DoubleToI(MacroAssembler * masm)3128 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
3129   Label out_of_range, only_low, negate, done, fastpath_done;
3130   Register result_reg = r3;
3131 
3132   HardAbortScope hard_abort(masm);  // Avoid calls to Abort.
3133 
3134   // Immediate values for this stub fit in instructions, so it's safe to use ip.
3135   Register scratch = GetRegisterThatIsNotOneOf(result_reg);
3136   Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
3137   Register scratch_high =
3138       GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low);
3139   DoubleRegister double_scratch = kScratchDoubleReg;
3140 
3141   __ Push(result_reg, scratch);
3142   // Account for saved regs.
3143   int argument_offset = 2 * kSystemPointerSize;
3144 
3145   // Load double input.
3146   __ lfd(double_scratch, MemOperand(sp, argument_offset));
3147 
3148   // Do fast-path convert from double to int.
3149   __ ConvertDoubleToInt64(double_scratch,
3150 #if !V8_TARGET_ARCH_PPC64
3151                           scratch,
3152 #endif
3153                           result_reg, d0);
3154 
3155 // Test for overflow
3156 #if V8_TARGET_ARCH_PPC64
3157   __ TestIfInt32(result_reg, r0);
3158 #else
3159   __ TestIfInt32(scratch, result_reg, r0);
3160 #endif
3161   __ beq(&fastpath_done);
3162 
3163   __ Push(scratch_high, scratch_low);
3164   // Account for saved regs.
3165   argument_offset += 2 * kSystemPointerSize;
3166 
3167   __ lwz(scratch_high,
3168          MemOperand(sp, argument_offset + Register::kExponentOffset));
3169   __ lwz(scratch_low,
3170          MemOperand(sp, argument_offset + Register::kMantissaOffset));
3171 
3172   __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
3173   // Load scratch with exponent - 1. This is faster than loading
3174   // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value.
3175   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
3176   __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
3177   // If exponent is greater than or equal to 84, the 32 less significant
3178   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
3179   // the result is 0.
3180   // Compare exponent with 84 (compare exponent - 1 with 83).
3181   __ cmpi(scratch, Operand(83));
3182   __ bge(&out_of_range);
3183 
3184   // If we reach this code, 31 <= exponent <= 83.
3185   // So, we don't have to handle cases where 0 <= exponent <= 20 for
3186   // which we would need to shift right the high part of the mantissa.
3187   // Scratch contains exponent - 1.
3188   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
3189   __ subfic(scratch, scratch, Operand(51));
3190   __ cmpi(scratch, Operand::Zero());
3191   __ ble(&only_low);
3192   // 21 <= exponent <= 51, shift scratch_low and scratch_high
3193   // to generate the result.
3194   __ srw(scratch_low, scratch_low, scratch);
3195   // Scratch contains: 52 - exponent.
3196   // We needs: exponent - 20.
3197   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
3198   __ subfic(scratch, scratch, Operand(32));
3199   __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
3200   // Set the implicit 1 before the mantissa part in scratch_high.
3201   STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
3202   __ oris(result_reg, result_reg,
3203           Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
3204   __ ShiftLeftU32(r0, result_reg, scratch);
3205   __ orx(result_reg, scratch_low, r0);
3206   __ b(&negate);
3207 
3208   __ bind(&out_of_range);
3209   __ mov(result_reg, Operand::Zero());
3210   __ b(&done);
3211 
3212   __ bind(&only_low);
3213   // 52 <= exponent <= 83, shift only scratch_low.
3214   // On entry, scratch contains: 52 - exponent.
3215   __ neg(scratch, scratch);
3216   __ ShiftLeftU32(result_reg, scratch_low, scratch);
3217 
3218   __ bind(&negate);
3219   // If input was positive, scratch_high ASR 31 equals 0 and
3220   // scratch_high LSR 31 equals zero.
3221   // New result = (result eor 0) + 0 = result.
3222   // If the input was negative, we have to negate the result.
3223   // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
3224   // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
3225   __ srawi(r0, scratch_high, 31);
3226 #if V8_TARGET_ARCH_PPC64
3227   __ srdi(r0, r0, Operand(32));
3228 #endif
3229   __ xor_(result_reg, result_reg, r0);
3230   __ srwi(r0, scratch_high, Operand(31));
3231   __ add(result_reg, result_reg, r0);
3232 
3233   __ bind(&done);
3234   __ Pop(scratch_high, scratch_low);
3235   // Account for saved regs.
3236   argument_offset -= 2 * kSystemPointerSize;
3237 
3238   __ bind(&fastpath_done);
3239   __ StoreU64(result_reg, MemOperand(sp, argument_offset));
3240   __ Pop(result_reg, scratch);
3241 
3242   __ Ret();
3243 }
3244 
3245 namespace {
3246 
AddressOffset(ExternalReference ref0,ExternalReference ref1)3247 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3248   return ref0.address() - ref1.address();
3249 }
3250 
3251 
3252 // Calls an API function.  Allocates HandleScope, extracts returned value
3253 // from handle and propagates exceptions.  Restores context.  stack_space
3254 // - space to be unwound on exit (includes the call JS arguments space and
3255 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)3256 static void CallApiFunctionAndReturn(MacroAssembler* masm,
3257                                      Register function_address,
3258                                      ExternalReference thunk_ref,
3259                                      int stack_space,
3260                                      MemOperand* stack_space_operand,
3261                                      MemOperand return_value_operand) {
3262   Isolate* isolate = masm->isolate();
3263   ExternalReference next_address =
3264       ExternalReference::handle_scope_next_address(isolate);
3265   const int kNextOffset = 0;
3266   const int kLimitOffset = AddressOffset(
3267       ExternalReference::handle_scope_limit_address(isolate), next_address);
3268   const int kLevelOffset = AddressOffset(
3269       ExternalReference::handle_scope_level_address(isolate), next_address);
3270 
3271   // Additional parameter is the address of the actual callback.
3272   DCHECK(function_address == r4 || function_address == r5);
3273   Register scratch = r6;
3274 
3275   __ Move(scratch, ExternalReference::is_profiling_address(isolate));
3276   __ lbz(scratch, MemOperand(scratch, 0));
3277   __ cmpi(scratch, Operand::Zero());
3278 
3279   if (CpuFeatures::IsSupported(PPC_7_PLUS)) {
3280     __ Move(scratch, thunk_ref);
3281     __ isel(eq, scratch, function_address, scratch);
3282   } else {
3283     Label profiler_enabled, end_profiler_check;
3284     __ bne(&profiler_enabled);
3285     __ Move(scratch, ExternalReference::address_of_runtime_stats_flag());
3286     __ lwz(scratch, MemOperand(scratch, 0));
3287     __ cmpi(scratch, Operand::Zero());
3288     __ bne(&profiler_enabled);
3289     {
3290       // Call the api function directly.
3291       __ mr(scratch, function_address);
3292       __ b(&end_profiler_check);
3293     }
3294     __ bind(&profiler_enabled);
3295     {
3296       // Additional parameter is the address of the actual callback.
3297       __ Move(scratch, thunk_ref);
3298     }
3299     __ bind(&end_profiler_check);
3300   }
3301 
3302   // Allocate HandleScope in callee-save registers.
3303   // r17 - next_address
3304   // r14 - next_address->kNextOffset
3305   // r15 - next_address->kLimitOffset
3306   // r16 - next_address->kLevelOffset
3307   __ Move(r17, next_address);
3308   __ LoadU64(r14, MemOperand(r17, kNextOffset));
3309   __ LoadU64(r15, MemOperand(r17, kLimitOffset));
3310   __ lwz(r16, MemOperand(r17, kLevelOffset));
3311   __ addi(r16, r16, Operand(1));
3312   __ stw(r16, MemOperand(r17, kLevelOffset));
3313 
3314   __ StoreReturnAddressAndCall(scratch);
3315 
3316   Label promote_scheduled_exception;
3317   Label delete_allocated_handles;
3318   Label leave_exit_frame;
3319   Label return_value_loaded;
3320 
3321   // load value from ReturnValue
3322   __ LoadU64(r3, return_value_operand);
3323   __ bind(&return_value_loaded);
3324   // No more valid handles (the result handle was the last one). Restore
3325   // previous handle scope.
3326   __ StoreU64(r14, MemOperand(r17, kNextOffset));
3327   if (FLAG_debug_code) {
3328     __ lwz(r4, MemOperand(r17, kLevelOffset));
3329     __ CmpS64(r4, r16);
3330     __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
3331   }
3332   __ subi(r16, r16, Operand(1));
3333   __ stw(r16, MemOperand(r17, kLevelOffset));
3334   __ LoadU64(r0, MemOperand(r17, kLimitOffset));
3335   __ CmpS64(r15, r0);
3336   __ bne(&delete_allocated_handles);
3337 
3338   // Leave the API exit frame.
3339   __ bind(&leave_exit_frame);
3340   // LeaveExitFrame expects unwind space to be in a register.
3341   if (stack_space_operand != nullptr) {
3342     __ LoadU64(r14, *stack_space_operand);
3343   } else {
3344     __ mov(r14, Operand(stack_space));
3345   }
3346   __ LeaveExitFrame(false, r14, stack_space_operand != nullptr);
3347 
3348   // Check if the function scheduled an exception.
3349   __ LoadRoot(r14, RootIndex::kTheHoleValue);
3350   __ Move(r15, ExternalReference::scheduled_exception_address(isolate));
3351   __ LoadU64(r15, MemOperand(r15));
3352   __ CmpS64(r14, r15);
3353   __ bne(&promote_scheduled_exception);
3354 
3355   __ blr();
3356 
3357   // Re-throw by promoting a scheduled exception.
3358   __ bind(&promote_scheduled_exception);
3359   __ TailCallRuntime(Runtime::kPromoteScheduledException);
3360 
3361   // HandleScope limit has changed. Delete allocated extensions.
3362   __ bind(&delete_allocated_handles);
3363   __ StoreU64(r15, MemOperand(r17, kLimitOffset));
3364   __ mr(r14, r3);
3365   __ PrepareCallCFunction(1, r15);
3366   __ Move(r3, ExternalReference::isolate_address(isolate));
3367   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3368   __ mr(r3, r14);
3369   __ b(&leave_exit_frame);
3370 }
3371 
3372 }  // namespace
3373 
Generate_CallApiCallback(MacroAssembler * masm)3374 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3375   // ----------- S t a t e -------------
3376   //  -- cp                  : context
3377   //  -- r4                  : api function address
3378   //  -- r5                  : arguments count (not including the receiver)
3379   //  -- r6                  : call data
3380   //  -- r3                  : holder
3381   //  -- sp[0]               : receiver
3382   //  -- sp[8]               : first argument
3383   //  -- ...
3384   //  -- sp[(argc) * 8]      : last argument
3385   // -----------------------------------
3386 
3387   Register api_function_address = r4;
3388   Register argc = r5;
3389   Register call_data = r6;
3390   Register holder = r3;
3391   Register scratch = r7;
3392   DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
3393 
3394   using FCA = FunctionCallbackArguments;
3395 
3396   STATIC_ASSERT(FCA::kArgsLength == 6);
3397   STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3398   STATIC_ASSERT(FCA::kDataIndex == 4);
3399   STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3400   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3401   STATIC_ASSERT(FCA::kIsolateIndex == 1);
3402   STATIC_ASSERT(FCA::kHolderIndex == 0);
3403 
3404   // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3405   //
3406   // Target state:
3407   //   sp[0 * kSystemPointerSize]: kHolder
3408   //   sp[1 * kSystemPointerSize]: kIsolate
3409   //   sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
3410   //   sp[3 * kSystemPointerSize]: undefined (kReturnValue)
3411   //   sp[4 * kSystemPointerSize]: kData
3412   //   sp[5 * kSystemPointerSize]: undefined (kNewTarget)
3413 
3414   // Reserve space on the stack.
3415   __ subi(sp, sp, Operand(FCA::kArgsLength * kSystemPointerSize));
3416 
3417   // kHolder.
3418   __ StoreU64(holder, MemOperand(sp, 0 * kSystemPointerSize));
3419 
3420   // kIsolate.
3421   __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3422   __ StoreU64(scratch, MemOperand(sp, 1 * kSystemPointerSize));
3423 
3424   // kReturnValueDefaultValue and kReturnValue.
3425   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3426   __ StoreU64(scratch, MemOperand(sp, 2 * kSystemPointerSize));
3427   __ StoreU64(scratch, MemOperand(sp, 3 * kSystemPointerSize));
3428 
3429   // kData.
3430   __ StoreU64(call_data, MemOperand(sp, 4 * kSystemPointerSize));
3431 
3432   // kNewTarget.
3433   __ StoreU64(scratch, MemOperand(sp, 5 * kSystemPointerSize));
3434 
3435   // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3436   // We use it below to set up the FunctionCallbackInfo object.
3437   __ mr(scratch, sp);
3438 
3439   // Allocate the v8::Arguments structure in the arguments' space since
3440   // it's not controlled by GC.
3441   // PPC LINUX ABI:
3442   //
3443   // Create 4 extra slots on stack:
3444   //    [0] space for DirectCEntryStub's LR save
3445   //    [1-3] FunctionCallbackInfo
3446   //    [4] number of bytes to drop from the stack after returning
3447   static constexpr int kApiStackSpace = 5;
3448   static constexpr bool kDontSaveDoubles = false;
3449 
3450   FrameScope frame_scope(masm, StackFrame::MANUAL);
3451   __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3452 
3453   // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3454   // Arguments are after the return address (pushed by EnterExitFrame()).
3455   __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
3456                                           kSystemPointerSize));
3457 
3458   // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3459   // on the stack).
3460   __ addi(scratch, scratch,
3461           Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3462   __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
3463                                           kSystemPointerSize));
3464 
3465   // FunctionCallbackInfo::length_.
3466   __ stw(argc,
3467          MemOperand(sp, (kStackFrameExtraParamSlot + 3) * kSystemPointerSize));
3468 
3469   // We also store the number of bytes to drop from the stack after returning
3470   // from the API function here.
3471   __ mov(scratch,
3472          Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
3473   __ ShiftLeftU64(ip, argc, Operand(kSystemPointerSizeLog2));
3474   __ add(scratch, scratch, ip);
3475   __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
3476                                           kSystemPointerSize));
3477 
3478   // v8::InvocationCallback's argument.
3479   __ addi(r3, sp,
3480           Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
3481 
3482   ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3483 
3484   // There are two stack slots above the arguments we constructed on the stack.
3485   // TODO(jgruber): Document what these arguments are.
3486   static constexpr int kStackSlotsAboveFCA = 2;
3487   MemOperand return_value_operand(
3488       fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
3489 
3490   static constexpr int kUseStackSpaceOperand = 0;
3491   MemOperand stack_space_operand(
3492       sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
3493 
3494   AllowExternalCallThatCantCauseGC scope(masm);
3495   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3496                            kUseStackSpaceOperand, &stack_space_operand,
3497                            return_value_operand);
3498 }
3499 
3500 
Generate_CallApiGetter(MacroAssembler * masm)3501 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3502   int arg0Slot = 0;
3503   int accessorInfoSlot = 0;
3504   int apiStackSpace = 0;
3505   // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3506   // name below the exit frame to make GC aware of them.
3507   STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3508   STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3509   STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3510   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3511   STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3512   STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3513   STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3514   STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3515 
3516   Register receiver = ApiGetterDescriptor::ReceiverRegister();
3517   Register holder = ApiGetterDescriptor::HolderRegister();
3518   Register callback = ApiGetterDescriptor::CallbackRegister();
3519   Register scratch = r7;
3520   DCHECK(!AreAliased(receiver, holder, callback, scratch));
3521 
3522   Register api_function_address = r5;
3523 
3524   __ push(receiver);
3525   // Push data from AccessorInfo.
3526   __ LoadAnyTaggedField(
3527       scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
3528   __ push(scratch);
3529   __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3530   __ Push(scratch, scratch);
3531   __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3532   __ Push(scratch, holder);
3533   __ Push(Smi::zero());  // should_throw_on_error -> false
3534   __ LoadTaggedPointerField(
3535       scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
3536   __ push(scratch);
3537 
3538   // v8::PropertyCallbackInfo::args_ array and name handle.
3539   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3540 
3541   // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3542   __ mr(r3, sp);                               // r3 = Handle<Name>
3543   __ addi(r4, r3, Operand(1 * kSystemPointerSize));  // r4 = v8::PCI::args_
3544 
3545   // If ABI passes Handles (pointer-sized struct) in a register:
3546   //
3547   // Create 2 extra slots on stack:
3548   //    [0] space for DirectCEntryStub's LR save
3549   //    [1] AccessorInfo&
3550   //
3551   // Otherwise:
3552   //
3553   // Create 3 extra slots on stack:
3554   //    [0] space for DirectCEntryStub's LR save
3555   //    [1] copy of Handle (first arg)
3556   //    [2] AccessorInfo&
3557   if (ABI_PASSES_HANDLES_IN_REGS) {
3558     accessorInfoSlot = kStackFrameExtraParamSlot + 1;
3559     apiStackSpace = 2;
3560   } else {
3561     arg0Slot = kStackFrameExtraParamSlot + 1;
3562     accessorInfoSlot = arg0Slot + 1;
3563     apiStackSpace = 3;
3564   }
3565 
3566   FrameScope frame_scope(masm, StackFrame::MANUAL);
3567   __ EnterExitFrame(false, apiStackSpace);
3568 
3569   if (!ABI_PASSES_HANDLES_IN_REGS) {
3570     // pass 1st arg by reference
3571     __ StoreU64(r3, MemOperand(sp, arg0Slot * kSystemPointerSize));
3572     __ addi(r3, sp, Operand(arg0Slot * kSystemPointerSize));
3573   }
3574 
3575   // Create v8::PropertyCallbackInfo object on the stack and initialize
3576   // it's args_ field.
3577   __ StoreU64(r4, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
3578   __ addi(r4, sp, Operand(accessorInfoSlot * kSystemPointerSize));
3579   // r4 = v8::PropertyCallbackInfo&
3580 
3581   ExternalReference thunk_ref =
3582       ExternalReference::invoke_accessor_getter_callback();
3583 
3584   __ LoadTaggedPointerField(
3585       scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset), r0);
3586   __ LoadU64(api_function_address,
3587              FieldMemOperand(scratch, Foreign::kForeignAddressOffset), r0);
3588 
3589   // +3 is to skip prolog, return address and name handle.
3590   MemOperand return_value_operand(
3591       fp,
3592       (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
3593   MemOperand* const kUseStackSpaceConstant = nullptr;
3594   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3595                            kStackUnwindSpace, kUseStackSpaceConstant,
3596                            return_value_operand);
3597 }
3598 
Generate_DirectCEntry(MacroAssembler * masm)3599 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3600   UseScratchRegisterScope temps(masm);
3601   Register temp2 = temps.Acquire();
3602   // Place the return address on the stack, making the call
3603   // GC safe. The RegExp backend also relies on this.
3604   __ mflr(r0);
3605   __ StoreU64(r0,
3606               MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
3607 
3608   if (ABI_USES_FUNCTION_DESCRIPTORS) {
3609     // AIX/PPC64BE Linux use a function descriptor;
3610     __ LoadU64(ToRegister(ABI_TOC_REGISTER),
3611                MemOperand(temp2, kSystemPointerSize));
3612     __ LoadU64(temp2, MemOperand(temp2, 0));  // Instruction address
3613   }
3614 
3615   __ Call(temp2);  // Call the C++ function.
3616   __ LoadU64(r0,
3617              MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
3618   __ mtlr(r0);
3619   __ blr();
3620 }
3621 
3622 namespace {
3623 
3624 // This code tries to be close to ia32 code so that any changes can be
3625 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3626 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3627                                   DeoptimizeKind deopt_kind) {
3628   Isolate* isolate = masm->isolate();
3629 
3630   // Unlike on ARM we don't save all the registers, just the useful ones.
3631   // For the rest, there are gaps on the stack, so the offsets remain the same.
3632   const int kNumberOfRegisters = Register::kNumRegisters;
3633 
3634   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3635   RegList saved_regs = restored_regs | sp;
3636 
3637   const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
3638 
3639   // Save all double registers before messing with them.
3640   __ subi(sp, sp, Operand(kDoubleRegsSize));
3641   const RegisterConfiguration* config = RegisterConfiguration::Default();
3642   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3643     int code = config->GetAllocatableDoubleCode(i);
3644     const DoubleRegister dreg = DoubleRegister::from_code(code);
3645     int offset = code * kDoubleSize;
3646     __ stfd(dreg, MemOperand(sp, offset));
3647   }
3648 
3649   // Push saved_regs (needed to populate FrameDescription::registers_).
3650   // Leave gaps for other registers.
3651   __ subi(sp, sp, Operand(kNumberOfRegisters * kSystemPointerSize));
3652   for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
3653     if ((saved_regs.bits() & (1 << i)) != 0) {
3654       __ StoreU64(ToRegister(i), MemOperand(sp, kSystemPointerSize * i));
3655     }
3656   }
3657   {
3658     UseScratchRegisterScope temps(masm);
3659     Register scratch = temps.Acquire();
3660     __ Move(scratch, ExternalReference::Create(
3661                          IsolateAddressId::kCEntryFPAddress, isolate));
3662     __ StoreU64(fp, MemOperand(scratch));
3663   }
3664   const int kSavedRegistersAreaSize =
3665       (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
3666 
3667   // Get the address of the location in the code object (r6) (return
3668   // address for lazy deoptimization) and compute the fp-to-sp delta in
3669   // register r7.
3670   __ mflr(r5);
3671   __ addi(r6, sp, Operand(kSavedRegistersAreaSize));
3672   __ sub(r6, fp, r6);
3673 
3674   // Allocate a new deoptimizer object.
3675   // Pass six arguments in r3 to r8.
3676   __ PrepareCallCFunction(5, r8);
3677   __ li(r3, Operand::Zero());
3678   Label context_check;
3679   __ LoadU64(r4,
3680              MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3681   __ JumpIfSmi(r4, &context_check);
3682   __ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3683   __ bind(&context_check);
3684   __ li(r4, Operand(static_cast<int>(deopt_kind)));
3685   // r5: code address or 0 already loaded.
3686   // r6: Fp-to-sp delta already loaded.
3687   __ Move(r7, ExternalReference::isolate_address(isolate));
3688   // Call Deoptimizer::New().
3689   {
3690     AllowExternalCallThatCantCauseGC scope(masm);
3691     __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3692   }
3693 
3694   // Preserve "deoptimizer" object in register r3 and get the input
3695   // frame descriptor pointer to r4 (deoptimizer->input_);
3696   __ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset()));
3697 
3698   // Copy core registers into FrameDescription::registers_[kNumRegisters].
3699   DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3700   for (int i = 0; i < kNumberOfRegisters; i++) {
3701     int offset =
3702         (i * kSystemPointerSize) + FrameDescription::registers_offset();
3703     __ LoadU64(r5, MemOperand(sp, i * kSystemPointerSize));
3704     __ StoreU64(r5, MemOperand(r4, offset));
3705   }
3706 
3707   int double_regs_offset = FrameDescription::double_registers_offset();
3708   // Copy double registers to
3709   // double_registers_[DoubleRegister::kNumRegisters]
3710   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3711     int code = config->GetAllocatableDoubleCode(i);
3712     int dst_offset = code * kDoubleSize + double_regs_offset;
3713     int src_offset =
3714         code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
3715     __ lfd(d0, MemOperand(sp, src_offset));
3716     __ stfd(d0, MemOperand(r4, dst_offset));
3717   }
3718 
3719   // Mark the stack as not iterable for the CPU profiler which won't be able to
3720   // walk the stack without the return address.
3721   {
3722     UseScratchRegisterScope temps(masm);
3723     Register is_iterable = temps.Acquire();
3724     Register zero = r7;
3725     __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3726     __ li(zero, Operand(0));
3727     __ stb(zero, MemOperand(is_iterable));
3728   }
3729 
3730   // Remove the saved registers from the stack.
3731   __ addi(sp, sp, Operand(kSavedRegistersAreaSize));
3732 
3733   // Compute a pointer to the unwinding limit in register r5; that is
3734   // the first stack slot not part of the input frame.
3735   __ LoadU64(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
3736   __ add(r5, r5, sp);
3737 
3738   // Unwind the stack down to - but not including - the unwinding
3739   // limit and copy the contents of the activation frame to the input
3740   // frame description.
3741   __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
3742   Label pop_loop;
3743   Label pop_loop_header;
3744   __ b(&pop_loop_header);
3745   __ bind(&pop_loop);
3746   __ pop(r7);
3747   __ StoreU64(r7, MemOperand(r6, 0));
3748   __ addi(r6, r6, Operand(kSystemPointerSize));
3749   __ bind(&pop_loop_header);
3750   __ CmpS64(r5, sp);
3751   __ bne(&pop_loop);
3752 
3753   // Compute the output frame in the deoptimizer.
3754   __ push(r3);  // Preserve deoptimizer object across call.
3755   // r3: deoptimizer object; r4: scratch.
3756   __ PrepareCallCFunction(1, r4);
3757   // Call Deoptimizer::ComputeOutputFrames().
3758   {
3759     AllowExternalCallThatCantCauseGC scope(masm);
3760     __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3761   }
3762   __ pop(r3);  // Restore deoptimizer object (class Deoptimizer).
3763 
3764   __ LoadU64(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
3765 
3766   // Replace the current (input) frame with the output frames.
3767   Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3768   // Outer loop state: r7 = current "FrameDescription** output_",
3769   // r4 = one past the last FrameDescription**.
3770   __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
3771   __ LoadU64(r7,
3772              MemOperand(r3, Deoptimizer::output_offset()));  // r7 is output_.
3773   __ ShiftLeftU64(r4, r4, Operand(kSystemPointerSizeLog2));
3774   __ add(r4, r7, r4);
3775   __ b(&outer_loop_header);
3776 
3777   __ bind(&outer_push_loop);
3778   // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
3779   __ LoadU64(r5, MemOperand(r7, 0));  // output_[ix]
3780   __ LoadU64(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
3781   __ b(&inner_loop_header);
3782 
3783   __ bind(&inner_push_loop);
3784   __ addi(r6, r6, Operand(-sizeof(intptr_t)));
3785   __ add(r9, r5, r6);
3786   __ LoadU64(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
3787   __ push(r9);
3788 
3789   __ bind(&inner_loop_header);
3790   __ cmpi(r6, Operand::Zero());
3791   __ bne(&inner_push_loop);  // test for gt?
3792 
3793   __ addi(r7, r7, Operand(kSystemPointerSize));
3794   __ bind(&outer_loop_header);
3795   __ CmpS64(r7, r4);
3796   __ blt(&outer_push_loop);
3797 
3798   __ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset()));
3799   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3800     int code = config->GetAllocatableDoubleCode(i);
3801     const DoubleRegister dreg = DoubleRegister::from_code(code);
3802     int src_offset = code * kDoubleSize + double_regs_offset;
3803     __ lfd(dreg, MemOperand(r4, src_offset));
3804   }
3805 
3806   // Push pc, and continuation from the last output frame.
3807   __ LoadU64(r9, MemOperand(r5, FrameDescription::pc_offset()));
3808   __ push(r9);
3809   __ LoadU64(r9, MemOperand(r5, FrameDescription::continuation_offset()));
3810   __ push(r9);
3811 
3812   // Restore the registers from the last output frame.
3813   {
3814     UseScratchRegisterScope temps(masm);
3815     Register scratch = temps.Acquire();
3816     DCHECK(!(restored_regs.has(scratch)));
3817     __ mr(scratch, r5);
3818     for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
3819       int offset =
3820           (i * kSystemPointerSize) + FrameDescription::registers_offset();
3821       if ((restored_regs.bits() & (1 << i)) != 0) {
3822         __ LoadU64(ToRegister(i), MemOperand(scratch, offset));
3823       }
3824     }
3825   }
3826 
3827   {
3828     UseScratchRegisterScope temps(masm);
3829     Register is_iterable = temps.Acquire();
3830     Register one = r7;
3831     __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3832     __ li(one, Operand(1));
3833     __ stb(one, MemOperand(is_iterable));
3834   }
3835 
3836   {
3837     UseScratchRegisterScope temps(masm);
3838     Register scratch = temps.Acquire();
3839     __ pop(scratch);  // get continuation, leave pc on stack
3840     __ pop(r0);
3841     __ mtlr(r0);
3842     __ Jump(scratch);
3843   }
3844 
3845   __ stop();
3846 }
3847 
3848 }  // namespace
3849 
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3850 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3851   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3852 }
3853 
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3854 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3855   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3856 }
3857 
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)3858 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
3859   Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
3860 }
3861 
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3862 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3863     MacroAssembler* masm) {
3864   // Implement on this platform, https://crrev.com/c/2695591.
3865   Generate_BaselineOrInterpreterEntry(masm, false);
3866 }
3867 
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3868 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3869     MacroAssembler* masm) {
3870   // Implement on this platform, https://crrev.com/c/2695591.
3871   Generate_BaselineOrInterpreterEntry(masm, true);
3872 }
3873 
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3874 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3875     MacroAssembler* masm) {
3876   // Implement on this platform, https://crrev.com/c/2800112.
3877   Generate_BaselineOrInterpreterEntry(masm, false, true);
3878 }
3879 
3880 #undef __
3881 }  // namespace internal
3882 }  // namespace v8
3883 
3884 #endif  // V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_PPC64
3885