1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_S390
6
7 #include "src/api/api-arguments.h"
8 #include "src/codegen/code-factory.h"
9 #include "src/codegen/interface-descriptors-inl.h"
10 // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
11 #include "src/codegen/macro-assembler-inl.h"
12 #include "src/codegen/register-configuration.h"
13 #include "src/debug/debug.h"
14 #include "src/deoptimizer/deoptimizer.h"
15 #include "src/execution/frame-constants.h"
16 #include "src/execution/frames.h"
17 #include "src/heap/heap-inl.h"
18 #include "src/logging/counters.h"
19 #include "src/objects/cell.h"
20 #include "src/objects/foreign.h"
21 #include "src/objects/heap-number.h"
22 #include "src/objects/js-generator.h"
23 #include "src/objects/smi.h"
24 #include "src/runtime/runtime.h"
25
26 #if V8_ENABLE_WEBASSEMBLY
27 #include "src/wasm/wasm-linkage.h"
28 #include "src/wasm/wasm-objects.h"
29 #endif // V8_ENABLE_WEBASSEMBLY
30
31 namespace v8 {
32 namespace internal {
33
34 #define __ ACCESS_MASM(masm)
35
36 namespace {
37
AssertCodeIsBaseline(MacroAssembler * masm,Register code,Register scratch)38 static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
39 Register scratch) {
40 DCHECK(!AreAliased(code, scratch));
41 // Verify that the code kind is baseline code via the CodeKind.
42 __ LoadU32(scratch, FieldMemOperand(code, Code::kFlagsOffset));
43 __ DecodeField<Code::KindField>(scratch);
44 __ CmpS64(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
45 __ Assert(eq, AbortReason::kExpectedBaselineData);
46 }
47
GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler * masm,Register sfi_data,Register scratch1,Label * is_baseline)48 static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
49 Register sfi_data,
50 Register scratch1,
51 Label* is_baseline) {
52 USE(GetSharedFunctionInfoBytecodeOrBaseline);
53 ASM_CODE_COMMENT(masm);
54 Label done;
55 __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
56 if (FLAG_debug_code) {
57 Label not_baseline;
58 __ b(ne, ¬_baseline);
59 AssertCodeIsBaseline(masm, sfi_data, scratch1);
60 __ beq(is_baseline);
61 __ bind(¬_baseline);
62 } else {
63 __ beq(is_baseline);
64 }
65 __ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
66 __ bne(&done);
67 __ LoadTaggedPointerField(
68 sfi_data,
69 FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
70
71 __ bind(&done);
72 }
73
Generate_OSREntry(MacroAssembler * masm,Register entry_address,intptr_t offset)74 void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
75 intptr_t offset) {
76 if (is_int20(offset)) {
77 __ lay(r14, MemOperand(entry_address, offset));
78 } else {
79 __ AddS64(r14, entry_address, Operand(offset));
80 }
81
82 // "return" to the OSR entry point of the function.
83 __ Ret();
84 }
85
ResetBytecodeAgeAndOsrState(MacroAssembler * masm,Register bytecode_array,Register scratch)86 void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
87 Register scratch) {
88 // Reset the bytecode age and OSR state (optimized to a single write).
89 static_assert(BytecodeArray::kOsrStateAndBytecodeAgeAreContiguous32Bits);
90 STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
91 __ mov(r0, Operand(0));
92 __ StoreU32(r0,
93 FieldMemOperand(bytecode_array,
94 BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
95 scratch);
96 }
97
98 // Restarts execution either at the current or next (in execution order)
99 // bytecode. If there is baseline code on the shared function info, converts an
100 // interpreter frame into a baseline frame and continues execution in baseline
101 // code. Otherwise execution continues with bytecode.
Generate_BaselineOrInterpreterEntry(MacroAssembler * masm,bool next_bytecode,bool is_osr=false)102 void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
103 bool next_bytecode,
104 bool is_osr = false) {
105 Label start;
106 __ bind(&start);
107
108 // Get function from the frame.
109 Register closure = r3;
110 __ LoadU64(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
111
112 // Get the Code object from the shared function info.
113 Register code_obj = r8;
114 __ LoadTaggedPointerField(
115 code_obj,
116 FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
117 __ LoadTaggedPointerField(
118 code_obj,
119 FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
120
121 // Check if we have baseline code. For OSR entry it is safe to assume we
122 // always have baseline code.
123 if (!is_osr) {
124 Label start_with_baseline;
125 __ CompareObjectType(code_obj, r5, r5, CODET_TYPE);
126 __ b(eq, &start_with_baseline);
127
128 // Start with bytecode as there is no baseline code.
129 Builtin builtin_id = next_bytecode
130 ? Builtin::kInterpreterEnterAtNextBytecode
131 : Builtin::kInterpreterEnterAtBytecode;
132 __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
133 RelocInfo::CODE_TARGET);
134
135 // Start with baseline code.
136 __ bind(&start_with_baseline);
137 } else if (FLAG_debug_code) {
138 __ CompareObjectType(code_obj, r5, r5, CODET_TYPE);
139 __ Assert(eq, AbortReason::kExpectedBaselineData);
140 }
141
142 if (FLAG_debug_code) {
143 AssertCodeIsBaseline(masm, code_obj, r5);
144 }
145
146 // Load the feedback vector.
147 Register feedback_vector = r4;
148 __ LoadTaggedPointerField(
149 feedback_vector,
150 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
151 __ LoadTaggedPointerField(
152 feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
153
154 Label install_baseline_code;
155 // Check if feedback vector is valid. If not, call prepare for baseline to
156 // allocate it.
157 __ CompareObjectType(feedback_vector, r5, r5, FEEDBACK_VECTOR_TYPE);
158 __ b(ne, &install_baseline_code);
159
160 // Save BytecodeOffset from the stack frame.
161 __ LoadU64(kInterpreterBytecodeOffsetRegister,
162 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
163 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
164 // Replace BytecodeOffset with the feedback vector.
165 __ StoreU64(feedback_vector,
166 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
167 feedback_vector = no_reg;
168
169 // Compute baseline pc for bytecode offset.
170 ExternalReference get_baseline_pc_extref;
171 if (next_bytecode || is_osr) {
172 get_baseline_pc_extref =
173 ExternalReference::baseline_pc_for_next_executed_bytecode();
174 } else {
175 get_baseline_pc_extref =
176 ExternalReference::baseline_pc_for_bytecode_offset();
177 }
178 Register get_baseline_pc = r5;
179 __ Move(get_baseline_pc, get_baseline_pc_extref);
180
181 // If the code deoptimizes during the implicit function entry stack interrupt
182 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
183 // not a valid bytecode offset.
184 // TODO(pthier): Investigate if it is feasible to handle this special case
185 // in TurboFan instead of here.
186 Label valid_bytecode_offset, function_entry_bytecode;
187 if (!is_osr) {
188 __ CmpS64(kInterpreterBytecodeOffsetRegister,
189 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
190 kFunctionEntryBytecodeOffset));
191 __ b(eq, &function_entry_bytecode);
192 }
193
194 __ SubS64(kInterpreterBytecodeOffsetRegister,
195 kInterpreterBytecodeOffsetRegister,
196 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
197
198 __ bind(&valid_bytecode_offset);
199 // Get bytecode array from the stack frame.
200 __ LoadU64(kInterpreterBytecodeArrayRegister,
201 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
202 // Save the accumulator register, since it's clobbered by the below call.
203 __ Push(kInterpreterAccumulatorRegister);
204 {
205 Register arg_reg_1 = r2;
206 Register arg_reg_2 = r3;
207 Register arg_reg_3 = r4;
208 __ mov(arg_reg_1, code_obj);
209 __ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
210 __ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
211 FrameScope scope(masm, StackFrame::INTERNAL);
212 __ PrepareCallCFunction(3, 0, r1);
213 __ CallCFunction(get_baseline_pc, 3, 0);
214 }
215 __ AddS64(code_obj, code_obj, kReturnRegister0);
216 __ Pop(kInterpreterAccumulatorRegister);
217
218 if (is_osr) {
219 // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
220 // disarm Sparkplug here.
221 ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r1);
222 Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
223 } else {
224 __ AddS64(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
225 __ Jump(code_obj);
226 }
227 __ Trap(); // Unreachable.
228
229 if (!is_osr) {
230 __ bind(&function_entry_bytecode);
231 // If the bytecode offset is kFunctionEntryOffset, get the start address of
232 // the first bytecode.
233 __ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
234 if (next_bytecode) {
235 __ Move(get_baseline_pc,
236 ExternalReference::baseline_pc_for_bytecode_offset());
237 }
238 __ b(&valid_bytecode_offset);
239 }
240
241 __ bind(&install_baseline_code);
242 {
243 FrameScope scope(masm, StackFrame::INTERNAL);
244 __ Push(kInterpreterAccumulatorRegister);
245 __ Push(closure);
246 __ CallRuntime(Runtime::kInstallBaselineCode, 1);
247 __ Pop(kInterpreterAccumulatorRegister);
248 }
249 // Retry from the start after installing baseline code.
250 __ b(&start);
251 }
252
253 enum class OsrSourceTier {
254 kInterpreter,
255 kBaseline,
256 };
257
OnStackReplacement(MacroAssembler * masm,OsrSourceTier source)258 void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source) {
259 ASM_CODE_COMMENT(masm);
260 {
261 FrameScope scope(masm, StackFrame::INTERNAL);
262 __ CallRuntime(Runtime::kCompileOptimizedOSR);
263 }
264
265 // If the code object is null, just return to the caller.
266 Label jump_to_returned_code;
267 __ CmpSmiLiteral(r2, Smi::zero(), r0);
268 __ bne(&jump_to_returned_code);
269 __ Ret();
270
271 __ bind(&jump_to_returned_code);
272
273 if (source == OsrSourceTier::kInterpreter) {
274 // Drop the handler frame that is be sitting on top of the actual
275 // JavaScript frame. This is the case then OSR is triggered from bytecode.
276 __ LeaveFrame(StackFrame::STUB);
277 }
278
279 // Load deoptimization data from the code object.
280 // <deopt_data> = <code>[#deoptimization_data_offset]
281 __ LoadTaggedPointerField(
282 r3,
283 FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset));
284
285 // Load the OSR entrypoint offset from the deoptimization data.
286 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
287 __ SmiUntagField(
288 r3, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(
289 DeoptimizationData::kOsrPcOffsetIndex)));
290
291 // Compute the target address = code_obj + header_size + osr_offset
292 // <entry_addr> = <code_obj> + #header_size + <osr_offset>
293 __ AddS64(r2, r3);
294 Generate_OSREntry(masm, r2, Code::kHeaderSize - kHeapObjectTag);
295 }
296
297 } // namespace
298
Generate_Adaptor(MacroAssembler * masm,Address address)299 void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
300 __ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
301 __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
302 RelocInfo::CODE_TARGET);
303 }
304
GenerateTailCallToReturnedCode(MacroAssembler * masm,Runtime::FunctionId function_id)305 static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
306 Runtime::FunctionId function_id) {
307 // ----------- S t a t e -------------
308 // -- r2 : actual argument count
309 // -- r3 : target function (preserved for callee)
310 // -- r5 : new target (preserved for callee)
311 // -----------------------------------
312 {
313 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
314 // Push a copy of the target function, the new target and the actual
315 // argument count.
316 // Push function as parameter to the runtime call.
317 __ SmiTag(kJavaScriptCallArgCountRegister);
318 __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
319 kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
320
321 __ CallRuntime(function_id, 1);
322 __ mov(r4, r2);
323
324 // Restore target function, new target and actual argument count.
325 __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
326 kJavaScriptCallArgCountRegister);
327 __ SmiUntag(kJavaScriptCallArgCountRegister);
328 }
329 static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
330 __ JumpCodeObject(r4);
331 }
332
333 namespace {
334
335 enum class ArgumentsElementType {
336 kRaw, // Push arguments as they are.
337 kHandle // Dereference arguments before pushing.
338 };
339
Generate_PushArguments(MacroAssembler * masm,Register array,Register argc,Register scratch,ArgumentsElementType element_type)340 void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
341 Register scratch,
342 ArgumentsElementType element_type) {
343 DCHECK(!AreAliased(array, argc, scratch));
344 Register counter = scratch;
345 Register value = ip;
346 Label loop, entry;
347 __ SubS64(counter, argc, Operand(kJSArgcReceiverSlots));
348 __ b(&entry);
349 __ bind(&loop);
350 __ ShiftLeftU64(value, counter, Operand(kSystemPointerSizeLog2));
351 __ LoadU64(value, MemOperand(array, value));
352 if (element_type == ArgumentsElementType::kHandle) {
353 __ LoadU64(value, MemOperand(value));
354 }
355 __ push(value);
356 __ bind(&entry);
357 __ SubS64(counter, counter, Operand(1));
358 __ bge(&loop);
359 }
360
Generate_JSBuiltinsConstructStubHelper(MacroAssembler * masm)361 void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
362 // ----------- S t a t e -------------
363 // -- r2 : number of arguments
364 // -- r3 : constructor function
365 // -- r5 : new target
366 // -- cp : context
367 // -- lr : return address
368 // -- sp[...]: constructor arguments
369 // -----------------------------------
370
371 Register scratch = r4;
372 Label stack_overflow;
373
374 __ StackOverflowCheck(r2, scratch, &stack_overflow);
375
376 // Enter a construct frame.
377 {
378 FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
379
380 // Preserve the incoming parameters on the stack.
381 __ SmiTag(r2);
382 __ Push(cp, r2);
383 __ SmiUntag(r2);
384
385 // TODO(victorgomes): When the arguments adaptor is completely removed, we
386 // should get the formal parameter count and copy the arguments in its
387 // correct position (including any undefined), instead of delaying this to
388 // InvokeFunction.
389
390 // Set up pointer to first argument (skip receiver).
391 __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
392 kSystemPointerSize));
393 // Copy arguments and receiver to the expression stack.
394 // r6: Pointer to start of arguments.
395 // r2: Number of arguments.
396 Generate_PushArguments(masm, r6, r2, r1, ArgumentsElementType::kRaw);
397
398 // The receiver for the builtin/api call.
399 __ PushRoot(RootIndex::kTheHoleValue);
400
401 // Call the function.
402 // r2: number of arguments
403 // r3: constructor function
404 // r5: new target
405
406 __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall);
407
408 // Restore context from the frame.
409 __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
410 // Restore smi-tagged arguments count from the frame.
411 __ LoadU64(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
412
413 // Leave construct frame.
414 }
415 // Remove caller arguments from the stack and return.
416 __ DropArguments(scratch, TurboAssembler::kCountIsSmi,
417 TurboAssembler::kCountIncludesReceiver);
418 __ Ret();
419
420 __ bind(&stack_overflow);
421 {
422 FrameScope scope(masm, StackFrame::INTERNAL);
423 __ CallRuntime(Runtime::kThrowStackOverflow);
424 __ bkpt(0); // Unreachable code.
425 }
426 }
427
428 } // namespace
429
430 // The construct stub for ES5 constructor functions and ES6 class constructors.
Generate_JSConstructStubGeneric(MacroAssembler * masm)431 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
432 // ----------- S t a t e -------------
433 // -- r2: number of arguments (untagged)
434 // -- r3: constructor function
435 // -- r5: new target
436 // -- cp: context
437 // -- lr: return address
438 // -- sp[...]: constructor arguments
439 // -----------------------------------
440
441 FrameScope scope(masm, StackFrame::MANUAL);
442 // Enter a construct frame.
443 Label post_instantiation_deopt_entry, not_create_implicit_receiver;
444 __ EnterFrame(StackFrame::CONSTRUCT);
445
446 // Preserve the incoming parameters on the stack.
447 __ SmiTag(r2);
448 __ Push(cp, r2, r3);
449 __ PushRoot(RootIndex::kUndefinedValue);
450 __ Push(r5);
451
452 // ----------- S t a t e -------------
453 // -- sp[0*kSystemPointerSize]: new target
454 // -- sp[1*kSystemPointerSize]: padding
455 // -- r3 and sp[2*kSystemPointerSize]: constructor function
456 // -- sp[3*kSystemPointerSize]: number of arguments (tagged)
457 // -- sp[4*kSystemPointerSize]: context
458 // -----------------------------------
459
460 __ LoadTaggedPointerField(
461 r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
462 __ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
463 __ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
464 __ JumpIfIsInRange(
465 r6, static_cast<uint8_t>(FunctionKind::kDefaultDerivedConstructor),
466 static_cast<uint8_t>(FunctionKind::kDerivedConstructor),
467 ¬_create_implicit_receiver);
468
469 // If not derived class constructor: Allocate the new receiver object.
470 __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, r6,
471 r7);
472 __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
473 __ b(&post_instantiation_deopt_entry);
474
475 // Else: use TheHoleValue as receiver for constructor call
476 __ bind(¬_create_implicit_receiver);
477 __ LoadRoot(r2, RootIndex::kTheHoleValue);
478
479 // ----------- S t a t e -------------
480 // -- r2: receiver
481 // -- Slot 4 / sp[0*kSystemPointerSize]: new target
482 // -- Slot 3 / sp[1*kSystemPointerSize]: padding
483 // -- Slot 2 / sp[2*kSystemPointerSize]: constructor function
484 // -- Slot 1 / sp[3*kSystemPointerSize]: number of arguments (tagged)
485 // -- Slot 0 / sp[4*kSystemPointerSize]: context
486 // -----------------------------------
487 // Deoptimizer enters here.
488 masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
489 masm->pc_offset());
490 __ bind(&post_instantiation_deopt_entry);
491
492 // Restore new target.
493 __ Pop(r5);
494
495 // Push the allocated receiver to the stack.
496 __ Push(r2);
497 // We need two copies because we may have to return the original one
498 // and the calling conventions dictate that the called function pops the
499 // receiver. The second copy is pushed after the arguments, we saved in r6
500 // since r0 needs to store the number of arguments before
501 // InvokingFunction.
502 __ mov(r8, r2);
503
504 // Set up pointer to first argument (skip receiver).
505 __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
506 kSystemPointerSize));
507
508 // ----------- S t a t e -------------
509 // -- r5: new target
510 // -- sp[0*kSystemPointerSize]: implicit receiver
511 // -- sp[1*kSystemPointerSize]: implicit receiver
512 // -- sp[2*kSystemPointerSize]: padding
513 // -- sp[3*kSystemPointerSize]: constructor function
514 // -- sp[4*kSystemPointerSize]: number of arguments (tagged)
515 // -- sp[5*kSystemPointerSize]: context
516 // -----------------------------------
517
518 // Restore constructor function and argument count.
519 __ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
520 __ LoadU64(r2, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
521 __ SmiUntag(r2);
522
523 Label stack_overflow;
524 __ StackOverflowCheck(r2, r7, &stack_overflow);
525
526 // Copy arguments and receiver to the expression stack.
527 // r6: Pointer to start of argument.
528 // r2: Number of arguments.
529 Generate_PushArguments(masm, r6, r2, r1, ArgumentsElementType::kRaw);
530
531 // Push implicit receiver.
532 __ Push(r8);
533
534 // Call the function.
535 __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall);
536
537 // ----------- S t a t e -------------
538 // -- r0: constructor result
539 // -- sp[0*kSystemPointerSize]: implicit receiver
540 // -- sp[1*kSystemPointerSize]: padding
541 // -- sp[2*kSystemPointerSize]: constructor function
542 // -- sp[3*kSystemPointerSize]: number of arguments
543 // -- sp[4*kSystemPointerSize]: context
544 // -----------------------------------
545
546 // Store offset of return address for deoptimizer.
547 masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
548 masm->pc_offset());
549
550 // If the result is an object (in the ECMA sense), we should get rid
551 // of the receiver and use the result; see ECMA-262 section 13.2.2-7
552 // on page 74.
553 Label use_receiver, do_throw, leave_and_return, check_receiver;
554
555 // If the result is undefined, we jump out to using the implicit receiver.
556 __ JumpIfNotRoot(r2, RootIndex::kUndefinedValue, &check_receiver);
557
558 // Otherwise we do a smi check and fall through to check if the return value
559 // is a valid receiver.
560
561 // Throw away the result of the constructor invocation and use the
562 // on-stack receiver as the result.
563 __ bind(&use_receiver);
564 __ LoadU64(r2, MemOperand(sp));
565 __ JumpIfRoot(r2, RootIndex::kTheHoleValue, &do_throw);
566
567 __ bind(&leave_and_return);
568 // Restore smi-tagged arguments count from the frame.
569 __ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
570 // Leave construct frame.
571 __ LeaveFrame(StackFrame::CONSTRUCT);
572
573 // Remove caller arguments from the stack and return.
574 __ DropArguments(r3, TurboAssembler::kCountIsSmi,
575 TurboAssembler::kCountIncludesReceiver);
576 __ Ret();
577
578 __ bind(&check_receiver);
579 // If the result is a smi, it is *not* an object in the ECMA sense.
580 __ JumpIfSmi(r2, &use_receiver);
581
582 // If the type of the result (stored in its map) is less than
583 // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
584 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
585 __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
586 __ bge(&leave_and_return);
587 __ b(&use_receiver);
588
589 __ bind(&do_throw);
590 // Restore the context from the frame.
591 __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
592 __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
593 __ bkpt(0);
594
595 __ bind(&stack_overflow);
596 // Restore the context from the frame.
597 __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
598 __ CallRuntime(Runtime::kThrowStackOverflow);
599 // Unreachable code.
600 __ bkpt(0);
601 }
602
Generate_JSBuiltinsConstructStub(MacroAssembler * masm)603 void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
604 Generate_JSBuiltinsConstructStubHelper(masm);
605 }
606
607 // static
Generate_ResumeGeneratorTrampoline(MacroAssembler * masm)608 void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
609 // ----------- S t a t e -------------
610 // -- r2 : the value to pass to the generator
611 // -- r3 : the JSGeneratorObject to resume
612 // -- lr : return address
613 // -----------------------------------
614 // Store input value into generator object.
615 __ StoreTaggedField(
616 r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset), r0);
617 __ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
618 kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
619 // Check that r3 is still valid, RecordWrite might have clobbered it.
620 __ AssertGeneratorObject(r3);
621
622 // Load suspended function and context.
623 __ LoadTaggedPointerField(
624 r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
625 __ LoadTaggedPointerField(cp,
626 FieldMemOperand(r6, JSFunction::kContextOffset));
627
628 // Flood function if we are stepping.
629 Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
630 Label stepping_prepared;
631 Register scratch = r7;
632
633 ExternalReference debug_hook =
634 ExternalReference::debug_hook_on_function_call_address(masm->isolate());
635 __ Move(scratch, debug_hook);
636 __ LoadS8(scratch, MemOperand(scratch));
637 __ CmpSmiLiteral(scratch, Smi::zero(), r0);
638 __ bne(&prepare_step_in_if_stepping);
639
640 // Flood function if we need to continue stepping in the suspended generator.
641
642 ExternalReference debug_suspended_generator =
643 ExternalReference::debug_suspended_generator_address(masm->isolate());
644
645 __ Move(scratch, debug_suspended_generator);
646 __ LoadU64(scratch, MemOperand(scratch));
647 __ CmpS64(scratch, r3);
648 __ beq(&prepare_step_in_suspended_generator);
649 __ bind(&stepping_prepared);
650
651 // Check the stack for overflow. We are not trying to catch interruptions
652 // (i.e. debug break and preemption) here, so check the "real stack limit".
653 Label stack_overflow;
654 __ LoadU64(scratch,
655 __ StackLimitAsMemOperand(StackLimitKind::kRealStackLimit));
656 __ CmpU64(sp, scratch);
657 __ blt(&stack_overflow);
658
659 // ----------- S t a t e -------------
660 // -- r3 : the JSGeneratorObject to resume
661 // -- r6 : generator function
662 // -- cp : generator context
663 // -- lr : return address
664 // -----------------------------------
665
666 // Copy the function arguments from the generator object's register file.
667 __ LoadTaggedPointerField(
668 r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
669 __ LoadU16(
670 r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
671 __ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
672 __ LoadTaggedPointerField(
673 r4,
674 FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
675 {
676 Label done_loop, loop;
677 __ bind(&loop);
678 __ SubS64(r5, r5, Operand(1));
679 __ blt(&done_loop);
680 __ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
681 __ la(scratch, MemOperand(r4, r1));
682 __ LoadAnyTaggedField(scratch,
683 FieldMemOperand(scratch, FixedArray::kHeaderSize));
684 __ Push(scratch);
685 __ b(&loop);
686 __ bind(&done_loop);
687
688 // Push receiver.
689 __ LoadAnyTaggedField(
690 scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
691 __ Push(scratch);
692 }
693
694 // Underlying function needs to have bytecode available.
695 if (FLAG_debug_code) {
696 Label is_baseline;
697 __ LoadTaggedPointerField(
698 r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
699 __ LoadTaggedPointerField(
700 r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
701 GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
702 __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
703 __ Assert(eq, AbortReason::kMissingBytecodeArray);
704 __ bind(&is_baseline);
705 }
706
707 // Resume (Ignition/TurboFan) generator object.
708 {
709 __ LoadTaggedPointerField(
710 r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
711 __ LoadS16(
712 r2,
713 FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
714 // We abuse new.target both to indicate that this is a resume call and to
715 // pass in the generator object. In ordinary calls, new.target is always
716 // undefined because generator functions are non-constructable.
717 __ mov(r5, r3);
718 __ mov(r3, r6);
719 static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
720 __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
721 __ JumpCodeObject(r4);
722 }
723
724 __ bind(&prepare_step_in_if_stepping);
725 {
726 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
727 __ Push(r3, r6);
728 // Push hole as receiver since we do not use it for stepping.
729 __ PushRoot(RootIndex::kTheHoleValue);
730 __ CallRuntime(Runtime::kDebugOnFunctionCall);
731 __ Pop(r3);
732 __ LoadTaggedPointerField(
733 r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
734 }
735 __ b(&stepping_prepared);
736
737 __ bind(&prepare_step_in_suspended_generator);
738 {
739 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
740 __ Push(r3);
741 __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
742 __ Pop(r3);
743 __ LoadTaggedPointerField(
744 r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
745 }
746 __ b(&stepping_prepared);
747
748 __ bind(&stack_overflow);
749 {
750 FrameScope scope(masm, StackFrame::INTERNAL);
751 __ CallRuntime(Runtime::kThrowStackOverflow);
752 __ bkpt(0); // This should be unreachable.
753 }
754 }
755
Generate_ConstructedNonConstructable(MacroAssembler * masm)756 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
757 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
758 __ push(r3);
759 __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
760 __ Trap(); // Unreachable.
761 }
762
763 namespace {
764
765 constexpr int kPushedStackSpace =
766 (kNumCalleeSaved + 2) * kSystemPointerSize +
767 kNumCalleeSavedDoubles * kDoubleSize + 5 * kSystemPointerSize +
768 EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
769
770 // Called with the native C calling convention. The corresponding function
771 // signature is either:
772 //
773 // using JSEntryFunction = GeneratedCode<Address(
774 // Address root_register_value, Address new_target, Address target,
775 // Address receiver, intptr_t argc, Address** args)>;
776 // or
777 // using JSEntryFunction = GeneratedCode<Address(
778 // Address root_register_value, MicrotaskQueue* microtask_queue)>;
Generate_JSEntryVariant(MacroAssembler * masm,StackFrame::Type type,Builtin entry_trampoline)779 void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
780 Builtin entry_trampoline) {
781 // The register state is either:
782 // r2: root register value
783 // r3: code entry
784 // r4: function
785 // r5: receiver
786 // r6: argc
787 // [sp + 20 * kSystemPointerSize]: argv
788 // or
789 // r2: root_register_value
790 // r3: microtask_queue
791
792 Label invoke, handler_entry, exit;
793
794 int pushed_stack_space = 0;
795 {
796 NoRootArrayScope no_root_array(masm);
797
798 // saving floating point registers
799 // 64bit ABI requires f8 to f15 be saved
800 // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_zSeries.html
801 __ lay(sp, MemOperand(sp, -8 * kDoubleSize));
802 __ std(d8, MemOperand(sp));
803 __ std(d9, MemOperand(sp, 1 * kDoubleSize));
804 __ std(d10, MemOperand(sp, 2 * kDoubleSize));
805 __ std(d11, MemOperand(sp, 3 * kDoubleSize));
806 __ std(d12, MemOperand(sp, 4 * kDoubleSize));
807 __ std(d13, MemOperand(sp, 5 * kDoubleSize));
808 __ std(d14, MemOperand(sp, 6 * kDoubleSize));
809 __ std(d15, MemOperand(sp, 7 * kDoubleSize));
810 pushed_stack_space += kNumCalleeSavedDoubles * kDoubleSize;
811
812 // zLinux ABI
813 // Incoming parameters:
814 // r2: root register value
815 // r3: code entry
816 // r4: function
817 // r5: receiver
818 // r6: argc
819 // [sp + 20 * kSystemPointerSize]: argv
820 // Requires us to save the callee-preserved registers r6-r13
821 // General convention is to also save r14 (return addr) and
822 // sp/r15 as well in a single STM/STMG
823 __ lay(sp, MemOperand(sp, -10 * kSystemPointerSize));
824 __ StoreMultipleP(r6, sp, MemOperand(sp, 0));
825 pushed_stack_space += (kNumCalleeSaved + 2) * kSystemPointerSize;
826
827 // Initialize the root register.
828 // C calling convention. The first argument is passed in r2.
829 __ mov(kRootRegister, r2);
830 }
831
832 // save r6 to r1
833 __ mov(r0, r6);
834
835 // Push a frame with special values setup to mark it as an entry frame.
836 // Bad FP (-1)
837 // SMI Marker
838 // SMI Marker
839 // kCEntryFPAddress
840 // Frame type
841 __ lay(sp, MemOperand(sp, -5 * kSystemPointerSize));
842 pushed_stack_space += 5 * kSystemPointerSize;
843
844 // Push a bad frame pointer to fail if it is used.
845 __ mov(r9, Operand(-1));
846
847 __ mov(r8, Operand(StackFrame::TypeToMarker(type)));
848 __ mov(r7, Operand(StackFrame::TypeToMarker(type)));
849 // Save copies of the top frame descriptor on the stack.
850 __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
851 masm->isolate()));
852 __ LoadU64(r6, MemOperand(r1));
853 __ StoreMultipleP(r6, r9, MemOperand(sp, kSystemPointerSize));
854
855 // Clear c_entry_fp, now we've pushed its previous value to the stack.
856 // If the c_entry_fp is not already zero and we don't clear it, the
857 // SafeStackFrameIterator will assume we are executing C++ and miss the JS
858 // frames on top.
859 __ mov(r6, Operand::Zero());
860 __ StoreU64(r6, MemOperand(r1));
861
862 Register scrach = r8;
863
864 // Set up frame pointer for the frame to be pushed.
865 // Need to add kSystemPointerSize, because sp has one extra
866 // frame already for the frame type being pushed later.
867 __ lay(fp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset +
868 kSystemPointerSize));
869 pushed_stack_space +=
870 EntryFrameConstants::kCallerFPOffset - kSystemPointerSize;
871
872 // restore r6
873 __ mov(r6, r0);
874
875 // If this is the outermost JS call, set js_entry_sp value.
876 Label non_outermost_js;
877 ExternalReference js_entry_sp =
878 ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
879 masm->isolate());
880 __ Move(r7, js_entry_sp);
881 __ LoadAndTestP(scrach, MemOperand(r7));
882 __ bne(&non_outermost_js, Label::kNear);
883 __ StoreU64(fp, MemOperand(r7));
884 __ mov(scrach, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
885 Label cont;
886 __ b(&cont, Label::kNear);
887 __ bind(&non_outermost_js);
888 __ mov(scrach, Operand(StackFrame::INNER_JSENTRY_FRAME));
889
890 __ bind(&cont);
891 __ StoreU64(scrach, MemOperand(sp)); // frame-type
892
893 // Jump to a faked try block that does the invoke, with a faked catch
894 // block that sets the pending exception.
895 __ b(&invoke, Label::kNear);
896
897 __ bind(&handler_entry);
898
899 // Store the current pc as the handler offset. It's used later to create the
900 // handler table.
901 masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
902
903 // Caught exception: Store result (exception) in the pending exception
904 // field in the JSEnv and return a failure sentinel. Coming in here the
905 // fp will be invalid because the PushStackHandler below sets it to 0 to
906 // signal the existence of the JSEntry frame.
907 __ Move(scrach,
908 ExternalReference::Create(IsolateAddressId::kPendingExceptionAddress,
909 masm->isolate()));
910
911 __ StoreU64(r2, MemOperand(scrach));
912 __ LoadRoot(r2, RootIndex::kException);
913 __ b(&exit, Label::kNear);
914
915 // Invoke: Link this frame into the handler chain.
916 __ bind(&invoke);
917 // Must preserve r2-r6.
918 __ PushStackHandler();
919 // If an exception not caught by another handler occurs, this handler
920 // returns control to the code after the b(&invoke) above, which
921 // restores all kCalleeSaved registers (including cp and fp) to their
922 // saved values before returning a failure to C.
923
924 // Invoke the function by calling through JS entry trampoline builtin.
925 // Notice that we cannot store a reference to the trampoline code directly in
926 // this stub, because runtime stubs are not traversed when doing GC.
927
928 // Invoke the function by calling through JS entry trampoline builtin and
929 // pop the faked function when we return.
930 Handle<Code> trampoline_code =
931 masm->isolate()->builtins()->code_handle(entry_trampoline);
932 USE(pushed_stack_space);
933 DCHECK_EQ(kPushedStackSpace, pushed_stack_space);
934 __ Call(trampoline_code, RelocInfo::CODE_TARGET);
935
936 // Unlink this frame from the handler chain.
937 __ PopStackHandler();
938 __ bind(&exit); // r2 holds result
939
940 // Check if the current stack frame is marked as the outermost JS frame.
941 Label non_outermost_js_2;
942 __ pop(r7);
943 __ CmpS64(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
944 __ bne(&non_outermost_js_2, Label::kNear);
945 __ mov(scrach, Operand::Zero());
946 __ Move(r7, js_entry_sp);
947 __ StoreU64(scrach, MemOperand(r7));
948 __ bind(&non_outermost_js_2);
949
950 // Restore the top frame descriptors from the stack.
951 __ pop(r5);
952 __ Move(scrach, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
953 masm->isolate()));
954 __ StoreU64(r5, MemOperand(scrach));
955
956 // Reset the stack to the callee saved registers.
957 __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset));
958
959 // Reload callee-saved preserved regs, return address reg (r14) and sp
960 __ LoadMultipleP(r6, sp, MemOperand(sp, 0));
961 __ la(sp, MemOperand(sp, 10 * kSystemPointerSize));
962
963 // saving floating point registers
964 #if V8_TARGET_ARCH_S390X
965 // 64bit ABI requires f8 to f15 be saved
966 __ ld(d8, MemOperand(sp));
967 __ ld(d9, MemOperand(sp, 1 * kDoubleSize));
968 __ ld(d10, MemOperand(sp, 2 * kDoubleSize));
969 __ ld(d11, MemOperand(sp, 3 * kDoubleSize));
970 __ ld(d12, MemOperand(sp, 4 * kDoubleSize));
971 __ ld(d13, MemOperand(sp, 5 * kDoubleSize));
972 __ ld(d14, MemOperand(sp, 6 * kDoubleSize));
973 __ ld(d15, MemOperand(sp, 7 * kDoubleSize));
974 __ la(sp, MemOperand(sp, 8 * kDoubleSize));
975 #else
976 // 31bit ABI requires you to store f4 and f6:
977 // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417
978 __ ld(d4, MemOperand(sp));
979 __ ld(d6, MemOperand(sp, kDoubleSize));
980 __ la(sp, MemOperand(sp, 2 * kDoubleSize));
981 #endif
982
983 __ b(r14);
984 }
985
986 } // namespace
987
Generate_JSEntry(MacroAssembler * masm)988 void Builtins::Generate_JSEntry(MacroAssembler* masm) {
989 Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
990 }
991
Generate_JSConstructEntry(MacroAssembler * masm)992 void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
993 Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
994 Builtin::kJSConstructEntryTrampoline);
995 }
996
Generate_JSRunMicrotasksEntry(MacroAssembler * masm)997 void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
998 Generate_JSEntryVariant(masm, StackFrame::ENTRY,
999 Builtin::kRunMicrotasksTrampoline);
1000 }
1001
Generate_JSEntryTrampolineHelper(MacroAssembler * masm,bool is_construct)1002 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
1003 bool is_construct) {
1004 // Called from Generate_JS_Entry
1005 // r3: new.target
1006 // r4: function
1007 // r5: receiver
1008 // r6: argc
1009 // [fp + kPushedStackSpace + 20 * kSystemPointerSize]: argv
1010 // r0,r2,r7-r9, cp may be clobbered
1011
1012 __ mov(r2, r6);
1013 // Load argv from the stack.
1014 __ LoadU64(
1015 r6, MemOperand(fp, kPushedStackSpace + EntryFrameConstants::kArgvOffset));
1016
1017 // r2: argc
1018 // r3: new.target
1019 // r4: function
1020 // r5: receiver
1021 // r6: argv
1022
1023 // Enter an internal frame.
1024 {
1025 // FrameScope ends up calling MacroAssembler::EnterFrame here
1026 FrameScope scope(masm, StackFrame::INTERNAL);
1027
1028 // Setup the context (we need to use the caller context from the isolate).
1029 ExternalReference context_address = ExternalReference::Create(
1030 IsolateAddressId::kContextAddress, masm->isolate());
1031 __ Move(cp, context_address);
1032 __ LoadU64(cp, MemOperand(cp));
1033
1034 // Push the function
1035 __ Push(r4);
1036
1037 // Check if we have enough stack space to push all arguments.
1038 Label enough_stack_space, stack_overflow;
1039 __ mov(r7, r2);
1040 __ StackOverflowCheck(r7, r1, &stack_overflow);
1041 __ b(&enough_stack_space);
1042 __ bind(&stack_overflow);
1043 __ CallRuntime(Runtime::kThrowStackOverflow);
1044 // Unreachable code.
1045 __ bkpt(0);
1046
1047 __ bind(&enough_stack_space);
1048
1049 // Copy arguments to the stack from argv to sp.
1050 // The arguments are actually placed in reverse order on sp
1051 // compared to argv (i.e. arg1 is highest memory in sp).
1052 // r2: argc
1053 // r3: function
1054 // r5: new.target
1055 // r6: argv, i.e. points to first arg
1056 // r7: scratch reg to hold scaled argc
1057 // r8: scratch reg to hold arg handle
1058 // r9: scratch reg to hold index into argv
1059 Generate_PushArguments(masm, r6, r2, r1, ArgumentsElementType::kHandle);
1060
1061 // Push the receiver.
1062 __ Push(r5);
1063
1064 // Setup new.target, argc and function.
1065 __ mov(r5, r3);
1066 __ mov(r3, r4);
1067 // r2: argc
1068 // r3: function
1069 // r5: new.target
1070
1071 // Initialize all JavaScript callee-saved registers, since they will be seen
1072 // by the garbage collector as part of handlers.
1073 __ LoadRoot(r4, RootIndex::kUndefinedValue);
1074 __ mov(r6, r4);
1075 __ mov(r7, r6);
1076 __ mov(r8, r6);
1077 __ mov(r9, r6);
1078
1079 // Invoke the code.
1080 Handle<Code> builtin = is_construct
1081 ? BUILTIN_CODE(masm->isolate(), Construct)
1082 : masm->isolate()->builtins()->Call();
1083 __ Call(builtin, RelocInfo::CODE_TARGET);
1084
1085 // Exit the JS frame and remove the parameters (except function), and
1086 // return.
1087 }
1088 __ b(r14);
1089
1090 // r2: result
1091 }
1092
Generate_JSEntryTrampoline(MacroAssembler * masm)1093 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
1094 Generate_JSEntryTrampolineHelper(masm, false);
1095 }
1096
Generate_JSConstructEntryTrampoline(MacroAssembler * masm)1097 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
1098 Generate_JSEntryTrampolineHelper(masm, true);
1099 }
1100
Generate_RunMicrotasksTrampoline(MacroAssembler * masm)1101 void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
1102 // This expects two C++ function parameters passed by Invoke() in
1103 // execution.cc.
1104 // r2: root_register_value
1105 // r3: microtask_queue
1106
1107 __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), r3);
1108 __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
1109 }
1110
ReplaceClosureCodeWithOptimizedCode(MacroAssembler * masm,Register optimized_code,Register closure,Register scratch1,Register slot_address)1111 static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
1112 Register optimized_code,
1113 Register closure,
1114 Register scratch1,
1115 Register slot_address) {
1116 DCHECK(!AreAliased(optimized_code, closure, scratch1, slot_address));
1117 DCHECK_EQ(closure, kJSFunctionRegister);
1118 DCHECK(!AreAliased(optimized_code, closure));
1119 // Store code entry in the closure.
1120 __ StoreTaggedField(optimized_code,
1121 FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
1122 // Write barrier clobbers scratch1 below.
1123 Register value = scratch1;
1124 __ mov(value, optimized_code);
1125
1126 __ RecordWriteField(closure, JSFunction::kCodeOffset, value, slot_address,
1127 kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
1128 RememberedSetAction::kOmit, SmiCheck::kOmit);
1129 }
1130
LeaveInterpreterFrame(MacroAssembler * masm,Register scratch1,Register scratch2)1131 static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
1132 Register scratch2) {
1133 Register params_size = scratch1;
1134 // Get the size of the formal parameters + receiver (in bytes).
1135 __ LoadU64(params_size,
1136 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1137 __ LoadU32(params_size,
1138 FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
1139
1140 Register actual_params_size = scratch2;
1141 // Compute the size of the actual parameters + receiver (in bytes).
1142 __ LoadU64(actual_params_size,
1143 MemOperand(fp, StandardFrameConstants::kArgCOffset));
1144 __ ShiftLeftU64(actual_params_size, actual_params_size,
1145 Operand(kSystemPointerSizeLog2));
1146
1147 // If actual is bigger than formal, then we should use it to free up the stack
1148 // arguments.
1149 Label corrected_args_count;
1150 __ CmpS64(params_size, actual_params_size);
1151 __ bge(&corrected_args_count);
1152 __ mov(params_size, actual_params_size);
1153 __ bind(&corrected_args_count);
1154
1155 // Leave the frame (also dropping the register file).
1156 __ LeaveFrame(StackFrame::INTERPRETED);
1157
1158 __ DropArguments(params_size, TurboAssembler::kCountIsBytes,
1159 TurboAssembler::kCountIncludesReceiver);
1160 }
1161
1162 // Tail-call |function_id| if |actual_state| == |expected_state|
TailCallRuntimeIfStateEquals(MacroAssembler * masm,Register actual_state,TieringState expected_state,Runtime::FunctionId function_id)1163 static void TailCallRuntimeIfStateEquals(MacroAssembler* masm,
1164 Register actual_state,
1165 TieringState expected_state,
1166 Runtime::FunctionId function_id) {
1167 Label no_match;
1168 __ CmpS64(actual_state, Operand(static_cast<int>(expected_state)));
1169 __ bne(&no_match);
1170 GenerateTailCallToReturnedCode(masm, function_id);
1171 __ bind(&no_match);
1172 }
1173
TailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimized_code_entry,Register scratch)1174 static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
1175 Register optimized_code_entry,
1176 Register scratch) {
1177 // ----------- S t a t e -------------
1178 // -- r2 : actual argument count
1179 // -- r5 : new target (preserved for callee if needed, and caller)
1180 // -- r3 : target function (preserved for callee if needed, and caller)
1181 // -----------------------------------
1182 DCHECK(!AreAliased(r3, r5, optimized_code_entry, scratch));
1183
1184 Register closure = r3;
1185 Label heal_optimized_code_slot;
1186
1187 // If the optimized code is cleared, go to runtime to update the optimization
1188 // marker field.
1189 __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
1190 &heal_optimized_code_slot);
1191
1192 // Check if the optimized code is marked for deopt. If it is, call the
1193 // runtime to clear it.
1194 __ LoadTaggedPointerField(
1195 scratch,
1196 FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
1197 __ LoadS32(scratch, FieldMemOperand(
1198 scratch, CodeDataContainer::kKindSpecificFlagsOffset));
1199 __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
1200 __ bne(&heal_optimized_code_slot);
1201
1202 // Optimized code is good, get it into the closure and link the closure
1203 // into the optimized functions list, then tail call the optimized code.
1204 ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure,
1205 scratch, r7);
1206 static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
1207 __ LoadCodeObjectEntry(r4, optimized_code_entry);
1208 __ Jump(r4);
1209
1210 // Optimized code slot contains deoptimized code or code is cleared and
1211 // optimized code marker isn't updated. Evict the code, update the marker
1212 // and re-enter the closure's code.
1213 __ bind(&heal_optimized_code_slot);
1214 GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
1215 }
1216
MaybeOptimizeCode(MacroAssembler * masm,Register feedback_vector,Register tiering_state)1217 static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
1218 Register tiering_state) {
1219 // ----------- S t a t e -------------
1220 // -- r2 : actual argument count
1221 // -- r5 : new target (preserved for callee if needed, and caller)
1222 // -- r3 : target function (preserved for callee if needed, and caller)
1223 // -- feedback vector (preserved for caller if needed)
1224 // -- tiering_state : a int32 containing a non-zero optimization
1225 // marker.
1226 // -----------------------------------
1227 DCHECK(!AreAliased(feedback_vector, r3, r5, tiering_state));
1228
1229 TailCallRuntimeIfStateEquals(masm, tiering_state,
1230 TieringState::kRequestTurbofan_Synchronous,
1231 Runtime::kCompileTurbofan_Synchronous);
1232 TailCallRuntimeIfStateEquals(masm, tiering_state,
1233 TieringState::kRequestTurbofan_Concurrent,
1234 Runtime::kCompileTurbofan_Concurrent);
1235
1236 __ stop();
1237 }
1238
1239 // Advance the current bytecode offset. This simulates what all bytecode
1240 // handlers do upon completion of the underlying operation. Will bail out to a
1241 // label if the bytecode (without prefix) is a return bytecode. Will not advance
1242 // the bytecode offset if the current bytecode is a JumpLoop, instead just
1243 // re-executing the JumpLoop to jump to the correct bytecode.
AdvanceBytecodeOffsetOrReturn(MacroAssembler * masm,Register bytecode_array,Register bytecode_offset,Register bytecode,Register scratch1,Register scratch2,Label * if_return)1244 static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
1245 Register bytecode_array,
1246 Register bytecode_offset,
1247 Register bytecode, Register scratch1,
1248 Register scratch2, Label* if_return) {
1249 Register bytecode_size_table = scratch1;
1250 Register scratch3 = bytecode;
1251
1252 // The bytecode offset value will be increased by one in wide and extra wide
1253 // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
1254 // will restore the original bytecode. In order to simplify the code, we have
1255 // a backup of it.
1256 Register original_bytecode_offset = scratch2;
1257 DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
1258 bytecode, original_bytecode_offset));
1259 __ Move(bytecode_size_table,
1260 ExternalReference::bytecode_size_table_address());
1261 __ Move(original_bytecode_offset, bytecode_offset);
1262
1263 // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
1264 Label process_bytecode, extra_wide;
1265 STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
1266 STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
1267 STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
1268 STATIC_ASSERT(3 ==
1269 static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
1270 __ CmpS64(bytecode, Operand(0x3));
1271 __ bgt(&process_bytecode);
1272 __ tmll(bytecode, Operand(0x1));
1273 __ bne(&extra_wide);
1274
1275 // Load the next bytecode and update table to the wide scaled table.
1276 __ AddS64(bytecode_offset, bytecode_offset, Operand(1));
1277 __ LoadU8(bytecode, MemOperand(bytecode_array, bytecode_offset));
1278 __ AddS64(bytecode_size_table, bytecode_size_table,
1279 Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
1280 __ b(&process_bytecode);
1281
1282 __ bind(&extra_wide);
1283 // Load the next bytecode and update table to the extra wide scaled table.
1284 __ AddS64(bytecode_offset, bytecode_offset, Operand(1));
1285 __ LoadU8(bytecode, MemOperand(bytecode_array, bytecode_offset));
1286 __ AddS64(bytecode_size_table, bytecode_size_table,
1287 Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
1288
1289 // Load the size of the current bytecode.
1290 __ bind(&process_bytecode);
1291
1292 // Bailout to the return label if this is a return bytecode.
1293 #define JUMP_IF_EQUAL(NAME) \
1294 __ CmpS64(bytecode, \
1295 Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
1296 __ beq(if_return);
1297 RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
1298 #undef JUMP_IF_EQUAL
1299
1300 // If this is a JumpLoop, re-execute it to perform the jump to the beginning
1301 // of the loop.
1302 Label end, not_jump_loop;
1303 __ CmpS64(bytecode,
1304 Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
1305 __ bne(¬_jump_loop);
1306 // We need to restore the original bytecode_offset since we might have
1307 // increased it to skip the wide / extra-wide prefix bytecode.
1308 __ Move(bytecode_offset, original_bytecode_offset);
1309 __ b(&end);
1310
1311 __ bind(¬_jump_loop);
1312 // Otherwise, load the size of the current bytecode and advance the offset.
1313 __ LoadU8(scratch3, MemOperand(bytecode_size_table, bytecode));
1314 __ AddS64(bytecode_offset, bytecode_offset, scratch3);
1315
1316 __ bind(&end);
1317 }
1318
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(MacroAssembler * masm,Register optimization_state,Register feedback_vector)1319 static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
1320 MacroAssembler* masm, Register optimization_state,
1321 Register feedback_vector) {
1322 DCHECK(!AreAliased(optimization_state, feedback_vector));
1323 Label maybe_has_optimized_code;
1324 // Check if optimized code is available
1325 __ TestBitMask(optimization_state,
1326 FeedbackVector::kTieringStateIsAnyRequestMask, r0);
1327 __ beq(&maybe_has_optimized_code);
1328
1329 Register tiering_state = optimization_state;
1330 __ DecodeField<FeedbackVector::TieringStateBits>(tiering_state);
1331 MaybeOptimizeCode(masm, feedback_vector, tiering_state);
1332
1333 __ bind(&maybe_has_optimized_code);
1334 Register optimized_code_entry = optimization_state;
1335 __ LoadAnyTaggedField(
1336 tiering_state,
1337 FieldMemOperand(feedback_vector,
1338 FeedbackVector::kMaybeOptimizedCodeOffset));
1339 TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8);
1340 }
1341
1342 // Read off the optimization state in the feedback vector and check if there
1343 // is optimized code or a tiering state that needs to be processed.
LoadTieringStateAndJumpIfNeedsProcessing(MacroAssembler * masm,Register optimization_state,Register feedback_vector,Label * has_optimized_code_or_state)1344 static void LoadTieringStateAndJumpIfNeedsProcessing(
1345 MacroAssembler* masm, Register optimization_state, Register feedback_vector,
1346 Label* has_optimized_code_or_state) {
1347 ASM_CODE_COMMENT(masm);
1348 USE(LoadTieringStateAndJumpIfNeedsProcessing);
1349 DCHECK(!AreAliased(optimization_state, feedback_vector));
1350 __ LoadU32(optimization_state,
1351 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1352 CHECK(is_uint16(
1353 FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1354 __ tmll(
1355 optimization_state,
1356 Operand(FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask));
1357 __ b(Condition(7), has_optimized_code_or_state);
1358 }
1359
1360 #if ENABLE_SPARKPLUG
1361 // static
Generate_BaselineOutOfLinePrologue(MacroAssembler * masm)1362 void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
1363 // UseScratchRegisterScope temps(masm);
1364 // Need a few extra registers
1365 // temps.Include(r8, r9);
1366
1367 auto descriptor =
1368 Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
1369 Register closure = descriptor.GetRegisterParameter(
1370 BaselineOutOfLinePrologueDescriptor::kClosure);
1371 // Load the feedback vector from the closure.
1372 Register feedback_vector = ip;
1373 __ LoadTaggedPointerField(
1374 feedback_vector,
1375 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1376 __ LoadTaggedPointerField(
1377 feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1378
1379 if (FLAG_debug_code) {
1380 Register scratch = r1;
1381 __ CompareObjectType(feedback_vector, scratch, scratch,
1382 FEEDBACK_VECTOR_TYPE);
1383 __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1384 }
1385
1386 // Check for an tiering state.
1387 Label has_optimized_code_or_state;
1388 Register optimization_state = r9;
1389 {
1390 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1391 feedback_vector,
1392 &has_optimized_code_or_state);
1393 }
1394
1395 // Increment invocation count for the function.
1396 {
1397 Register invocation_count = r1;
1398 __ LoadU64(invocation_count,
1399 FieldMemOperand(feedback_vector,
1400 FeedbackVector::kInvocationCountOffset));
1401 __ AddU64(invocation_count, Operand(1));
1402 __ StoreU64(invocation_count,
1403 FieldMemOperand(feedback_vector,
1404 FeedbackVector::kInvocationCountOffset));
1405 }
1406
1407 FrameScope frame_scope(masm, StackFrame::MANUAL);
1408 {
1409 ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
1410 // Normally the first thing we'd do here is Push(lr, fp), but we already
1411 // entered the frame in BaselineCompiler::Prologue, as we had to use the
1412 // value lr before the call to this BaselineOutOfLinePrologue builtin.
1413
1414 Register callee_context = descriptor.GetRegisterParameter(
1415 BaselineOutOfLinePrologueDescriptor::kCalleeContext);
1416 Register callee_js_function = descriptor.GetRegisterParameter(
1417 BaselineOutOfLinePrologueDescriptor::kClosure);
1418 __ Push(callee_context, callee_js_function);
1419 DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
1420 DCHECK_EQ(callee_js_function, kJSFunctionRegister);
1421
1422 Register argc = descriptor.GetRegisterParameter(
1423 BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
1424 // We'll use the bytecode for both code age/OSR resetting, and pushing onto
1425 // the frame, so load it into a register.
1426 Register bytecodeArray = descriptor.GetRegisterParameter(
1427 BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
1428 ResetBytecodeAgeAndOsrState(masm, bytecodeArray, r1);
1429
1430 __ Push(argc, bytecodeArray);
1431
1432 // Baseline code frames store the feedback vector where interpreter would
1433 // store the bytecode offset.
1434 if (FLAG_debug_code) {
1435 Register scratch = r1;
1436 __ CompareObjectType(feedback_vector, scratch, scratch,
1437 FEEDBACK_VECTOR_TYPE);
1438 __ Assert(eq, AbortReason::kExpectedFeedbackVector);
1439 }
1440 __ Push(feedback_vector);
1441 }
1442
1443 Label call_stack_guard;
1444 Register frame_size = descriptor.GetRegisterParameter(
1445 BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
1446 {
1447 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
1448 // Stack check. This folds the checks for both the interrupt stack limit
1449 // check and the real stack limit into one by just checking for the
1450 // interrupt limit. The interrupt limit is either equal to the real stack
1451 // limit or tighter. By ensuring we have space until that limit after
1452 // building the frame we can quickly precheck both at once.
1453
1454 Register sp_minus_frame_size = r1;
1455 Register interrupt_limit = r0;
1456 __ SubS64(sp_minus_frame_size, sp, frame_size);
1457 __ LoadStackLimit(interrupt_limit, StackLimitKind::kInterruptStackLimit);
1458 __ CmpU64(sp_minus_frame_size, interrupt_limit);
1459 __ blt(&call_stack_guard);
1460 }
1461
1462 // Do "fast" return to the caller pc in lr.
1463 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1464 __ Ret();
1465
1466 __ bind(&has_optimized_code_or_state);
1467 {
1468 ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
1469
1470 // Drop the frame created by the baseline call.
1471 __ Pop(r14, fp);
1472 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1473 feedback_vector);
1474 __ Trap();
1475 }
1476
1477 __ bind(&call_stack_guard);
1478 {
1479 ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
1480 FrameScope frame_scope(masm, StackFrame::INTERNAL);
1481 // Save incoming new target or generator
1482 __ Push(kJavaScriptCallNewTargetRegister);
1483 __ SmiTag(frame_size);
1484 __ Push(frame_size);
1485 __ CallRuntime(Runtime::kStackGuardWithGap);
1486 __ Pop(kJavaScriptCallNewTargetRegister);
1487 }
1488
1489 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1490 __ Ret();
1491 }
1492 #endif
1493
1494 // Generate code for entering a JS function with the interpreter.
1495 // On entry to the function the receiver and arguments have been pushed on the
1496 // stack left to right.
1497 //
1498 // The live registers are:
1499 // o r2: actual argument count
1500 // o r3: the JS function object being called.
1501 // o r5: the incoming new target or generator object
1502 // o cp: our context
1503 // o pp: the caller's constant pool pointer (if enabled)
1504 // o fp: the caller's frame pointer
1505 // o sp: stack pointer
1506 // o lr: return address
1507 //
1508 // The function builds an interpreter frame. See InterpreterFrameConstants in
1509 // frame-constants.h for its layout.
Generate_InterpreterEntryTrampoline(MacroAssembler * masm)1510 void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
1511 Register closure = r3;
1512 Register feedback_vector = r4;
1513
1514 // Get the bytecode array from the function object and load it into
1515 // kInterpreterBytecodeArrayRegister.
1516 __ LoadTaggedPointerField(
1517 r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
1518 // Load original bytecode array or the debug copy.
1519 __ LoadTaggedPointerField(
1520 kInterpreterBytecodeArrayRegister,
1521 FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
1522
1523 Label is_baseline;
1524 GetSharedFunctionInfoBytecodeOrBaseline(
1525 masm, kInterpreterBytecodeArrayRegister, ip, &is_baseline);
1526
1527 // The bytecode array could have been flushed from the shared function info,
1528 // if so, call into CompileLazy.
1529 Label compile_lazy;
1530 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r6, no_reg,
1531 BYTECODE_ARRAY_TYPE);
1532 __ bne(&compile_lazy);
1533
1534 // Load the feedback vector from the closure.
1535 __ LoadTaggedPointerField(
1536 feedback_vector,
1537 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1538 __ LoadTaggedPointerField(
1539 feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1540
1541 Label push_stack_frame;
1542 // Check if feedback vector is valid. If valid, check for optimized code
1543 // and update invocation count. Otherwise, setup the stack frame.
1544 __ LoadTaggedPointerField(
1545 r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1546 __ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
1547 __ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
1548 __ bne(&push_stack_frame);
1549
1550 Register optimization_state = r6;
1551
1552 // Read off the optimization state in the feedback vector.
1553 __ LoadS32(optimization_state,
1554 FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
1555
1556 // Check if the optimized code slot is not empty or has a tiering state.
1557 Label has_optimized_code_or_state;
1558 __ TestBitMask(
1559 optimization_state,
1560 FeedbackVector::kHasOptimizedCodeOrTieringStateIsAnyRequestMask, r0);
1561 __ bne(&has_optimized_code_or_state);
1562
1563 Label not_optimized;
1564 __ bind(¬_optimized);
1565
1566 // Increment invocation count for the function.
1567 __ LoadS32(r1, FieldMemOperand(feedback_vector,
1568 FeedbackVector::kInvocationCountOffset));
1569 __ AddS64(r1, r1, Operand(1));
1570 __ StoreU32(r1, FieldMemOperand(feedback_vector,
1571 FeedbackVector::kInvocationCountOffset));
1572
1573 // Open a frame scope to indicate that there is a frame on the stack. The
1574 // MANUAL indicates that the scope shouldn't actually generate code to set up
1575 // the frame (that is done below).
1576 __ bind(&push_stack_frame);
1577 FrameScope frame_scope(masm, StackFrame::MANUAL);
1578 __ PushStandardFrame(closure);
1579
1580 ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r1);
1581
1582 // Load the initial bytecode offset.
1583 __ mov(kInterpreterBytecodeOffsetRegister,
1584 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1585
1586 // Push bytecode array and Smi tagged bytecode array offset.
1587 __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1588 __ Push(kInterpreterBytecodeArrayRegister, r4);
1589
1590 // Allocate the local and temporary register file on the stack.
1591 Label stack_overflow;
1592 {
1593 // Load frame size (word) from the BytecodeArray object.
1594 __ LoadU32(r4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
1595 BytecodeArray::kFrameSizeOffset));
1596
1597 // Do a stack check to ensure we don't go over the limit.
1598 __ SubS64(r8, sp, r4);
1599 __ CmpU64(r8, __ StackLimitAsMemOperand(StackLimitKind::kRealStackLimit));
1600 __ blt(&stack_overflow);
1601
1602 // If ok, push undefined as the initial value for all register file entries.
1603 // TODO(rmcilroy): Consider doing more than one push per loop iteration.
1604 Label loop, no_args;
1605 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1606 __ ShiftRightU64(r4, r4, Operand(kSystemPointerSizeLog2));
1607 __ LoadAndTestP(r4, r4);
1608 __ beq(&no_args);
1609 __ mov(r1, r4);
1610 __ bind(&loop);
1611 __ push(kInterpreterAccumulatorRegister);
1612 __ SubS64(r1, Operand(1));
1613 __ bne(&loop);
1614 __ bind(&no_args);
1615 }
1616
1617 // If the bytecode array has a valid incoming new target or generator object
1618 // register, initialize it with incoming value which was passed in r5.
1619 Label no_incoming_new_target_or_generator_register;
1620 __ LoadS32(r8, FieldMemOperand(
1621 kInterpreterBytecodeArrayRegister,
1622 BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
1623 __ CmpS64(r8, Operand::Zero());
1624 __ beq(&no_incoming_new_target_or_generator_register);
1625 __ ShiftLeftU64(r8, r8, Operand(kSystemPointerSizeLog2));
1626 __ StoreU64(r5, MemOperand(fp, r8));
1627 __ bind(&no_incoming_new_target_or_generator_register);
1628
1629 // Perform interrupt stack check.
1630 // TODO(solanes): Merge with the real stack limit check above.
1631 Label stack_check_interrupt, after_stack_check_interrupt;
1632 __ LoadU64(r0,
1633 __ StackLimitAsMemOperand(StackLimitKind::kInterruptStackLimit));
1634 __ CmpU64(sp, r0);
1635 __ blt(&stack_check_interrupt);
1636 __ bind(&after_stack_check_interrupt);
1637
1638 // The accumulator is already loaded with undefined.
1639
1640 // Load the dispatch table into a register and dispatch to the bytecode
1641 // handler at the current bytecode offset.
1642 Label do_dispatch;
1643 __ bind(&do_dispatch);
1644 __ Move(
1645 kInterpreterDispatchTableRegister,
1646 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1647
1648 __ LoadU8(r5, MemOperand(kInterpreterBytecodeArrayRegister,
1649 kInterpreterBytecodeOffsetRegister));
1650 __ ShiftLeftU64(r5, r5, Operand(kSystemPointerSizeLog2));
1651 __ LoadU64(kJavaScriptCallCodeStartRegister,
1652 MemOperand(kInterpreterDispatchTableRegister, r5));
1653 __ Call(kJavaScriptCallCodeStartRegister);
1654
1655 masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
1656
1657 // Any returns to the entry trampoline are either due to the return bytecode
1658 // or the interpreter tail calling a builtin and then a dispatch.
1659
1660 // Get bytecode array and bytecode offset from the stack frame.
1661 __ LoadU64(kInterpreterBytecodeArrayRegister,
1662 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1663 __ LoadU64(kInterpreterBytecodeOffsetRegister,
1664 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1665 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1666
1667 // Either return, or advance to the next bytecode and dispatch.
1668 Label do_return;
1669 __ LoadU8(r3, MemOperand(kInterpreterBytecodeArrayRegister,
1670 kInterpreterBytecodeOffsetRegister));
1671 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1672 kInterpreterBytecodeOffsetRegister, r3, r4, r5,
1673 &do_return);
1674 __ b(&do_dispatch);
1675
1676 __ bind(&do_return);
1677 // The return value is in r2.
1678 LeaveInterpreterFrame(masm, r4, r6);
1679 __ Ret();
1680
1681 __ bind(&stack_check_interrupt);
1682 // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
1683 // for the call to the StackGuard.
1684 __ mov(kInterpreterBytecodeOffsetRegister,
1685 Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
1686 kFunctionEntryBytecodeOffset)));
1687 __ StoreU64(kInterpreterBytecodeOffsetRegister,
1688 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1689 __ CallRuntime(Runtime::kStackGuard);
1690
1691 // After the call, restore the bytecode array, bytecode offset and accumulator
1692 // registers again. Also, restore the bytecode offset in the stack to its
1693 // previous value.
1694 __ LoadU64(kInterpreterBytecodeArrayRegister,
1695 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1696 __ mov(kInterpreterBytecodeOffsetRegister,
1697 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1698 __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
1699
1700 __ SmiTag(r0, kInterpreterBytecodeOffsetRegister);
1701 __ StoreU64(r0,
1702 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1703
1704 __ jmp(&after_stack_check_interrupt);
1705
1706 __ bind(&has_optimized_code_or_state);
1707 MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
1708 feedback_vector);
1709
1710 __ bind(&is_baseline);
1711 {
1712 // Load the feedback vector from the closure.
1713 __ LoadTaggedPointerField(
1714 feedback_vector,
1715 FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
1716 __ LoadTaggedPointerField(
1717 feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
1718
1719 Label install_baseline_code;
1720 // Check if feedback vector is valid. If not, call prepare for baseline to
1721 // allocate it.
1722 __ LoadTaggedPointerField(
1723 ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
1724 __ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
1725 __ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
1726 __ b(ne, &install_baseline_code);
1727
1728 // Check for an tiering state.
1729 LoadTieringStateAndJumpIfNeedsProcessing(masm, optimization_state,
1730 feedback_vector,
1731 &has_optimized_code_or_state);
1732
1733 // Load the baseline code into the closure.
1734 __ mov(r4, kInterpreterBytecodeArrayRegister);
1735 static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
1736 ReplaceClosureCodeWithOptimizedCode(masm, r4, closure, ip, r1);
1737 __ JumpCodeObject(r4);
1738
1739 __ bind(&install_baseline_code);
1740 GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
1741 }
1742
1743 __ bind(&compile_lazy);
1744 GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
1745
1746 __ bind(&stack_overflow);
1747 __ CallRuntime(Runtime::kThrowStackOverflow);
1748 __ bkpt(0); // Should not return.
1749 }
1750
GenerateInterpreterPushArgs(MacroAssembler * masm,Register num_args,Register start_address,Register scratch)1751 static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
1752 Register start_address,
1753 Register scratch) {
1754 ASM_CODE_COMMENT(masm);
1755 __ SubS64(scratch, num_args, Operand(1));
1756 __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
1757 __ SubS64(start_address, start_address, scratch);
1758 // Push the arguments.
1759 __ PushArray(start_address, num_args, r1, scratch,
1760 TurboAssembler::PushArrayOrder::kReverse);
1761 }
1762
1763 // static
Generate_InterpreterPushArgsThenCallImpl(MacroAssembler * masm,ConvertReceiverMode receiver_mode,InterpreterPushArgsMode mode)1764 void Builtins::Generate_InterpreterPushArgsThenCallImpl(
1765 MacroAssembler* masm, ConvertReceiverMode receiver_mode,
1766 InterpreterPushArgsMode mode) {
1767 DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
1768 // ----------- S t a t e -------------
1769 // -- r2 : the number of arguments
1770 // -- r4 : the address of the first argument to be pushed. Subsequent
1771 // arguments should be consecutive above this, in the same order as
1772 // they are to be pushed onto the stack.
1773 // -- r3 : the target to call (can be any Object).
1774 // -----------------------------------
1775 Label stack_overflow;
1776 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1777 // The spread argument should not be pushed.
1778 __ SubS64(r2, r2, Operand(1));
1779 }
1780
1781 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1782 __ SubS64(r5, r2, Operand(kJSArgcReceiverSlots));
1783 } else {
1784 __ mov(r5, r2);
1785 }
1786
1787 __ StackOverflowCheck(r5, ip, &stack_overflow);
1788
1789 // Push the arguments.
1790 GenerateInterpreterPushArgs(masm, r5, r4, r6);
1791
1792 if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
1793 __ PushRoot(RootIndex::kUndefinedValue);
1794 }
1795
1796 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1797 // Pass the spread in the register r2.
1798 // r2 already points to the penultimate argument, the spread
1799 // lies in the next interpreter register.
1800 __ LoadU64(r4, MemOperand(r4, -kSystemPointerSize));
1801 }
1802
1803 // Call the target.
1804 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1805 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
1806 RelocInfo::CODE_TARGET);
1807 } else {
1808 __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
1809 RelocInfo::CODE_TARGET);
1810 }
1811
1812 __ bind(&stack_overflow);
1813 {
1814 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1815 // Unreachable Code.
1816 __ bkpt(0);
1817 }
1818 }
1819
1820 // static
Generate_InterpreterPushArgsThenConstructImpl(MacroAssembler * masm,InterpreterPushArgsMode mode)1821 void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
1822 MacroAssembler* masm, InterpreterPushArgsMode mode) {
1823 // ----------- S t a t e -------------
1824 // -- r2 : argument count
1825 // -- r5 : new target
1826 // -- r3 : constructor to call
1827 // -- r4 : allocation site feedback if available, undefined otherwise.
1828 // -- r6 : address of the first argument
1829 // -----------------------------------
1830 Label stack_overflow;
1831 __ StackOverflowCheck(r2, ip, &stack_overflow);
1832
1833 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1834 // The spread argument should not be pushed.
1835 __ SubS64(r2, r2, Operand(1));
1836 }
1837
1838 Register argc_without_receiver = ip;
1839 __ SubS64(argc_without_receiver, r2, Operand(kJSArgcReceiverSlots));
1840 // Push the arguments. r4 and r5 will be modified.
1841 GenerateInterpreterPushArgs(masm, argc_without_receiver, r6, r7);
1842
1843 // Push a slot for the receiver to be constructed.
1844 __ mov(r0, Operand::Zero());
1845 __ push(r0);
1846
1847 if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1848 // Pass the spread in the register r2.
1849 // r4 already points to the penultimate argument, the spread
1850 // lies in the next interpreter register.
1851 __ lay(r6, MemOperand(r6, -kSystemPointerSize));
1852 __ LoadU64(r4, MemOperand(r6));
1853 } else {
1854 __ AssertUndefinedOrAllocationSite(r4, r7);
1855 }
1856
1857 if (mode == InterpreterPushArgsMode::kArrayFunction) {
1858 __ AssertFunction(r3);
1859
1860 // Tail call to the array construct stub (still in the caller
1861 // context at this point).
1862 Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
1863 __ Jump(code, RelocInfo::CODE_TARGET);
1864 } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
1865 // Call the constructor with r2, r3, and r5 unmodified.
1866 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
1867 RelocInfo::CODE_TARGET);
1868 } else {
1869 DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
1870 // Call the constructor with r2, r3, and r5 unmodified.
1871 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
1872 }
1873
1874 __ bind(&stack_overflow);
1875 {
1876 __ TailCallRuntime(Runtime::kThrowStackOverflow);
1877 // Unreachable Code.
1878 __ bkpt(0);
1879 }
1880 }
1881
Generate_InterpreterEnterBytecode(MacroAssembler * masm)1882 static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
1883 // Set the return address to the correct point in the interpreter entry
1884 // trampoline.
1885 Label builtin_trampoline, trampoline_loaded;
1886 Smi interpreter_entry_return_pc_offset(
1887 masm->isolate()->heap()->interpreter_entry_return_pc_offset());
1888 DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
1889
1890 // If the SFI function_data is an InterpreterData, the function will have a
1891 // custom copy of the interpreter entry trampoline for profiling. If so,
1892 // get the custom trampoline, otherwise grab the entry address of the global
1893 // trampoline.
1894 __ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
1895 __ LoadTaggedPointerField(
1896 r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1897 __ LoadTaggedPointerField(
1898 r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
1899 __ CompareObjectType(r4, kInterpreterDispatchTableRegister,
1900 kInterpreterDispatchTableRegister,
1901 INTERPRETER_DATA_TYPE);
1902 __ bne(&builtin_trampoline);
1903
1904 __ LoadTaggedPointerField(
1905 r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
1906 __ AddS64(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
1907 __ b(&trampoline_loaded);
1908
1909 __ bind(&builtin_trampoline);
1910 __ Move(r4, ExternalReference::
1911 address_of_interpreter_entry_trampoline_instruction_start(
1912 masm->isolate()));
1913 __ LoadU64(r4, MemOperand(r4));
1914
1915 __ bind(&trampoline_loaded);
1916 __ AddS64(r14, r4, Operand(interpreter_entry_return_pc_offset.value()));
1917
1918 // Initialize the dispatch table register.
1919 __ Move(
1920 kInterpreterDispatchTableRegister,
1921 ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
1922
1923 // Get the bytecode array pointer from the frame.
1924 __ LoadU64(kInterpreterBytecodeArrayRegister,
1925 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1926
1927 if (FLAG_debug_code) {
1928 // Check function data field is actually a BytecodeArray object.
1929 __ TestIfSmi(kInterpreterBytecodeArrayRegister);
1930 __ Assert(
1931 ne, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1932 __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
1933 BYTECODE_ARRAY_TYPE);
1934 __ Assert(
1935 eq, AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
1936 }
1937
1938 // Get the target bytecode offset from the frame.
1939 __ LoadU64(kInterpreterBytecodeOffsetRegister,
1940 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1941 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1942
1943 if (FLAG_debug_code) {
1944 Label okay;
1945 __ CmpS64(kInterpreterBytecodeOffsetRegister,
1946 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
1947 __ bge(&okay);
1948 __ bkpt(0);
1949 __ bind(&okay);
1950 }
1951
1952 // Dispatch to the target bytecode.
1953 UseScratchRegisterScope temps(masm);
1954 Register scratch = temps.Acquire();
1955 __ LoadU8(scratch, MemOperand(kInterpreterBytecodeArrayRegister,
1956 kInterpreterBytecodeOffsetRegister));
1957 __ ShiftLeftU64(scratch, scratch, Operand(kSystemPointerSizeLog2));
1958 __ LoadU64(kJavaScriptCallCodeStartRegister,
1959 MemOperand(kInterpreterDispatchTableRegister, scratch));
1960 __ Jump(kJavaScriptCallCodeStartRegister);
1961 }
1962
Generate_InterpreterEnterAtNextBytecode(MacroAssembler * masm)1963 void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
1964 // Get bytecode array and bytecode offset from the stack frame.
1965 __ LoadU64(kInterpreterBytecodeArrayRegister,
1966 MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
1967 __ LoadU64(kInterpreterBytecodeOffsetRegister,
1968 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1969 __ SmiUntag(kInterpreterBytecodeOffsetRegister);
1970
1971 Label enter_bytecode, function_entry_bytecode;
1972 __ CmpS64(kInterpreterBytecodeOffsetRegister,
1973 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
1974 kFunctionEntryBytecodeOffset));
1975 __ beq(&function_entry_bytecode);
1976
1977 // Load the current bytecode.
1978 __ LoadU8(r3, MemOperand(kInterpreterBytecodeArrayRegister,
1979 kInterpreterBytecodeOffsetRegister));
1980
1981 // Advance to the next bytecode.
1982 Label if_return;
1983 AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
1984 kInterpreterBytecodeOffsetRegister, r3, r4, r5,
1985 &if_return);
1986
1987 __ bind(&enter_bytecode);
1988 // Convert new bytecode offset to a Smi and save in the stackframe.
1989 __ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
1990 __ StoreU64(r4,
1991 MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
1992
1993 Generate_InterpreterEnterBytecode(masm);
1994
1995 __ bind(&function_entry_bytecode);
1996 // If the code deoptimizes during the implicit function entry stack interrupt
1997 // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
1998 // not a valid bytecode offset. Detect this case and advance to the first
1999 // actual bytecode.
2000 __ mov(kInterpreterBytecodeOffsetRegister,
2001 Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
2002 __ b(&enter_bytecode);
2003
2004 // We should never take the if_return path.
2005 __ bind(&if_return);
2006 __ Abort(AbortReason::kInvalidBytecodeAdvance);
2007 }
2008
Generate_InterpreterEnterAtBytecode(MacroAssembler * masm)2009 void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
2010 Generate_InterpreterEnterBytecode(masm);
2011 }
2012
2013 namespace {
Generate_ContinueToBuiltinHelper(MacroAssembler * masm,bool java_script_builtin,bool with_result)2014 void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
2015 bool java_script_builtin,
2016 bool with_result) {
2017 const RegisterConfiguration* config(RegisterConfiguration::Default());
2018 int allocatable_register_count = config->num_allocatable_general_registers();
2019 Register scratch = ip;
2020 if (with_result) {
2021 if (java_script_builtin) {
2022 __ mov(scratch, r2);
2023 } else {
2024 // Overwrite the hole inserted by the deoptimizer with the return value
2025 // from the LAZY deopt point.
2026 __ StoreU64(
2027 r2, MemOperand(
2028 sp, config->num_allocatable_general_registers() *
2029 kSystemPointerSize +
2030 BuiltinContinuationFrameConstants::kFixedFrameSize));
2031 }
2032 }
2033 for (int i = allocatable_register_count - 1; i >= 0; --i) {
2034 int code = config->GetAllocatableGeneralCode(i);
2035 __ Pop(Register::from_code(code));
2036 if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
2037 __ SmiUntag(Register::from_code(code));
2038 }
2039 }
2040 if (java_script_builtin && with_result) {
2041 // Overwrite the hole inserted by the deoptimizer with the return value from
2042 // the LAZY deopt point. r0 contains the arguments count, the return value
2043 // from LAZY is always the last argument.
2044 constexpr int return_value_offset =
2045 BuiltinContinuationFrameConstants::kFixedSlotCount -
2046 kJSArgcReceiverSlots;
2047 __ AddS64(r2, r2, Operand(return_value_offset));
2048 __ ShiftLeftU64(r1, r2, Operand(kSystemPointerSizeLog2));
2049 __ StoreU64(scratch, MemOperand(sp, r1));
2050 // Recover arguments count.
2051 __ SubS64(r2, r2, Operand(return_value_offset));
2052 }
2053 __ LoadU64(
2054 fp,
2055 MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
2056 // Load builtin index (stored as a Smi) and use it to get the builtin start
2057 // address from the builtins table.
2058 UseScratchRegisterScope temps(masm);
2059 Register builtin = temps.Acquire();
2060 __ Pop(builtin);
2061 __ AddS64(sp, sp,
2062 Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
2063 __ Pop(r0);
2064 __ mov(r14, r0);
2065 __ LoadEntryFromBuiltinIndex(builtin);
2066 __ Jump(builtin);
2067 }
2068 } // namespace
2069
Generate_ContinueToCodeStubBuiltin(MacroAssembler * masm)2070 void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
2071 Generate_ContinueToBuiltinHelper(masm, false, false);
2072 }
2073
Generate_ContinueToCodeStubBuiltinWithResult(MacroAssembler * masm)2074 void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
2075 MacroAssembler* masm) {
2076 Generate_ContinueToBuiltinHelper(masm, false, true);
2077 }
2078
Generate_ContinueToJavaScriptBuiltin(MacroAssembler * masm)2079 void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
2080 Generate_ContinueToBuiltinHelper(masm, true, false);
2081 }
2082
Generate_ContinueToJavaScriptBuiltinWithResult(MacroAssembler * masm)2083 void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
2084 MacroAssembler* masm) {
2085 Generate_ContinueToBuiltinHelper(masm, true, true);
2086 }
2087
Generate_NotifyDeoptimized(MacroAssembler * masm)2088 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
2089 {
2090 FrameScope scope(masm, StackFrame::INTERNAL);
2091 __ CallRuntime(Runtime::kNotifyDeoptimized);
2092 }
2093
2094 DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r2.code());
2095 __ pop(r2);
2096 __ Ret();
2097 }
2098
2099 // static
Generate_FunctionPrototypeApply(MacroAssembler * masm)2100 void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
2101 // ----------- S t a t e -------------
2102 // -- r2 : argc
2103 // -- sp[0] : receiver
2104 // -- sp[4] : thisArg
2105 // -- sp[8] : argArray
2106 // -----------------------------------
2107
2108 // 1. Load receiver into r3, argArray into r4 (if present), remove all
2109 // arguments from the stack (including the receiver), and push thisArg (if
2110 // present) instead.
2111 {
2112 __ LoadRoot(r7, RootIndex::kUndefinedValue);
2113 __ mov(r4, r7);
2114 Label done;
2115
2116 __ LoadU64(r3, MemOperand(sp)); // receiver
2117 __ CmpS64(r2, Operand(JSParameterCount(1)));
2118 __ blt(&done);
2119 __ LoadU64(r7, MemOperand(sp, kSystemPointerSize)); // thisArg
2120 __ CmpS64(r2, Operand(JSParameterCount(2)));
2121 __ blt(&done);
2122 __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
2123
2124 __ bind(&done);
2125 __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
2126 TurboAssembler::kCountIncludesReceiver);
2127 }
2128
2129 // ----------- S t a t e -------------
2130 // -- r4 : argArray
2131 // -- r3 : receiver
2132 // -- sp[0] : thisArg
2133 // -----------------------------------
2134
2135 // 2. We don't need to check explicitly for callable receiver here,
2136 // since that's the first thing the Call/CallWithArrayLike builtins
2137 // will do.
2138
2139 // 3. Tail call with no arguments if argArray is null or undefined.
2140 Label no_arguments;
2141 __ JumpIfRoot(r4, RootIndex::kNullValue, &no_arguments);
2142 __ JumpIfRoot(r4, RootIndex::kUndefinedValue, &no_arguments);
2143
2144 // 4a. Apply the receiver to the given argArray.
2145 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2146 RelocInfo::CODE_TARGET);
2147
2148 // 4b. The argArray is either null or undefined, so we tail call without any
2149 // arguments to the receiver.
2150 __ bind(&no_arguments);
2151 {
2152 __ mov(r2, Operand(JSParameterCount(0)));
2153 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2154 }
2155 }
2156
2157 // static
Generate_FunctionPrototypeCall(MacroAssembler * masm)2158 void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
2159 // 1. Get the callable to call (passed as receiver) from the stack.
2160 __ Pop(r3);
2161
2162 // 2. Make sure we have at least one argument.
2163 // r2: actual number of arguments
2164 {
2165 Label done;
2166 __ CmpS64(r2, Operand(JSParameterCount(0)));
2167 __ b(ne, &done);
2168 __ PushRoot(RootIndex::kUndefinedValue);
2169 __ AddS64(r2, r2, Operand(1));
2170 __ bind(&done);
2171 }
2172
2173 // 3. Adjust the actual number of arguments.
2174 __ SubS64(r2, r2, Operand(1));
2175
2176 // 4. Call the callable.
2177 __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
2178 }
2179
Generate_ReflectApply(MacroAssembler * masm)2180 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
2181 // ----------- S t a t e -------------
2182 // -- r2 : argc
2183 // -- sp[0] : receiver
2184 // -- sp[4] : target (if argc >= 1)
2185 // -- sp[8] : thisArgument (if argc >= 2)
2186 // -- sp[12] : argumentsList (if argc == 3)
2187 // -----------------------------------
2188
2189 // 1. Load target into r3 (if present), argumentsList into r4 (if present),
2190 // remove all arguments from the stack (including the receiver), and push
2191 // thisArgument (if present) instead.
2192 {
2193 __ LoadRoot(r3, RootIndex::kUndefinedValue);
2194 __ mov(r7, r3);
2195 __ mov(r4, r3);
2196
2197 Label done;
2198
2199 __ CmpS64(r2, Operand(JSParameterCount(1)));
2200 __ blt(&done);
2201 __ LoadU64(r3, MemOperand(sp, kSystemPointerSize)); // thisArg
2202 __ CmpS64(r2, Operand(JSParameterCount(2)));
2203 __ blt(&done);
2204 __ LoadU64(r7, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
2205 __ CmpS64(r2, Operand(JSParameterCount(3)));
2206 __ blt(&done);
2207 __ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
2208
2209 __ bind(&done);
2210 __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
2211 TurboAssembler::kCountIncludesReceiver);
2212 }
2213
2214 // ----------- S t a t e -------------
2215 // -- r4 : argumentsList
2216 // -- r3 : target
2217 // -- sp[0] : thisArgument
2218 // -----------------------------------
2219
2220 // 2. We don't need to check explicitly for callable target here,
2221 // since that's the first thing the Call/CallWithArrayLike builtins
2222 // will do.
2223
2224 // 3 Apply the target to the given argumentsList.
2225 __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
2226 RelocInfo::CODE_TARGET);
2227 }
2228
Generate_ReflectConstruct(MacroAssembler * masm)2229 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
2230 // ----------- S t a t e -------------
2231 // -- r2 : argc
2232 // -- sp[0] : receiver
2233 // -- sp[4] : target
2234 // -- sp[8] : argumentsList
2235 // -- sp[12] : new.target (optional)
2236 // -----------------------------------
2237
2238 // 1. Load target into r3 (if present), argumentsList into r4 (if present),
2239 // new.target into r5 (if present, otherwise use target), remove all
2240 // arguments from the stack (including the receiver), and push thisArgument
2241 // (if present) instead.
2242 {
2243 __ LoadRoot(r3, RootIndex::kUndefinedValue);
2244 __ mov(r4, r3);
2245
2246 Label done;
2247
2248 __ mov(r6, r3);
2249 __ CmpS64(r2, Operand(JSParameterCount(1)));
2250 __ blt(&done);
2251 __ LoadU64(r3, MemOperand(sp, kSystemPointerSize)); // thisArg
2252 __ mov(r5, r3);
2253 __ CmpS64(r2, Operand(JSParameterCount(2)));
2254 __ blt(&done);
2255 __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
2256 __ CmpS64(r2, Operand(JSParameterCount(3)));
2257 __ blt(&done);
2258 __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
2259 __ bind(&done);
2260 __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger,
2261 TurboAssembler::kCountIncludesReceiver);
2262 }
2263
2264 // ----------- S t a t e -------------
2265 // -- r4 : argumentsList
2266 // -- r5 : new.target
2267 // -- r3 : target
2268 // -- sp[0] : receiver (undefined)
2269 // -----------------------------------
2270
2271 // 2. We don't need to check explicitly for constructor target here,
2272 // since that's the first thing the Construct/ConstructWithArrayLike
2273 // builtins will do.
2274
2275 // 3. We don't need to check explicitly for constructor new.target here,
2276 // since that's the second thing the Construct/ConstructWithArrayLike
2277 // builtins will do.
2278
2279 // 4. Construct the target with the given new.target and argumentsList.
2280 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
2281 RelocInfo::CODE_TARGET);
2282 }
2283
2284 namespace {
2285
2286 // Allocate new stack space for |count| arguments and shift all existing
2287 // arguments already on the stack. |pointer_to_new_space_out| points to the
2288 // first free slot on the stack to copy additional arguments to and
2289 // |argc_in_out| is updated to include |count|.
Generate_AllocateSpaceAndShiftExistingArguments(MacroAssembler * masm,Register count,Register argc_in_out,Register pointer_to_new_space_out,Register scratch1,Register scratch2)2290 void Generate_AllocateSpaceAndShiftExistingArguments(
2291 MacroAssembler* masm, Register count, Register argc_in_out,
2292 Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
2293 DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
2294 scratch2));
2295 Register old_sp = scratch1;
2296 Register new_space = scratch2;
2297 __ mov(old_sp, sp);
2298 __ ShiftLeftU64(new_space, count, Operand(kSystemPointerSizeLog2));
2299 __ AllocateStackSpace(new_space);
2300
2301 Register end = scratch2;
2302 Register value = r1;
2303 Register dest = pointer_to_new_space_out;
2304 __ mov(dest, sp);
2305 __ ShiftLeftU64(r0, argc_in_out, Operand(kSystemPointerSizeLog2));
2306 __ AddS64(end, old_sp, r0);
2307 Label loop, done;
2308 __ bind(&loop);
2309 __ CmpS64(old_sp, end);
2310 __ bge(&done);
2311 __ LoadU64(value, MemOperand(old_sp));
2312 __ lay(old_sp, MemOperand(old_sp, kSystemPointerSize));
2313 __ StoreU64(value, MemOperand(dest));
2314 __ lay(dest, MemOperand(dest, kSystemPointerSize));
2315 __ b(&loop);
2316 __ bind(&done);
2317
2318 // Update total number of arguments.
2319 __ AddS64(argc_in_out, argc_in_out, count);
2320 }
2321
2322 } // namespace
2323
2324 // static
2325 // TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
Generate_CallOrConstructVarargs(MacroAssembler * masm,Handle<Code> code)2326 void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
2327 Handle<Code> code) {
2328 // ----------- S t a t e -------------
2329 // -- r3 : target
2330 // -- r2 : number of parameters on the stack
2331 // -- r4 : arguments list (a FixedArray)
2332 // -- r6 : len (number of elements to push from args)
2333 // -- r5 : new.target (for [[Construct]])
2334 // -----------------------------------
2335
2336 Register scratch = ip;
2337
2338 if (FLAG_debug_code) {
2339 // Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
2340 Label ok, fail;
2341 __ AssertNotSmi(r4);
2342 __ LoadTaggedPointerField(scratch,
2343 FieldMemOperand(r4, HeapObject::kMapOffset));
2344 __ LoadS16(scratch,
2345 FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2346 __ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
2347 __ beq(&ok);
2348 __ CmpS64(scratch, Operand(FIXED_DOUBLE_ARRAY_TYPE));
2349 __ bne(&fail);
2350 __ CmpS64(r6, Operand::Zero());
2351 __ beq(&ok);
2352 // Fall through.
2353 __ bind(&fail);
2354 __ Abort(AbortReason::kOperandIsNotAFixedArray);
2355
2356 __ bind(&ok);
2357 }
2358
2359 // Check for stack overflow.
2360 Label stack_overflow;
2361 __ StackOverflowCheck(r6, scratch, &stack_overflow);
2362
2363 // Move the arguments already in the stack,
2364 // including the receiver and the return address.
2365 // r6: Number of arguments to make room for.
2366 // r2: Number of arguments already on the stack.
2367 // r7: Points to first free slot on the stack after arguments were shifted.
2368 Generate_AllocateSpaceAndShiftExistingArguments(masm, r6, r2, r7, ip, r8);
2369
2370 // Push arguments onto the stack (thisArgument is already on the stack).
2371 {
2372 Label loop, no_args, skip;
2373 __ CmpS64(r6, Operand::Zero());
2374 __ beq(&no_args);
2375 __ AddS64(r4, r4,
2376 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
2377 __ mov(r1, r6);
2378 __ bind(&loop);
2379 __ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
2380 __ la(r4, MemOperand(r4, kTaggedSize));
2381 __ CompareRoot(scratch, RootIndex::kTheHoleValue);
2382 __ bne(&skip, Label::kNear);
2383 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
2384 __ bind(&skip);
2385 __ StoreU64(scratch, MemOperand(r7));
2386 __ lay(r7, MemOperand(r7, kSystemPointerSize));
2387 __ BranchOnCount(r1, &loop);
2388 __ bind(&no_args);
2389 }
2390
2391 // Tail-call to the actual Call or Construct builtin.
2392 __ Jump(code, RelocInfo::CODE_TARGET);
2393
2394 __ bind(&stack_overflow);
2395 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2396 }
2397
2398 // static
Generate_CallOrConstructForwardVarargs(MacroAssembler * masm,CallOrConstructMode mode,Handle<Code> code)2399 void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
2400 CallOrConstructMode mode,
2401 Handle<Code> code) {
2402 // ----------- S t a t e -------------
2403 // -- r2 : the number of arguments
2404 // -- r5 : the new.target (for [[Construct]] calls)
2405 // -- r3 : the target to call (can be any Object)
2406 // -- r4 : start index (to support rest parameters)
2407 // -----------------------------------
2408
2409 Register scratch = r8;
2410
2411 if (mode == CallOrConstructMode::kConstruct) {
2412 Label new_target_constructor, new_target_not_constructor;
2413 __ JumpIfSmi(r5, &new_target_not_constructor);
2414 __ LoadTaggedPointerField(scratch,
2415 FieldMemOperand(r5, HeapObject::kMapOffset));
2416 __ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
2417 __ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
2418 __ bne(&new_target_constructor);
2419 __ bind(&new_target_not_constructor);
2420 {
2421 FrameScope scope(masm, StackFrame::MANUAL);
2422 __ EnterFrame(StackFrame::INTERNAL);
2423 __ Push(r5);
2424 __ CallRuntime(Runtime::kThrowNotConstructor);
2425 __ Trap(); // Unreachable.
2426 }
2427 __ bind(&new_target_constructor);
2428 }
2429
2430 Label stack_done, stack_overflow;
2431 __ LoadU64(r7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
2432 __ SubS64(r7, r7, Operand(kJSArgcReceiverSlots));
2433 __ SubS64(r7, r7, r4);
2434 __ ble(&stack_done);
2435 {
2436 // ----------- S t a t e -------------
2437 // -- r2 : the number of arguments already in the stack
2438 // -- r3 : the target to call (can be any Object)
2439 // -- r4 : start index (to support rest parameters)
2440 // -- r5 : the new.target (for [[Construct]] calls)
2441 // -- r6 : point to the caller stack frame
2442 // -- r7 : number of arguments to copy, i.e. arguments count - start index
2443 // -----------------------------------
2444
2445 // Check for stack overflow.
2446 __ StackOverflowCheck(r7, scratch, &stack_overflow);
2447
2448 // Forward the arguments from the caller frame.
2449 __ mov(r5, r5);
2450 // Point to the first argument to copy (skipping the receiver).
2451 __ AddS64(r6, fp,
2452 Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
2453 kSystemPointerSize));
2454 __ ShiftLeftU64(scratch, r4, Operand(kSystemPointerSizeLog2));
2455 __ AddS64(r6, r6, scratch);
2456
2457 // Move the arguments already in the stack,
2458 // including the receiver and the return address.
2459 // r7: Number of arguments to make room for.0
2460 // r2: Number of arguments already on the stack.
2461 // r4: Points to first free slot on the stack after arguments were shifted.
2462 Generate_AllocateSpaceAndShiftExistingArguments(masm, r7, r2, r4, scratch,
2463 ip);
2464
2465 // Copy arguments from the caller frame.
2466 // TODO(victorgomes): Consider using forward order as potentially more cache
2467 // friendly.
2468 {
2469 Label loop;
2470 __ bind(&loop);
2471 {
2472 __ SubS64(r7, r7, Operand(1));
2473 __ ShiftLeftU64(r1, r7, Operand(kSystemPointerSizeLog2));
2474 __ LoadU64(scratch, MemOperand(r6, r1));
2475 __ StoreU64(scratch, MemOperand(r4, r1));
2476 __ CmpS64(r7, Operand::Zero());
2477 __ bne(&loop);
2478 }
2479 }
2480 }
2481 __ b(&stack_done);
2482 __ bind(&stack_overflow);
2483 __ TailCallRuntime(Runtime::kThrowStackOverflow);
2484 __ bind(&stack_done);
2485
2486 // Tail-call to the {code} handler.
2487 __ Jump(code, RelocInfo::CODE_TARGET);
2488 }
2489
2490 // static
Generate_CallFunction(MacroAssembler * masm,ConvertReceiverMode mode)2491 void Builtins::Generate_CallFunction(MacroAssembler* masm,
2492 ConvertReceiverMode mode) {
2493 // ----------- S t a t e -------------
2494 // -- r2 : the number of arguments
2495 // -- r3 : the function to call (checked to be a JSFunction)
2496 // -----------------------------------
2497 __ AssertCallableFunction(r3);
2498
2499 __ LoadTaggedPointerField(
2500 r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2501
2502 // Enter the context of the function; ToObject has to run in the function
2503 // context, and we also need to take the global proxy from the function
2504 // context in case of conversion.
2505 __ LoadTaggedPointerField(cp,
2506 FieldMemOperand(r3, JSFunction::kContextOffset));
2507 // We need to convert the receiver for non-native sloppy mode functions.
2508 Label done_convert;
2509 __ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
2510 __ AndP(r0, r5,
2511 Operand(SharedFunctionInfo::IsStrictBit::kMask |
2512 SharedFunctionInfo::IsNativeBit::kMask));
2513 __ bne(&done_convert);
2514 {
2515 // ----------- S t a t e -------------
2516 // -- r2 : the number of arguments
2517 // -- r3 : the function to call (checked to be a JSFunction)
2518 // -- r4 : the shared function info.
2519 // -- cp : the function context.
2520 // -----------------------------------
2521
2522 if (mode == ConvertReceiverMode::kNullOrUndefined) {
2523 // Patch receiver to global proxy.
2524 __ LoadGlobalProxy(r5);
2525 } else {
2526 Label convert_to_object, convert_receiver;
2527 __ LoadReceiver(r5, r2);
2528 __ JumpIfSmi(r5, &convert_to_object);
2529 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
2530 __ CompareObjectType(r5, r6, r6, FIRST_JS_RECEIVER_TYPE);
2531 __ bge(&done_convert);
2532 if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
2533 Label convert_global_proxy;
2534 __ JumpIfRoot(r5, RootIndex::kUndefinedValue, &convert_global_proxy);
2535 __ JumpIfNotRoot(r5, RootIndex::kNullValue, &convert_to_object);
2536 __ bind(&convert_global_proxy);
2537 {
2538 // Patch receiver to global proxy.
2539 __ LoadGlobalProxy(r5);
2540 }
2541 __ b(&convert_receiver);
2542 }
2543 __ bind(&convert_to_object);
2544 {
2545 // Convert receiver using ToObject.
2546 // TODO(bmeurer): Inline the allocation here to avoid building the frame
2547 // in the fast case? (fall back to AllocateInNewSpace?)
2548 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2549 __ SmiTag(r2);
2550 __ Push(r2, r3);
2551 __ mov(r2, r5);
2552 __ Push(cp);
2553 __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
2554 RelocInfo::CODE_TARGET);
2555 __ Pop(cp);
2556 __ mov(r5, r2);
2557 __ Pop(r2, r3);
2558 __ SmiUntag(r2);
2559 }
2560 __ LoadTaggedPointerField(
2561 r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2562 __ bind(&convert_receiver);
2563 }
2564 __ StoreReceiver(r5, r2, r6);
2565 }
2566 __ bind(&done_convert);
2567
2568 // ----------- S t a t e -------------
2569 // -- r2 : the number of arguments
2570 // -- r3 : the function to call (checked to be a JSFunction)
2571 // -- r4 : the shared function info.
2572 // -- cp : the function context.
2573 // -----------------------------------
2574
2575 __ LoadU16(
2576 r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
2577 __ InvokeFunctionCode(r3, no_reg, r4, r2, InvokeType::kJump);
2578 }
2579
2580 namespace {
2581
Generate_PushBoundArguments(MacroAssembler * masm)2582 void Generate_PushBoundArguments(MacroAssembler* masm) {
2583 // ----------- S t a t e -------------
2584 // -- r2 : the number of arguments
2585 // -- r3 : target (checked to be a JSBoundFunction)
2586 // -- r5 : new.target (only in case of [[Construct]])
2587 // -----------------------------------
2588
2589 // Load [[BoundArguments]] into r4 and length of that into r6.
2590 Label no_bound_arguments;
2591 __ LoadTaggedPointerField(
2592 r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
2593 __ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
2594 __ LoadAndTestP(r6, r6);
2595 __ beq(&no_bound_arguments);
2596 {
2597 // ----------- S t a t e -------------
2598 // -- r2 : the number of arguments
2599 // -- r3 : target (checked to be a JSBoundFunction)
2600 // -- r4 : the [[BoundArguments]] (implemented as FixedArray)
2601 // -- r5 : new.target (only in case of [[Construct]])
2602 // -- r6 : the number of [[BoundArguments]]
2603 // -----------------------------------
2604
2605 Register scratch = r8;
2606 // Reserve stack space for the [[BoundArguments]].
2607 {
2608 Label done;
2609 __ ShiftLeftU64(r9, r6, Operand(kSystemPointerSizeLog2));
2610 __ SubS64(r1, sp, r9);
2611 // Check the stack for overflow. We are not trying to catch interruptions
2612 // (i.e. debug break and preemption) here, so check the "real stack
2613 // limit".
2614 __ CmpU64(r1, __ StackLimitAsMemOperand(StackLimitKind::kRealStackLimit));
2615 __ bgt(&done); // Signed comparison.
2616 // Restore the stack pointer.
2617 {
2618 FrameScope scope(masm, StackFrame::MANUAL);
2619 __ EnterFrame(StackFrame::INTERNAL);
2620 __ CallRuntime(Runtime::kThrowStackOverflow);
2621 }
2622 __ bind(&done);
2623 }
2624
2625 // Pop receiver.
2626 __ Pop(r7);
2627
2628 // Push [[BoundArguments]].
2629 {
2630 Label loop, done;
2631 __ AddS64(r2, r2, r6); // Adjust effective number of arguments.
2632 __ AddS64(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2633
2634 __ bind(&loop);
2635 __ SubS64(r1, r6, Operand(1));
2636 __ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
2637 __ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
2638 __ Push(scratch);
2639 __ SubS64(r6, r6, Operand(1));
2640 __ bgt(&loop);
2641 __ bind(&done);
2642 }
2643
2644 // Push receiver.
2645 __ Push(r7);
2646 }
2647 __ bind(&no_bound_arguments);
2648 }
2649
2650 } // namespace
2651
2652 // static
Generate_CallBoundFunctionImpl(MacroAssembler * masm)2653 void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
2654 // ----------- S t a t e -------------
2655 // -- r2 : the number of arguments
2656 // -- r3 : the function to call (checked to be a JSBoundFunction)
2657 // -----------------------------------
2658 __ AssertBoundFunction(r3);
2659
2660 // Patch the receiver to [[BoundThis]].
2661 __ LoadAnyTaggedField(r5,
2662 FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
2663 __ StoreReceiver(r5, r2, r1);
2664
2665 // Push the [[BoundArguments]] onto the stack.
2666 Generate_PushBoundArguments(masm);
2667
2668 // Call the [[BoundTargetFunction]] via the Call builtin.
2669 __ LoadTaggedPointerField(
2670 r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2671 __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
2672 RelocInfo::CODE_TARGET);
2673 }
2674
2675 // static
Generate_Call(MacroAssembler * masm,ConvertReceiverMode mode)2676 void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
2677 // ----------- S t a t e -------------
2678 // -- r2 : the number of arguments
2679 // -- r3 : the target to call (can be any Object).
2680 // -----------------------------------
2681 Register argc = r2;
2682 Register target = r3;
2683 Register map = r6;
2684 Register instance_type = r7;
2685 DCHECK(!AreAliased(argc, target, map, instance_type));
2686
2687 Label non_callable, class_constructor;
2688 __ JumpIfSmi(target, &non_callable);
2689 __ LoadMap(map, target);
2690 __ CompareInstanceTypeRange(map, instance_type,
2691 FIRST_CALLABLE_JS_FUNCTION_TYPE,
2692 LAST_CALLABLE_JS_FUNCTION_TYPE);
2693 __ Jump(masm->isolate()->builtins()->CallFunction(mode),
2694 RelocInfo::CODE_TARGET, le);
2695 __ CmpS64(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
2696 __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
2697 RelocInfo::CODE_TARGET, eq);
2698
2699 // Check if target has a [[Call]] internal method.
2700 {
2701 Register flags = r6;
2702 __ LoadU8(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2703 map = no_reg;
2704 __ TestBit(flags, Map::Bits1::IsCallableBit::kShift);
2705 __ beq(&non_callable);
2706 }
2707
2708 // Check if target is a proxy and call CallProxy external builtin
2709 __ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
2710 __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq);
2711
2712 // Check if target is a wrapped function and call CallWrappedFunction external
2713 // builtin
2714 __ CmpS64(instance_type, Operand(JS_WRAPPED_FUNCTION_TYPE));
2715 __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction),
2716 RelocInfo::CODE_TARGET, eq);
2717
2718 // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
2719 // Check that the function is not a "classConstructor".
2720 __ CmpS64(instance_type, Operand(JS_CLASS_CONSTRUCTOR_TYPE));
2721 __ beq(&class_constructor);
2722
2723 // 2. Call to something else, which might have a [[Call]] internal method (if
2724 // not we raise an exception).
2725 // Overwrite the original receiver the (original) target.
2726 __ StoreReceiver(target, argc, r7);
2727 // Let the "call_as_function_delegate" take care of the rest.
2728 __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
2729 __ Jump(masm->isolate()->builtins()->CallFunction(
2730 ConvertReceiverMode::kNotNullOrUndefined),
2731 RelocInfo::CODE_TARGET);
2732
2733 // 3. Call to something that is not callable.
2734 __ bind(&non_callable);
2735 {
2736 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2737 __ Push(target);
2738 __ CallRuntime(Runtime::kThrowCalledNonCallable);
2739 __ Trap(); // Unreachable.
2740 }
2741
2742 // 4. The function is a "classConstructor", need to raise an exception.
2743 __ bind(&class_constructor);
2744 {
2745 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2746 __ Push(target);
2747 __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
2748 __ Trap(); // Unreachable.
2749 }
2750 }
2751
2752 // static
Generate_ConstructFunction(MacroAssembler * masm)2753 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
2754 // ----------- S t a t e -------------
2755 // -- r2 : the number of arguments
2756 // -- r3 : the constructor to call (checked to be a JSFunction)
2757 // -- r5 : the new target (checked to be a constructor)
2758 // -----------------------------------
2759 __ AssertConstructor(r3, r1);
2760 __ AssertFunction(r3);
2761
2762 // Calling convention for function specific ConstructStubs require
2763 // r4 to contain either an AllocationSite or undefined.
2764 __ LoadRoot(r4, RootIndex::kUndefinedValue);
2765
2766 Label call_generic_stub;
2767
2768 // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
2769 __ LoadTaggedPointerField(
2770 r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
2771 __ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
2772 __ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
2773 __ beq(&call_generic_stub);
2774
2775 __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
2776 RelocInfo::CODE_TARGET);
2777
2778 __ bind(&call_generic_stub);
2779 __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
2780 RelocInfo::CODE_TARGET);
2781 }
2782
2783 // static
Generate_ConstructBoundFunction(MacroAssembler * masm)2784 void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
2785 // ----------- S t a t e -------------
2786 // -- r2 : the number of arguments
2787 // -- r3 : the function to call (checked to be a JSBoundFunction)
2788 // -- r5 : the new target (checked to be a constructor)
2789 // -----------------------------------
2790 __ AssertConstructor(r3, r1);
2791 __ AssertBoundFunction(r3);
2792
2793 // Push the [[BoundArguments]] onto the stack.
2794 Generate_PushBoundArguments(masm);
2795
2796 // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
2797 Label skip;
2798 __ CompareTagged(r3, r5);
2799 __ bne(&skip);
2800 __ LoadTaggedPointerField(
2801 r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2802 __ bind(&skip);
2803
2804 // Construct the [[BoundTargetFunction]] via the Construct builtin.
2805 __ LoadTaggedPointerField(
2806 r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
2807 __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
2808 }
2809
2810 // static
Generate_Construct(MacroAssembler * masm)2811 void Builtins::Generate_Construct(MacroAssembler* masm) {
2812 // ----------- S t a t e -------------
2813 // -- r2 : the number of arguments
2814 // -- r3 : the constructor to call (can be any Object)
2815 // -- r5 : the new target (either the same as the constructor or
2816 // the JSFunction on which new was invoked initially)
2817 // -----------------------------------
2818 Register argc = r2;
2819 Register target = r3;
2820 Register map = r6;
2821 Register instance_type = r7;
2822 DCHECK(!AreAliased(argc, target, map, instance_type));
2823
2824 // Check if target is a Smi.
2825 Label non_constructor, non_proxy;
2826 __ JumpIfSmi(target, &non_constructor);
2827
2828 // Check if target has a [[Construct]] internal method.
2829 __ LoadTaggedPointerField(map,
2830 FieldMemOperand(target, HeapObject::kMapOffset));
2831 {
2832 Register flags = r4;
2833 DCHECK(!AreAliased(argc, target, map, instance_type, flags));
2834 __ LoadU8(flags, FieldMemOperand(map, Map::kBitFieldOffset));
2835 __ TestBit(flags, Map::Bits1::IsConstructorBit::kShift);
2836 __ beq(&non_constructor);
2837 }
2838
2839 // Dispatch based on instance type.
2840 __ CompareInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE,
2841 LAST_JS_FUNCTION_TYPE);
2842 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
2843 RelocInfo::CODE_TARGET, le);
2844
2845 // Only dispatch to bound functions after checking whether they are
2846 // constructors.
2847 __ CmpS64(instance_type, Operand(JS_BOUND_FUNCTION_TYPE));
2848 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
2849 RelocInfo::CODE_TARGET, eq);
2850
2851 // Only dispatch to proxies after checking whether they are constructors.
2852 __ CmpS64(instance_type, Operand(JS_PROXY_TYPE));
2853 __ bne(&non_proxy);
2854 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
2855 RelocInfo::CODE_TARGET);
2856
2857 // Called Construct on an exotic Object with a [[Construct]] internal method.
2858 __ bind(&non_proxy);
2859 {
2860 // Overwrite the original receiver with the (original) target.
2861 __ StoreReceiver(target, argc, r7);
2862 // Let the "call_as_constructor_delegate" take care of the rest.
2863 __ LoadNativeContextSlot(target,
2864 Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
2865 __ Jump(masm->isolate()->builtins()->CallFunction(),
2866 RelocInfo::CODE_TARGET);
2867 }
2868
2869 // Called Construct on an Object that doesn't have a [[Construct]] internal
2870 // method.
2871 __ bind(&non_constructor);
2872 __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
2873 RelocInfo::CODE_TARGET);
2874 }
2875
2876 #if V8_ENABLE_WEBASSEMBLY
Generate_WasmCompileLazy(MacroAssembler * masm)2877 void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
2878 // The function index was put in a register by the jump table trampoline.
2879 // Convert to Smi for the runtime call.
2880 __ SmiTag(kWasmCompileLazyFuncIndexRegister);
2881
2882 {
2883 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2884 FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
2885
2886 // Save all parameter registers (see wasm-linkage.h). They might be
2887 // overwritten in the runtime call below. We don't have any callee-saved
2888 // registers in wasm, so no need to store anything else.
2889 RegList gp_regs;
2890 for (Register gp_param_reg : wasm::kGpParamRegisters) {
2891 gp_regs.set(gp_param_reg);
2892 }
2893
2894 DoubleRegList fp_regs;
2895 for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
2896 fp_regs.set(fp_param_reg);
2897 }
2898
2899 CHECK_EQ(gp_regs.Count(), arraysize(wasm::kGpParamRegisters));
2900 CHECK_EQ(fp_regs.Count(), arraysize(wasm::kFpParamRegisters));
2901 CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
2902 gp_regs.Count());
2903 CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
2904 fp_regs.Count());
2905
2906 __ MultiPush(gp_regs);
2907 __ MultiPushF64OrV128(fp_regs, ip);
2908
2909 // Push the Wasm instance for loading the jump table address after the
2910 // runtime call.
2911 __ Push(kWasmInstanceRegister);
2912
2913 // Push the Wasm instance again as an explicit argument to the runtime
2914 // function.
2915 __ Push(kWasmInstanceRegister);
2916 // Push the function index as second argument.
2917 __ Push(kWasmCompileLazyFuncIndexRegister);
2918 // Initialize the JavaScript context with 0. CEntry will use it to
2919 // set the current context on the isolate.
2920 __ LoadSmiLiteral(cp, Smi::zero());
2921 __ CallRuntime(Runtime::kWasmCompileLazy, 2);
2922 // The runtime function returns the jump table slot offset as a Smi. Use
2923 // that to compute the jump target in ip.
2924 __ Pop(kWasmInstanceRegister);
2925 __ LoadU64(ip, MemOperand(kWasmInstanceRegister,
2926 WasmInstanceObject::kJumpTableStartOffset -
2927 kHeapObjectTag));
2928 __ SmiUntag(kReturnRegister0);
2929 __ AddS64(ip, ip, kReturnRegister0);
2930 // ip now holds the jump table slot where we want to jump to in the end.
2931
2932 // Restore registers.
2933 __ MultiPopF64OrV128(fp_regs, ip);
2934 __ MultiPop(gp_regs);
2935 }
2936
2937 // Finally, jump to the jump table slot for the function.
2938 __ Jump(ip);
2939 }
2940
Generate_WasmDebugBreak(MacroAssembler * masm)2941 void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
2942 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
2943 {
2944 FrameAndConstantPoolScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
2945
2946 // Save all parameter registers. They might hold live values, we restore
2947 // them after the runtime call.
2948 __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
2949 __ MultiPushF64OrV128(WasmDebugBreakFrameConstants::kPushedFpRegs, ip);
2950
2951 // Initialize the JavaScript context with 0. CEntry will use it to
2952 // set the current context on the isolate.
2953 __ LoadSmiLiteral(cp, Smi::zero());
2954 __ CallRuntime(Runtime::kWasmDebugBreak, 0);
2955
2956 // Restore registers.
2957 __ MultiPopF64OrV128(WasmDebugBreakFrameConstants::kPushedFpRegs, ip);
2958 __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
2959 }
2960 __ Ret();
2961 }
2962
Generate_GenericJSToWasmWrapper(MacroAssembler * masm)2963 void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
2964 // TODO(v8:10701): Implement for this platform.
2965 __ Trap();
2966 }
2967
Generate_WasmReturnPromiseOnSuspend(MacroAssembler * masm)2968 void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) {
2969 // TODO(v8:12191): Implement for this platform.
2970 __ Trap();
2971 }
2972
Generate_WasmSuspend(MacroAssembler * masm)2973 void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
2974 // TODO(v8:12191): Implement for this platform.
2975 __ Trap();
2976 }
2977
Generate_WasmResume(MacroAssembler * masm)2978 void Builtins::Generate_WasmResume(MacroAssembler* masm) {
2979 // TODO(v8:12191): Implement for this platform.
2980 __ Trap();
2981 }
2982
Generate_WasmOnStackReplace(MacroAssembler * masm)2983 void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
2984 // Only needed on x64.
2985 __ Trap();
2986 }
2987 #endif // V8_ENABLE_WEBASSEMBLY
2988
Generate_CEntry(MacroAssembler * masm,int result_size,SaveFPRegsMode save_doubles,ArgvMode argv_mode,bool builtin_exit_frame)2989 void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
2990 SaveFPRegsMode save_doubles, ArgvMode argv_mode,
2991 bool builtin_exit_frame) {
2992 // Called from JavaScript; parameters are on stack as if calling JS function.
2993 // r2: number of arguments including receiver
2994 // r3: pointer to builtin function
2995 // fp: frame pointer (restored after C call)
2996 // sp: stack pointer (restored as callee's sp after C call)
2997 // cp: current context (C callee-saved)
2998 //
2999 // If argv_mode == ArgvMode::kRegister:
3000 // r4: pointer to the first argument
3001
3002 __ mov(r7, r3);
3003
3004 if (argv_mode == ArgvMode::kRegister) {
3005 // Move argv into the correct register.
3006 __ mov(r3, r4);
3007 } else {
3008 // Compute the argv pointer.
3009 __ ShiftLeftU64(r3, r2, Operand(kSystemPointerSizeLog2));
3010 __ lay(r3, MemOperand(r3, sp, -kSystemPointerSize));
3011 }
3012
3013 // Enter the exit frame that transitions from JavaScript to C++.
3014 FrameScope scope(masm, StackFrame::MANUAL);
3015
3016 // Need at least one extra slot for return address location.
3017 int arg_stack_space = 1;
3018
3019 // Pass buffer for return value on stack if necessary
3020 bool needs_return_buffer =
3021 result_size == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS;
3022 if (needs_return_buffer) {
3023 arg_stack_space += result_size;
3024 }
3025
3026 #if V8_TARGET_ARCH_S390X
3027 // 64-bit linux pass Argument object by reference not value
3028 arg_stack_space += 2;
3029 #endif
3030
3031 __ EnterExitFrame(
3032 save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
3033 builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
3034
3035 // Store a copy of argc, argv in callee-saved registers for later.
3036 __ mov(r6, r2);
3037 __ mov(r8, r3);
3038 // r2, r6: number of arguments including receiver (C callee-saved)
3039 // r3, r8: pointer to the first argument
3040 // r7: pointer to builtin function (C callee-saved)
3041
3042 // Result returned in registers or stack, depending on result size and ABI.
3043
3044 Register isolate_reg = r4;
3045 if (needs_return_buffer) {
3046 // The return value is 16-byte non-scalar value.
3047 // Use frame storage reserved by calling function to pass return
3048 // buffer as implicit first argument in R2. Shfit original parameters
3049 // by one register each.
3050 __ mov(r4, r3);
3051 __ mov(r3, r2);
3052 __ la(r2,
3053 MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
3054 isolate_reg = r5;
3055 // Clang doesn't preserve r2 (result buffer)
3056 // write to r8 (preserved) before entry
3057 __ mov(r8, r2);
3058 }
3059 // Call C built-in.
3060 __ Move(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
3061
3062 __ StoreReturnAddressAndCall(r7);
3063
3064 // If return value is on the stack, pop it to registers.
3065 if (needs_return_buffer) {
3066 __ mov(r2, r8);
3067 __ LoadU64(r3, MemOperand(r2, kSystemPointerSize));
3068 __ LoadU64(r2, MemOperand(r2));
3069 }
3070
3071 // Check result for exception sentinel.
3072 Label exception_returned;
3073 __ CompareRoot(r2, RootIndex::kException);
3074 __ beq(&exception_returned, Label::kNear);
3075
3076 // Check that there is no pending exception, otherwise we
3077 // should have returned the exception sentinel.
3078 if (FLAG_debug_code) {
3079 Label okay;
3080 ExternalReference pending_exception_address = ExternalReference::Create(
3081 IsolateAddressId::kPendingExceptionAddress, masm->isolate());
3082 __ Move(r1, pending_exception_address);
3083 __ LoadU64(r1, MemOperand(r1));
3084 __ CompareRoot(r1, RootIndex::kTheHoleValue);
3085 // Cannot use check here as it attempts to generate call into runtime.
3086 __ beq(&okay, Label::kNear);
3087 __ stop();
3088 __ bind(&okay);
3089 }
3090
3091 // Exit C frame and return.
3092 // r2:r3: result
3093 // sp: stack pointer
3094 // fp: frame pointer
3095 Register argc = argv_mode == ArgvMode::kRegister
3096 // We don't want to pop arguments so set argc to no_reg.
3097 ? no_reg
3098 // r6: still holds argc (callee-saved).
3099 : r6;
3100 __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
3101 __ b(r14);
3102
3103 // Handling of exception.
3104 __ bind(&exception_returned);
3105
3106 ExternalReference pending_handler_context_address = ExternalReference::Create(
3107 IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
3108 ExternalReference pending_handler_entrypoint_address =
3109 ExternalReference::Create(
3110 IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
3111 ExternalReference pending_handler_fp_address = ExternalReference::Create(
3112 IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
3113 ExternalReference pending_handler_sp_address = ExternalReference::Create(
3114 IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
3115
3116 // Ask the runtime for help to determine the handler. This will set r3 to
3117 // contain the current pending exception, don't clobber it.
3118 ExternalReference find_handler =
3119 ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
3120 {
3121 FrameScope scope(masm, StackFrame::MANUAL);
3122 __ PrepareCallCFunction(3, 0, r2);
3123 __ mov(r2, Operand::Zero());
3124 __ mov(r3, Operand::Zero());
3125 __ Move(r4, ExternalReference::isolate_address(masm->isolate()));
3126 __ CallCFunction(find_handler, 3);
3127 }
3128
3129 // Retrieve the handler context, SP and FP.
3130 __ Move(cp, pending_handler_context_address);
3131 __ LoadU64(cp, MemOperand(cp));
3132 __ Move(sp, pending_handler_sp_address);
3133 __ LoadU64(sp, MemOperand(sp));
3134 __ Move(fp, pending_handler_fp_address);
3135 __ LoadU64(fp, MemOperand(fp));
3136
3137 // If the handler is a JS frame, restore the context to the frame. Note that
3138 // the context will be set to (cp == 0) for non-JS frames.
3139 Label skip;
3140 __ CmpS64(cp, Operand::Zero());
3141 __ beq(&skip, Label::kNear);
3142 __ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3143 __ bind(&skip);
3144
3145 // Clear c_entry_fp, like we do in `LeaveExitFrame`.
3146 {
3147 UseScratchRegisterScope temps(masm);
3148 __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
3149 masm->isolate()));
3150 __ mov(r0, Operand::Zero());
3151 __ StoreU64(r0, MemOperand(r1));
3152 }
3153
3154 // Compute the handler entry address and jump to it.
3155 __ Move(r3, pending_handler_entrypoint_address);
3156 __ LoadU64(r3, MemOperand(r3));
3157 __ Jump(r3);
3158 }
3159
Generate_DoubleToI(MacroAssembler * masm)3160 void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
3161 Label out_of_range, only_low, negate, done, fastpath_done;
3162 Register result_reg = r2;
3163
3164 HardAbortScope hard_abort(masm); // Avoid calls to Abort.
3165
3166 // Immediate values for this stub fit in instructions, so it's safe to use ip.
3167 Register scratch = GetRegisterThatIsNotOneOf(result_reg);
3168 Register scratch_low = GetRegisterThatIsNotOneOf(result_reg, scratch);
3169 Register scratch_high =
3170 GetRegisterThatIsNotOneOf(result_reg, scratch, scratch_low);
3171 DoubleRegister double_scratch = kScratchDoubleReg;
3172
3173 __ Push(result_reg, scratch);
3174 // Account for saved regs.
3175 int argument_offset = 2 * kSystemPointerSize;
3176
3177 // Load double input.
3178 __ LoadF64(double_scratch, MemOperand(sp, argument_offset));
3179
3180 // Do fast-path convert from double to int.
3181 __ ConvertDoubleToInt64(result_reg, double_scratch);
3182
3183 // Test for overflow
3184 __ TestIfInt32(result_reg);
3185 __ beq(&fastpath_done, Label::kNear);
3186
3187 __ Push(scratch_high, scratch_low);
3188 // Account for saved regs.
3189 argument_offset += 2 * kSystemPointerSize;
3190
3191 __ LoadU32(scratch_high,
3192 MemOperand(sp, argument_offset + Register::kExponentOffset));
3193 __ LoadU32(scratch_low,
3194 MemOperand(sp, argument_offset + Register::kMantissaOffset));
3195
3196 __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
3197 // Load scratch with exponent - 1. This is faster than loading
3198 // with exponent because Bias + 1 = 1024 which is a *S390* immediate value.
3199 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
3200 __ SubS64(scratch, Operand(HeapNumber::kExponentBias + 1));
3201 // If exponent is greater than or equal to 84, the 32 less significant
3202 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
3203 // the result is 0.
3204 // Compare exponent with 84 (compare exponent - 1 with 83).
3205 __ CmpS64(scratch, Operand(83));
3206 __ bge(&out_of_range, Label::kNear);
3207
3208 // If we reach this code, 31 <= exponent <= 83.
3209 // So, we don't have to handle cases where 0 <= exponent <= 20 for
3210 // which we would need to shift right the high part of the mantissa.
3211 // Scratch contains exponent - 1.
3212 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
3213 __ mov(r0, Operand(51));
3214 __ SubS64(scratch, r0, scratch);
3215 __ CmpS64(scratch, Operand::Zero());
3216 __ ble(&only_low, Label::kNear);
3217 // 21 <= exponent <= 51, shift scratch_low and scratch_high
3218 // to generate the result.
3219 __ ShiftRightU32(scratch_low, scratch_low, scratch);
3220 // Scratch contains: 52 - exponent.
3221 // We needs: exponent - 20.
3222 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
3223 __ mov(r0, Operand(32));
3224 __ SubS64(scratch, r0, scratch);
3225 __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
3226 // Set the implicit 1 before the mantissa part in scratch_high.
3227 STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
3228 __ mov(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16)));
3229 __ ShiftLeftU64(r0, r0, Operand(16));
3230 __ OrP(result_reg, result_reg, r0);
3231 __ ShiftLeftU32(r0, result_reg, scratch);
3232 __ OrP(result_reg, scratch_low, r0);
3233 __ b(&negate, Label::kNear);
3234
3235 __ bind(&out_of_range);
3236 __ mov(result_reg, Operand::Zero());
3237 __ b(&done, Label::kNear);
3238
3239 __ bind(&only_low);
3240 // 52 <= exponent <= 83, shift only scratch_low.
3241 // On entry, scratch contains: 52 - exponent.
3242 __ lcgr(scratch, scratch);
3243 __ ShiftLeftU32(result_reg, scratch_low, scratch);
3244
3245 __ bind(&negate);
3246 // If input was positive, scratch_high ASR 31 equals 0 and
3247 // scratch_high LSR 31 equals zero.
3248 // New result = (result eor 0) + 0 = result.
3249 // If the input was negative, we have to negate the result.
3250 // Input_high ASR 31 equals 0xFFFFFFFF and scratch_high LSR 31 equals 1.
3251 // New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
3252 __ ShiftRightS32(r0, scratch_high, Operand(31));
3253 #if V8_TARGET_ARCH_S390X
3254 __ lgfr(r0, r0);
3255 __ ShiftRightU64(r0, r0, Operand(32));
3256 #endif
3257 __ XorP(result_reg, r0);
3258 __ ShiftRightU32(r0, scratch_high, Operand(31));
3259 __ AddS64(result_reg, r0);
3260
3261 __ bind(&done);
3262 __ Pop(scratch_high, scratch_low);
3263 argument_offset -= 2 * kSystemPointerSize;
3264
3265 __ bind(&fastpath_done);
3266 __ StoreU64(result_reg, MemOperand(sp, argument_offset));
3267 __ Pop(result_reg, scratch);
3268
3269 __ Ret();
3270 }
3271
3272 namespace {
3273
AddressOffset(ExternalReference ref0,ExternalReference ref1)3274 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3275 return ref0.address() - ref1.address();
3276 }
3277
3278 // Calls an API function. Allocates HandleScope, extracts returned value
3279 // from handle and propagates exceptions. Restores context. stack_space
3280 // - space to be unwound on exit (includes the call JS arguments space and
3281 // the additional space allocated for the fast call).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,int stack_space,MemOperand * stack_space_operand,MemOperand return_value_operand)3282 static void CallApiFunctionAndReturn(MacroAssembler* masm,
3283 Register function_address,
3284 ExternalReference thunk_ref,
3285 int stack_space,
3286 MemOperand* stack_space_operand,
3287 MemOperand return_value_operand) {
3288 Isolate* isolate = masm->isolate();
3289 ExternalReference next_address =
3290 ExternalReference::handle_scope_next_address(isolate);
3291 const int kNextOffset = 0;
3292 const int kLimitOffset = AddressOffset(
3293 ExternalReference::handle_scope_limit_address(isolate), next_address);
3294 const int kLevelOffset = AddressOffset(
3295 ExternalReference::handle_scope_level_address(isolate), next_address);
3296
3297 // Additional parameter is the address of the actual callback.
3298 DCHECK(function_address == r3 || function_address == r4);
3299 Register scratch = r5;
3300
3301 __ Move(scratch, ExternalReference::is_profiling_address(isolate));
3302 __ LoadU8(scratch, MemOperand(scratch, 0));
3303 __ CmpS64(scratch, Operand::Zero());
3304
3305 Label profiler_enabled, end_profiler_check;
3306 __ bne(&profiler_enabled, Label::kNear);
3307 __ Move(scratch, ExternalReference::address_of_runtime_stats_flag());
3308 __ LoadU32(scratch, MemOperand(scratch, 0));
3309 __ CmpS64(scratch, Operand::Zero());
3310 __ bne(&profiler_enabled, Label::kNear);
3311 {
3312 // Call the api function directly.
3313 __ mov(scratch, function_address);
3314 __ b(&end_profiler_check, Label::kNear);
3315 }
3316 __ bind(&profiler_enabled);
3317 {
3318 // Additional parameter is the address of the actual callback.
3319 __ Move(scratch, thunk_ref);
3320 }
3321 __ bind(&end_profiler_check);
3322
3323 // Allocate HandleScope in callee-save registers.
3324 // r9 - next_address
3325 // r6 - next_address->kNextOffset
3326 // r7 - next_address->kLimitOffset
3327 // r8 - next_address->kLevelOffset
3328 __ Move(r9, next_address);
3329 __ LoadU64(r6, MemOperand(r9, kNextOffset));
3330 __ LoadU64(r7, MemOperand(r9, kLimitOffset));
3331 __ LoadU32(r8, MemOperand(r9, kLevelOffset));
3332 __ AddS64(r8, Operand(1));
3333 __ StoreU32(r8, MemOperand(r9, kLevelOffset));
3334
3335 __ StoreReturnAddressAndCall(scratch);
3336
3337 Label promote_scheduled_exception;
3338 Label delete_allocated_handles;
3339 Label leave_exit_frame;
3340 Label return_value_loaded;
3341
3342 // load value from ReturnValue
3343 __ LoadU64(r2, return_value_operand);
3344 __ bind(&return_value_loaded);
3345 // No more valid handles (the result handle was the last one). Restore
3346 // previous handle scope.
3347 __ StoreU64(r6, MemOperand(r9, kNextOffset));
3348 if (FLAG_debug_code) {
3349 __ LoadU32(r3, MemOperand(r9, kLevelOffset));
3350 __ CmpS64(r3, r8);
3351 __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
3352 }
3353 __ SubS64(r8, Operand(1));
3354 __ StoreU32(r8, MemOperand(r9, kLevelOffset));
3355 __ CmpS64(r7, MemOperand(r9, kLimitOffset));
3356 __ bne(&delete_allocated_handles, Label::kNear);
3357
3358 // Leave the API exit frame.
3359 __ bind(&leave_exit_frame);
3360 // LeaveExitFrame expects unwind space to be in a register.
3361 if (stack_space_operand == nullptr) {
3362 DCHECK_NE(stack_space, 0);
3363 __ mov(r6, Operand(stack_space));
3364 } else {
3365 DCHECK_EQ(stack_space, 0);
3366 __ LoadU64(r6, *stack_space_operand);
3367 }
3368 __ LeaveExitFrame(false, r6, stack_space_operand != nullptr);
3369
3370 // Check if the function scheduled an exception.
3371 __ Move(r7, ExternalReference::scheduled_exception_address(isolate));
3372 __ LoadU64(r7, MemOperand(r7));
3373 __ CompareRoot(r7, RootIndex::kTheHoleValue);
3374 __ bne(&promote_scheduled_exception, Label::kNear);
3375
3376 __ b(r14);
3377
3378 // Re-throw by promoting a scheduled exception.
3379 __ bind(&promote_scheduled_exception);
3380 __ TailCallRuntime(Runtime::kPromoteScheduledException);
3381
3382 // HandleScope limit has changed. Delete allocated extensions.
3383 __ bind(&delete_allocated_handles);
3384 __ StoreU64(r7, MemOperand(r9, kLimitOffset));
3385 __ mov(r6, r2);
3386 __ PrepareCallCFunction(1, r7);
3387 __ Move(r2, ExternalReference::isolate_address(isolate));
3388 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
3389 __ mov(r2, r6);
3390 __ b(&leave_exit_frame, Label::kNear);
3391 }
3392
3393 } // namespace
3394
Generate_CallApiCallback(MacroAssembler * masm)3395 void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
3396 // ----------- S t a t e -------------
3397 // -- cp : context
3398 // -- r4 : api function address
3399 // -- r4 : arguments count (not including the receiver)
3400 // -- r5 : call data
3401 // -- r2 : holder
3402 // -- sp[0] : receiver
3403 // -- sp[8] : first argument
3404 // -- ...
3405 // -- sp[(argc) * 8] : last argument
3406 // -----------------------------------
3407
3408 Register api_function_address = r3;
3409 Register argc = r4;
3410 Register call_data = r5;
3411 Register holder = r2;
3412 Register scratch = r6;
3413 DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch));
3414
3415 using FCA = FunctionCallbackArguments;
3416
3417 STATIC_ASSERT(FCA::kArgsLength == 6);
3418 STATIC_ASSERT(FCA::kNewTargetIndex == 5);
3419 STATIC_ASSERT(FCA::kDataIndex == 4);
3420 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3421 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3422 STATIC_ASSERT(FCA::kIsolateIndex == 1);
3423 STATIC_ASSERT(FCA::kHolderIndex == 0);
3424
3425 // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
3426 //
3427 // Target state:
3428 // sp[0 * kSystemPointerSize]: kHolder
3429 // sp[1 * kSystemPointerSize]: kIsolate
3430 // sp[2 * kSystemPointerSize]: undefined (kReturnValueDefaultValue)
3431 // sp[3 * kSystemPointerSize]: undefined (kReturnValue)
3432 // sp[4 * kSystemPointerSize]: kData
3433 // sp[5 * kSystemPointerSize]: undefined (kNewTarget)
3434
3435 // Reserve space on the stack.
3436 __ lay(sp, MemOperand(sp, -(FCA::kArgsLength * kSystemPointerSize)));
3437
3438 // kHolder.
3439 __ StoreU64(holder, MemOperand(sp, 0 * kSystemPointerSize));
3440
3441 // kIsolate.
3442 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3443 __ StoreU64(scratch, MemOperand(sp, 1 * kSystemPointerSize));
3444
3445 // kReturnValueDefaultValue and kReturnValue.
3446 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3447 __ StoreU64(scratch, MemOperand(sp, 2 * kSystemPointerSize));
3448 __ StoreU64(scratch, MemOperand(sp, 3 * kSystemPointerSize));
3449
3450 // kData.
3451 __ StoreU64(call_data, MemOperand(sp, 4 * kSystemPointerSize));
3452
3453 // kNewTarget.
3454 __ StoreU64(scratch, MemOperand(sp, 5 * kSystemPointerSize));
3455
3456 // Keep a pointer to kHolder (= implicit_args) in a scratch register.
3457 // We use it below to set up the FunctionCallbackInfo object.
3458 __ mov(scratch, sp);
3459
3460 // Allocate the v8::Arguments structure in the arguments' space since
3461 // it's not controlled by GC.
3462 // S390 LINUX ABI:
3463 //
3464 // Create 4 extra slots on stack:
3465 // [0] space for DirectCEntryStub's LR save
3466 // [1-3] FunctionCallbackInfo
3467 // [4] number of bytes to drop from the stack after returning
3468 static constexpr int kApiStackSpace = 5;
3469 static constexpr bool kDontSaveDoubles = false;
3470
3471 FrameScope frame_scope(masm, StackFrame::MANUAL);
3472 __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
3473
3474 // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
3475 // Arguments are after the return address (pushed by EnterExitFrame()).
3476 __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 1) *
3477 kSystemPointerSize));
3478
3479 // FunctionCallbackInfo::values_ (points at the first varargs argument passed
3480 // on the stack).
3481 __ AddS64(scratch, scratch,
3482 Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
3483 __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 2) *
3484 kSystemPointerSize));
3485
3486 // FunctionCallbackInfo::length_.
3487 __ StoreU32(argc, MemOperand(sp, (kStackFrameExtraParamSlot + 3) *
3488 kSystemPointerSize));
3489
3490 // We also store the number of bytes to drop from the stack after returning
3491 // from the API function here.
3492 __ mov(scratch,
3493 Operand((FCA::kArgsLength + 1 /* receiver */) * kSystemPointerSize));
3494 __ ShiftLeftU64(r1, argc, Operand(kSystemPointerSizeLog2));
3495 __ AddS64(scratch, r1);
3496 __ StoreU64(scratch, MemOperand(sp, (kStackFrameExtraParamSlot + 4) *
3497 kSystemPointerSize));
3498
3499 // v8::InvocationCallback's argument.
3500 __ lay(r2,
3501 MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
3502
3503 ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
3504
3505 // There are two stack slots above the arguments we constructed on the stack.
3506 // TODO(jgruber): Document what these arguments are.
3507 static constexpr int kStackSlotsAboveFCA = 2;
3508 MemOperand return_value_operand(
3509 fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kSystemPointerSize);
3510
3511 static constexpr int kUseStackSpaceOperand = 0;
3512 MemOperand stack_space_operand(
3513 sp, (kStackFrameExtraParamSlot + 4) * kSystemPointerSize);
3514
3515 AllowExternalCallThatCantCauseGC scope(masm);
3516 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3517 kUseStackSpaceOperand, &stack_space_operand,
3518 return_value_operand);
3519 }
3520
Generate_CallApiGetter(MacroAssembler * masm)3521 void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
3522 int arg0Slot = 0;
3523 int accessorInfoSlot = 0;
3524 int apiStackSpace = 0;
3525 // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3526 // name below the exit frame to make GC aware of them.
3527 STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3528 STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3529 STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3530 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3531 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3532 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3533 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3534 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3535
3536 Register receiver = ApiGetterDescriptor::ReceiverRegister();
3537 Register holder = ApiGetterDescriptor::HolderRegister();
3538 Register callback = ApiGetterDescriptor::CallbackRegister();
3539 Register scratch = r6;
3540 DCHECK(!AreAliased(receiver, holder, callback, scratch));
3541
3542 Register api_function_address = r4;
3543
3544 __ push(receiver);
3545 // Push data from AccessorInfo.
3546 __ LoadAnyTaggedField(
3547 scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
3548 __ push(scratch);
3549 __ LoadRoot(scratch, RootIndex::kUndefinedValue);
3550 __ Push(scratch, scratch);
3551 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
3552 __ Push(scratch, holder);
3553 __ Push(Smi::zero()); // should_throw_on_error -> false
3554 __ LoadTaggedPointerField(
3555 scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
3556 __ push(scratch);
3557
3558 // v8::PropertyCallbackInfo::args_ array and name handle.
3559 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3560
3561 // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3562 __ mov(r2, sp); // r2 = Handle<Name>
3563 __ AddS64(r3, r2, Operand(1 * kSystemPointerSize)); // r3 = v8::PCI::args_
3564
3565 // If ABI passes Handles (pointer-sized struct) in a register:
3566 //
3567 // Create 2 extra slots on stack:
3568 // [0] space for DirectCEntryStub's LR save
3569 // [1] AccessorInfo&
3570 //
3571 // Otherwise:
3572 //
3573 // Create 3 extra slots on stack:
3574 // [0] space for DirectCEntryStub's LR save
3575 // [1] copy of Handle (first arg)
3576 // [2] AccessorInfo&
3577 if (ABI_PASSES_HANDLES_IN_REGS) {
3578 accessorInfoSlot = kStackFrameExtraParamSlot + 1;
3579 apiStackSpace = 2;
3580 } else {
3581 arg0Slot = kStackFrameExtraParamSlot + 1;
3582 accessorInfoSlot = arg0Slot + 1;
3583 apiStackSpace = 3;
3584 }
3585
3586 FrameScope frame_scope(masm, StackFrame::MANUAL);
3587 __ EnterExitFrame(false, apiStackSpace);
3588
3589 if (!ABI_PASSES_HANDLES_IN_REGS) {
3590 // pass 1st arg by reference
3591 __ StoreU64(r2, MemOperand(sp, arg0Slot * kSystemPointerSize));
3592 __ AddS64(r2, sp, Operand(arg0Slot * kSystemPointerSize));
3593 }
3594
3595 // Create v8::PropertyCallbackInfo object on the stack and initialize
3596 // it's args_ field.
3597 __ StoreU64(r3, MemOperand(sp, accessorInfoSlot * kSystemPointerSize));
3598 __ AddS64(r3, sp, Operand(accessorInfoSlot * kSystemPointerSize));
3599 // r3 = v8::PropertyCallbackInfo&
3600
3601 ExternalReference thunk_ref =
3602 ExternalReference::invoke_accessor_getter_callback();
3603
3604 __ LoadTaggedPointerField(
3605 scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3606 __ LoadU64(api_function_address,
3607 FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3608
3609 // +3 is to skip prolog, return address and name handle.
3610 MemOperand return_value_operand(
3611 fp,
3612 (PropertyCallbackArguments::kReturnValueOffset + 3) * kSystemPointerSize);
3613 MemOperand* const kUseStackSpaceConstant = nullptr;
3614 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3615 kStackUnwindSpace, kUseStackSpaceConstant,
3616 return_value_operand);
3617 }
3618
Generate_DirectCEntry(MacroAssembler * masm)3619 void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
3620 // Unused.
3621 __ stop();
3622 }
3623
3624 namespace {
3625
3626 // This code tries to be close to ia32 code so that any changes can be
3627 // easily ported.
Generate_DeoptimizationEntry(MacroAssembler * masm,DeoptimizeKind deopt_kind)3628 void Generate_DeoptimizationEntry(MacroAssembler* masm,
3629 DeoptimizeKind deopt_kind) {
3630 Isolate* isolate = masm->isolate();
3631
3632 // Save all the registers onto the stack
3633 const int kNumberOfRegisters = Register::kNumRegisters;
3634
3635 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
3636
3637 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
3638
3639 // Save all double registers before messing with them.
3640 __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
3641 const RegisterConfiguration* config = RegisterConfiguration::Default();
3642 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3643 int code = config->GetAllocatableDoubleCode(i);
3644 const DoubleRegister dreg = DoubleRegister::from_code(code);
3645 int offset = code * kDoubleSize;
3646 __ StoreF64(dreg, MemOperand(sp, offset));
3647 }
3648
3649 // Push all GPRs onto the stack
3650 __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kSystemPointerSize));
3651 __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
3652
3653 __ Move(r1, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
3654 isolate));
3655 __ StoreU64(fp, MemOperand(r1));
3656
3657 static constexpr int kSavedRegistersAreaSize =
3658 (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
3659
3660 // Cleanse the Return address for 31-bit
3661 __ CleanseP(r14);
3662 // Get the address of the location in the code object (r5)(return
3663 // address for lazy deoptimization) and compute the fp-to-sp delta in
3664 // register r6.
3665 __ mov(r4, r14);
3666 __ la(r5, MemOperand(sp, kSavedRegistersAreaSize));
3667 __ SubS64(r5, fp, r5);
3668
3669 // Allocate a new deoptimizer object.
3670 // Pass six arguments in r2 to r7.
3671 __ PrepareCallCFunction(5, r7);
3672 __ mov(r2, Operand::Zero());
3673 Label context_check;
3674 __ LoadU64(r3,
3675 MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
3676 __ JumpIfSmi(r3, &context_check);
3677 __ LoadU64(r2, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
3678 __ bind(&context_check);
3679 __ mov(r3, Operand(static_cast<int>(deopt_kind)));
3680 // r4: code address or 0 already loaded.
3681 // r5: Fp-to-sp delta already loaded.
3682 // Parm6: isolate is passed on the stack.
3683 __ Move(r6, ExternalReference::isolate_address(isolate));
3684 __ StoreU64(r6,
3685 MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
3686
3687 // Call Deoptimizer::New().
3688 {
3689 AllowExternalCallThatCantCauseGC scope(masm);
3690 __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
3691 }
3692
3693 // Preserve "deoptimizer" object in register r2 and get the input
3694 // frame descriptor pointer to r3 (deoptimizer->input_);
3695 __ LoadU64(r3, MemOperand(r2, Deoptimizer::input_offset()));
3696
3697 // Copy core registers into FrameDescription::registers_[kNumRegisters].
3698 // DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3699 // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
3700 // MemOperand(sp), kNumberOfRegisters * kSystemPointerSize);
3701 // Copy core registers into FrameDescription::registers_[kNumRegisters].
3702 // TODO(john.yan): optimize the following code by using mvc instruction
3703 DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
3704 for (int i = 0; i < kNumberOfRegisters; i++) {
3705 int offset =
3706 (i * kSystemPointerSize) + FrameDescription::registers_offset();
3707 __ LoadU64(r4, MemOperand(sp, i * kSystemPointerSize));
3708 __ StoreU64(r4, MemOperand(r3, offset));
3709 }
3710
3711 int double_regs_offset = FrameDescription::double_registers_offset();
3712 // Copy double registers to
3713 // double_registers_[DoubleRegister::kNumRegisters]
3714 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3715 int code = config->GetAllocatableDoubleCode(i);
3716 int dst_offset = code * kDoubleSize + double_regs_offset;
3717 int src_offset =
3718 code * kDoubleSize + kNumberOfRegisters * kSystemPointerSize;
3719 // TODO(joransiu): MVC opportunity
3720 __ LoadF64(d0, MemOperand(sp, src_offset));
3721 __ StoreF64(d0, MemOperand(r3, dst_offset));
3722 }
3723
3724 // Mark the stack as not iterable for the CPU profiler which won't be able to
3725 // walk the stack without the return address.
3726 {
3727 UseScratchRegisterScope temps(masm);
3728 Register is_iterable = temps.Acquire();
3729 Register zero = r6;
3730 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3731 __ lhi(zero, Operand(0));
3732 __ StoreU8(zero, MemOperand(is_iterable));
3733 }
3734
3735 // Remove the saved registers from the stack.
3736 __ la(sp, MemOperand(sp, kSavedRegistersAreaSize));
3737
3738 // Compute a pointer to the unwinding limit in register r4; that is
3739 // the first stack slot not part of the input frame.
3740 __ LoadU64(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
3741 __ AddS64(r4, sp);
3742
3743 // Unwind the stack down to - but not including - the unwinding
3744 // limit and copy the contents of the activation frame to the input
3745 // frame description.
3746 __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
3747 Label pop_loop;
3748 Label pop_loop_header;
3749 __ b(&pop_loop_header, Label::kNear);
3750 __ bind(&pop_loop);
3751 __ pop(r6);
3752 __ StoreU64(r6, MemOperand(r5, 0));
3753 __ la(r5, MemOperand(r5, kSystemPointerSize));
3754 __ bind(&pop_loop_header);
3755 __ CmpS64(r4, sp);
3756 __ bne(&pop_loop);
3757
3758 // Compute the output frame in the deoptimizer.
3759 __ push(r2); // Preserve deoptimizer object across call.
3760 // r2: deoptimizer object; r3: scratch.
3761 __ PrepareCallCFunction(1, r3);
3762 // Call Deoptimizer::ComputeOutputFrames().
3763 {
3764 AllowExternalCallThatCantCauseGC scope(masm);
3765 __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
3766 }
3767 __ pop(r2); // Restore deoptimizer object (class Deoptimizer).
3768
3769 __ LoadU64(sp, MemOperand(r2, Deoptimizer::caller_frame_top_offset()));
3770
3771 // Replace the current (input) frame with the output frames.
3772 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
3773 // Outer loop state: r6 = current "FrameDescription** output_",
3774 // r3 = one past the last FrameDescription**.
3775 __ LoadU32(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
3776 __ LoadU64(r6,
3777 MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
3778 __ ShiftLeftU64(r3, r3, Operand(kSystemPointerSizeLog2));
3779 __ AddS64(r3, r6, r3);
3780 __ b(&outer_loop_header, Label::kNear);
3781
3782 __ bind(&outer_push_loop);
3783 // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
3784 __ LoadU64(r4, MemOperand(r6, 0)); // output_[ix]
3785 __ LoadU64(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
3786 __ b(&inner_loop_header, Label::kNear);
3787
3788 __ bind(&inner_push_loop);
3789 __ SubS64(r5, Operand(sizeof(intptr_t)));
3790 __ AddS64(r8, r4, r5);
3791 __ LoadU64(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
3792 __ push(r8);
3793
3794 __ bind(&inner_loop_header);
3795 __ CmpS64(r5, Operand::Zero());
3796 __ bne(&inner_push_loop); // test for gt?
3797
3798 __ AddS64(r6, r6, Operand(kSystemPointerSize));
3799 __ bind(&outer_loop_header);
3800 __ CmpS64(r6, r3);
3801 __ blt(&outer_push_loop);
3802
3803 __ LoadU64(r3, MemOperand(r2, Deoptimizer::input_offset()));
3804 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
3805 int code = config->GetAllocatableDoubleCode(i);
3806 const DoubleRegister dreg = DoubleRegister::from_code(code);
3807 int src_offset = code * kDoubleSize + double_regs_offset;
3808 __ ld(dreg, MemOperand(r3, src_offset));
3809 }
3810
3811 // Push pc and continuation from the last output frame.
3812 __ LoadU64(r8, MemOperand(r4, FrameDescription::pc_offset()));
3813 __ push(r8);
3814 __ LoadU64(r8, MemOperand(r4, FrameDescription::continuation_offset()));
3815 __ push(r8);
3816
3817 // Restore the registers from the last output frame.
3818 __ mov(r1, r4);
3819 for (int i = kNumberOfRegisters - 1; i > 0; i--) {
3820 int offset =
3821 (i * kSystemPointerSize) + FrameDescription::registers_offset();
3822 if ((restored_regs.bits() & (1 << i)) != 0) {
3823 __ LoadU64(ToRegister(i), MemOperand(r1, offset));
3824 }
3825 }
3826
3827 {
3828 UseScratchRegisterScope temps(masm);
3829 Register is_iterable = temps.Acquire();
3830 Register one = r6;
3831 __ Move(is_iterable, ExternalReference::stack_is_iterable_address(isolate));
3832 __ lhi(one, Operand(1));
3833 __ StoreU8(one, MemOperand(is_iterable));
3834 }
3835
3836 __ pop(ip); // get continuation, leave pc on stack
3837 __ pop(r14);
3838 __ Jump(ip);
3839
3840 __ stop();
3841 }
3842
3843 } // namespace
3844
Generate_DeoptimizationEntry_Eager(MacroAssembler * masm)3845 void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
3846 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
3847 }
3848
Generate_DeoptimizationEntry_Lazy(MacroAssembler * masm)3849 void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
3850 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
3851 }
3852
Generate_DeoptimizationEntry_Unused(MacroAssembler * masm)3853 void Builtins::Generate_DeoptimizationEntry_Unused(MacroAssembler* masm) {
3854 Generate_DeoptimizationEntry(masm, DeoptimizeKind::kUnused);
3855 }
3856
Generate_InterpreterOnStackReplacement(MacroAssembler * masm)3857 void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
3858 OnStackReplacement(masm, OsrSourceTier::kInterpreter);
3859 }
3860
3861 #if ENABLE_SPARKPLUG
Generate_BaselineOnStackReplacement(MacroAssembler * masm)3862 void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
3863 __ LoadU64(kContextRegister,
3864 MemOperand(fp, BaselineFrameConstants::kContextOffset));
3865 OnStackReplacement(masm, OsrSourceTier::kBaseline);
3866 }
3867 #endif
3868
Generate_BaselineOrInterpreterEnterAtBytecode(MacroAssembler * masm)3869 void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
3870 MacroAssembler* masm) {
3871 Generate_BaselineOrInterpreterEntry(masm, false);
3872 }
3873
Generate_BaselineOrInterpreterEnterAtNextBytecode(MacroAssembler * masm)3874 void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
3875 MacroAssembler* masm) {
3876 Generate_BaselineOrInterpreterEntry(masm, true);
3877 }
3878
Generate_InterpreterOnStackReplacement_ToBaseline(MacroAssembler * masm)3879 void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
3880 MacroAssembler* masm) {
3881 Generate_BaselineOrInterpreterEntry(masm, false, true);
3882 }
3883
3884 #undef __
3885
3886 } // namespace internal
3887 } // namespace v8
3888
3889 #endif // V8_TARGET_ARCH_S390
3890