1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
6 #define V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
7
8 #include "src/baseline/baseline-assembler.h"
9 #include "src/codegen/arm64/macro-assembler-arm64-inl.h"
10 #include "src/codegen/interface-descriptors.h"
11
12 namespace v8 {
13 namespace internal {
14 namespace baseline {
15
16 class BaselineAssembler::ScratchRegisterScope {
17 public:
ScratchRegisterScope(BaselineAssembler * assembler)18 explicit ScratchRegisterScope(BaselineAssembler* assembler)
19 : assembler_(assembler),
20 prev_scope_(assembler->scratch_register_scope_),
21 wrapped_scope_(assembler->masm()) {
22 if (!assembler_->scratch_register_scope_) {
23 // If we haven't opened a scratch scope yet, for the first one add a
24 // couple of extra registers.
25 wrapped_scope_.Include(x14, x15);
26 wrapped_scope_.Include(x19);
27 }
28 assembler_->scratch_register_scope_ = this;
29 }
~ScratchRegisterScope()30 ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
31
AcquireScratch()32 Register AcquireScratch() { return wrapped_scope_.AcquireX(); }
33
34 private:
35 BaselineAssembler* assembler_;
36 ScratchRegisterScope* prev_scope_;
37 UseScratchRegisterScope wrapped_scope_;
38 };
39
40 // TODO(v8:11461): Unify condition names in the MacroAssembler.
41 enum class Condition : uint32_t {
42 kEqual = eq,
43 kNotEqual = ne,
44
45 kLessThan = lt,
46 kGreaterThan = gt,
47 kLessThanEqual = le,
48 kGreaterThanEqual = ge,
49
50 kUnsignedLessThan = lo,
51 kUnsignedGreaterThan = hi,
52 kUnsignedLessThanEqual = ls,
53 kUnsignedGreaterThanEqual = hs,
54
55 kOverflow = vs,
56 kNoOverflow = vc,
57
58 kZero = eq,
59 kNotZero = ne,
60 };
61
AsMasmCondition(Condition cond)62 inline internal::Condition AsMasmCondition(Condition cond) {
63 return static_cast<internal::Condition>(cond);
64 }
65
66 namespace detail {
67
68 #ifdef DEBUG
Clobbers(Register target,MemOperand op)69 inline bool Clobbers(Register target, MemOperand op) {
70 return op.base() == target || op.regoffset() == target;
71 }
72 #endif
73
74 } // namespace detail
75
76 #define __ masm_->
77
RegisterFrameOperand(interpreter::Register interpreter_register)78 MemOperand BaselineAssembler::RegisterFrameOperand(
79 interpreter::Register interpreter_register) {
80 return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
81 }
RegisterFrameAddress(interpreter::Register interpreter_register,Register rscratch)82 void BaselineAssembler::RegisterFrameAddress(
83 interpreter::Register interpreter_register, Register rscratch) {
84 return __ Add(rscratch, fp,
85 interpreter_register.ToOperand() * kSystemPointerSize);
86 }
FeedbackVectorOperand()87 MemOperand BaselineAssembler::FeedbackVectorOperand() {
88 return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
89 }
90
Bind(Label * label)91 void BaselineAssembler::Bind(Label* label) {
92 // All baseline compiler binds on arm64 are assumed to be for jump targets.
93 __ BindJumpTarget(label);
94 }
95
BindWithoutJumpTarget(Label * label)96 void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ Bind(label); }
97
JumpTarget()98 void BaselineAssembler::JumpTarget() { __ JumpTarget(); }
99
Jump(Label * target,Label::Distance distance)100 void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
101 __ B(target);
102 }
103
JumpIfRoot(Register value,RootIndex index,Label * target,Label::Distance)104 void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
105 Label* target, Label::Distance) {
106 __ JumpIfRoot(value, index, target);
107 }
108
JumpIfNotRoot(Register value,RootIndex index,Label * target,Label::Distance)109 void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
110 Label* target, Label::Distance) {
111 __ JumpIfNotRoot(value, index, target);
112 }
113
JumpIfSmi(Register value,Label * target,Label::Distance)114 void BaselineAssembler::JumpIfSmi(Register value, Label* target,
115 Label::Distance) {
116 __ JumpIfSmi(value, target);
117 }
118
JumpIfNotSmi(Register value,Label * target,Label::Distance)119 void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
120 Label::Distance) {
121 __ JumpIfNotSmi(value, target);
122 }
123
JumpIfImmediate(Condition cc,Register left,int right,Label * target,Label::Distance distance)124 void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
125 Label* target,
126 Label::Distance distance) {
127 JumpIf(cc, left, Immediate(right), target, distance);
128 }
129
CallBuiltin(Builtin builtin)130 void BaselineAssembler::CallBuiltin(Builtin builtin) {
131 if (masm()->options().short_builtin_calls) {
132 // Generate pc-relative call.
133 __ CallBuiltin(builtin);
134 } else {
135 ScratchRegisterScope temps(this);
136 Register temp = temps.AcquireScratch();
137 __ LoadEntryFromBuiltin(builtin, temp);
138 __ Call(temp);
139 }
140 }
141
TailCallBuiltin(Builtin builtin)142 void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
143 if (masm()->options().short_builtin_calls) {
144 // Generate pc-relative call.
145 __ TailCallBuiltin(builtin);
146 } else {
147 // The control flow integrity (CFI) feature allows us to "sign" code entry
148 // points as a target for calls, jumps or both. Arm64 has special
149 // instructions for this purpose, so-called "landing pads" (see
150 // TurboAssembler::CallTarget(), TurboAssembler::JumpTarget() and
151 // TurboAssembler::JumpOrCallTarget()). Currently, we generate "Call"
152 // landing pads for CPP builtins. In order to allow tail calling to those
153 // builtins we have to use a workaround.
154 // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" (i.e.
155 // `bti j`) landing pads for the tail-called code.
156 Register temp = x17;
157
158 // Make sure we're don't use this register as a temporary.
159 UseScratchRegisterScope temps(masm());
160 temps.Exclude(temp);
161
162 __ LoadEntryFromBuiltin(builtin, temp);
163 __ Jump(temp);
164 }
165 }
166
TestAndBranch(Register value,int mask,Condition cc,Label * target,Label::Distance)167 void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
168 Label* target, Label::Distance) {
169 __ Tst(value, Immediate(mask));
170 __ B(AsMasmCondition(cc), target);
171 }
172
JumpIf(Condition cc,Register lhs,const Operand & rhs,Label * target,Label::Distance)173 void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
174 Label* target, Label::Distance) {
175 __ CompareAndBranch(lhs, rhs, AsMasmCondition(cc), target);
176 }
JumpIfObjectType(Condition cc,Register object,InstanceType instance_type,Register map,Label * target,Label::Distance)177 void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
178 InstanceType instance_type,
179 Register map, Label* target,
180 Label::Distance) {
181 ScratchRegisterScope temps(this);
182 Register type = temps.AcquireScratch();
183 __ LoadMap(map, object);
184 __ Ldrh(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
185 JumpIf(cc, type, instance_type, target);
186 }
JumpIfInstanceType(Condition cc,Register map,InstanceType instance_type,Label * target,Label::Distance)187 void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
188 InstanceType instance_type,
189 Label* target, Label::Distance) {
190 ScratchRegisterScope temps(this);
191 Register type = temps.AcquireScratch();
192 if (FLAG_debug_code) {
193 __ AssertNotSmi(map);
194 __ CompareObjectType(map, type, type, MAP_TYPE);
195 __ Assert(eq, AbortReason::kUnexpectedValue);
196 }
197 __ Ldrh(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
198 JumpIf(cc, type, instance_type, target);
199 }
JumpIfPointer(Condition cc,Register value,MemOperand operand,Label * target,Label::Distance)200 void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
201 MemOperand operand, Label* target,
202 Label::Distance) {
203 ScratchRegisterScope temps(this);
204 Register tmp = temps.AcquireScratch();
205 __ Ldr(tmp, operand);
206 JumpIf(cc, value, tmp, target);
207 }
JumpIfSmi(Condition cc,Register value,Smi smi,Label * target,Label::Distance distance)208 void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
209 Label* target, Label::Distance distance) {
210 __ AssertSmi(value);
211 __ CompareTaggedAndBranch(value, smi, AsMasmCondition(cc), target);
212 }
213
JumpIfSmi(Condition cc,Register lhs,Register rhs,Label * target,Label::Distance)214 void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
215 Label* target, Label::Distance) {
216 __ AssertSmi(lhs);
217 __ AssertSmi(rhs);
218 __ CompareTaggedAndBranch(lhs, rhs, AsMasmCondition(cc), target);
219 }
JumpIfTagged(Condition cc,Register value,MemOperand operand,Label * target,Label::Distance)220 void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
221 MemOperand operand, Label* target,
222 Label::Distance) {
223 ScratchRegisterScope temps(this);
224 Register tmp = temps.AcquireScratch();
225 __ Ldr(tmp, operand);
226 __ CompareTaggedAndBranch(value, tmp, AsMasmCondition(cc), target);
227 }
JumpIfTagged(Condition cc,MemOperand operand,Register value,Label * target,Label::Distance)228 void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
229 Register value, Label* target,
230 Label::Distance) {
231 ScratchRegisterScope temps(this);
232 Register tmp = temps.AcquireScratch();
233 __ Ldr(tmp, operand);
234 __ CompareTaggedAndBranch(tmp, value, AsMasmCondition(cc), target);
235 }
JumpIfByte(Condition cc,Register value,int32_t byte,Label * target,Label::Distance)236 void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
237 Label* target, Label::Distance) {
238 JumpIf(cc, value, Immediate(byte), target);
239 }
240
Move(interpreter::Register output,Register source)241 void BaselineAssembler::Move(interpreter::Register output, Register source) {
242 Move(RegisterFrameOperand(output), source);
243 }
Move(Register output,TaggedIndex value)244 void BaselineAssembler::Move(Register output, TaggedIndex value) {
245 __ Mov(output, Immediate(value.ptr()));
246 }
Move(MemOperand output,Register source)247 void BaselineAssembler::Move(MemOperand output, Register source) {
248 __ Str(source, output);
249 }
Move(Register output,ExternalReference reference)250 void BaselineAssembler::Move(Register output, ExternalReference reference) {
251 __ Mov(output, Operand(reference));
252 }
Move(Register output,Handle<HeapObject> value)253 void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
254 __ Mov(output, Operand(value));
255 }
Move(Register output,int32_t value)256 void BaselineAssembler::Move(Register output, int32_t value) {
257 __ Mov(output, Immediate(value));
258 }
MoveMaybeSmi(Register output,Register source)259 void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
260 __ Mov(output, source);
261 }
MoveSmi(Register output,Register source)262 void BaselineAssembler::MoveSmi(Register output, Register source) {
263 __ Mov(output, source);
264 }
265
266 namespace detail {
267
268 template <typename Arg>
ToRegister(BaselineAssembler * basm,BaselineAssembler::ScratchRegisterScope * scope,Arg arg)269 inline Register ToRegister(BaselineAssembler* basm,
270 BaselineAssembler::ScratchRegisterScope* scope,
271 Arg arg) {
272 Register reg = scope->AcquireScratch();
273 basm->Move(reg, arg);
274 return reg;
275 }
ToRegister(BaselineAssembler * basm,BaselineAssembler::ScratchRegisterScope * scope,Register reg)276 inline Register ToRegister(BaselineAssembler* basm,
277 BaselineAssembler::ScratchRegisterScope* scope,
278 Register reg) {
279 return reg;
280 }
281
282 template <typename... Args>
283 struct CountPushHelper;
284 template <>
285 struct CountPushHelper<> {
286 static int Count() { return 0; }
287 };
288 template <typename Arg, typename... Args>
289 struct CountPushHelper<Arg, Args...> {
290 static int Count(Arg arg, Args... args) {
291 return 1 + CountPushHelper<Args...>::Count(args...);
292 }
293 };
294 template <typename... Args>
295 struct CountPushHelper<interpreter::RegisterList, Args...> {
296 static int Count(interpreter::RegisterList list, Args... args) {
297 return list.register_count() + CountPushHelper<Args...>::Count(args...);
298 }
299 };
300
301 template <typename... Args>
302 struct PushAllHelper;
303 template <typename... Args>
304 inline void PushAll(BaselineAssembler* basm, Args... args) {
305 PushAllHelper<Args...>::Push(basm, args...);
306 }
307 template <typename... Args>
308 inline void PushAllReverse(BaselineAssembler* basm, Args... args) {
309 PushAllHelper<Args...>::PushReverse(basm, args...);
310 }
311
312 template <>
313 struct PushAllHelper<> {
314 static void Push(BaselineAssembler* basm) {}
315 static void PushReverse(BaselineAssembler* basm) {}
316 };
317 template <typename Arg>
318 struct PushAllHelper<Arg> {
319 static void Push(BaselineAssembler* basm, Arg) { FATAL("Unaligned push"); }
320 static void PushReverse(BaselineAssembler* basm, Arg arg) {
321 // Push the padding register to round up the amount of values pushed.
322 return PushAllReverse(basm, arg, padreg);
323 }
324 };
325 template <typename Arg1, typename Arg2, typename... Args>
326 struct PushAllHelper<Arg1, Arg2, Args...> {
327 static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
328 Args... args) {
329 {
330 BaselineAssembler::ScratchRegisterScope scope(basm);
331 basm->masm()->Push(ToRegister(basm, &scope, arg1),
332 ToRegister(basm, &scope, arg2));
333 }
334 PushAll(basm, args...);
335 }
336 static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
337 Args... args) {
338 PushAllReverse(basm, args...);
339 {
340 BaselineAssembler::ScratchRegisterScope scope(basm);
341 basm->masm()->Push(ToRegister(basm, &scope, arg2),
342 ToRegister(basm, &scope, arg1));
343 }
344 }
345 };
346 // Currently RegisterLists are always be the last argument, so we don't
347 // specialize for the case where they're not. We do still specialise for the
348 // aligned and unaligned cases.
349 template <typename Arg>
350 struct PushAllHelper<Arg, interpreter::RegisterList> {
351 static void Push(BaselineAssembler* basm, Arg arg,
352 interpreter::RegisterList list) {
353 DCHECK_EQ(list.register_count() % 2, 1);
354 PushAll(basm, arg, list[0], list.PopLeft());
355 }
356 static void PushReverse(BaselineAssembler* basm, Arg arg,
357 interpreter::RegisterList list) {
358 if (list.register_count() == 0) {
359 PushAllReverse(basm, arg);
360 } else {
361 PushAllReverse(basm, arg, list[0], list.PopLeft());
362 }
363 }
364 };
365 template <>
366 struct PushAllHelper<interpreter::RegisterList> {
367 static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
368 DCHECK_EQ(list.register_count() % 2, 0);
369 for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
370 PushAll(basm, list[reg_index], list[reg_index + 1]);
371 }
372 }
373 static void PushReverse(BaselineAssembler* basm,
374 interpreter::RegisterList list) {
375 int reg_index = list.register_count() - 1;
376 if (reg_index % 2 == 0) {
377 // Push the padding register to round up the amount of values pushed.
378 PushAllReverse(basm, list[reg_index], padreg);
379 reg_index--;
380 }
381 for (; reg_index >= 1; reg_index -= 2) {
382 PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
383 }
384 }
385 };
386
387 template <typename... T>
388 struct PopAllHelper;
389 template <>
390 struct PopAllHelper<> {
391 static void Pop(BaselineAssembler* basm) {}
392 };
393 template <>
394 struct PopAllHelper<Register> {
395 static void Pop(BaselineAssembler* basm, Register reg) {
396 basm->masm()->Pop(reg, padreg);
397 }
398 };
399 template <typename... T>
400 struct PopAllHelper<Register, Register, T...> {
401 static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
402 T... tail) {
403 basm->masm()->Pop(reg1, reg2);
404 PopAllHelper<T...>::Pop(basm, tail...);
405 }
406 };
407
408 } // namespace detail
409
410 template <typename... T>
411 int BaselineAssembler::Push(T... vals) {
412 // We have to count the pushes first, to decide whether to add padding before
413 // the first push.
414 int push_count = detail::CountPushHelper<T...>::Count(vals...);
415 if (push_count % 2 == 0) {
416 detail::PushAll(this, vals...);
417 } else {
418 detail::PushAll(this, padreg, vals...);
419 }
420 return push_count;
421 }
422
423 template <typename... T>
424 void BaselineAssembler::PushReverse(T... vals) {
425 detail::PushAllReverse(this, vals...);
426 }
427
428 template <typename... T>
429 void BaselineAssembler::Pop(T... registers) {
430 detail::PopAllHelper<T...>::Pop(this, registers...);
431 }
432
433 void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
434 int offset) {
435 __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
436 }
437
438 void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
439 int offset) {
440 __ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
441 }
442
443 void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
444 int offset) {
445 __ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
446 }
447
448 void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
449 Register source, int offset) {
450 __ Ldrh(output, FieldMemOperand(source, offset));
451 }
452
453 void BaselineAssembler::LoadWord8Field(Register output, Register source,
454 int offset) {
455 __ Ldrb(output, FieldMemOperand(source, offset));
456 }
457
458 void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
459 Smi value) {
460 ASM_CODE_COMMENT(masm_);
461 ScratchRegisterScope temps(this);
462 Register tmp = temps.AcquireScratch();
463 __ Mov(tmp, Operand(value));
464 __ StoreTaggedField(tmp, FieldMemOperand(target, offset));
465 }
466
467 void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
468 int offset,
469 Register value) {
470 ASM_CODE_COMMENT(masm_);
471 __ StoreTaggedField(value, FieldMemOperand(target, offset));
472 __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
473 SaveFPRegsMode::kIgnore);
474 }
475
476 void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
477 int offset,
478 Register value) {
479 __ StoreTaggedField(value, FieldMemOperand(target, offset));
480 }
481
482 void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
483 int32_t weight, Label* skip_interrupt_label) {
484 ASM_CODE_COMMENT(masm_);
485 ScratchRegisterScope scratch_scope(this);
486 Register feedback_cell = scratch_scope.AcquireScratch();
487 LoadFunction(feedback_cell);
488 LoadTaggedPointerField(feedback_cell, feedback_cell,
489 JSFunction::kFeedbackCellOffset);
490
491 Register interrupt_budget = scratch_scope.AcquireScratch().W();
492 __ Ldr(interrupt_budget,
493 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
494 // Remember to set flags as part of the add!
495 __ Adds(interrupt_budget, interrupt_budget, weight);
496 __ Str(interrupt_budget,
497 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
498 if (skip_interrupt_label) {
499 // Use compare flags set by Adds
500 DCHECK_LT(weight, 0);
501 __ B(ge, skip_interrupt_label);
502 }
503 }
504
505 void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
506 Register weight, Label* skip_interrupt_label) {
507 ASM_CODE_COMMENT(masm_);
508 ScratchRegisterScope scratch_scope(this);
509 Register feedback_cell = scratch_scope.AcquireScratch();
510 LoadFunction(feedback_cell);
511 LoadTaggedPointerField(feedback_cell, feedback_cell,
512 JSFunction::kFeedbackCellOffset);
513
514 Register interrupt_budget = scratch_scope.AcquireScratch().W();
515 __ Ldr(interrupt_budget,
516 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
517 // Remember to set flags as part of the add!
518 __ Adds(interrupt_budget, interrupt_budget, weight.W());
519 __ Str(interrupt_budget,
520 FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
521 if (skip_interrupt_label) __ B(ge, skip_interrupt_label);
522 }
523
524 void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
525 if (SmiValuesAre31Bits()) {
526 __ Add(lhs.W(), lhs.W(), Immediate(rhs));
527 } else {
528 DCHECK(lhs.IsX());
529 __ Add(lhs, lhs, Immediate(rhs));
530 }
531 }
532
533 void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
534 __ And(output, lhs, Immediate(rhs));
535 }
536
537 void BaselineAssembler::Switch(Register reg, int case_value_base,
538 Label** labels, int num_labels) {
539 ASM_CODE_COMMENT(masm_);
540 Label fallthrough;
541 if (case_value_base != 0) {
542 __ Sub(reg, reg, Immediate(case_value_base));
543 }
544
545 // Mostly copied from code-generator-arm64.cc
546 ScratchRegisterScope scope(this);
547 Register temp = scope.AcquireScratch();
548 Label table;
549 JumpIf(Condition::kUnsignedGreaterThanEqual, reg, num_labels, &fallthrough);
550 __ Adr(temp, &table);
551 int entry_size_log2 = 2;
552 #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
553 ++entry_size_log2; // Account for BTI.
554 #endif
555 __ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
556 __ Br(temp);
557 {
558 TurboAssembler::BlockPoolsScope block_pools(masm_, num_labels * kInstrSize);
559 __ Bind(&table);
560 for (int i = 0; i < num_labels; ++i) {
561 __ JumpTarget();
562 __ B(labels[i]);
563 }
564 __ JumpTarget();
565 __ Bind(&fallthrough);
566 }
567 }
568
569 #undef __
570 #define __ basm.
571
572 void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
573 ASM_CODE_COMMENT(masm);
574 BaselineAssembler basm(masm);
575
576 Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
577 Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
578
579 {
580 ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
581
582 Label skip_interrupt_label;
583 __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
584 __ masm()->SmiTag(params_size);
585 __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
586
587 __ LoadContext(kContextRegister);
588 __ LoadFunction(kJSFunctionRegister);
589 __ masm()->PushArgument(kJSFunctionRegister);
590 __ CallRuntime(Runtime::kBytecodeBudgetInterrupt, 1);
591
592 __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
593 __ masm()->SmiUntag(params_size);
594
595 __ Bind(&skip_interrupt_label);
596 }
597
598 BaselineAssembler::ScratchRegisterScope temps(&basm);
599 Register actual_params_size = temps.AcquireScratch();
600 // Compute the size of the actual parameters + receiver (in bytes).
601 __ Move(actual_params_size,
602 MemOperand(fp, StandardFrameConstants::kArgCOffset));
603
604 // If actual is bigger than formal, then we should use it to free up the stack
605 // arguments.
606 Label corrected_args_count;
607 __ JumpIf(Condition::kGreaterThanEqual, params_size, actual_params_size,
608 &corrected_args_count);
609 __ masm()->Mov(params_size, actual_params_size);
610 __ Bind(&corrected_args_count);
611
612 // Leave the frame (also dropping the register file).
613 __ masm()->LeaveFrame(StackFrame::BASELINE);
614
615 // Drop receiver + arguments.
616 __ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver);
617 __ masm()->Ret();
618 }
619
620 #undef __
621
622 inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
623 Register reg) {
624 assembler_->masm()->CmpTagged(reg, kInterpreterAccumulatorRegister);
625 assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
626 }
627
628 } // namespace baseline
629 } // namespace internal
630 } // namespace v8
631
632 #endif // V8_BASELINE_ARM64_BASELINE_ASSEMBLER_ARM64_INL_H_
633