• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
6 #error This header must be included via macro-assembler.h
7 #endif
8 
9 #ifndef V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
10 #define V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
11 
12 #include "src/codegen/arm/assembler-arm.h"
13 #include "src/codegen/bailout-reason.h"
14 #include "src/common/globals.h"
15 #include "src/objects/contexts.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 // TODO(victorgomes): Move definition to macro-assembler.h, once all other
21 // platforms are updated.
22 enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
23 
24 // ----------------------------------------------------------------------------
25 // Static helper functions
26 
27 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)28 inline MemOperand FieldMemOperand(Register object, int offset) {
29   return MemOperand(object, offset - kHeapObjectTag);
30 }
31 
32 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
33 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
34 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
35 
36 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
37                                    Register reg3 = no_reg,
38                                    Register reg4 = no_reg,
39                                    Register reg5 = no_reg,
40                                    Register reg6 = no_reg);
41 
42 enum TargetAddressStorageMode {
43   CAN_INLINE_TARGET_ADDRESS,
44   NEVER_INLINE_TARGET_ADDRESS
45 };
46 
47 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
48  public:
49   using TurboAssemblerBase::TurboAssemblerBase;
50 
51   // Activation support.
52   void EnterFrame(StackFrame::Type type,
53                   bool load_constant_pool_pointer_reg = false);
54   // Returns the pc offset at which the frame ends.
55   int LeaveFrame(StackFrame::Type type);
56 
57 // Allocate stack space of given size (i.e. decrement {sp} by the value
58 // stored in the given register, or by a constant). If you need to perform a
59 // stack check, do it before calling this function because this function may
60 // write into the newly allocated space. It may also overwrite the given
61 // register's value, in the version that takes a register.
62 #ifdef V8_OS_WIN
63   void AllocateStackSpace(Register bytes_scratch);
64   void AllocateStackSpace(int bytes);
65 #else
AllocateStackSpace(Register bytes)66   void AllocateStackSpace(Register bytes) { sub(sp, sp, bytes); }
AllocateStackSpace(int bytes)67   void AllocateStackSpace(int bytes) { sub(sp, sp, Operand(bytes)); }
68 #endif
69 
70   // Push a fixed frame, consisting of lr, fp
71   void PushCommonFrame(Register marker_reg = no_reg);
72 
73   // Generates function and stub prologue code.
74   void StubPrologue(StackFrame::Type type);
75   void Prologue();
76 
77   // Push a standard frame, consisting of lr, fp, context and JS function
78   void PushStandardFrame(Register function_reg);
79 
80   void InitializeRootRegister();
81 
Push(Register src)82   void Push(Register src) { push(src); }
83 
84   void Push(Handle<HeapObject> handle);
85   void Push(Smi smi);
86 
87   // Push two registers.  Pushes leftmost register first (to highest address).
88   void Push(Register src1, Register src2, Condition cond = al) {
89     if (src1.code() > src2.code()) {
90       stm(db_w, sp, src1.bit() | src2.bit(), cond);
91     } else {
92       str(src1, MemOperand(sp, 4, NegPreIndex), cond);
93       str(src2, MemOperand(sp, 4, NegPreIndex), cond);
94     }
95   }
96 
97   // Push three registers.  Pushes leftmost register first (to highest address).
98   void Push(Register src1, Register src2, Register src3, Condition cond = al) {
99     if (src1.code() > src2.code()) {
100       if (src2.code() > src3.code()) {
101         stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
102       } else {
103         stm(db_w, sp, src1.bit() | src2.bit(), cond);
104         str(src3, MemOperand(sp, 4, NegPreIndex), cond);
105       }
106     } else {
107       str(src1, MemOperand(sp, 4, NegPreIndex), cond);
108       Push(src2, src3, cond);
109     }
110   }
111 
112   // Push four registers.  Pushes leftmost register first (to highest address).
113   void Push(Register src1, Register src2, Register src3, Register src4,
114             Condition cond = al) {
115     if (src1.code() > src2.code()) {
116       if (src2.code() > src3.code()) {
117         if (src3.code() > src4.code()) {
118           stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
119               cond);
120         } else {
121           stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
122           str(src4, MemOperand(sp, 4, NegPreIndex), cond);
123         }
124       } else {
125         stm(db_w, sp, src1.bit() | src2.bit(), cond);
126         Push(src3, src4, cond);
127       }
128     } else {
129       str(src1, MemOperand(sp, 4, NegPreIndex), cond);
130       Push(src2, src3, src4, cond);
131     }
132   }
133 
134   // Push five registers.  Pushes leftmost register first (to highest address).
135   void Push(Register src1, Register src2, Register src3, Register src4,
136             Register src5, Condition cond = al) {
137     if (src1.code() > src2.code()) {
138       if (src2.code() > src3.code()) {
139         if (src3.code() > src4.code()) {
140           if (src4.code() > src5.code()) {
141             stm(db_w, sp,
142                 src1.bit() | src2.bit() | src3.bit() | src4.bit() | src5.bit(),
143                 cond);
144           } else {
145             stm(db_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
146                 cond);
147             str(src5, MemOperand(sp, 4, NegPreIndex), cond);
148           }
149         } else {
150           stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
151           Push(src4, src5, cond);
152         }
153       } else {
154         stm(db_w, sp, src1.bit() | src2.bit(), cond);
155         Push(src3, src4, src5, cond);
156       }
157     } else {
158       str(src1, MemOperand(sp, 4, NegPreIndex), cond);
159       Push(src2, src3, src4, src5, cond);
160     }
161   }
162 
163   enum class PushArrayOrder { kNormal, kReverse };
164   // `array` points to the first element (the lowest address).
165   // `array` and `size` are not modified.
166   void PushArray(Register array, Register size, Register scratch,
167                  PushArrayOrder order = PushArrayOrder::kNormal);
168 
Pop(Register dst)169   void Pop(Register dst) { pop(dst); }
170 
171   // Pop two registers. Pops rightmost register first (from lower address).
172   void Pop(Register src1, Register src2, Condition cond = al) {
173     DCHECK(src1 != src2);
174     if (src1.code() > src2.code()) {
175       ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
176     } else {
177       ldr(src2, MemOperand(sp, 4, PostIndex), cond);
178       ldr(src1, MemOperand(sp, 4, PostIndex), cond);
179     }
180   }
181 
182   // Pop three registers.  Pops rightmost register first (from lower address).
183   void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
184     DCHECK(!AreAliased(src1, src2, src3));
185     if (src1.code() > src2.code()) {
186       if (src2.code() > src3.code()) {
187         ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
188       } else {
189         ldr(src3, MemOperand(sp, 4, PostIndex), cond);
190         ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
191       }
192     } else {
193       Pop(src2, src3, cond);
194       ldr(src1, MemOperand(sp, 4, PostIndex), cond);
195     }
196   }
197 
198   // Pop four registers.  Pops rightmost register first (from lower address).
199   void Pop(Register src1, Register src2, Register src3, Register src4,
200            Condition cond = al) {
201     DCHECK(!AreAliased(src1, src2, src3, src4));
202     if (src1.code() > src2.code()) {
203       if (src2.code() > src3.code()) {
204         if (src3.code() > src4.code()) {
205           ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit() | src4.bit(),
206               cond);
207         } else {
208           ldr(src4, MemOperand(sp, 4, PostIndex), cond);
209           ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
210         }
211       } else {
212         Pop(src3, src4, cond);
213         ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
214       }
215     } else {
216       Pop(src2, src3, src4, cond);
217       ldr(src1, MemOperand(sp, 4, PostIndex), cond);
218     }
219   }
220 
221   // Before calling a C-function from generated code, align arguments on stack.
222   // After aligning the frame, non-register arguments must be stored in
223   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
224   // are word sized. If double arguments are used, this function assumes that
225   // all double arguments are stored before core registers; otherwise the
226   // correct alignment of the double values is not guaranteed.
227   // Some compilers/platforms require the stack to be aligned when calling
228   // C++ code.
229   // Needs a scratch register to do some arithmetic. This register will be
230   // trashed.
231   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers = 0,
232                             Register scratch = no_reg);
233 
234   // Removes current frame and its arguments from the stack preserving
235   // the arguments and a return address pushed to the stack for the next call.
236   // Both |callee_args_count| and |caller_args_count| do not include
237   // receiver. |callee_args_count| is not modified. |caller_args_count|
238   // is trashed.
239   void PrepareForTailCall(Register callee_args_count,
240                           Register caller_args_count, Register scratch0,
241                           Register scratch1);
242 
243   // There are two ways of passing double arguments on ARM, depending on
244   // whether soft or hard floating point ABI is used. These functions
245   // abstract parameter passing for the three different ways we call
246   // C functions from generated code.
247   void MovToFloatParameter(DwVfpRegister src);
248   void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2);
249   void MovToFloatResult(DwVfpRegister src);
250 
251   // Calls a C function and cleans up the space for arguments allocated
252   // by PrepareCallCFunction. The called function is not allowed to trigger a
253   // garbage collection, since that might move the code and invalidate the
254   // return address (unless this is somehow accounted for by the called
255   // function).
256   void CallCFunction(ExternalReference function, int num_arguments);
257   void CallCFunction(Register function, int num_arguments);
258   void CallCFunction(ExternalReference function, int num_reg_arguments,
259                      int num_double_arguments);
260   void CallCFunction(Register function, int num_reg_arguments,
261                      int num_double_arguments);
262 
263   void MovFromFloatParameter(DwVfpRegister dst);
264   void MovFromFloatResult(DwVfpRegister dst);
265 
266   void Trap() override;
267   void DebugBreak() override;
268 
269   // Calls Abort(msg) if the condition cond is not satisfied.
270   // Use --debug-code to enable.
271   void Assert(Condition cond, AbortReason reason);
272 
273   // Like Assert(), but without condition.
274   // Use --debug-code to enable.
275   void AssertUnreachable(AbortReason reason);
276 
277   // Like Assert(), but always enabled.
278   void Check(Condition cond, AbortReason reason);
279 
280   // Print a message to stdout and abort execution.
281   void Abort(AbortReason msg);
282 
283   void LslPair(Register dst_low, Register dst_high, Register src_low,
284                Register src_high, Register shift);
285   void LslPair(Register dst_low, Register dst_high, Register src_low,
286                Register src_high, uint32_t shift);
287   void LsrPair(Register dst_low, Register dst_high, Register src_low,
288                Register src_high, Register shift);
289   void LsrPair(Register dst_low, Register dst_high, Register src_low,
290                Register src_high, uint32_t shift);
291   void AsrPair(Register dst_low, Register dst_high, Register src_low,
292                Register src_high, Register shift);
293   void AsrPair(Register dst_low, Register dst_high, Register src_low,
294                Register src_high, uint32_t shift);
295 
296   void LoadFromConstantsTable(Register destination,
297                               int constant_index) override;
298   void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
299   void LoadRootRelative(Register destination, int32_t offset) override;
300 
301   // Jump, Call, and Ret pseudo instructions implementing inter-working.
302   void Call(Register target, Condition cond = al);
303   void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
304             TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
305             bool check_constant_pool = true);
306   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
307             Condition cond = al,
308             TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
309             bool check_constant_pool = true);
310   void Call(Label* target);
311 
312   // Load the builtin given by the Smi in |builtin_index| into the same
313   // register.
314   void LoadEntryFromBuiltinIndex(Register builtin_index);
315   void CallBuiltinByIndex(Register builtin_index) override;
316   void CallBuiltin(int builtin_index, Condition cond = al);
317 
318   void LoadCodeObjectEntry(Register destination, Register code_object) override;
319   void CallCodeObject(Register code_object) override;
320   void JumpCodeObject(Register code_object) override;
321 
322   // Generates an instruction sequence s.t. the return address points to the
323   // instruction following the call.
324   // The return address on the stack is used by frame iteration.
325   void StoreReturnAddressAndCall(Register target);
326 
327   void CallForDeoptimization(Builtins::Name target, int deopt_id, Label* exit,
328                              DeoptimizeKind kind,
329                              Label* jump_deoptimization_entry_label);
330 
331   // Emit code to discard a non-negative number of pointer-sized elements
332   // from the stack, clobbering only the sp register.
333   void Drop(int count, Condition cond = al);
334   void Drop(Register count, Condition cond = al);
335 
336   void Ret(Condition cond = al);
337   void Ret(int drop, Condition cond = al);
338 
339   // Compare single values and move the result to the normal condition flags.
340   void VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2,
341                              const Condition cond = al);
342   void VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2,
343                              const Condition cond = al);
344 
345   // Compare double values and move the result to the normal condition flags.
346   void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2,
347                              const Condition cond = al);
348   void VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2,
349                              const Condition cond = al);
350 
351   // If the value is a NaN, canonicalize the value else, do nothing.
352   void VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src,
353                           const Condition cond = al);
354   void VFPCanonicalizeNaN(const DwVfpRegister value,
355                           const Condition cond = al) {
356     VFPCanonicalizeNaN(value, value, cond);
357   }
358 
359   void VmovHigh(Register dst, DwVfpRegister src);
360   void VmovHigh(DwVfpRegister dst, Register src);
361   void VmovLow(Register dst, DwVfpRegister src);
362   void VmovLow(DwVfpRegister dst, Register src);
363 
364   void CheckPageFlag(Register object, int mask, Condition cc,
365                      Label* condition_met);
366 
367   // Check whether d16-d31 are available on the CPU. The result is given by the
368   // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
369   void CheckFor32DRegs(Register scratch);
370 
371   void SaveRegisters(RegList registers);
372   void RestoreRegisters(RegList registers);
373 
374   void CallRecordWriteStub(Register object, Operand offset,
375                            RememberedSetAction remembered_set_action,
376                            SaveFPRegsMode fp_mode);
377   void CallRecordWriteStub(Register object, Operand offset,
378                            RememberedSetAction remembered_set_action,
379                            SaveFPRegsMode fp_mode, Address wasm_target);
380   void CallEphemeronKeyBarrier(Register object, Operand offset,
381                                SaveFPRegsMode fp_mode);
382 
383   // For a given |object| and |offset|:
384   //   - Move |object| to |dst_object|.
385   //   - Compute the address of the slot pointed to by |offset| in |object| and
386   //     write it to |dst_slot|. |offset| can be either an immediate or a
387   //     register.
388   // This method makes sure |object| and |offset| are allowed to overlap with
389   // the destination registers.
390   void MoveObjectAndSlot(Register dst_object, Register dst_slot,
391                          Register object, Operand offset);
392 
393   // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double
394   // values to location, saving [d0..(d15|d31)].
395   void SaveFPRegs(Register location, Register scratch);
396 
397   // Does a runtime check for 16/32 FP registers. Either way, pops 32 double
398   // values to location, restoring [d0..(d15|d31)].
399   void RestoreFPRegs(Register location, Register scratch);
400 
401   // As above, but with heap semantics instead of stack semantics, i.e.: the
402   // location starts at the lowest address and grows towards higher addresses,
403   // for both saves and restores.
404   void SaveFPRegsToHeap(Register location, Register scratch);
405   void RestoreFPRegsFromHeap(Register location, Register scratch);
406 
407   // Calculate how much stack space (in bytes) are required to store caller
408   // registers excluding those specified in the arguments.
409   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
410                                       Register exclusion1 = no_reg,
411                                       Register exclusion2 = no_reg,
412                                       Register exclusion3 = no_reg) const;
413 
414   // Push caller saved registers on the stack, and return the number of bytes
415   // stack pointer is adjusted.
416   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
417                       Register exclusion2 = no_reg,
418                       Register exclusion3 = no_reg);
419   // Restore caller saved registers from the stack, and return the number of
420   // bytes stack pointer is adjusted.
421   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
422                      Register exclusion2 = no_reg,
423                      Register exclusion3 = no_reg);
424   void Jump(Register target, Condition cond = al);
425   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
426   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
427   void Jump(const ExternalReference& reference) override;
428 
429   // Perform a floating-point min or max operation with the
430   // (IEEE-754-compatible) semantics of ARM64's fmin/fmax. Some cases, typically
431   // NaNs or +/-0.0, are expected to be rare and are handled in out-of-line
432   // code. The specific behaviour depends on supported instructions.
433   //
434   // These functions assume (and assert) that left!=right. It is permitted
435   // for the result to alias either input register.
436   void FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
437                 Label* out_of_line);
438   void FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right,
439                 Label* out_of_line);
440   void FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
441                 Label* out_of_line);
442   void FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right,
443                 Label* out_of_line);
444 
445   // Generate out-of-line cases for the macros above.
446   void FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left,
447                          SwVfpRegister right);
448   void FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left,
449                          SwVfpRegister right);
450   void FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left,
451                          DwVfpRegister right);
452   void FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left,
453                          DwVfpRegister right);
454 
455   void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
456   void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
457   void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane);
458   void ExtractLane(DwVfpRegister dst, QwNeonRegister src, int lane);
459   void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
460                    NeonDataType dt, int lane);
461   void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
462                    SwVfpRegister src_lane, int lane);
463   void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
464                    DwVfpRegister src_lane, int lane);
465 
466   // Register move. May do nothing if the registers are identical.
467   void Move(Register dst, Smi smi);
468   void Move(Register dst, Handle<HeapObject> value);
469   void Move(Register dst, ExternalReference reference);
470   void Move(Register dst, Register src, Condition cond = al);
471   void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
472             Condition cond = al) {
473     if (!src.IsRegister() || src.rm() != dst || sbit != LeaveCC) {
474       mov(dst, src, sbit, cond);
475     }
476   }
477   // Move src0 to dst0 and src1 to dst1, handling possible overlaps.
478   void MovePair(Register dst0, Register src0, Register dst1, Register src1);
479 
480   void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
481   void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
482   void Move(QwNeonRegister dst, QwNeonRegister src);
483 
484   // Simulate s-register moves for imaginary s32 - s63 registers.
485   void VmovExtended(Register dst, int src_code);
486   void VmovExtended(int dst_code, Register src);
487   // Move between s-registers and imaginary s-registers.
488   void VmovExtended(int dst_code, int src_code);
489   void VmovExtended(int dst_code, const MemOperand& src);
490   void VmovExtended(const MemOperand& dst, int src_code);
491 
492   // Register swap. Note that the register operands should be distinct.
493   void Swap(Register srcdst0, Register srcdst1);
494   void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
495   void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
496 
497   // Get the actual activation frame alignment for target environment.
498   static int ActivationFrameAlignment();
499 
500   void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
501 
502   void SmiUntag(Register reg, SBit s = LeaveCC) {
503     mov(reg, Operand::SmiUntag(reg), s);
504   }
505   void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
506     mov(dst, Operand::SmiUntag(src), s);
507   }
508 
509   // Load an object from the root table.
LoadRoot(Register destination,RootIndex index)510   void LoadRoot(Register destination, RootIndex index) override {
511     LoadRoot(destination, index, al);
512   }
513   void LoadRoot(Register destination, RootIndex index, Condition cond);
514 
515   // Jump if the register contains a smi.
516   void JumpIfSmi(Register value, Label* smi_label);
517 
518   void JumpIfEqual(Register x, int32_t y, Label* dest);
519   void JumpIfLessThan(Register x, int32_t y, Label* dest);
520 
521   // Performs a truncating conversion of a floating point number as used by
522   // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
523   // succeeds, otherwise falls through if result is saturated. On return
524   // 'result' either holds answer, or is clobbered on fall through.
525   void TryInlineTruncateDoubleToI(Register result, DwVfpRegister input,
526                                   Label* done);
527 
528   // Performs a truncating conversion of a floating point number as used by
529   // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
530   // Exits with 'result' holding the answer.
531   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
532                          DwVfpRegister double_input, StubCallMode stub_mode);
533 
534   // EABI variant for double arguments in use.
use_eabi_hardfloat()535   bool use_eabi_hardfloat() {
536 #ifdef __arm__
537     return base::OS::ArmUsingHardFloat();
538 #elif USE_EABI_HARDFLOAT
539     return true;
540 #else
541     return false;
542 #endif
543   }
544 
545   // Compute the start of the generated instruction stream from the current PC.
546   // This is an alternative to embedding the {CodeObject} handle as a reference.
547   void ComputeCodeStartAddress(Register dst);
548 
549   void ResetSpeculationPoisonRegister();
550 
551   // Control-flow integrity:
552 
553   // Define a function entrypoint. This doesn't emit any code for this
554   // architecture, as control-flow integrity is not supported for it.
CodeEntry()555   void CodeEntry() {}
556   // Define an exception handler.
ExceptionHandler()557   void ExceptionHandler() {}
558   // Define an exception handler and bind a label.
BindExceptionHandler(Label * label)559   void BindExceptionHandler(Label* label) { bind(label); }
560 
561  private:
562   // Compare single values and then load the fpscr flags to a register.
563   void VFPCompareAndLoadFlags(const SwVfpRegister src1,
564                               const SwVfpRegister src2,
565                               const Register fpscr_flags,
566                               const Condition cond = al);
567   void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
568                               const Register fpscr_flags,
569                               const Condition cond = al);
570 
571   // Compare double values and then load the fpscr flags to a register.
572   void VFPCompareAndLoadFlags(const DwVfpRegister src1,
573                               const DwVfpRegister src2,
574                               const Register fpscr_flags,
575                               const Condition cond = al);
576   void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
577                               const Register fpscr_flags,
578                               const Condition cond = al);
579 
580   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
581 
582   // Implementation helpers for FloatMin and FloatMax.
583   template <typename T>
584   void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
585   template <typename T>
586   void FloatMinHelper(T result, T left, T right, Label* out_of_line);
587   template <typename T>
588   void FloatMaxOutOfLineHelper(T result, T left, T right);
589   template <typename T>
590   void FloatMinOutOfLineHelper(T result, T left, T right);
591 
592   int CalculateStackPassedWords(int num_reg_arguments,
593                                 int num_double_arguments);
594 
595   void CallCFunctionHelper(Register function, int num_reg_arguments,
596                            int num_double_arguments);
597 
598   void CallRecordWriteStub(Register object, Operand offset,
599                            RememberedSetAction remembered_set_action,
600                            SaveFPRegsMode fp_mode, Handle<Code> code_target,
601                            Address wasm_target);
602 };
603 
604 // MacroAssembler implements a collection of frequently used macros.
605 class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
606  public:
607   using TurboAssembler::TurboAssembler;
608 
609   void Mls(Register dst, Register src1, Register src2, Register srcA,
610            Condition cond = al);
611   void And(Register dst, Register src1, const Operand& src2,
612            Condition cond = al);
613   void Ubfx(Register dst, Register src, int lsb, int width,
614             Condition cond = al);
615   void Sbfx(Register dst, Register src, int lsb, int width,
616             Condition cond = al);
617 
618   // ---------------------------------------------------------------------------
619   // GC Support
620 
621   // Notify the garbage collector that we wrote a pointer into an object.
622   // |object| is the object being stored into, |value| is the object being
623   // stored.
624   // The offset is the offset from the start of the object, not the offset from
625   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
626   void RecordWriteField(
627       Register object, int offset, Register value, LinkRegisterStatus lr_status,
628       SaveFPRegsMode save_fp,
629       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
630       SmiCheck smi_check = INLINE_SMI_CHECK);
631 
632   // For a given |object| notify the garbage collector that the slot at |offset|
633   // has been written. |value| is the object being stored.
634   void RecordWrite(
635       Register object, Operand offset, Register value,
636       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
637       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
638       SmiCheck smi_check = INLINE_SMI_CHECK);
639 
640   // Enter exit frame.
641   // stack_space - extra stack space, used for alignment before call to C.
642   void EnterExitFrame(bool save_doubles, int stack_space = 0,
643                       StackFrame::Type frame_type = StackFrame::EXIT);
644 
645   // Leave the current exit frame. Expects the return value in r0.
646   // Expect the number of values, pushed prior to the exit frame, to
647   // remove in a register (or no_reg, if there is nothing to remove).
648   void LeaveExitFrame(bool save_doubles, Register argument_count,
649                       bool argument_count_is_length = false);
650 
651   void LoadMap(Register destination, Register object);
652 
653   // Load the global proxy from the current context.
654   void LoadGlobalProxy(Register dst);
655 
656   void LoadNativeContextSlot(int index, Register dst);
657 
658   // ---------------------------------------------------------------------------
659   // JavaScript invokes
660 
661   // Invoke the JavaScript function code by either calling or jumping.
662   void InvokeFunctionCode(Register function, Register new_target,
663                           Register expected_parameter_count,
664                           Register actual_parameter_count, InvokeFlag flag);
665 
666   // On function call, call into the debugger.
667   void CallDebugOnFunctionCall(Register fun, Register new_target,
668                                Register expected_parameter_count,
669                                Register actual_parameter_count);
670 
671   // Invoke the JavaScript function in the given register. Changes the
672   // current context to the context in the function before invoking.
673   void InvokeFunctionWithNewTarget(Register function, Register new_target,
674                                    Register actual_parameter_count,
675                                    InvokeFlag flag);
676 
677   void InvokeFunction(Register function, Register expected_parameter_count,
678                       Register actual_parameter_count, InvokeFlag flag);
679 
680   // Frame restart support
681   void MaybeDropFrames();
682 
683   // Exception handling
684 
685   // Push a new stack handler and link into stack handler chain.
686   void PushStackHandler();
687 
688   // Unlink the stack handler on top of the stack from the stack handler chain.
689   // Must preserve the result register.
690   void PopStackHandler();
691 
692   // ---------------------------------------------------------------------------
693   // Support functions.
694 
695   // Compare object type for heap object.  heap_object contains a non-Smi
696   // whose object type should be compared with the given type.  This both
697   // sets the flags and leaves the object type in the type_reg register.
698   // It leaves the map in the map register (unless the type_reg and map register
699   // are the same register).  It leaves the heap object in the heap_object
700   // register unless the heap_object register is the same register as one of the
701   // other registers.
702   // Type_reg can be no_reg. In that case a scratch register is used.
703   void CompareObjectType(Register heap_object, Register map, Register type_reg,
704                          InstanceType type);
705 
706   // Compare instance type in a map.  map contains a valid map object whose
707   // object type should be compared with the given type.  This both
708   // sets the flags and leaves the object type in the type_reg register.
709   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
710 
711   // Compare the object in a register to a value from the root list.
712   // Acquires a scratch register.
713   void CompareRoot(Register obj, RootIndex index);
PushRoot(RootIndex index)714   void PushRoot(RootIndex index) {
715     UseScratchRegisterScope temps(this);
716     Register scratch = temps.Acquire();
717     LoadRoot(scratch, index);
718     Push(scratch);
719   }
720 
721   // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,RootIndex index,Label * if_equal)722   void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
723     CompareRoot(with, index);
724     b(eq, if_equal);
725   }
726 
727   // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,RootIndex index,Label * if_not_equal)728   void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
729     CompareRoot(with, index);
730     b(ne, if_not_equal);
731   }
732 
733   // Checks if value is in range [lower_limit, higher_limit] using a single
734   // comparison.
735   void JumpIfIsInRange(Register value, unsigned lower_limit,
736                        unsigned higher_limit, Label* on_in_range);
737 
738   // It assumes that the arguments are located below the stack pointer.
739   // argc is the number of arguments not including the receiver.
740   // TODO(victorgomes): Remove this function once we stick with the reversed
741   // arguments order.
ReceiverOperand(Register argc)742   MemOperand ReceiverOperand(Register argc) {
743     return MemOperand(sp, 0);
744   }
745 
746   // ---------------------------------------------------------------------------
747   // Runtime calls
748 
749   // Call a runtime routine.
750   void CallRuntime(const Runtime::Function* f, int num_arguments,
751                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
752 
753   // Convenience function: Same as above, but takes the fid instead.
754   void CallRuntime(Runtime::FunctionId fid,
755                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
756     const Runtime::Function* function = Runtime::FunctionForId(fid);
757     CallRuntime(function, function->nargs, save_doubles);
758   }
759 
760   // Convenience function: Same as above, but takes the fid instead.
761   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
762                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
763     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
764   }
765 
766   // Convenience function: tail call a runtime routine (jump).
767   void TailCallRuntime(Runtime::FunctionId fid);
768 
769   // Jump to a runtime routine.
770   void JumpToExternalReference(const ExternalReference& builtin,
771                                bool builtin_exit_frame = false);
772 
773   // Generates a trampoline to jump to the off-heap instruction stream.
774   void JumpToInstructionStream(Address entry);
775 
776   // ---------------------------------------------------------------------------
777   // In-place weak references.
778   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
779 
780   // ---------------------------------------------------------------------------
781   // StatsCounter support
782 
783   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
784                         Register scratch2);
785   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
786                         Register scratch2);
787 
788   // ---------------------------------------------------------------------------
789   // Stack limit utilities
790   void LoadStackLimit(Register destination, StackLimitKind kind);
791   void StackOverflowCheck(Register num_args, Register scratch,
792                           Label* stack_overflow);
793 
794   // ---------------------------------------------------------------------------
795   // Smi utilities
796 
797   void SmiTag(Register reg, SBit s = LeaveCC);
798   void SmiTag(Register dst, Register src, SBit s = LeaveCC);
799 
800   // Test if the register contains a smi (Z == 0 (eq) if true).
801   void SmiTst(Register value);
802   // Jump if either of the registers contain a non-smi.
803   void JumpIfNotSmi(Register value, Label* not_smi_label);
804 
805   // Abort execution if argument is a smi, enabled via --debug-code.
806   void AssertNotSmi(Register object);
807   void AssertSmi(Register object);
808 
809   // Abort execution if argument is not a Constructor, enabled via --debug-code.
810   void AssertConstructor(Register object);
811 
812   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
813   void AssertFunction(Register object);
814 
815   // Abort execution if argument is not a JSBoundFunction,
816   // enabled via --debug-code.
817   void AssertBoundFunction(Register object);
818 
819   // Abort execution if argument is not a JSGeneratorObject (or subclass),
820   // enabled via --debug-code.
821   void AssertGeneratorObject(Register object);
822 
823   // Abort execution if argument is not undefined or an AllocationSite, enabled
824   // via --debug-code.
825   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
826 
827   template <typename Field>
DecodeField(Register dst,Register src)828   void DecodeField(Register dst, Register src) {
829     Ubfx(dst, src, Field::kShift, Field::kSize);
830   }
831 
832   template <typename Field>
DecodeField(Register reg)833   void DecodeField(Register reg) {
834     DecodeField<Field>(reg, reg);
835   }
836 
837  private:
838   // Helper functions for generating invokes.
839   void InvokePrologue(Register expected_parameter_count,
840                       Register actual_parameter_count, Label* done,
841                       InvokeFlag flag);
842 
843   DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
844 };
845 
846 #define ACCESS_MASM(masm) masm->
847 
848 }  // namespace internal
849 }  // namespace v8
850 
851 #endif  // V8_CODEGEN_ARM_MACRO_ASSEMBLER_ARM_H_
852