• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36 
37 // A light-weight ARM Assembler
38 // Generates user mode instructions for the ARM architecture up to version 5
39 
40 #ifndef V8_ARM_ASSEMBLER_ARM_H_
41 #define V8_ARM_ASSEMBLER_ARM_H_
42 
43 #include <stdio.h>
44 #include <vector>
45 
46 #include "src/arm/constants-arm.h"
47 #include "src/assembler.h"
48 #include "src/boxed-float.h"
49 #include "src/double.h"
50 
51 namespace v8 {
52 namespace internal {
53 
54 // clang-format off
55 #define GENERAL_REGISTERS(V)                              \
56   V(r0)  V(r1)  V(r2)  V(r3)  V(r4)  V(r5)  V(r6)  V(r7)  \
57   V(r8)  V(r9)  V(r10) V(fp)  V(ip)  V(sp)  V(lr)  V(pc)
58 
59 #define ALLOCATABLE_GENERAL_REGISTERS(V)                  \
60   V(r0)  V(r1)  V(r2)  V(r3)  V(r4)  V(r5)  V(r6)  V(r7)  \
61   V(r8)  V(r9)
62 
63 #define FLOAT_REGISTERS(V)                                \
64   V(s0)  V(s1)  V(s2)  V(s3)  V(s4)  V(s5)  V(s6)  V(s7)  \
65   V(s8)  V(s9)  V(s10) V(s11) V(s12) V(s13) V(s14) V(s15) \
66   V(s16) V(s17) V(s18) V(s19) V(s20) V(s21) V(s22) V(s23) \
67   V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
68 
69 #define LOW_DOUBLE_REGISTERS(V)                           \
70   V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
71   V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
72 
73 #define NON_LOW_DOUBLE_REGISTERS(V)                       \
74   V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
75   V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
76 
77 #define DOUBLE_REGISTERS(V) \
78   LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
79 
80 #define SIMD128_REGISTERS(V)                              \
81   V(q0)  V(q1)  V(q2)  V(q3)  V(q4)  V(q5)  V(q6)  V(q7)  \
82   V(q8)  V(q9)  V(q10) V(q11) V(q12) V(q13) V(q14) V(q15)
83 
84 #define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
85   V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
86   V(d8)  V(d9)  V(d10) V(d11) V(d12)                      \
87   V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
88   V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
89 
90 #define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V)          \
91   V(d0)  V(d1)  V(d2)  V(d3)  V(d4)  V(d5)  V(d6)  V(d7)  \
92   V(d8)  V(d9)  V(d10) V(d11) V(d12) V(d15)
93 
94 #define C_REGISTERS(V)                                            \
95   V(cr0)  V(cr1)  V(cr2)  V(cr3)  V(cr4)  V(cr5)  V(cr6)  V(cr7)  \
96   V(cr8)  V(cr9)  V(cr10) V(cr11) V(cr12) V(cr15)
97 // clang-format on
98 
99 // The ARM ABI does not specify the usage of register r9, which may be reserved
100 // as the static base or thread register on some platforms, in which case we
101 // leave it alone. Adjust the value of kR9Available accordingly:
102 const int kR9Available = 1;  // 1 if available to us, 0 if reserved
103 
104 // Register list in load/store instructions
105 // Note that the bit values must match those used in actual instruction encoding
106 const int kNumRegs = 16;
107 
108 // Caller-saved/arguments registers
109 const RegList kJSCallerSaved =
110   1 << 0 |  // r0 a1
111   1 << 1 |  // r1 a2
112   1 << 2 |  // r2 a3
113   1 << 3;   // r3 a4
114 
115 const int kNumJSCallerSaved = 4;
116 
117 // Callee-saved registers preserved when switching from C to JavaScript
118 const RegList kCalleeSaved =
119   1 <<  4 |  //  r4 v1
120   1 <<  5 |  //  r5 v2
121   1 <<  6 |  //  r6 v3
122   1 <<  7 |  //  r7 v4 (cp in JavaScript code)
123   1 <<  8 |  //  r8 v5 (pp in JavaScript code)
124   kR9Available <<  9 |  //  r9 v6
125   1 << 10 |  // r10 v7
126   1 << 11;   // r11 v8 (fp in JavaScript code)
127 
128 // When calling into C++ (only for C++ calls that can't cause a GC).
129 // The call code will take care of lr, fp, etc.
130 const RegList kCallerSaved =
131   1 <<  0 |  // r0
132   1 <<  1 |  // r1
133   1 <<  2 |  // r2
134   1 <<  3 |  // r3
135   1 <<  9;   // r9
136 
137 const int kNumCalleeSaved = 7 + kR9Available;
138 
139 // Double registers d8 to d15 are callee-saved.
140 const int kNumDoubleCalleeSaved = 8;
141 
142 // Number of registers for which space is reserved in safepoints. Must be a
143 // multiple of 8.
144 // TODO(regis): Only 8 registers may actually be sufficient. Revisit.
145 const int kNumSafepointRegisters = 16;
146 
147 // Define the list of registers actually saved at safepoints.
148 // Note that the number of saved registers may be smaller than the reserved
149 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
150 const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
151 const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
152 
153 enum RegisterCode {
154 #define REGISTER_CODE(R) kRegCode_##R,
155   GENERAL_REGISTERS(REGISTER_CODE)
156 #undef REGISTER_CODE
157       kRegAfterLast
158 };
159 
160 class Register : public RegisterBase<Register, kRegAfterLast> {
161   friend class RegisterBase;
Register(int code)162   explicit constexpr Register(int code) : RegisterBase(code) {}
163 };
164 
165 ASSERT_TRIVIALLY_COPYABLE(Register);
166 static_assert(sizeof(Register) == sizeof(int),
167               "Register can efficiently be passed by value");
168 
169 // r7: context register
170 #define DECLARE_REGISTER(R) \
171   constexpr Register R = Register::from_code<kRegCode_##R>();
172 GENERAL_REGISTERS(DECLARE_REGISTER)
173 #undef DECLARE_REGISTER
174 constexpr Register no_reg = Register::no_reg();
175 
176 constexpr bool kPadArguments = false;
177 constexpr bool kSimpleFPAliasing = false;
178 constexpr bool kSimdMaskRegisters = false;
179 
180 enum SwVfpRegisterCode {
181 #define REGISTER_CODE(R) kSwVfpCode_##R,
182   FLOAT_REGISTERS(REGISTER_CODE)
183 #undef REGISTER_CODE
184       kSwVfpAfterLast
185 };
186 
187 // Representation of a list of non-overlapping VFP registers. This list
188 // represents the data layout of VFP registers as a bitfield:
189 //   S registers cover 1 bit
190 //   D registers cover 2 bits
191 //   Q registers cover 4 bits
192 //
193 // This way, we make sure no registers in the list ever overlap. However, a list
194 // may represent multiple different sets of registers,
195 // e.g. [d0 s2 s3] <=> [s0 s1 d1].
196 typedef uint64_t VfpRegList;
197 
198 // Single word VFP register.
199 class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
200  public:
201   static constexpr int kSizeInBytes = 4;
202 
split_code(int reg_code,int * vm,int * m)203   static void split_code(int reg_code, int* vm, int* m) {
204     DCHECK(from_code(reg_code).is_valid());
205     *m = reg_code & 0x1;
206     *vm = reg_code >> 1;
207   }
split_code(int * vm,int * m)208   void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
ToVfpRegList()209   VfpRegList ToVfpRegList() const {
210     DCHECK(is_valid());
211     // Each bit in the list corresponds to a S register.
212     return uint64_t{0x1} << code();
213   }
214 
215  private:
216   friend class RegisterBase;
SwVfpRegister(int code)217   explicit constexpr SwVfpRegister(int code) : RegisterBase(code) {}
218 };
219 
220 ASSERT_TRIVIALLY_COPYABLE(SwVfpRegister);
221 static_assert(sizeof(SwVfpRegister) == sizeof(int),
222               "SwVfpRegister can efficiently be passed by value");
223 
224 typedef SwVfpRegister FloatRegister;
225 
226 enum DoubleRegisterCode {
227 #define REGISTER_CODE(R) kDoubleCode_##R,
228   DOUBLE_REGISTERS(REGISTER_CODE)
229 #undef REGISTER_CODE
230       kDoubleAfterLast
231 };
232 
233 // Double word VFP register.
234 class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
235  public:
236   static constexpr int kSizeInBytes = 8;
237 
238   inline static int NumRegisters();
239 
split_code(int reg_code,int * vm,int * m)240   static void split_code(int reg_code, int* vm, int* m) {
241     DCHECK(from_code(reg_code).is_valid());
242     *m = (reg_code & 0x10) >> 4;
243     *vm = reg_code & 0x0F;
244   }
split_code(int * vm,int * m)245   void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
ToVfpRegList()246   VfpRegList ToVfpRegList() const {
247     DCHECK(is_valid());
248     // A D register overlaps two S registers.
249     return uint64_t{0x3} << (code() * 2);
250   }
251 
252  private:
253   friend class RegisterBase;
254   friend class LowDwVfpRegister;
DwVfpRegister(int code)255   explicit constexpr DwVfpRegister(int code) : RegisterBase(code) {}
256 };
257 
258 ASSERT_TRIVIALLY_COPYABLE(DwVfpRegister);
259 static_assert(sizeof(DwVfpRegister) == sizeof(int),
260               "DwVfpRegister can efficiently be passed by value");
261 
262 typedef DwVfpRegister DoubleRegister;
263 
264 
265 // Double word VFP register d0-15.
266 class LowDwVfpRegister
267     : public RegisterBase<LowDwVfpRegister, kDoubleCode_d16> {
268  public:
DwVfpRegister()269   constexpr operator DwVfpRegister() const { return DwVfpRegister(reg_code_); }
270 
low()271   SwVfpRegister low() const { return SwVfpRegister::from_code(code() * 2); }
high()272   SwVfpRegister high() const {
273     return SwVfpRegister::from_code(code() * 2 + 1);
274   }
ToVfpRegList()275   VfpRegList ToVfpRegList() const {
276     DCHECK(is_valid());
277     // A D register overlaps two S registers.
278     return uint64_t{0x3} << (code() * 2);
279   }
280 
281  private:
282   friend class RegisterBase;
LowDwVfpRegister(int code)283   explicit constexpr LowDwVfpRegister(int code) : RegisterBase(code) {}
284 };
285 
286 enum Simd128RegisterCode {
287 #define REGISTER_CODE(R) kSimd128Code_##R,
288   SIMD128_REGISTERS(REGISTER_CODE)
289 #undef REGISTER_CODE
290       kSimd128AfterLast
291 };
292 
293 // Quad word NEON register.
294 class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
295  public:
split_code(int reg_code,int * vm,int * m)296   static void split_code(int reg_code, int* vm, int* m) {
297     DCHECK(from_code(reg_code).is_valid());
298     int encoded_code = reg_code << 1;
299     *m = (encoded_code & 0x10) >> 4;
300     *vm = encoded_code & 0x0F;
301   }
split_code(int * vm,int * m)302   void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
low()303   DwVfpRegister low() const { return DwVfpRegister::from_code(code() * 2); }
high()304   DwVfpRegister high() const {
305     return DwVfpRegister::from_code(code() * 2 + 1);
306   }
ToVfpRegList()307   VfpRegList ToVfpRegList() const {
308     DCHECK(is_valid());
309     // A Q register overlaps four S registers.
310     return uint64_t{0xf} << (code() * 4);
311   }
312 
313  private:
314   friend class RegisterBase;
QwNeonRegister(int code)315   explicit constexpr QwNeonRegister(int code) : RegisterBase(code) {}
316 };
317 
318 
319 typedef QwNeonRegister QuadRegister;
320 
321 typedef QwNeonRegister Simd128Register;
322 
323 enum CRegisterCode {
324 #define REGISTER_CODE(R) kCCode_##R,
325   C_REGISTERS(REGISTER_CODE)
326 #undef REGISTER_CODE
327       kCAfterLast
328 };
329 
330 // Coprocessor register
331 class CRegister : public RegisterBase<CRegister, kCAfterLast> {
332   friend class RegisterBase;
CRegister(int code)333   explicit constexpr CRegister(int code) : RegisterBase(code) {}
334 };
335 
336 // Support for the VFP registers s0 to s31 (d0 to d15).
337 // Note that "s(N):s(N+1)" is the same as "d(N/2)".
338 #define DECLARE_FLOAT_REGISTER(R) \
339   constexpr SwVfpRegister R = SwVfpRegister::from_code<kSwVfpCode_##R>();
340 FLOAT_REGISTERS(DECLARE_FLOAT_REGISTER)
341 #undef DECLARE_FLOAT_REGISTER
342 
343 #define DECLARE_LOW_DOUBLE_REGISTER(R) \
344   constexpr LowDwVfpRegister R = LowDwVfpRegister::from_code<kDoubleCode_##R>();
345 LOW_DOUBLE_REGISTERS(DECLARE_LOW_DOUBLE_REGISTER)
346 #undef DECLARE_LOW_DOUBLE_REGISTER
347 
348 #define DECLARE_DOUBLE_REGISTER(R) \
349   constexpr DwVfpRegister R = DwVfpRegister::from_code<kDoubleCode_##R>();
350 NON_LOW_DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
351 #undef DECLARE_DOUBLE_REGISTER
352 
353 constexpr DwVfpRegister no_dreg = DwVfpRegister::no_reg();
354 
355 #define DECLARE_SIMD128_REGISTER(R) \
356   constexpr Simd128Register R = Simd128Register::from_code<kSimd128Code_##R>();
357 SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
358 #undef DECLARE_SIMD128_REGISTER
359 
360 // Aliases for double registers.
361 constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
362 constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
363 constexpr LowDwVfpRegister kDoubleRegZero  = d13;
364 
365 constexpr CRegister no_creg = CRegister::no_reg();
366 
367 #define DECLARE_C_REGISTER(R) \
368   constexpr CRegister R = CRegister::from_code<kCCode_##R>();
369 C_REGISTERS(DECLARE_C_REGISTER)
370 #undef DECLARE_C_REGISTER
371 
372 // Coprocessor number
373 enum Coprocessor {
374   p0  = 0,
375   p1  = 1,
376   p2  = 2,
377   p3  = 3,
378   p4  = 4,
379   p5  = 5,
380   p6  = 6,
381   p7  = 7,
382   p8  = 8,
383   p9  = 9,
384   p10 = 10,
385   p11 = 11,
386   p12 = 12,
387   p13 = 13,
388   p14 = 14,
389   p15 = 15
390 };
391 
392 // -----------------------------------------------------------------------------
393 // Machine instruction Operands
394 
395 // Class Operand represents a shifter operand in data processing instructions
396 class Operand BASE_EMBEDDED {
397  public:
398   // immediate
399   V8_INLINE explicit Operand(int32_t immediate,
400                              RelocInfo::Mode rmode = RelocInfo::NONE);
401   V8_INLINE static Operand Zero();
402   V8_INLINE explicit Operand(const ExternalReference& f);
403   explicit Operand(Handle<HeapObject> handle);
404   V8_INLINE explicit Operand(Smi* value);
405 
406   // rm
407   V8_INLINE explicit Operand(Register rm);
408 
409   // rm <shift_op> shift_imm
410   explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
SmiUntag(Register rm)411   V8_INLINE static Operand SmiUntag(Register rm) {
412     return Operand(rm, ASR, kSmiTagSize);
413   }
PointerOffsetFromSmiKey(Register key)414   V8_INLINE static Operand PointerOffsetFromSmiKey(Register key) {
415     STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
416     return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
417   }
DoubleOffsetFromSmiKey(Register key)418   V8_INLINE static Operand DoubleOffsetFromSmiKey(Register key) {
419     STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
420     return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
421   }
422 
423   // rm <shift_op> rs
424   explicit Operand(Register rm, ShiftOp shift_op, Register rs);
425 
426   static Operand EmbeddedNumber(double number);  // Smi or HeapNumber.
427   static Operand EmbeddedCode(CodeStub* stub);
428 
429   // Return true if this is a register operand.
IsRegister()430   bool IsRegister() const {
431     return rm_.is_valid() && rs_ == no_reg && shift_op_ == LSL &&
432            shift_imm_ == 0;
433   }
434   // Return true if this is a register operand shifted with an immediate.
IsImmediateShiftedRegister()435   bool IsImmediateShiftedRegister() const {
436     return rm_.is_valid() && !rs_.is_valid();
437   }
438   // Return true if this is a register operand shifted with a register.
IsRegisterShiftedRegister()439   bool IsRegisterShiftedRegister() const {
440     return rm_.is_valid() && rs_.is_valid();
441   }
442 
443   // Return the number of actual instructions required to implement the given
444   // instruction for this particular operand. This can be a single instruction,
445   // if no load into a scratch register is necessary, or anything between 2 and
446   // 4 instructions when we need to load from the constant pool (depending upon
447   // whether the constant pool entry is in the small or extended section). If
448   // the instruction this operand is used for is a MOV or MVN instruction the
449   // actual instruction to use is required for this calculation. For other
450   // instructions instr is ignored.
451   //
452   // The value returned is only valid as long as no entries are added to the
453   // constant pool between this call and the actual instruction being emitted.
454   int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const;
455   bool MustOutputRelocInfo(const Assembler* assembler) const;
456 
immediate()457   inline int32_t immediate() const {
458     DCHECK(IsImmediate());
459     DCHECK(!IsHeapObjectRequest());
460     return value_.immediate;
461   }
IsImmediate()462   bool IsImmediate() const {
463     return !rm_.is_valid();
464   }
465 
heap_object_request()466   HeapObjectRequest heap_object_request() const {
467     DCHECK(IsHeapObjectRequest());
468     return value_.heap_object_request;
469   }
IsHeapObjectRequest()470   bool IsHeapObjectRequest() const {
471     DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
472     DCHECK_IMPLIES(is_heap_object_request_,
473         rmode_ == RelocInfo::EMBEDDED_OBJECT ||
474         rmode_ == RelocInfo::CODE_TARGET);
475     return is_heap_object_request_;
476   }
477 
rm()478   Register rm() const { return rm_; }
rs()479   Register rs() const { return rs_; }
shift_op()480   ShiftOp shift_op() const { return shift_op_; }
481 
482 
483  private:
484   Register rm_ = no_reg;
485   Register rs_ = no_reg;
486   ShiftOp shift_op_;
487   int shift_imm_;                // valid if rm_ != no_reg && rs_ == no_reg
488   union Value {
Value()489     Value() {}
490     HeapObjectRequest heap_object_request;  // if is_heap_object_request_
491     int32_t immediate;                      // otherwise
492   } value_;                                 // valid if rm_ == no_reg
493   bool is_heap_object_request_ = false;
494   RelocInfo::Mode rmode_;
495 
496   friend class Assembler;
497 };
498 
499 
500 // Class MemOperand represents a memory operand in load and store instructions
501 class MemOperand BASE_EMBEDDED {
502  public:
503   // [rn +/- offset]      Offset/NegOffset
504   // [rn +/- offset]!     PreIndex/NegPreIndex
505   // [rn], +/- offset     PostIndex/NegPostIndex
506   // offset is any signed 32-bit value; offset is first loaded to a scratch
507   // register if it does not fit the addressing mode (12-bit unsigned and sign
508   // bit)
509   explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
510 
511   // [rn +/- rm]          Offset/NegOffset
512   // [rn +/- rm]!         PreIndex/NegPreIndex
513   // [rn], +/- rm         PostIndex/NegPostIndex
514   explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
515 
516   // [rn +/- rm <shift_op> shift_imm]      Offset/NegOffset
517   // [rn +/- rm <shift_op> shift_imm]!     PreIndex/NegPreIndex
518   // [rn], +/- rm <shift_op> shift_imm     PostIndex/NegPostIndex
519   explicit MemOperand(Register rn, Register rm,
520                       ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
521   V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array,
522                                                        Register key,
523                                                        AddrMode am = Offset) {
524     STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
525     return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
526   }
527 
set_offset(int32_t offset)528   void set_offset(int32_t offset) {
529     DCHECK(rm_ == no_reg);
530     offset_ = offset;
531   }
532 
offset()533   uint32_t offset() const {
534     DCHECK(rm_ == no_reg);
535     return offset_;
536   }
537 
rn()538   Register rn() const { return rn_; }
rm()539   Register rm() const { return rm_; }
am()540   AddrMode am() const { return am_; }
541 
OffsetIsUint12Encodable()542   bool OffsetIsUint12Encodable() const {
543     return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
544   }
545 
546  private:
547   Register rn_;  // base
548   Register rm_;  // register offset
549   int32_t offset_;  // valid if rm_ == no_reg
550   ShiftOp shift_op_;
551   int shift_imm_;  // valid if rm_ != no_reg && rs_ == no_reg
552   AddrMode am_;  // bits P, U, and W
553 
554   friend class Assembler;
555 };
556 
557 
558 // Class NeonMemOperand represents a memory operand in load and
559 // store NEON instructions
560 class NeonMemOperand BASE_EMBEDDED {
561  public:
562   // [rn {:align}]       Offset
563   // [rn {:align}]!      PostIndex
564   explicit NeonMemOperand(Register rn, AddrMode am = Offset, int align = 0);
565 
566   // [rn {:align}], rm   PostIndex
567   explicit NeonMemOperand(Register rn, Register rm, int align = 0);
568 
rn()569   Register rn() const { return rn_; }
rm()570   Register rm() const { return rm_; }
align()571   int align() const { return align_; }
572 
573  private:
574   void SetAlignment(int align);
575 
576   Register rn_;  // base
577   Register rm_;  // register increment
578   int align_;
579 };
580 
581 
582 // Class NeonListOperand represents a list of NEON registers
583 class NeonListOperand BASE_EMBEDDED {
584  public:
585   explicit NeonListOperand(DoubleRegister base, int register_count = 1)
base_(base)586     : base_(base), register_count_(register_count) {}
NeonListOperand(QwNeonRegister q_reg)587   explicit NeonListOperand(QwNeonRegister q_reg)
588     : base_(q_reg.low()), register_count_(2) {}
base()589   DoubleRegister base() const { return base_; }
register_count()590   int register_count() { return register_count_; }
length()591   int length() const { return register_count_ - 1; }
type()592   NeonListType type() const {
593     switch (register_count_) {
594       default: UNREACHABLE();
595       // Fall through.
596       case 1: return nlt_1;
597       case 2: return nlt_2;
598       case 3: return nlt_3;
599       case 4: return nlt_4;
600     }
601   }
602  private:
603   DoubleRegister base_;
604   int register_count_;
605 };
606 
607 class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
608  public:
609   // Create an assembler. Instructions and relocation information are emitted
610   // into a buffer, with the instructions starting from the beginning and the
611   // relocation information starting from the end of the buffer. See CodeDesc
612   // for a detailed comment on the layout (globals.h).
613   //
614   // If the provided buffer is nullptr, the assembler allocates and grows its
615   // own buffer, and buffer_size determines the initial buffer size. The buffer
616   // is owned by the assembler and deallocated upon destruction of the
617   // assembler.
618   //
619   // If the provided buffer is not nullptr, the assembler uses the provided
620   // buffer for code generation and assumes its size to be buffer_size. If the
621   // buffer is too small, a fatal error occurs. No deallocation of the buffer is
622   // done upon destruction of the assembler.
623   Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
624   virtual ~Assembler();
625 
626   // GetCode emits any pending (non-emitted) code and fills the descriptor
627   // desc. GetCode() is idempotent; it returns the same result if no other
628   // Assembler functions are invoked in between GetCode() calls.
629   void GetCode(Isolate* isolate, CodeDesc* desc);
630 
631   // Label operations & relative jumps (PPUM Appendix D)
632   //
633   // Takes a branch opcode (cc) and a label (L) and generates
634   // either a backward branch or a forward branch and links it
635   // to the label fixup chain. Usage:
636   //
637   // Label L;    // unbound label
638   // j(cc, &L);  // forward branch to unbound label
639   // bind(&L);   // bind label to the current pc
640   // j(cc, &L);  // backward branch to bound label
641   // bind(&L);   // illegal: a label may be bound only once
642   //
643   // Note: The same Label can be used for forward and backward branches
644   // but it may be bound only once.
645 
646   void bind(Label* L);  // binds an unbound label L to the current code position
647 
648   // Returns the branch offset to the given label from the current code position
649   // Links the label to the current position if it is still unbound
650   // Manages the jump elimination optimization if the second parameter is true.
651   int branch_offset(Label* L);
652 
653   // Returns true if the given pc address is the start of a constant pool load
654   // instruction sequence.
655   V8_INLINE static bool is_constant_pool_load(Address pc);
656 
657   // Return the address in the constant pool of the code target address used by
658   // the branch/call instruction at pc, or the object in a mov.
659   V8_INLINE static Address constant_pool_entry_address(Address pc,
660                                                        Address constant_pool);
661 
662   // Read/Modify the code target address in the branch/call instruction at pc.
663   // The isolate argument is unused (and may be nullptr) when skipping flushing.
664   V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
665   V8_INLINE static void set_target_address_at(
666       Address pc, Address constant_pool, Address target,
667       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
668 
669   // Return the code target address at a call site from the return address
670   // of that call in the instruction stream.
671   V8_INLINE static Address target_address_from_return_address(Address pc);
672 
673   // Given the address of the beginning of a call, return the address
674   // in the instruction stream that the call will return from.
675   V8_INLINE static Address return_address_from_call_start(Address pc);
676 
677   // This sets the branch destination (which is in the constant pool on ARM).
678   // This is for calls and branches within generated code.
679   inline static void deserialization_set_special_target_at(
680       Address constant_pool_entry, Code* code, Address target);
681 
682   // Get the size of the special target encoded at 'location'.
683   inline static int deserialization_special_target_size(Address location);
684 
685   // This sets the internal reference at the pc.
686   inline static void deserialization_set_target_internal_reference_at(
687       Address pc, Address target,
688       RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
689 
690   // Here we are patching the address in the constant pool, not the actual call
691   // instruction.  The address in the constant pool is the same size as a
692   // pointer.
693   static constexpr int kSpecialTargetSize = kPointerSize;
694 
GetScratchRegisterList()695   RegList* GetScratchRegisterList() { return &scratch_register_list_; }
GetScratchVfpRegisterList()696   VfpRegList* GetScratchVfpRegisterList() {
697     return &scratch_vfp_register_list_;
698   }
699 
700   // ---------------------------------------------------------------------------
701   // Code generation
702 
703   // Insert the smallest number of nop instructions
704   // possible to align the pc offset to a multiple
705   // of m. m must be a power of 2 (>= 4).
706   void Align(int m);
707   // Insert the smallest number of zero bytes possible to align the pc offset
708   // to a mulitple of m. m must be a power of 2 (>= 2).
709   void DataAlign(int m);
710   // Aligns code to something that's optimal for a jump target for the platform.
711   void CodeTargetAlign();
712 
713   // Branch instructions
714   void b(int branch_offset, Condition cond = al,
715          RelocInfo::Mode rmode = RelocInfo::NONE);
716   void bl(int branch_offset, Condition cond = al,
717           RelocInfo::Mode rmode = RelocInfo::NONE);
718   void blx(int branch_offset);  // v5 and above
719   void blx(Register target, Condition cond = al);  // v5 and above
720   void bx(Register target, Condition cond = al);  // v5 and above, plus v4t
721 
722   // Convenience branch instructions using labels
723   void b(Label* L, Condition cond = al);
b(Condition cond,Label * L)724   void b(Condition cond, Label* L) { b(L, cond); }
725   void bl(Label* L, Condition cond = al);
bl(Condition cond,Label * L)726   void bl(Condition cond, Label* L) { bl(L, cond); }
727   void blx(Label* L);  // v5 and above
728 
729   // Data-processing instructions
730 
731   void and_(Register dst, Register src1, const Operand& src2,
732             SBit s = LeaveCC, Condition cond = al);
733   void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
734             Condition cond = al);
735 
736   void eor(Register dst, Register src1, const Operand& src2,
737            SBit s = LeaveCC, Condition cond = al);
738 
739   void sub(Register dst, Register src1, const Operand& src2,
740            SBit s = LeaveCC, Condition cond = al);
741   void sub(Register dst, Register src1, Register src2,
742            SBit s = LeaveCC, Condition cond = al);
743 
744   void rsb(Register dst, Register src1, const Operand& src2,
745            SBit s = LeaveCC, Condition cond = al);
746 
747   void add(Register dst, Register src1, const Operand& src2,
748            SBit s = LeaveCC, Condition cond = al);
749   void add(Register dst, Register src1, Register src2,
750            SBit s = LeaveCC, Condition cond = al);
751 
752   void adc(Register dst, Register src1, const Operand& src2,
753            SBit s = LeaveCC, Condition cond = al);
754 
755   void sbc(Register dst, Register src1, const Operand& src2,
756            SBit s = LeaveCC, Condition cond = al);
757 
758   void rsc(Register dst, Register src1, const Operand& src2,
759            SBit s = LeaveCC, Condition cond = al);
760 
761   void tst(Register src1, const Operand& src2, Condition cond = al);
762   void tst(Register src1, Register src2, Condition cond = al);
763 
764   void teq(Register src1, const Operand& src2, Condition cond = al);
765 
766   void cmp(Register src1, const Operand& src2, Condition cond = al);
767   void cmp(Register src1, Register src2, Condition cond = al);
768 
769   void cmp_raw_immediate(Register src1, int raw_immediate, Condition cond = al);
770 
771   void cmn(Register src1, const Operand& src2, Condition cond = al);
772 
773   void orr(Register dst, Register src1, const Operand& src2,
774            SBit s = LeaveCC, Condition cond = al);
775   void orr(Register dst, Register src1, Register src2,
776            SBit s = LeaveCC, Condition cond = al);
777 
778   void mov(Register dst, const Operand& src,
779            SBit s = LeaveCC, Condition cond = al);
780   void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al);
781 
782   // Load the position of the label relative to the generated code object
783   // pointer in a register.
784   void mov_label_offset(Register dst, Label* label);
785 
786   // ARMv7 instructions for loading a 32 bit immediate in two instructions.
787   // The constant for movw and movt should be in the range 0-0xffff.
788   void movw(Register reg, uint32_t immediate, Condition cond = al);
789   void movt(Register reg, uint32_t immediate, Condition cond = al);
790 
791   void bic(Register dst, Register src1, const Operand& src2,
792            SBit s = LeaveCC, Condition cond = al);
793 
794   void mvn(Register dst, const Operand& src,
795            SBit s = LeaveCC, Condition cond = al);
796 
797   // Shift instructions
798 
799   void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
800            Condition cond = al);
801 
802   void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
803            Condition cond = al);
804 
805   void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
806            Condition cond = al);
807 
808   // Multiply instructions
809 
810   void mla(Register dst, Register src1, Register src2, Register srcA,
811            SBit s = LeaveCC, Condition cond = al);
812 
813   void mls(Register dst, Register src1, Register src2, Register srcA,
814            Condition cond = al);
815 
816   void sdiv(Register dst, Register src1, Register src2,
817             Condition cond = al);
818 
819   void udiv(Register dst, Register src1, Register src2, Condition cond = al);
820 
821   void mul(Register dst, Register src1, Register src2,
822            SBit s = LeaveCC, Condition cond = al);
823 
824   void smmla(Register dst, Register src1, Register src2, Register srcA,
825              Condition cond = al);
826 
827   void smmul(Register dst, Register src1, Register src2, Condition cond = al);
828 
829   void smlal(Register dstL, Register dstH, Register src1, Register src2,
830              SBit s = LeaveCC, Condition cond = al);
831 
832   void smull(Register dstL, Register dstH, Register src1, Register src2,
833              SBit s = LeaveCC, Condition cond = al);
834 
835   void umlal(Register dstL, Register dstH, Register src1, Register src2,
836              SBit s = LeaveCC, Condition cond = al);
837 
838   void umull(Register dstL, Register dstH, Register src1, Register src2,
839              SBit s = LeaveCC, Condition cond = al);
840 
841   // Miscellaneous arithmetic instructions
842 
843   void clz(Register dst, Register src, Condition cond = al);  // v5 and above
844 
845   // Saturating instructions. v6 and above.
846 
847   // Unsigned saturate.
848   //
849   // Saturate an optionally shifted signed value to an unsigned range.
850   //
851   //   usat dst, #satpos, src
852   //   usat dst, #satpos, src, lsl #sh
853   //   usat dst, #satpos, src, asr #sh
854   //
855   // Register dst will contain:
856   //
857   //   0,                 if s < 0
858   //   (1 << satpos) - 1, if s > ((1 << satpos) - 1)
859   //   s,                 otherwise
860   //
861   // where s is the contents of src after shifting (if used.)
862   void usat(Register dst, int satpos, const Operand& src, Condition cond = al);
863 
864   // Bitfield manipulation instructions. v7 and above.
865 
866   void ubfx(Register dst, Register src, int lsb, int width,
867             Condition cond = al);
868 
869   void sbfx(Register dst, Register src, int lsb, int width,
870             Condition cond = al);
871 
872   void bfc(Register dst, int lsb, int width, Condition cond = al);
873 
874   void bfi(Register dst, Register src, int lsb, int width,
875            Condition cond = al);
876 
877   void pkhbt(Register dst, Register src1, const Operand& src2,
878              Condition cond = al);
879 
880   void pkhtb(Register dst, Register src1, const Operand& src2,
881              Condition cond = al);
882 
883   void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
884   void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
885              Condition cond = al);
886   void sxth(Register dst, Register src, int rotate = 0, Condition cond = al);
887   void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
888              Condition cond = al);
889 
890   void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
891   void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
892              Condition cond = al);
893   void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al);
894   void uxth(Register dst, Register src, int rotate = 0, Condition cond = al);
895   void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
896              Condition cond = al);
897 
898   // Reverse the bits in a register.
899   void rbit(Register dst, Register src, Condition cond = al);
900   void rev(Register dst, Register src, Condition cond = al);
901 
902   // Status register access instructions
903 
904   void mrs(Register dst, SRegister s, Condition cond = al);
905   void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
906 
907   // Load/Store instructions
908   void ldr(Register dst, const MemOperand& src, Condition cond = al);
909   void str(Register src, const MemOperand& dst, Condition cond = al);
910   void ldrb(Register dst, const MemOperand& src, Condition cond = al);
911   void strb(Register src, const MemOperand& dst, Condition cond = al);
912   void ldrh(Register dst, const MemOperand& src, Condition cond = al);
913   void strh(Register src, const MemOperand& dst, Condition cond = al);
914   void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
915   void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
916   void ldrd(Register dst1,
917             Register dst2,
918             const MemOperand& src, Condition cond = al);
919   void strd(Register src1,
920             Register src2,
921             const MemOperand& dst, Condition cond = al);
922 
923   // Load literal from a pc relative address.
924   void ldr_pcrel(Register dst, int imm12, Condition cond = al);
925 
926   // Load/Store exclusive instructions
927   void ldrex(Register dst, Register src, Condition cond = al);
928   void strex(Register src1, Register src2, Register dst, Condition cond = al);
929   void ldrexb(Register dst, Register src, Condition cond = al);
930   void strexb(Register src1, Register src2, Register dst, Condition cond = al);
931   void ldrexh(Register dst, Register src, Condition cond = al);
932   void strexh(Register src1, Register src2, Register dst, Condition cond = al);
933   void ldrexd(Register dst1, Register dst2, Register src, Condition cond = al);
934   void strexd(Register res, Register src1, Register src2, Register dst,
935               Condition cond = al);
936 
937   // Preload instructions
938   void pld(const MemOperand& address);
939 
940   // Load/Store multiple instructions
941   void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
942   void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
943 
944   // Exception-generating instructions and debugging support
945   void stop(const char* msg,
946             Condition cond = al,
947             int32_t code = kDefaultStopCode);
948 
949   void bkpt(uint32_t imm16);  // v5 and above
950   void svc(uint32_t imm24, Condition cond = al);
951 
952   // Synchronization instructions.
953   // On ARMv6, an equivalent CP15 operation will be used.
954   void dmb(BarrierOption option);
955   void dsb(BarrierOption option);
956   void isb(BarrierOption option);
957 
958   // Conditional speculation barrier.
959   void csdb();
960 
961   // Coprocessor instructions
962 
963   void cdp(Coprocessor coproc, int opcode_1,
964            CRegister crd, CRegister crn, CRegister crm,
965            int opcode_2, Condition cond = al);
966 
967   void cdp2(Coprocessor coproc, int opcode_1,
968             CRegister crd, CRegister crn, CRegister crm,
969             int opcode_2);  // v5 and above
970 
971   void mcr(Coprocessor coproc, int opcode_1,
972            Register rd, CRegister crn, CRegister crm,
973            int opcode_2 = 0, Condition cond = al);
974 
975   void mcr2(Coprocessor coproc, int opcode_1,
976             Register rd, CRegister crn, CRegister crm,
977             int opcode_2 = 0);  // v5 and above
978 
979   void mrc(Coprocessor coproc, int opcode_1,
980            Register rd, CRegister crn, CRegister crm,
981            int opcode_2 = 0, Condition cond = al);
982 
983   void mrc2(Coprocessor coproc, int opcode_1,
984             Register rd, CRegister crn, CRegister crm,
985             int opcode_2 = 0);  // v5 and above
986 
987   void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
988            LFlag l = Short, Condition cond = al);
989   void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
990            LFlag l = Short, Condition cond = al);
991 
992   void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
993             LFlag l = Short);  // v5 and above
994   void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
995             LFlag l = Short);  // v5 and above
996 
997   // Support for VFP.
998   // All these APIs support S0 to S31 and D0 to D31.
999 
1000   void vldr(const DwVfpRegister dst,
1001             const Register base,
1002             int offset,
1003             const Condition cond = al);
1004   void vldr(const DwVfpRegister dst,
1005             const MemOperand& src,
1006             const Condition cond = al);
1007 
1008   void vldr(const SwVfpRegister dst,
1009             const Register base,
1010             int offset,
1011             const Condition cond = al);
1012   void vldr(const SwVfpRegister dst,
1013             const MemOperand& src,
1014             const Condition cond = al);
1015 
1016   void vstr(const DwVfpRegister src,
1017             const Register base,
1018             int offset,
1019             const Condition cond = al);
1020   void vstr(const DwVfpRegister src,
1021             const MemOperand& dst,
1022             const Condition cond = al);
1023 
1024   void vstr(const SwVfpRegister src,
1025             const Register base,
1026             int offset,
1027             const Condition cond = al);
1028   void vstr(const SwVfpRegister src,
1029             const MemOperand& dst,
1030             const Condition cond = al);
1031 
1032   void vldm(BlockAddrMode am,
1033             Register base,
1034             DwVfpRegister first,
1035             DwVfpRegister last,
1036             Condition cond = al);
1037 
1038   void vstm(BlockAddrMode am,
1039             Register base,
1040             DwVfpRegister first,
1041             DwVfpRegister last,
1042             Condition cond = al);
1043 
1044   void vldm(BlockAddrMode am,
1045             Register base,
1046             SwVfpRegister first,
1047             SwVfpRegister last,
1048             Condition cond = al);
1049 
1050   void vstm(BlockAddrMode am,
1051             Register base,
1052             SwVfpRegister first,
1053             SwVfpRegister last,
1054             Condition cond = al);
1055 
1056   void vmov(const SwVfpRegister dst, Float32 imm);
1057   void vmov(const DwVfpRegister dst,
1058             Double imm,
1059             const Register extra_scratch = no_reg);
1060   void vmov(const SwVfpRegister dst,
1061             const SwVfpRegister src,
1062             const Condition cond = al);
1063   void vmov(const DwVfpRegister dst,
1064             const DwVfpRegister src,
1065             const Condition cond = al);
1066   void vmov(const DwVfpRegister dst,
1067             const Register src1,
1068             const Register src2,
1069             const Condition cond = al);
1070   void vmov(const Register dst1,
1071             const Register dst2,
1072             const DwVfpRegister src,
1073             const Condition cond = al);
1074   void vmov(const SwVfpRegister dst,
1075             const Register src,
1076             const Condition cond = al);
1077   void vmov(const Register dst,
1078             const SwVfpRegister src,
1079             const Condition cond = al);
1080   void vcvt_f64_s32(const DwVfpRegister dst,
1081                     const SwVfpRegister src,
1082                     VFPConversionMode mode = kDefaultRoundToZero,
1083                     const Condition cond = al);
1084   void vcvt_f32_s32(const SwVfpRegister dst,
1085                     const SwVfpRegister src,
1086                     VFPConversionMode mode = kDefaultRoundToZero,
1087                     const Condition cond = al);
1088   void vcvt_f64_u32(const DwVfpRegister dst,
1089                     const SwVfpRegister src,
1090                     VFPConversionMode mode = kDefaultRoundToZero,
1091                     const Condition cond = al);
1092   void vcvt_f32_u32(const SwVfpRegister dst,
1093                     const SwVfpRegister src,
1094                     VFPConversionMode mode = kDefaultRoundToZero,
1095                     const Condition cond = al);
1096   void vcvt_s32_f32(const SwVfpRegister dst,
1097                     const SwVfpRegister src,
1098                     VFPConversionMode mode = kDefaultRoundToZero,
1099                     const Condition cond = al);
1100   void vcvt_u32_f32(const SwVfpRegister dst,
1101                     const SwVfpRegister src,
1102                     VFPConversionMode mode = kDefaultRoundToZero,
1103                     const Condition cond = al);
1104   void vcvt_s32_f64(const SwVfpRegister dst,
1105                     const DwVfpRegister src,
1106                     VFPConversionMode mode = kDefaultRoundToZero,
1107                     const Condition cond = al);
1108   void vcvt_u32_f64(const SwVfpRegister dst,
1109                     const DwVfpRegister src,
1110                     VFPConversionMode mode = kDefaultRoundToZero,
1111                     const Condition cond = al);
1112   void vcvt_f64_f32(const DwVfpRegister dst,
1113                     const SwVfpRegister src,
1114                     VFPConversionMode mode = kDefaultRoundToZero,
1115                     const Condition cond = al);
1116   void vcvt_f32_f64(const SwVfpRegister dst,
1117                     const DwVfpRegister src,
1118                     VFPConversionMode mode = kDefaultRoundToZero,
1119                     const Condition cond = al);
1120   void vcvt_f64_s32(const DwVfpRegister dst,
1121                     int fraction_bits,
1122                     const Condition cond = al);
1123 
1124   void vmrs(const Register dst, const Condition cond = al);
1125   void vmsr(const Register dst, const Condition cond = al);
1126 
1127   void vneg(const DwVfpRegister dst,
1128             const DwVfpRegister src,
1129             const Condition cond = al);
1130   void vneg(const SwVfpRegister dst, const SwVfpRegister src,
1131             const Condition cond = al);
1132   void vabs(const DwVfpRegister dst,
1133             const DwVfpRegister src,
1134             const Condition cond = al);
1135   void vabs(const SwVfpRegister dst, const SwVfpRegister src,
1136             const Condition cond = al);
1137   void vadd(const DwVfpRegister dst,
1138             const DwVfpRegister src1,
1139             const DwVfpRegister src2,
1140             const Condition cond = al);
1141   void vadd(const SwVfpRegister dst, const SwVfpRegister src1,
1142             const SwVfpRegister src2, const Condition cond = al);
1143   void vsub(const DwVfpRegister dst,
1144             const DwVfpRegister src1,
1145             const DwVfpRegister src2,
1146             const Condition cond = al);
1147   void vsub(const SwVfpRegister dst, const SwVfpRegister src1,
1148             const SwVfpRegister src2, const Condition cond = al);
1149   void vmul(const DwVfpRegister dst,
1150             const DwVfpRegister src1,
1151             const DwVfpRegister src2,
1152             const Condition cond = al);
1153   void vmul(const SwVfpRegister dst, const SwVfpRegister src1,
1154             const SwVfpRegister src2, const Condition cond = al);
1155   void vmla(const DwVfpRegister dst,
1156             const DwVfpRegister src1,
1157             const DwVfpRegister src2,
1158             const Condition cond = al);
1159   void vmla(const SwVfpRegister dst, const SwVfpRegister src1,
1160             const SwVfpRegister src2, const Condition cond = al);
1161   void vmls(const DwVfpRegister dst,
1162             const DwVfpRegister src1,
1163             const DwVfpRegister src2,
1164             const Condition cond = al);
1165   void vmls(const SwVfpRegister dst, const SwVfpRegister src1,
1166             const SwVfpRegister src2, const Condition cond = al);
1167   void vdiv(const DwVfpRegister dst,
1168             const DwVfpRegister src1,
1169             const DwVfpRegister src2,
1170             const Condition cond = al);
1171   void vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
1172             const SwVfpRegister src2, const Condition cond = al);
1173   void vcmp(const DwVfpRegister src1,
1174             const DwVfpRegister src2,
1175             const Condition cond = al);
1176   void vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
1177             const Condition cond = al);
1178   void vcmp(const DwVfpRegister src1,
1179             const double src2,
1180             const Condition cond = al);
1181   void vcmp(const SwVfpRegister src1, const float src2,
1182             const Condition cond = al);
1183 
1184   void vmaxnm(const DwVfpRegister dst,
1185               const DwVfpRegister src1,
1186               const DwVfpRegister src2);
1187   void vmaxnm(const SwVfpRegister dst,
1188               const SwVfpRegister src1,
1189               const SwVfpRegister src2);
1190   void vminnm(const DwVfpRegister dst,
1191               const DwVfpRegister src1,
1192               const DwVfpRegister src2);
1193   void vminnm(const SwVfpRegister dst,
1194               const SwVfpRegister src1,
1195               const SwVfpRegister src2);
1196 
1197   // VSEL supports cond in {eq, ne, ge, lt, gt, le, vs, vc}.
1198   void vsel(const Condition cond,
1199             const DwVfpRegister dst,
1200             const DwVfpRegister src1,
1201             const DwVfpRegister src2);
1202   void vsel(const Condition cond,
1203             const SwVfpRegister dst,
1204             const SwVfpRegister src1,
1205             const SwVfpRegister src2);
1206 
1207   void vsqrt(const DwVfpRegister dst,
1208              const DwVfpRegister src,
1209              const Condition cond = al);
1210   void vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
1211              const Condition cond = al);
1212 
1213   // ARMv8 rounding instructions.
1214   void vrinta(const SwVfpRegister dst, const SwVfpRegister src);
1215   void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
1216   void vrintn(const SwVfpRegister dst, const SwVfpRegister src);
1217   void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
1218   void vrintm(const SwVfpRegister dst, const SwVfpRegister src);
1219   void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
1220   void vrintp(const SwVfpRegister dst, const SwVfpRegister src);
1221   void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
1222   void vrintz(const SwVfpRegister dst, const SwVfpRegister src,
1223               const Condition cond = al);
1224   void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
1225               const Condition cond = al);
1226 
1227   // Support for NEON.
1228 
1229   // All these APIs support D0 to D31 and Q0 to Q15.
1230   void vld1(NeonSize size,
1231             const NeonListOperand& dst,
1232             const NeonMemOperand& src);
1233   void vst1(NeonSize size,
1234             const NeonListOperand& src,
1235             const NeonMemOperand& dst);
1236   // dt represents the narrower type
1237   void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
1238   // dt represents the narrower type.
1239   void vqmovn(NeonDataType dt, DwVfpRegister dst, QwNeonRegister src);
1240 
1241   // Only unconditional core <-> scalar moves are currently supported.
1242   void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
1243   void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
1244 
1245   void vmov(QwNeonRegister dst, QwNeonRegister src);
1246   void vdup(NeonSize size, QwNeonRegister dst, Register src);
1247   void vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src, int index);
1248   void vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int index);
1249 
1250   void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src);
1251   void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src);
1252   void vcvt_s32_f32(QwNeonRegister dst, QwNeonRegister src);
1253   void vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src);
1254 
1255   void vmvn(QwNeonRegister dst, QwNeonRegister src);
1256   void vswp(DwVfpRegister dst, DwVfpRegister src);
1257   void vswp(QwNeonRegister dst, QwNeonRegister src);
1258   void vabs(QwNeonRegister dst, QwNeonRegister src);
1259   void vabs(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1260   void vneg(QwNeonRegister dst, QwNeonRegister src);
1261   void vneg(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1262 
1263   void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1264   void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
1265   void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1266   void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1267   void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1268   void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1269   void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1270             QwNeonRegister src2);
1271   void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
1272              QwNeonRegister src2);
1273   void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1274   void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1275             QwNeonRegister src2);
1276   void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
1277              QwNeonRegister src2);
1278   void vmul(QwNeonRegister dst, QwNeonRegister src1,
1279             QwNeonRegister src2);
1280   void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1281             QwNeonRegister src2);
1282   void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1283   void vmin(NeonDataType dt, QwNeonRegister dst,
1284             QwNeonRegister src1, QwNeonRegister src2);
1285   void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1286   void vmax(NeonDataType dt, QwNeonRegister dst,
1287             QwNeonRegister src1, QwNeonRegister src2);
1288   void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
1289   void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
1290              DwVfpRegister src2);
1291   void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
1292              DwVfpRegister src2);
1293   void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
1294              DwVfpRegister src2);
1295   void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
1296   void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
1297   void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
1298   void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
1299   // vrecpe and vrsqrte only support floating point lanes.
1300   void vrecpe(QwNeonRegister dst, QwNeonRegister src);
1301   void vrsqrte(QwNeonRegister dst, QwNeonRegister src);
1302   void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1303   void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1304   void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1305             QwNeonRegister src2);
1306   void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1307   void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
1308             QwNeonRegister src2);
1309   void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1310   void vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
1311             QwNeonRegister src2);
1312   void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
1313   void vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
1314             QwNeonRegister src2);
1315   void vext(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2,
1316             int bytes);
1317   void vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1318   void vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
1319   void vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1320   void vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
1321   void vrev16(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1322   void vrev32(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1323   void vrev64(NeonSize size, QwNeonRegister dst, QwNeonRegister src);
1324   void vtrn(NeonSize size, DwVfpRegister src1, DwVfpRegister src2);
1325   void vtrn(NeonSize size, QwNeonRegister src1, QwNeonRegister src2);
1326   void vtbl(DwVfpRegister dst, const NeonListOperand& list,
1327             DwVfpRegister index);
1328   void vtbx(DwVfpRegister dst, const NeonListOperand& list,
1329             DwVfpRegister index);
1330 
1331   // Pseudo instructions
1332 
1333   // Different nop operations are used by the code generator to detect certain
1334   // states of the generated code.
1335   enum NopMarkerTypes {
1336     NON_MARKING_NOP = 0,
1337     DEBUG_BREAK_NOP,
1338     // IC markers.
1339     PROPERTY_ACCESS_INLINED,
1340     PROPERTY_ACCESS_INLINED_CONTEXT,
1341     PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
1342     // Helper values.
1343     LAST_CODE_MARKER,
1344     FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
1345   };
1346 
1347   void nop(int type = 0);   // 0 is the default non-marking type.
1348 
1349   void push(Register src, Condition cond = al) {
1350     str(src, MemOperand(sp, 4, NegPreIndex), cond);
1351   }
1352 
1353   void pop(Register dst, Condition cond = al) {
1354     ldr(dst, MemOperand(sp, 4, PostIndex), cond);
1355   }
1356 
1357   void pop();
1358 
1359   void vpush(QwNeonRegister src, Condition cond = al) {
1360     vstm(db_w, sp, src.low(), src.high(), cond);
1361   }
1362 
1363   void vpush(DwVfpRegister src, Condition cond = al) {
1364     vstm(db_w, sp, src, src, cond);
1365   }
1366 
1367   void vpush(SwVfpRegister src, Condition cond = al) {
1368     vstm(db_w, sp, src, src, cond);
1369   }
1370 
1371   void vpop(DwVfpRegister dst, Condition cond = al) {
1372     vldm(ia_w, sp, dst, dst, cond);
1373   }
1374 
1375   // Jump unconditionally to given label.
jmp(Label * L)1376   void jmp(Label* L) { b(L, al); }
1377 
1378   // Check the code size generated from label to here.
SizeOfCodeGeneratedSince(Label * label)1379   int SizeOfCodeGeneratedSince(Label* label) {
1380     return pc_offset() - label->pos();
1381   }
1382 
1383   // Check the number of instructions generated from label to here.
InstructionsGeneratedSince(Label * label)1384   int InstructionsGeneratedSince(Label* label) {
1385     return SizeOfCodeGeneratedSince(label) / kInstrSize;
1386   }
1387 
1388   // Check whether an immediate fits an addressing mode 1 instruction.
1389   static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
1390 
1391   // Check whether an immediate fits an addressing mode 2 instruction.
1392   bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
1393 
1394   // Class for scoping postponing the constant pool generation.
1395   class BlockConstPoolScope {
1396    public:
BlockConstPoolScope(Assembler * assem)1397     explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1398       assem_->StartBlockConstPool();
1399     }
~BlockConstPoolScope()1400     ~BlockConstPoolScope() {
1401       assem_->EndBlockConstPool();
1402     }
1403 
1404    private:
1405     Assembler* assem_;
1406 
1407     DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1408   };
1409 
1410   // Record a comment relocation entry that can be used by a disassembler.
1411   // Use --code-comments to enable.
1412   void RecordComment(const char* msg);
1413 
1414   // Record a deoptimization reason that can be used by a log or cpu profiler.
1415   // Use --trace-deopt to enable.
1416   void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1417                          int id);
1418 
1419   // Record the emission of a constant pool.
1420   //
1421   // The emission of constant pool depends on the size of the code generated and
1422   // the number of RelocInfo recorded.
1423   // The Debug mechanism needs to map code offsets between two versions of a
1424   // function, compiled with and without debugger support (see for example
1425   // Debug::PrepareForBreakPoints()).
1426   // Compiling functions with debugger support generates additional code
1427   // (DebugCodegen::GenerateSlot()). This may affect the emission of the
1428   // constant pools and cause the version of the code with debugger support to
1429   // have constant pools generated in different places.
1430   // Recording the position and size of emitted constant pools allows to
1431   // correctly compute the offset mappings between the different versions of a
1432   // function in all situations.
1433   //
1434   // The parameter indicates the size of the constant pool (in bytes), including
1435   // the marker and branch over the data.
1436   void RecordConstPool(int size);
1437 
1438   // Writes a single byte or word of data in the code stream.  Used
1439   // for inline tables, e.g., jump-tables. CheckConstantPool() should be
1440   // called before any use of db/dd/dq/dp to ensure that constant pools
1441   // are not emitted as part of the tables generated.
1442   void db(uint8_t data);
1443   void dd(uint32_t data);
1444   void dq(uint64_t data);
dp(uintptr_t data)1445   void dp(uintptr_t data) { dd(data); }
1446 
1447   // Read/patch instructions
instr_at(int pos)1448   Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
instr_at_put(int pos,Instr instr)1449   void instr_at_put(int pos, Instr instr) {
1450     *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1451   }
instr_at(Address pc)1452   static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
instr_at_put(Address pc,Instr instr)1453   static void instr_at_put(Address pc, Instr instr) {
1454     *reinterpret_cast<Instr*>(pc) = instr;
1455   }
1456   static Condition GetCondition(Instr instr);
1457   static bool IsLdrRegisterImmediate(Instr instr);
1458   static bool IsVldrDRegisterImmediate(Instr instr);
1459   static int GetLdrRegisterImmediateOffset(Instr instr);
1460   static int GetVldrDRegisterImmediateOffset(Instr instr);
1461   static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
1462   static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
1463   static bool IsStrRegisterImmediate(Instr instr);
1464   static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
1465   static bool IsAddRegisterImmediate(Instr instr);
1466   static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
1467   static Register GetRd(Instr instr);
1468   static Register GetRn(Instr instr);
1469   static Register GetRm(Instr instr);
1470   static bool IsPush(Instr instr);
1471   static bool IsPop(Instr instr);
1472   static bool IsStrRegFpOffset(Instr instr);
1473   static bool IsLdrRegFpOffset(Instr instr);
1474   static bool IsStrRegFpNegOffset(Instr instr);
1475   static bool IsLdrRegFpNegOffset(Instr instr);
1476   static bool IsLdrPcImmediateOffset(Instr instr);
1477   static bool IsVldrDPcImmediateOffset(Instr instr);
1478   static bool IsBlxReg(Instr instr);
1479   static bool IsBlxIp(Instr instr);
1480   static bool IsTstImmediate(Instr instr);
1481   static bool IsCmpRegister(Instr instr);
1482   static bool IsCmpImmediate(Instr instr);
1483   static Register GetCmpImmediateRegister(Instr instr);
1484   static int GetCmpImmediateRawImmediate(Instr instr);
1485   static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1486   static bool IsMovImmed(Instr instr);
1487   static bool IsOrrImmed(Instr instr);
1488   static bool IsMovT(Instr instr);
1489   static Instr GetMovTPattern();
1490   static bool IsMovW(Instr instr);
1491   static Instr GetMovWPattern();
1492   static Instr EncodeMovwImmediate(uint32_t immediate);
1493   static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
1494   static int DecodeShiftImm(Instr instr);
1495   static Instr PatchShiftImm(Instr instr, int immed);
1496 
1497   // Constants in pools are accessed via pc relative addressing, which can
1498   // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
1499   // PC-relative loads, thereby defining a maximum distance between the
1500   // instruction and the accessed constant.
1501   static constexpr int kMaxDistToIntPool = 4 * KB;
1502   static constexpr int kMaxDistToFPPool = 1 * KB;
1503   // All relocations could be integer, it therefore acts as the limit.
1504   static constexpr int kMinNumPendingConstants = 4;
1505   static constexpr int kMaxNumPending32Constants =
1506       kMaxDistToIntPool / kInstrSize;
1507   static constexpr int kMaxNumPending64Constants =
1508       kMaxDistToFPPool / kInstrSize;
1509 
1510   // Postpone the generation of the constant pool for the specified number of
1511   // instructions.
1512   void BlockConstPoolFor(int instructions);
1513 
1514   // Check if is time to emit a constant pool.
1515   void CheckConstPool(bool force_emit, bool require_jump);
1516 
MaybeCheckConstPool()1517   void MaybeCheckConstPool() {
1518     if (pc_offset() >= next_buffer_check_) {
1519       CheckConstPool(false, true);
1520     }
1521   }
1522 
PatchConstantPoolAccessInstruction(int pc_offset,int offset,ConstantPoolEntry::Access access,ConstantPoolEntry::Type type)1523   void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
1524                                           ConstantPoolEntry::Access access,
1525                                           ConstantPoolEntry::Type type) {
1526     // No embedded constant pool support.
1527     UNREACHABLE();
1528   }
1529 
1530   // Move a 32-bit immediate into a register, potentially via the constant pool.
1531   void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
1532 
1533   // Get the code target object for a pc-relative call or jump.
1534   V8_INLINE Handle<Code> relative_code_target_object_handle_at(
1535       Address pc_) const;
1536 
1537  protected:
buffer_space()1538   int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1539 
1540   // Decode branch instruction at pos and return branch target pos
1541   int target_at(int pos);
1542 
1543   // Patch branch instruction at pos to branch to given branch target pos
1544   void target_at_put(int pos, int target_pos);
1545 
1546   // Prevent contant pool emission until EndBlockConstPool is called.
1547   // Calls to this function can be nested but must be followed by an equal
1548   // number of call to EndBlockConstpool.
StartBlockConstPool()1549   void StartBlockConstPool() {
1550     if (const_pool_blocked_nesting_++ == 0) {
1551       // Prevent constant pool checks happening by setting the next check to
1552       // the biggest possible offset.
1553       next_buffer_check_ = kMaxInt;
1554     }
1555   }
1556 
1557   // Resume constant pool emission. Needs to be called as many times as
1558   // StartBlockConstPool to have an effect.
EndBlockConstPool()1559   void EndBlockConstPool() {
1560     if (--const_pool_blocked_nesting_ == 0) {
1561 #ifdef DEBUG
1562       // Max pool start (if we need a jump and an alignment).
1563       int start = pc_offset() + kInstrSize + 2 * kPointerSize;
1564       // Check the constant pool hasn't been blocked for too long.
1565       DCHECK(pending_32_bit_constants_.empty() ||
1566              (start + pending_64_bit_constants_.size() * kDoubleSize <
1567               static_cast<size_t>(first_const_pool_32_use_ +
1568                                   kMaxDistToIntPool)));
1569       DCHECK(pending_64_bit_constants_.empty() ||
1570              (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
1571 #endif
1572       // Two cases:
1573       //  * no_const_pool_before_ >= next_buffer_check_ and the emission is
1574       //    still blocked
1575       //  * no_const_pool_before_ < next_buffer_check_ and the next emit will
1576       //    trigger a check.
1577       next_buffer_check_ = no_const_pool_before_;
1578     }
1579   }
1580 
is_const_pool_blocked()1581   bool is_const_pool_blocked() const {
1582     return (const_pool_blocked_nesting_ > 0) ||
1583            (pc_offset() < no_const_pool_before_);
1584   }
1585 
VfpRegisterIsAvailable(DwVfpRegister reg)1586   bool VfpRegisterIsAvailable(DwVfpRegister reg) {
1587     DCHECK(reg.is_valid());
1588     return IsEnabled(VFP32DREGS) ||
1589            (reg.code() < LowDwVfpRegister::kNumRegisters);
1590   }
1591 
VfpRegisterIsAvailable(QwNeonRegister reg)1592   bool VfpRegisterIsAvailable(QwNeonRegister reg) {
1593     DCHECK(reg.is_valid());
1594     return IsEnabled(VFP32DREGS) ||
1595            (reg.code() < LowDwVfpRegister::kNumRegisters / 2);
1596   }
1597 
1598   inline void emit(Instr x);
1599 
1600   // Code generation
1601   // The relocation writer's position is at least kGap bytes below the end of
1602   // the generated instructions. This is so that multi-instruction sequences do
1603   // not have to check for overflow. The same is true for writes of large
1604   // relocation info entries.
1605   static constexpr int kGap = 32;
1606 
1607   // Relocation info generation
1608   // Each relocation is encoded as a variable size value
1609   static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1610   RelocInfoWriter reloc_info_writer;
1611 
1612   // ConstantPoolEntry records are used during code generation as temporary
1613   // containers for constants and code target addresses until they are emitted
1614   // to the constant pool. These records are temporarily stored in a separate
1615   // buffer until a constant pool is emitted.
1616   // If every instruction in a long sequence is accessing the pool, we need one
1617   // pending relocation entry per instruction.
1618 
1619   // The buffers of pending constant pool entries.
1620   std::vector<ConstantPoolEntry> pending_32_bit_constants_;
1621   std::vector<ConstantPoolEntry> pending_64_bit_constants_;
1622 
1623   // Scratch registers available for use by the Assembler.
1624   RegList scratch_register_list_;
1625   VfpRegList scratch_vfp_register_list_;
1626 
1627  private:
1628   // Avoid overflows for displacements etc.
1629   static const int kMaximalBufferSize = 512 * MB;
1630 
1631   int next_buffer_check_;  // pc offset of next buffer check
1632 
1633   // Constant pool generation
1634   // Pools are emitted in the instruction stream, preferably after unconditional
1635   // jumps or after returns from functions (in dead code locations).
1636   // If a long code sequence does not contain unconditional jumps, it is
1637   // necessary to emit the constant pool before the pool gets too far from the
1638   // location it is accessed from. In this case, we emit a jump over the emitted
1639   // constant pool.
1640   // Constants in the pool may be addresses of functions that gets relocated;
1641   // if so, a relocation info entry is associated to the constant pool entry.
1642 
1643   // Repeated checking whether the constant pool should be emitted is rather
1644   // expensive. By default we only check again once a number of instructions
1645   // has been generated. That also means that the sizing of the buffers is not
1646   // an exact science, and that we rely on some slop to not overrun buffers.
1647   static constexpr int kCheckPoolIntervalInst = 32;
1648   static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
1649 
1650   // Emission of the constant pool may be blocked in some code sequences.
1651   int const_pool_blocked_nesting_;  // Block emission if this is not zero.
1652   int no_const_pool_before_;  // Block emission before this pc offset.
1653 
1654   // Keep track of the first instruction requiring a constant pool entry
1655   // since the previous constant pool was emitted.
1656   int first_const_pool_32_use_;
1657   int first_const_pool_64_use_;
1658 
1659   // The bound position, before this we cannot do instruction elimination.
1660   int last_bound_pos_;
1661 
1662   inline void CheckBuffer();
1663   void GrowBuffer();
1664 
1665   // Instruction generation
1666   void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
1667   // Attempt to encode operand |x| for instruction |instr| and return true on
1668   // success. The result will be encoded in |instr| directly. This method may
1669   // change the opcode if deemed beneficial, for instance, MOV may be turned
1670   // into MVN, ADD into SUB, AND into BIC, ...etc.  The only reason this method
1671   // may fail is that the operand is an immediate that cannot be encoded.
1672   bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x);
1673 
1674   void AddrMode2(Instr instr, Register rd, const MemOperand& x);
1675   void AddrMode3(Instr instr, Register rd, const MemOperand& x);
1676   void AddrMode4(Instr instr, Register rn, RegList rl);
1677   void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
1678 
1679   // Labels
1680   void print(const Label* L);
1681   void bind_to(Label* L, int pos);
1682   void next(Label* L);
1683 
1684   // Record reloc info for current pc_
1685   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1686   void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
1687                             intptr_t value);
1688   void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
1689 
1690   friend class RelocInfo;
1691   friend class BlockConstPoolScope;
1692   friend class EnsureSpace;
1693   friend class UseScratchRegisterScope;
1694 };
1695 
1696 class EnsureSpace BASE_EMBEDDED {
1697  public:
1698   V8_INLINE explicit EnsureSpace(Assembler* assembler);
1699 };
1700 
1701 class PatchingAssembler : public Assembler {
1702  public:
1703   PatchingAssembler(const AssemblerOptions& options, byte* address,
1704                     int instructions);
1705   ~PatchingAssembler();
1706 
1707   void Emit(Address addr);
1708 };
1709 
1710 // This scope utility allows scratch registers to be managed safely. The
1711 // Assembler's GetScratchRegisterList() is used as a pool of scratch
1712 // registers. These registers can be allocated on demand, and will be returned
1713 // at the end of the scope.
1714 //
1715 // When the scope ends, the Assembler's list will be restored to its original
1716 // state, even if the list is modified by some other means. Note that this scope
1717 // can be nested but the destructors need to run in the opposite order as the
1718 // constructors. We do not have assertions for this.
1719 class UseScratchRegisterScope {
1720  public:
1721   explicit UseScratchRegisterScope(Assembler* assembler);
1722   ~UseScratchRegisterScope();
1723 
1724   // Take a register from the list and return it.
1725   Register Acquire();
AcquireS()1726   SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); }
AcquireLowD()1727   LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); }
AcquireD()1728   DwVfpRegister AcquireD() {
1729     DwVfpRegister reg = AcquireVfp<DwVfpRegister>();
1730     DCHECK(assembler_->VfpRegisterIsAvailable(reg));
1731     return reg;
1732   }
AcquireQ()1733   QwNeonRegister AcquireQ() {
1734     QwNeonRegister reg = AcquireVfp<QwNeonRegister>();
1735     DCHECK(assembler_->VfpRegisterIsAvailable(reg));
1736     return reg;
1737   }
1738 
1739   // Check if we have registers available to acquire.
CanAcquire()1740   bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
CanAcquireD()1741   bool CanAcquireD() const { return CanAcquireVfp<DwVfpRegister>(); }
1742 
1743  private:
1744   friend class Assembler;
1745   friend class TurboAssembler;
1746 
1747   template <typename T>
1748   bool CanAcquireVfp() const;
1749 
1750   template <typename T>
1751   T AcquireVfp();
1752 
1753   Assembler* assembler_;
1754   // Available scratch registers at the start of this scope.
1755   RegList old_available_;
1756   VfpRegList old_available_vfp_;
1757 };
1758 
1759 }  // namespace internal
1760 }  // namespace v8
1761 
1762 #endif  // V8_ARM_ASSEMBLER_ARM_H_
1763